[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7 28/34] ARM: vITS: handle MAPTI command



The MAPTI commands associates a DeviceID/EventID pair with a LPI/CPU
pair and actually instantiates LPI interrupts.
We connect the already allocated host LPI to this virtual LPI, so that
any triggering LPI on the host can be quickly forwarded to a guest.
Beside entering the VCPU and the virtual LPI number in the respective
host LPI entry, we also initialize and add the already allocated
struct pending_irq to our radix tree, so that we can now easily find it
by its virtual LPI number.
We also read the property table to update the enabled bit and the
priority for our new LPI, as we might have missed this during an earlier
INVALL call (which only checks mapped LPIs).

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/gic-v3-its.c        | 74 ++++++++++++++++++++++++++++++
 xen/arch/arm/gic-v3-lpi.c        | 18 ++++++++
 xen/arch/arm/vgic-v3-its.c       | 99 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/gic_v3_its.h |  6 +++
 4 files changed, 197 insertions(+)

diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c
index aebc257..793ad82 100644
--- a/xen/arch/arm/gic-v3-its.c
+++ b/xen/arch/arm/gic-v3-its.c
@@ -800,6 +800,80 @@ out:
     return ret;
 }
 
+/* Must be called with the its_device_lock held. */
+static struct its_device *get_its_device(struct domain *d, paddr_t vdoorbell,
+                                          uint32_t vdevid)
+{
+    struct rb_node *node = d->arch.vgic.its_devices.rb_node;
+    struct its_device *dev;
+
+    ASSERT(spin_is_locked(&d->arch.vgic.its_devices_lock));
+
+    while (node)
+    {
+        int cmp;
+
+        dev = rb_entry(node, struct its_device, rbnode);
+        cmp = compare_its_guest_devices(dev, vdoorbell, vdevid);
+
+        if ( !cmp )
+            return dev;
+
+        if ( cmp > 0 )
+            node = node->rb_left;
+        else
+            node = node->rb_right;
+    }
+
+    return NULL;
+}
+
+static uint32_t get_host_lpi(struct its_device *dev, uint32_t eventid)
+{
+    uint32_t host_lpi = 0;
+
+    if ( dev && (eventid < dev->eventids) )
+        host_lpi = dev->host_lpi_blocks[eventid / LPI_BLOCK] +
+                                       (eventid % LPI_BLOCK);
+
+    return host_lpi;
+}
+
+/*
+ * Connects the event ID for an already assigned device to the given VCPU/vLPI
+ * pair. The corresponding physical LPI is already mapped on the host side
+ * (when assigning the physical device to the guest), so we just connect the
+ * target VCPU/vLPI pair to that interrupt to inject it properly if it fires.
+ * Returns a pointer to the already allocated struct pending_irq that is
+ * meant to be used by that event.
+ */
+struct pending_irq *gicv3_assign_guest_event(struct domain *d,
+                                             paddr_t vdoorbell_address,
+                                             uint32_t vdevid, uint32_t 
veventid,
+                                             struct vcpu *v, uint32_t virt_lpi)
+{
+    struct its_device *dev;
+    struct pending_irq *pirq = NULL;
+    uint32_t host_lpi = 0;
+
+    spin_lock(&d->arch.vgic.its_devices_lock);
+    dev = get_its_device(d, vdoorbell_address, vdevid);
+    if ( dev )
+    {
+        host_lpi = get_host_lpi(dev, veventid);
+        pirq = &dev->pend_irqs[veventid];
+    }
+    spin_unlock(&d->arch.vgic.its_devices_lock);
+
+    if ( !host_lpi || !pirq )
+        return NULL;
+
+    gicv3_lpi_update_host_entry(host_lpi, d->domain_id,
+                                v ? v->vcpu_id : INVALID_VCPU_ID, virt_lpi);
+
+    return pirq;
+}
+
 /* Scan the DT for any ITS nodes and create a list of host ITSes out of it. */
 void gicv3_its_dt_init(const struct dt_device_node *node)
 {
diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c
index bfabcbb..19774d6 100644
--- a/xen/arch/arm/gic-v3-lpi.c
+++ b/xen/arch/arm/gic-v3-lpi.c
@@ -202,6 +202,24 @@ void gicv3_do_LPI(unsigned int lpi)
     rcu_unlock_domain(d);
 }
 
+void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
+                                 unsigned int vcpu_id, uint32_t virt_lpi)
+{
+    union host_lpi *hlpip, hlpi;
+
+    ASSERT(host_lpi >= LPI_OFFSET);
+
+    host_lpi -= LPI_OFFSET;
+
+    hlpip = &lpi_data.host_lpis[host_lpi / HOST_LPIS_PER_PAGE][host_lpi % 
HOST_LPIS_PER_PAGE];
+
+    hlpi.virt_lpi = virt_lpi;
+    hlpi.dom_id = domain_id;
+    hlpi.vcpu_id = vcpu_id;
+
+    write_u64_atomic(&hlpip->data, hlpi.data);
+}
+
 static int gicv3_lpi_allocate_pendtable(uint64_t *reg)
 {
     uint64_t val;
diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index f5a657e..171e259 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -371,6 +371,33 @@ static int its_handle_int(struct virt_its *its, uint64_t 
*cmdptr)
     return 0;
 }
 
+/*
+ * For a given virtual LPI read the enabled bit and priority from the virtual
+ * property table and update the virtual IRQ's state in the given pending_irq.
+ */
+static int update_lpi_property(struct domain *d, uint32_t vlpi,
+                               struct pending_irq *p)
+{
+    paddr_t addr;
+    uint8_t property;
+    int ret;
+
+    addr = d->arch.vgic.rdist_propbase & GENMASK(51, 12);
+
+    ret = vgic_access_guest_memory(d, addr + vlpi - LPI_OFFSET,
+                                   &property, sizeof(property), false);
+    if ( ret )
+        return ret;
+
+    p->lpi_priority = property & LPI_PROP_PRIO_MASK;
+    if ( property & LPI_PROP_ENABLED )
+        set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+    else
+        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+
+    return 0;
+}
+
 static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
 {
     uint32_t collid = its_cmd_get_collection(cmdptr);
@@ -435,6 +462,74 @@ static int its_handle_mapd(struct virt_its *its, uint64_t 
*cmdptr)
     return ret;
 }
 
+static int its_handle_mapti(struct virt_its *its, uint64_t *cmdptr)
+{
+    uint32_t devid = its_cmd_get_deviceid(cmdptr);
+    uint32_t eventid = its_cmd_get_id(cmdptr);
+    uint32_t intid = its_cmd_get_physical_id(cmdptr), _intid;
+    uint16_t collid = its_cmd_get_collection(cmdptr);
+    struct pending_irq *pirq;
+    struct vcpu *vcpu = NULL;
+    int ret = 0;
+
+    if ( its_cmd_get_command(cmdptr) == GITS_CMD_MAPI )
+        intid = eventid;
+
+    spin_lock(&its->its_lock);
+    /*
+     * Check whether there is a valid existing mapping. If yes, behavior is
+     * unpredictable, we choose to ignore this command here.
+     * This makes sure we start with a pristine pending_irq below.
+     */
+    if ( read_itte_locked(its, devid, eventid, &vcpu, &_intid) &&
+         _intid != INVALID_LPI )
+    {
+        spin_unlock(&its->its_lock);
+        return -1;
+    }
+
+    /* Enter the mapping in our virtual ITS tables. */
+    if ( !write_itte_locked(its, devid, eventid, collid, intid, &vcpu) )
+    {
+        spin_unlock(&its->its_lock);
+        return -1;
+    }
+
+    spin_unlock(&its->its_lock);
+
+    /*
+     * Connect this virtual LPI to the corresponding host LPI, which is
+     * determined by the same device ID and event ID on the host side.
+     * This returns us the corresponding, still unused pending_irq.
+     */
+    pirq = gicv3_assign_guest_event(its->d, its->doorbell_address,
+                                    devid, eventid, vcpu, intid);
+    if ( !pirq )
+        return -1;
+
+    vgic_init_pending_irq(pirq, intid);
+
+    /*
+     * Now read the guest's property table to initialize our cached state.
+     * It can't fire at this time, because it is not known to the host yet.
+     */
+    ret = update_lpi_property(its->d, intid, pirq);
+    if ( ret )
+        return ret;
+
+    pirq->lpi_vcpu_id = vcpu->vcpu_id;
+
+    /*
+     * Now insert the pending_irq into the domain's LPI tree, so that
+     * it becomes live.
+     */
+    write_lock(&its->d->arch.vgic.pend_lpi_tree_lock);
+    radix_tree_insert(&its->d->arch.vgic.pend_lpi_tree, intid, pirq);
+    write_unlock(&its->d->arch.vgic.pend_lpi_tree_lock);
+
+    return 0;
+}
+
 #define ITS_CMD_BUFFER_SIZE(baser)      ((((baser) & 0xff) + 1) << 12)
 
 /*
@@ -476,6 +571,10 @@ static int vgic_its_handle_cmds(struct domain *d, struct 
virt_its *its)
         case GITS_CMD_MAPD:
             ret = its_handle_mapd(its, command);
             break;
+        case GITS_CMD_MAPI:
+        case GITS_CMD_MAPTI:
+            ret = its_handle_mapti(its, command);
+            break;
         case GITS_CMD_SYNC:
             /* We handle ITS commands synchronously, so we ignore SYNC. */
             break;
diff --git a/xen/include/asm-arm/gic_v3_its.h b/xen/include/asm-arm/gic_v3_its.h
index ea574c4..0be88ac 100644
--- a/xen/include/asm-arm/gic_v3_its.h
+++ b/xen/include/asm-arm/gic_v3_its.h
@@ -169,6 +169,12 @@ int gicv3_its_map_guest_device(struct domain *d,
 int gicv3_allocate_host_lpi_block(struct domain *d, uint32_t *first_lpi);
 void gicv3_free_host_lpi_block(uint32_t first_lpi);
 
+struct pending_irq *gicv3_assign_guest_event(struct domain *d, paddr_t 
doorbell,
+                                             uint32_t devid, uint32_t eventid,
+                                             struct vcpu *v, uint32_t 
virt_lpi);
+void gicv3_lpi_update_host_entry(uint32_t host_lpi, int domain_id,
+                                 unsigned int vcpu_id, uint32_t virt_lpi);
+
 #else
 
 static inline void gicv3_its_dt_init(const struct dt_device_node *node)
-- 
2.8.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.