[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7 31/34] ARM: vITS: handle INV command



The INV command instructs the ITS to update the configuration data for
a given LPI by re-reading its entry from the property table.
We don't need to care so much about the priority value, but enabling
or disabling an LPI has some effect: We remove or push virtual LPIs
to their VCPUs, also check the virtual pending bit if an LPI gets enabled.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/vgic-v3-its.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

diff --git a/xen/arch/arm/vgic-v3-its.c b/xen/arch/arm/vgic-v3-its.c
index f163c09..7634a63 100644
--- a/xen/arch/arm/vgic-v3-its.c
+++ b/xen/arch/arm/vgic-v3-its.c
@@ -398,6 +398,70 @@ static int update_lpi_property(struct domain *d, uint32_t 
vlpi,
     return 0;
 }
 
+/*
+ * For a given virtual LPI read the enabled bit and priority from the virtual
+ * property table and update the virtual IRQ's state.
+ * This takes care of removing or pushing of virtual LPIs to their VCPUs.
+ * Also check if this LPI is due to be injected and do it, if needed.
+ */
+static int update_lpi_enabled_status(struct domain *d,
+                                     struct vcpu *vcpu, uint32_t vlpi)
+{
+    struct pending_irq *p = d->arch.vgic.handler->lpi_to_pending(d, vlpi);
+    unsigned long flags;
+    int ret;
+
+    if ( !p )
+        return -EINVAL;
+
+    spin_lock_irqsave(&vcpu->arch.vgic.lock, flags);
+    ret = update_lpi_property(d, vlpi, p);
+    if ( ret ) {
+        spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+        return ret;
+    }
+
+    if ( test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) )
+    {
+        if ( !list_empty(&p->inflight) &&
+             !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+            gic_raise_guest_irq(vcpu, vlpi, p->lpi_priority);
+        spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+        vgic_vcpu_inject_irq(vcpu, vlpi);
+    }
+    else
+    {
+        clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
+        spin_unlock_irqrestore(&vcpu->arch.vgic.lock, flags);
+
+        gic_remove_from_queues(vcpu, vlpi);
+    }
+
+    return 0;
+}
+
+static int its_handle_inv(struct virt_its *its, uint64_t *cmdptr)
+{
+    uint32_t devid = its_cmd_get_deviceid(cmdptr);
+    uint32_t eventid = its_cmd_get_id(cmdptr);
+    struct vcpu *vcpu;
+    uint32_t vlpi;
+
+    /* Translate the event into a vCPU/vLPI pair. */
+    if ( !read_itte(its, devid, eventid, &vcpu, &vlpi) )
+        return -1;
+
+    /*
+     * Now read the property table and update our cached status. This
+     * also takes care if this LPI now needs to be injected or removed.
+     */
+    if ( update_lpi_enabled_status(its->d, vcpu, vlpi) )
+        return -1;
+
+    return 0;
+}
+
 static int its_handle_mapc(struct virt_its *its, uint64_t *cmdptr)
 {
     uint32_t collid = its_cmd_get_collection(cmdptr);
@@ -638,6 +702,9 @@ static int vgic_its_handle_cmds(struct domain *d, struct 
virt_its *its)
         case GITS_CMD_INT:
             ret = its_handle_int(its, command);
             break;
+        case GITS_CMD_INV:
+            ret = its_handle_inv(its, command);
+            break;
         case GITS_CMD_MAPC:
             ret = its_handle_mapc(its, command);
             break;
-- 
2.8.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.