[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v12 09/34] ARM: GICv3: introduce separate pending_irq structs for LPIs



For the same reason that allocating a struct irq_desc for each
possible LPI is not an option, having a struct pending_irq for each LPI
is also not feasible. We only care about mapped LPIs, so we can get away
with having struct pending_irq's only for them.
Maintain a radix tree per domain where we drop the pointer to the
respective pending_irq. The index used is the virtual LPI number.
The memory for the actual structures has been allocated already per
device at device mapping time.
Teach the existing VGIC functions to find the right pointer when being
given a virtual LPI number.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
Acked-by: Julien Grall <julien.grall@xxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
 xen/arch/arm/vgic-v2.c       |  8 ++++++++
 xen/arch/arm/vgic-v3.c       | 30 ++++++++++++++++++++++++++++++
 xen/arch/arm/vgic.c          |  2 ++
 xen/include/asm-arm/domain.h |  2 ++
 xen/include/asm-arm/vgic.h   |  2 ++
 5 files changed, 44 insertions(+)

diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 9fa42e1..488e6fa 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -705,10 +705,18 @@ static void vgic_v2_domain_free(struct domain *d)
     /* Nothing to be cleanup for this driver */
 }
 
+static struct pending_irq *vgic_v2_lpi_to_pending(struct domain *d,
+                                                  unsigned int vlpi)
+{
+    /* Dummy function, no LPIs on a VGICv2. */
+    BUG();
+}
+
 static const struct vgic_ops vgic_v2_ops = {
     .vcpu_init   = vgic_v2_vcpu_init,
     .domain_init = vgic_v2_domain_init,
     .domain_free = vgic_v2_domain_free,
+    .lpi_to_pending = vgic_v2_lpi_to_pending,
     .max_vcpus = 8,
 };
 
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 474cca7..9dee2df 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -1457,6 +1457,9 @@ static int vgic_v3_domain_init(struct domain *d)
     d->arch.vgic.nr_regions = rdist_count;
     d->arch.vgic.rdist_regions = rdist_regions;
 
+    rwlock_init(&d->arch.vgic.pend_lpi_tree_lock);
+    radix_tree_init(&d->arch.vgic.pend_lpi_tree);
+
     /*
      * Domain 0 gets the hardware address.
      * Guests get the virtual platform layout.
@@ -1545,14 +1548,41 @@ static int vgic_v3_domain_init(struct domain *d)
 static void vgic_v3_domain_free(struct domain *d)
 {
     vgic_v3_its_free_domain(d);
+    /*
+     * It is expected that at this point all actual ITS devices have been
+     * cleaned up already. The struct pending_irq's, for which the pointers
+     * have been stored in the radix tree, are allocated and freed by device.
+     * On device unmapping all the entries are removed from the tree and
+     * the backing memory is freed.
+     */
+    radix_tree_destroy(&d->arch.vgic.pend_lpi_tree, NULL);
     xfree(d->arch.vgic.rdist_regions);
 }
 
+/*
+ * Looks up a virtual LPI number in our tree of mapped LPIs. This will return
+ * the corresponding struct pending_irq, which we also use to store the
+ * enabled and pending bit plus the priority.
+ * Returns NULL if an LPI cannot be found (or no LPIs are supported).
+ */
+static struct pending_irq *vgic_v3_lpi_to_pending(struct domain *d,
+                                                  unsigned int lpi)
+{
+    struct pending_irq *pirq;
+
+    read_lock(&d->arch.vgic.pend_lpi_tree_lock);
+    pirq = radix_tree_lookup(&d->arch.vgic.pend_lpi_tree, lpi);
+    read_unlock(&d->arch.vgic.pend_lpi_tree_lock);
+
+    return pirq;
+}
+
 static const struct vgic_ops v3_ops = {
     .vcpu_init   = vgic_v3_vcpu_init,
     .domain_init = vgic_v3_domain_init,
     .domain_free = vgic_v3_domain_free,
     .emulate_reg  = vgic_v3_emulate_reg,
+    .lpi_to_pending = vgic_v3_lpi_to_pending,
     /*
      * We use both AFF1 and AFF0 in (v)MPIDR. Thus, the max number of CPU
      * that can be supported is up to 4096(==256*16) in theory.
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 9cc9563..cb7ab3b 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -469,6 +469,8 @@ struct pending_irq *irq_to_pending(struct vcpu *v, unsigned 
int irq)
      * are used for SPIs; the rests are used for per cpu irqs */
     if ( irq < 32 )
         n = &v->arch.vgic.pending_irqs[irq];
+    else if ( is_lpi(irq) )
+        n = v->domain->arch.vgic.handler->lpi_to_pending(v->domain, irq);
     else
         n = &v->domain->arch.vgic.pending_irqs[irq - 32];
     return n;
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 7c3829d..3d8e84c 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -111,6 +111,8 @@ struct arch_domain
         uint32_t rdist_stride;              /* Re-Distributor stride */
         struct rb_root its_devices;         /* Devices mapped to an ITS */
         spinlock_t its_devices_lock;        /* Protects the its_devices tree */
+        struct radix_tree_root pend_lpi_tree; /* Stores struct pending_irq's */
+        rwlock_t pend_lpi_tree_lock;        /* Protects the pend_lpi_tree */
         unsigned int intid_bits;
 #endif
     } vgic;
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 3af7a24..65d2322 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -134,6 +134,8 @@ struct vgic_ops {
     void (*domain_free)(struct domain *d);
     /* vGIC sysreg/cpregs emulate */
     bool (*emulate_reg)(struct cpu_user_regs *regs, union hsr hsr);
+    /* lookup the struct pending_irq for a given LPI interrupt */
+    struct pending_irq *(*lpi_to_pending)(struct domain *d, unsigned int vlpi);
     /* Maximum number of vCPU supported */
     const unsigned int max_vcpus;
 };
-- 
2.9.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.