[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/vioapic: allow holes in the GSI range for PVH Dom0



commit 53e482266e8c552a47c1c14630b357dbedd86c3c
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Wed Apr 19 13:29:51 2017 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Apr 19 13:29:51 2017 +0200

    x86/vioapic: allow holes in the GSI range for PVH Dom0
    
    The current vIO APIC for PVH Dom0 doesn't allow non-contiguous GSIs, which
    means that all GSIs must belong to an IO APIC. This doesn't match reality,
    where there are systems with non-contiguous GSIs.
    
    In order to fix this add a base_gsi field to each hvm_vioapic struct, in 
order
    to store the base GSI for each emulated IO APIC. For PVH Dom0 those values 
are
    populated based on the hardware ones.
    
    Reported-by: Chao Gao <chao.gao@xxxxxxxxx>
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Tested-by: Chao Gao <chao.gao@xxxxxxxxx>
    Release-acked-by: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/x86/hvm/dom0_build.c     |  2 +-
 xen/arch/x86/hvm/vioapic.c        | 63 ++++++++++++++++++++-------------------
 xen/include/asm-x86/hvm/vioapic.h |  1 +
 3 files changed, 35 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index db9be87..020c355 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -719,7 +719,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, 
paddr_t *addr)
         io_apic->header.length = sizeof(*io_apic);
         io_apic->id = domain_vioapic(d, i)->id;
         io_apic->address = domain_vioapic(d, i)->base_address;
-        io_apic->global_irq_base = io_apic_gsi_base(i);
+        io_apic->global_irq_base = domain_vioapic(d, i)->base_gsi;
         io_apic++;
     }
 
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 5157db7..abcc473 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -64,37 +64,23 @@ static struct hvm_vioapic *addr_vioapic(const struct domain 
*d,
 struct hvm_vioapic *gsi_vioapic(const struct domain *d, unsigned int gsi,
                                 unsigned int *pin)
 {
-    unsigned int i, base_gsi = 0;
+    unsigned int i;
 
     for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
     {
         struct hvm_vioapic *vioapic = domain_vioapic(d, i);
 
-        if ( gsi >= base_gsi && gsi < base_gsi + vioapic->nr_pins )
+        if ( gsi >= vioapic->base_gsi &&
+             gsi < vioapic->base_gsi + vioapic->nr_pins )
         {
-            *pin = gsi - base_gsi;
+            *pin = gsi - vioapic->base_gsi;
             return vioapic;
         }
-
-        base_gsi += vioapic->nr_pins;
     }
 
     return NULL;
 }
 
-static unsigned int base_gsi(const struct domain *d,
-                             const struct hvm_vioapic *vioapic)
-{
-    unsigned int nr_vioapics = d->arch.hvm_domain.nr_vioapics;
-    unsigned int base_gsi = 0, i = 0;
-    const struct hvm_vioapic *tmp;
-
-    while ( i < nr_vioapics && (tmp = domain_vioapic(d, i++)) != vioapic )
-        base_gsi += tmp->nr_pins;
-
-    return base_gsi;
-}
-
 static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic)
 {
     uint32_t result = 0;
@@ -180,7 +166,7 @@ static void vioapic_write_redirent(
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
     union vioapic_redir_entry *pent, ent;
     int unmasked = 0;
-    unsigned int gsi = base_gsi(d, vioapic) + idx;
+    unsigned int gsi = vioapic->base_gsi + idx;
 
     spin_lock(&d->arch.hvm_domain.irq_lock);
 
@@ -340,7 +326,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, 
unsigned int pin)
     struct domain *d = vioapic_domain(vioapic);
     struct vlapic *target;
     struct vcpu *v;
-    unsigned int irq = base_gsi(d, vioapic) + pin;
+    unsigned int irq = vioapic->base_gsi + pin;
 
     ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
 
@@ -451,7 +437,7 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
 {
     struct hvm_irq *hvm_irq = hvm_domain_irq(d);
     union vioapic_redir_entry *ent;
-    unsigned int i, base_gsi = 0;
+    unsigned int i;
 
     ASSERT(has_vioapic(d));
 
@@ -473,19 +459,18 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
             if ( iommu_enabled )
             {
                 spin_unlock(&d->arch.hvm_domain.irq_lock);
-                hvm_dpci_eoi(d, base_gsi + pin, ent);
+                hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
                 spin_lock(&d->arch.hvm_domain.irq_lock);
             }
 
             if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
                  !ent->fields.mask &&
-                 hvm_irq->gsi_assert_count[base_gsi + pin] )
+                 hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
             {
                 ent->fields.remote_irr = 1;
                 vioapic_deliver(vioapic, pin);
             }
         }
-        base_gsi += vioapic->nr_pins;
     }
 
     spin_unlock(&d->arch.hvm_domain.irq_lock);
@@ -538,7 +523,8 @@ void vioapic_reset(struct domain *d)
     for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
     {
         struct hvm_vioapic *vioapic = domain_vioapic(d, i);
-        unsigned int pin, nr_pins = vioapic->nr_pins;
+        unsigned int nr_pins = vioapic->nr_pins, base_gsi = vioapic->base_gsi;
+        unsigned int pin;
 
         memset(vioapic, 0, hvm_vioapic_size(nr_pins));
         for ( pin = 0; pin < nr_pins; pin++ )
@@ -546,7 +532,7 @@ void vioapic_reset(struct domain *d)
 
         if ( !is_hardware_domain(d) )
         {
-            ASSERT(!i);
+            ASSERT(!i && !base_gsi);
             vioapic->base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
             vioapic->id = 0;
         }
@@ -555,6 +541,7 @@ void vioapic_reset(struct domain *d)
             vioapic->base_address = mp_ioapics[i].mpc_apicaddr;
             vioapic->id = mp_ioapics[i].mpc_apicid;
         }
+        vioapic->base_gsi = base_gsi;
         vioapic->nr_pins = nr_pins;
         vioapic->domain = d;
     }
@@ -588,8 +575,18 @@ int vioapic_init(struct domain *d)
 
     for ( i = 0; i < nr_vioapics; i++ )
     {
-        unsigned int nr_pins = is_hardware_domain(d) ? nr_ioapic_entries[i] :
-            ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl);
+        unsigned int nr_pins, base_gsi;
+
+        if ( is_hardware_domain(d) )
+        {
+            nr_pins = nr_ioapic_entries[i];
+            base_gsi = io_apic_gsi_base(i);
+        }
+        else
+        {
+            nr_pins = ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl);
+            base_gsi = 0;
+        }
 
         if ( (domain_vioapic(d, i) =
               xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL )
@@ -598,10 +595,16 @@ int vioapic_init(struct domain *d)
             return -ENOMEM;
         }
         domain_vioapic(d, i)->nr_pins = nr_pins;
-        nr_gsis += nr_pins;
+        domain_vioapic(d, i)->base_gsi = base_gsi;
+        nr_gsis = max(nr_gsis, base_gsi + nr_pins);
     }
 
-    ASSERT(hvm_domain_irq(d)->nr_gsis == nr_gsis);
+    /*
+     * NB: hvm_domain_irq(d)->nr_gsis is actually the highest GSI + 1, but
+     * there might be holes in this range (ie: GSIs that don't belong to any
+     * vIO APIC).
+     */
+    ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis);
 
     d->arch.hvm_domain.nr_vioapics = nr_vioapics;
     vioapic_reset(d);
diff --git a/xen/include/asm-x86/hvm/vioapic.h 
b/xen/include/asm-x86/hvm/vioapic.h
index 8ec91d2..2ceb60e 100644
--- a/xen/include/asm-x86/hvm/vioapic.h
+++ b/xen/include/asm-x86/hvm/vioapic.h
@@ -50,6 +50,7 @@
 struct hvm_vioapic {
     struct domain *domain;
     uint32_t nr_pins;
+    unsigned int base_gsi;
     union {
         XEN_HVM_VIOAPIC(,);
         struct hvm_hw_vioapic domU;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.