[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 8/8] xen/x86: use keyhandler locks when dumping data to console



Instead of using the normal locks use the keyhandler provided trylocks
with timeouts.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/io_apic.c | 53 +++++++++++++++++++++++++++++++++++++-------------
 xen/arch/x86/irq.c     |  5 ++++-
 xen/arch/x86/msi.c     |  4 +++-
 xen/arch/x86/numa.c    | 16 +++++++++------
 4 files changed, 57 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index e98e08e9c8..4acdc566b9 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -1098,6 +1098,18 @@ static inline void UNEXPECTED_IO_APIC(void)
 {
 }
 
+static bool get_ioapic_lock(unsigned long *flags, bool boot)
+{
+    if ( boot )
+    {
+        spin_lock_irqsave(&ioapic_lock, *flags);
+        return true;
+    }
+
+    return keyhandler_spin_lock_irqsave(&ioapic_lock, flags,
+                                        "could not get ioapic lock");
+}
+
 static void /*__init*/ __print_IO_APIC(bool boot)
 {
     int apic, i;
@@ -1125,13 +1137,16 @@ static void /*__init*/ __print_IO_APIC(bool boot)
         if (!nr_ioapic_entries[apic])
             continue;
 
-       spin_lock_irqsave(&ioapic_lock, flags);
+        if ( !get_ioapic_lock(&flags, boot) )
+                continue;
+
        reg_00.raw = io_apic_read(apic, 0);
        reg_01.raw = io_apic_read(apic, 1);
        if (reg_01.bits.version >= 0x10)
             reg_02.raw = io_apic_read(apic, 2);
        if (reg_01.bits.version >= 0x20)
             reg_03.raw = io_apic_read(apic, 3);
+
        spin_unlock_irqrestore(&ioapic_lock, flags);
 
        printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
@@ -1201,7 +1216,12 @@ static void /*__init*/ __print_IO_APIC(bool boot)
        for (i = 0; i <= reg_01.bits.entries; i++) {
             struct IO_APIC_route_entry entry;
 
-            entry = ioapic_read_entry(apic, i, 0);
+            if ( !get_ioapic_lock(&flags, boot) )
+                continue;
+
+            entry = __ioapic_read_entry(apic, i, 0);
+
+            spin_unlock_irqrestore(&ioapic_lock, flags);
 
             if ( x2apic_enabled && iommu_intremap )
                 printk(KERN_DEBUG " %02x %08x", i, entry.dest.dest32);
@@ -2495,21 +2515,28 @@ void dump_ioapic_irq_info(void)
 
         for ( ; ; )
         {
+            unsigned long flags;
+
             pin = entry->pin;
 
             printk("      Apic 0x%02x, Pin %2d: ", entry->apic, pin);
 
-            rte = ioapic_read_entry(entry->apic, pin, 0);
-
-            printk("vec=%02x delivery=%-5s dest=%c status=%d "
-                   "polarity=%d irr=%d trig=%c mask=%d dest_id:%0*x\n",
-                   rte.vector, delivery_mode_2_str(rte.delivery_mode),
-                   rte.dest_mode ? 'L' : 'P',
-                   rte.delivery_status, rte.polarity, rte.irr,
-                   rte.trigger ? 'L' : 'E', rte.mask,
-                   (x2apic_enabled && iommu_intremap) ? 8 : 2,
-                   (x2apic_enabled && iommu_intremap) ?
-                       rte.dest.dest32 : rte.dest.logical.logical_dest);
+            if ( keyhandler_spin_lock_irqsave(&ioapic_lock, &flags,
+                                              "could not get ioapic lock") )
+            {
+                rte = __ioapic_read_entry(entry->apic, pin, 0);
+                spin_unlock_irqrestore(&ioapic_lock, flags);
+
+                printk("vec=%02x delivery=%-5s dest=%c status=%d "
+                       "polarity=%d irr=%d trig=%c mask=%d dest_id:%0*x\n",
+                       rte.vector, delivery_mode_2_str(rte.delivery_mode),
+                       rte.dest_mode ? 'L' : 'P',
+                       rte.delivery_status, rte.polarity, rte.irr,
+                       rte.trigger ? 'L' : 'E', rte.mask,
+                       (x2apic_enabled && iommu_intremap) ? 8 : 2,
+                       (x2apic_enabled && iommu_intremap) ?
+                           rte.dest.dest32 : rte.dest.logical.logical_dest);
+            }
 
             if ( entry->next == 0 )
                 break;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index cc2eb8e925..f3d931b121 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2470,7 +2470,9 @@ static void dump_irqs(unsigned char key)
 
         ssid = in_irq() ? NULL : xsm_show_irq_sid(irq);
 
-        spin_lock_irqsave(&desc->lock, flags);
+        if ( !keyhandler_spin_lock_irqsave(&desc->lock, &flags,
+                                           "could not get irq lock") )
+            goto free_ssid;
 
         printk("   IRQ:%4d vec:%02x %-15s status=%03x aff:{%*pbl}/{%*pbl} ",
                irq, desc->arch.vector, desc->handler->typename, desc->status,
@@ -2506,6 +2508,7 @@ static void dump_irqs(unsigned char key)
 
         spin_unlock_irqrestore(&desc->lock, flags);
 
+ free_ssid:
         xfree(ssid);
     }
 
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index c85cf9f85a..d10b856179 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -1470,7 +1470,9 @@ static void dump_msi(unsigned char key)
         if ( !irq_desc_initialized(desc) )
             continue;
 
-        spin_lock_irqsave(&desc->lock, flags);
+        if ( !keyhandler_spin_lock_irqsave(&desc->lock, &flags,
+                                           "could not get irq lock") )
+            continue;
 
         entry = desc->msi_desc;
         if ( !entry )
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 6ef15b34d5..d21ed8737f 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -425,18 +425,22 @@ static void dump_numa(unsigned char key)
         for_each_online_node ( i )
             page_num_node[i] = 0;
 
-        spin_lock(&d->page_alloc_lock);
-        page_list_for_each(page, &d->page_list)
+        if ( keyhandler_spin_lock(&d->page_alloc_lock,
+                                  "could not get page_alloc lock") )
         {
-            i = phys_to_nid(page_to_maddr(page));
-            page_num_node[i]++;
+            page_list_for_each(page, &d->page_list)
+            {
+                i = phys_to_nid(page_to_maddr(page));
+                page_num_node[i]++;
+            }
+            spin_unlock(&d->page_alloc_lock);
         }
-        spin_unlock(&d->page_alloc_lock);
 
         for_each_online_node ( i )
             printk("    Node %u: %u\n", i, page_num_node[i]);
 
-        if ( !read_trylock(&d->vnuma_rwlock) )
+        if ( !keyhandler_read_lock(&d->vnuma_rwlock,
+                                   "could not get vnuma lock") )
             continue;
 
         if ( !d->vnuma )
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.