[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/8] xen/drivers: use keyhandler locks when dumping data to console



Instead of using the normal locks use the keyhandler provided trylocks
with timeouts. This requires adding a special primitive for the pcidev
lock.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/drivers/passthrough/amd/iommu_intr.c | 14 ++++++++++----
 xen/drivers/passthrough/pci.c            | 14 +++++++++++---
 xen/drivers/vpci/msi.c                   |  5 ++++-
 3 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_intr.c 
b/xen/drivers/passthrough/amd/iommu_intr.c
index e1cc13b873..753aaf3679 100644
--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -16,6 +16,7 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <xen/keyhandler.h>
 #include <xen/softirq.h>
 
 #include <asm/io_apic.h>
@@ -886,9 +887,12 @@ static int dump_intremap_mapping(const struct amd_iommu 
*iommu,
     if ( !ivrs_mapping )
         return 0;
 
-    spin_lock_irqsave(&(ivrs_mapping->intremap_lock), flags);
-    dump_intremap_table(iommu, ivrs_mapping->intremap_table, ivrs_mapping);
-    spin_unlock_irqrestore(&(ivrs_mapping->intremap_lock), flags);
+    if ( keyhandler_spin_lock_irqsave(&(ivrs_mapping->intremap_lock), &flags,
+                                      "could not get intremap lock") )
+    {
+        dump_intremap_table(iommu, ivrs_mapping->intremap_table, ivrs_mapping);
+        spin_unlock_irqrestore(&(ivrs_mapping->intremap_lock), flags);
+    }
 
     process_pending_softirqs();
 
@@ -909,7 +913,9 @@ void amd_iommu_dump_intremap_tables(unsigned char key)
 
         printk("--- Dumping Shared IOMMU Interrupt Remapping Table ---\n");
 
-        spin_lock_irqsave(&shared_intremap_lock, flags);
+        if ( !keyhandler_spin_lock_irqsave(&shared_intremap_lock, &flags,
+                                           "could not get lock") )
+            return;
         dump_intremap_table(list_first_entry(&amd_iommu_head, struct amd_iommu,
                                              list),
                             shared_intremap_table, NULL);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 5660f7e1c2..1fd998af3a 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1356,12 +1356,20 @@ static int _dump_pci_devices(struct pci_seg *pseg, void 
*arg)
     return 0;
 }
 
+static bool keyhandler_pcidevs_lock(void)
+{
+    keyhandler_lock_body(bool_t, pcidevs_trylock(),
+                         "could not get pcidevs lock\n");
+}
+
 static void dump_pci_devices(unsigned char ch)
 {
     printk("==== PCI devices ====\n");
-    pcidevs_lock();
-    pci_segments_iterate(_dump_pci_devices, NULL);
-    pcidevs_unlock();
+    if ( keyhandler_pcidevs_lock() )
+    {
+        pci_segments_iterate(_dump_pci_devices, NULL);
+        pcidevs_unlock();
+    }
 }
 
 static int __init setup_dump_pcidevs(void)
diff --git a/xen/drivers/vpci/msi.c b/xen/drivers/vpci/msi.c
index 75010762ed..31ea99b62e 100644
--- a/xen/drivers/vpci/msi.c
+++ b/xen/drivers/vpci/msi.c
@@ -16,6 +16,7 @@
  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <xen/keyhandler.h>
 #include <xen/sched.h>
 #include <xen/softirq.h>
 #include <xen/vpci.h>
@@ -283,7 +284,9 @@ void vpci_dump_msi(void)
             const struct vpci_msi *msi;
             const struct vpci_msix *msix;
 
-            if ( !pdev->vpci || !spin_trylock(&pdev->vpci->lock) )
+            if ( !pdev->vpci ||
+                 !keyhandler_spin_lock(&pdev->vpci->lock,
+                                       "could not get vpci lock") )
                 continue;
 
             msi = pdev->vpci->msi;
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.