[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 1/4] pci: add rwlock to pcidevs_lock machinery



From: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>

Currently pcidevs lock is a global recursive spinlock which is fine for
the existing use cases. It is used to both protect pdev instances
themselves from being removed while in use and to make sure the update
of the relevant pdev properties is synchronized.

Moving towards vPCI is used for guests this becomes problematic in terms
of lock contention. For example, during vpci_{read|write} the access to
pdev must be protected to prevent pdev disappearing under our feet.
This needs to be done with the help of pcidevs_{lock|unlock}.
On the other hand it is highly undesirable to lock all other pdev accesses
which only use pdevs in read mode, e.g. those which do not remove or
add pdevs.

For the above reasons introduce a read/write lock which will help
preventing locking contentions between pdev readers and writers:
- make pci_{add|remove}_device and setup_hwdom_pci_devices use the
  new write lock
- keep all the rest using the existing API (pcidevs_{lock|unlock},
  but extend the later to also acquire the rwlock in read mode.

This is in preparation for vPCI to be used for guests.

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx>
---
 xen/drivers/passthrough/pci.c | 45 ++++++++++++++++++++++++++---------
 xen/include/xen/pci.h         |  4 ++++
 2 files changed, 38 insertions(+), 11 deletions(-)

diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index e8b09d77d880..2a0d3d37a69f 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -51,20 +51,38 @@ struct pci_seg {
 };
 
 static spinlock_t _pcidevs_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(_pcidevs_rwlock);
 
 void pcidevs_lock(void)
 {
+    read_lock(&_pcidevs_rwlock);
     spin_lock_recursive(&_pcidevs_lock);
 }
 
 void pcidevs_unlock(void)
 {
     spin_unlock_recursive(&_pcidevs_lock);
+    read_unlock(&_pcidevs_rwlock);
 }
 
 bool_t pcidevs_locked(void)
 {
-    return !!spin_is_locked(&_pcidevs_lock);
+    return !!spin_is_locked(&_pcidevs_lock) || pcidevs_write_locked();
+}
+
+void pcidevs_write_lock(void)
+{
+    write_lock(&_pcidevs_rwlock);
+}
+
+void pcidevs_write_unlock(void)
+{
+    write_unlock(&_pcidevs_rwlock);
+}
+
+bool pcidevs_write_locked(void)
+{
+    return !!rw_is_write_locked(&_pcidevs_rwlock);
 }
 
 static struct radix_tree_root pci_segments;
@@ -758,7 +776,7 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn,
 
     ret = -ENOMEM;
 
-    pcidevs_lock();
+    pcidevs_write_lock();
     pseg = alloc_pseg(seg);
     if ( !pseg )
         goto out;
@@ -854,7 +872,7 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn,
     pci_enable_acs(pdev);
 
 out:
-    pcidevs_unlock();
+    pcidevs_write_unlock();
     if ( !ret )
     {
         printk(XENLOG_DEBUG "PCI add %s %pp\n", pdev_type,  &pdev->sbdf);
@@ -885,7 +903,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
     if ( !pseg )
         return -ENODEV;
 
-    pcidevs_lock();
+    pcidevs_write_lock();
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
         if ( pdev->bus == bus && pdev->devfn == devfn )
         {
@@ -899,7 +917,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
             break;
         }
 
-    pcidevs_unlock();
+    pcidevs_write_unlock();
     return ret;
 }
 
@@ -1176,6 +1194,11 @@ static void __hwdom_init setup_one_hwdom_device(const 
struct setup_hwdom *ctxt,
                ctxt->d->domain_id, err);
 }
 
+/*
+ * It's safe to drop and re-acquire the write lock in this context without
+ * risking pdev disappearing because devices cannot be removed until the
+ * initial domain has been started.
+ */
 static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void 
*arg)
 {
     struct setup_hwdom *ctxt = arg;
@@ -1208,17 +1231,17 @@ static int __hwdom_init _setup_hwdom_pci_devices(struct 
pci_seg *pseg, void *arg
 
             if ( iommu_verbose )
             {
-                pcidevs_unlock();
+                pcidevs_write_unlock();
                 process_pending_softirqs();
-                pcidevs_lock();
+                pcidevs_write_lock();
             }
         }
 
         if ( !iommu_verbose )
         {
-            pcidevs_unlock();
+            pcidevs_write_unlock();
             process_pending_softirqs();
-            pcidevs_lock();
+            pcidevs_write_lock();
         }
     }
 
@@ -1230,9 +1253,9 @@ void __hwdom_init setup_hwdom_pci_devices(
 {
     struct setup_hwdom ctxt = { .d = d, .handler = handler };
 
-    pcidevs_lock();
+    pcidevs_write_lock();
     pci_segments_iterate(_setup_hwdom_pci_devices, &ctxt);
-    pcidevs_unlock();
+    pcidevs_write_unlock();
 }
 
 /* APEI not supported on ARM yet. */
diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h
index b6d7e454f814..e814d9542bfc 100644
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -152,6 +152,10 @@ void pcidevs_lock(void);
 void pcidevs_unlock(void);
 bool_t __must_check pcidevs_locked(void);
 
+void pcidevs_write_lock(void);
+void pcidevs_write_unlock(void);
+bool __must_check pcidevs_write_locked(void);
+
 bool_t pci_known_segment(u16 seg);
 bool_t pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func);
 int scan_pci_devices(void);
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.