[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 02/10] xen: pci: add pci_seg->alldevs_lock


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Date: Wed, 31 Aug 2022 14:10:59 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=tlEJMtsRyS/9QiiQY8Z+TlGkWcyZ8F0YLDRgonTMQAk=; b=c81phSZI+3ZTniIBwxkEJIBQV8VTrFyqHLvRElANZhcCg8Tcj3ulnFyyXqzebRMBY+9RQWPrsA2CcVajaAfjYJe6grFPF/s+a1JcLvSIpyq8Lt7WF5nl/oYPb7OGQJ+EwnexPENPHfBP+f7Rnonnd49JJpkCLsS+aPsGv4ebFjfbZRiwhxDfmBhjCsbY/65nWfBsYKLHf8oVgo0VEYlI6IVnKKQ5J4ByTGu93XQZVLpxBsxvfqhs09NoXTnmlVfKNHce/S1nThJ/IBu8ce3vCNb5Sj2qwaosleWKsmCkDHs9Sl1189L+6HKSlhTk5uRG8RG9SLVNJU1z0N+RhRbfhQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=VMYw3IIznt/gx2OITmQtAtPWVRAdhO0eSyiP3+uOfdNpk+/mHKGDQ/uXBH8uOn0sYl8urTCnyteFeszqzy2XW4prhRgWADebOThU57JnInEmTKUvMJnN9ptyaGt9lSS4moxisIombYJ1hMsF4gD30W1ZvO2PvW9XRGV+Bh9jL5G7lNp5rCA6TuXy0mPzTbxt+8CBhjm/m2NEV9PjV0PTFOjWnFBk3JErEVnEmzMo7bXSO6wF+uoy2fFu8xojSNEiTFc1d1CAx/Lpnt3xKSg/7/X7+UEIOGMQX4Wodh8MAVxqiGOjsineioHUrxWipADP2P2garJpODiUJ+ZsnXXvmA==
  • Cc: Oleksandr Andrushchenko <Oleksandr_Andrushchenko@xxxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Paul Durrant <paul@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Wed, 31 Aug 2022 14:11:22 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHYvUN+nWaGteAQq0y+sD+4Hw5CPQ==
  • Thread-topic: [RFC PATCH 02/10] xen: pci: add pci_seg->alldevs_lock

This lock protects alldevs_list of struct pci_seg. As this, it should
be used when we are adding, removing on enumerating PCI devices
assigned to a PCI segment.

Radix tree that stores PCI segment has own locking mechanism, also
pci_seg structures are only allocated and newer freed, so we need no
additional locking to access pci_seg structures. But we need a lock
that protects alldevs_list field.

This enables more granular locking instead of one huge pcidevs_lock
that locks entire PCI subsystem.  Please note that pcidevs_lock() is
still used, we are going to remove it in subsequent patches.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
 xen/drivers/passthrough/pci.c | 20 +++++++++++++++++++-
 1 file changed, 19 insertions(+), 1 deletion(-)

diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 4366f8f965..2dfa1c2875 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -38,6 +38,7 @@
 
 struct pci_seg {
     struct list_head alldevs_list;
+    spinlock_t alldevs_lock;
     u16 nr;
     unsigned long *ro_map;
     /* bus2bridge_lock protects bus2bridge array */
@@ -93,6 +94,7 @@ static struct pci_seg *alloc_pseg(u16 seg)
     pseg->nr = seg;
     INIT_LIST_HEAD(&pseg->alldevs_list);
     spin_lock_init(&pseg->bus2bridge_lock);
+    spin_lock_init(&pseg->alldevs_lock);
 
     if ( radix_tree_insert(&pci_segments, seg, pseg) )
     {
@@ -385,9 +387,13 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 
bus, u8 devfn)
     unsigned int pos;
     int rc;
 
+    spin_lock(&pseg->alldevs_lock);
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
         if ( pdev->bus == bus && pdev->devfn == devfn )
+        {
+            spin_unlock(&pseg->alldevs_lock);
             return pdev;
+        }
 
     pdev = xzalloc(struct pci_dev);
     if ( !pdev )
@@ -404,10 +410,12 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, 
u8 bus, u8 devfn)
     if ( rc )
     {
         xfree(pdev);
+        spin_unlock(&pseg->alldevs_lock);
         return NULL;
     }
 
     list_add(&pdev->alldevs_list, &pseg->alldevs_list);
+    spin_unlock(&pseg->alldevs_lock);
 
     /* update bus2bridge */
     switch ( pdev->type = pdev_type(pseg->nr, bus, devfn) )
@@ -611,15 +619,20 @@ struct pci_dev *pci_get_pdev(struct domain *d, pci_sbdf_t 
sbdf)
      */
     if ( !d || is_hardware_domain(d) )
     {
-        const struct pci_seg *pseg = get_pseg(sbdf.seg);
+        struct pci_seg *pseg = get_pseg(sbdf.seg);
 
         if ( !pseg )
             return NULL;
 
+        spin_lock(&pseg->alldevs_lock);
         list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
             if ( pdev->sbdf.bdf == sbdf.bdf &&
                  (!d || pdev->domain == d) )
+            {
+                spin_unlock(&pseg->alldevs_lock);
                 return pdev;
+            }
+        spin_unlock(&pseg->alldevs_lock);
     }
     else
     {
@@ -893,6 +906,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
         return -ENODEV;
 
     pcidevs_lock();
+    spin_lock(&pseg->alldevs_lock);
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
         if ( pdev->bus == bus && pdev->devfn == devfn )
         {
@@ -907,10 +921,12 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
             }
             printk(XENLOG_DEBUG "PCI remove device %pp\n", &pdev->sbdf);
             free_pdev(pseg, pdev);
+            list_del(&pdev->alldevs_list);
             break;
         }
 
     pcidevs_unlock();
+    spin_unlock(&pseg->alldevs_lock);
     return ret;
 }
 
@@ -1363,6 +1379,7 @@ static int cf_check _dump_pci_devices(struct pci_seg 
*pseg, void *arg)
 
     printk("==== segment %04x ====\n", pseg->nr);
 
+    spin_lock(&pseg->alldevs_lock);
     list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
     {
         printk("%pp - ", &pdev->sbdf);
@@ -1376,6 +1393,7 @@ static int cf_check _dump_pci_devices(struct pci_seg 
*pseg, void *arg)
         pdev_dump_msi(pdev);
         printk("\n");
     }
+    spin_unlock(&pseg->alldevs_lock);
 
     return 0;
 }
-- 
2.36.1



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.