[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] VT-d: eliminate a mis-use of pcidevs_lock



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1316712704 -3600
# Node ID 571b6e90dfb463b4a56b2a1c8d7d00f7741ca692
# Parent  47ec1d405af9e9c0491681c50bb622d1c1f0904d
VT-d: eliminate a mis-use of pcidevs_lock

dma_pte_clear_one() shouldn't acquire this global lock for the purpose
of processing a per-domain list. Furthermore the function a few lines
earlier has a comment stating that acquiring pcidevs_lock isn't
necessary here (whether that's really correct is another question).

Use the domain's mappin_lock instead to protect the mapped_rmrrs list.
Fold domain_rmrr_mapped() into its sole caller so that the otherwise
implicit dependency on pcidevs_lock there becomes more obvious (see
the comment there).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 47ec1d405af9 -r 571b6e90dfb4 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Thu Sep 22 18:31:02 2011 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Thu Sep 22 18:31:44 2011 +0100
@@ -589,7 +589,6 @@
     u64 pg_maddr;
     int flush_dev_iotlb;
     int iommu_domid;
-    struct list_head *rmrr_list, *tmp;
     struct mapped_rmrr *mrmrr;
 
     spin_lock(&hd->mapping_lock);
@@ -636,10 +635,9 @@
     /* if the cleared address is between mapped RMRR region,
      * remove the mapped RMRR
      */
-    spin_lock(&pcidevs_lock);
-    list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs )
+    spin_lock(&hd->mapping_lock);
+    list_for_each_entry ( mrmrr, &hd->mapped_rmrrs, list )
     {
-        mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
         if ( addr >= mrmrr->base && addr <= mrmrr->end )
         {
             list_del(&mrmrr->list);
@@ -647,7 +645,7 @@
             break;
         }
     }
-    spin_unlock(&pcidevs_lock);
+    spin_unlock(&hd->mapping_lock);
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1824,22 +1822,6 @@
     hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
 }
 
-static int domain_rmrr_mapped(struct domain *d,
-                              struct acpi_rmrr_unit *rmrr)
-{
-    struct hvm_iommu *hd = domain_hvm_iommu(d);
-    struct mapped_rmrr *mrmrr;
-
-    list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
-    {
-        if ( mrmrr->base == rmrr->base_address &&
-             mrmrr->end == rmrr->end_address )
-            return 1;
-    }
-
-    return 0;
-}
-
 static int rmrr_identity_mapping(struct domain *d,
                                  struct acpi_rmrr_unit *rmrr)
 {
@@ -1851,8 +1833,16 @@
     ASSERT(spin_is_locked(&pcidevs_lock));
     ASSERT(rmrr->base_address < rmrr->end_address);
 
-    if ( domain_rmrr_mapped(d, rmrr) )
-        return 0;
+    /*
+     * No need to acquire hd->mapping_lock, as the only theoretical race is
+     * with the insertion below (impossible due to holding pcidevs_lock).
+     */
+    list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list )
+    {
+        if ( mrmrr->base == rmrr->base_address &&
+             mrmrr->end == rmrr->end_address )
+            return 0;
+    }
 
     base = rmrr->base_address & PAGE_MASK_4K;
     base_pfn = base >> PAGE_SHIFT_4K;
@@ -1872,7 +1862,9 @@
         return -ENOMEM;
     mrmrr->base = rmrr->base_address;
     mrmrr->end = rmrr->end_address;
+    spin_lock(&hd->mapping_lock);
     list_add_tail(&mrmrr->list, &hd->mapped_rmrrs);
+    spin_unlock(&hd->mapping_lock);
 
     return 0;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.