[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Change the pcidevs_lock from rw_lock to spin_lock
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1229698352 0 # Node ID 738513b106fa262a11cc3254cd6dd67afb3a63e7 # Parent 2312cc25232b6a22136cae46b164bbec11be3687 Change the pcidevs_lock from rw_lock to spin_lock As pcidevs_lock is changed from protecting only the alldevs_list to more than that, it doesn't benifit too much from the rw_lock. Also the previous patch 18906:2941b1a97c60 is wrong to use read_lock to protect some sensitive data (thanks Espen pointed out that). Also two minor fix in this patch: a) deassign_device will deadlock when try to get the pcidevs_lock if called by pci_release_devices, remove the lock to the caller. b) The iommu_domain_teardown should not ASSERT for the pcidevs_lock because it just update the domain's vt-d mapping. Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx> --- xen/arch/x86/domctl.c | 2 + xen/arch/x86/irq.c | 8 +++---- xen/arch/x86/msi.c | 10 ++++----- xen/arch/x86/physdev.c | 12 +++++------ xen/drivers/passthrough/amd/pci_amd_iommu.c | 12 +++++------ xen/drivers/passthrough/iommu.c | 25 +++++++++--------------- xen/drivers/passthrough/pci.c | 22 ++++++++++----------- xen/drivers/passthrough/vtd/iommu.c | 29 +++++++++++++--------------- xen/include/xen/pci.h | 9 +++----- 9 files changed, 62 insertions(+), 67 deletions(-) diff -r 2312cc25232b -r 738513b106fa xen/arch/x86/domctl.c --- a/xen/arch/x86/domctl.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/arch/x86/domctl.c Fri Dec 19 14:52:32 2008 +0000 @@ -708,7 +708,9 @@ long arch_do_domctl( break; } ret = 0; + spin_lock(&pcidevs_lock); ret = deassign_device(d, bus, devfn); + spin_unlock(&pcidevs_lock); gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); diff -r 2312cc25232b -r 738513b106fa xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/arch/x86/irq.c Fri Dec 19 14:52:32 2008 +0000 @@ -850,7 +850,7 @@ int map_domain_pirq( struct msi_desc *msi_desc; struct pci_dev *pdev = NULL; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); ASSERT(spin_is_locked(&d->event_lock)); if ( !IS_PRIV(current->domain) ) @@ -930,7 +930,7 @@ int unmap_domain_pirq(struct domain *d, if ( !IS_PRIV(current->domain) ) return -EINVAL; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); ASSERT(spin_is_locked(&d->event_lock)); vector = d->arch.pirq_vector[pirq]; @@ -993,7 +993,7 @@ void free_domain_pirqs(struct domain *d) { int i; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); for ( i = 0; i < NR_IRQS; i++ ) @@ -1001,7 +1001,7 @@ void free_domain_pirqs(struct domain *d) unmap_domain_pirq(d, i); spin_unlock(&d->event_lock); - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); } extern void dump_ioapic_irq_info(void); diff -r 2312cc25232b -r 738513b106fa xen/arch/x86/msi.c --- a/xen/arch/x86/msi.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/arch/x86/msi.c Fri Dec 19 14:52:32 2008 +0000 @@ -440,7 +440,7 @@ static int msi_capability_init(struct pc u8 slot = PCI_SLOT(dev->devfn); u8 func = PCI_FUNC(dev->devfn); - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI); control = pci_conf_read16(bus, slot, func, msi_control_reg(pos)); /* MSI Entry Initialization */ @@ -509,7 +509,7 @@ static int msix_capability_init(struct p u8 slot = PCI_SLOT(dev->devfn); u8 func = PCI_FUNC(dev->devfn); - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); ASSERT(desc); pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX); @@ -574,7 +574,7 @@ static int __pci_enable_msi(struct msi_i int status; struct pci_dev *pdev; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); pdev = pci_get_pdev(msi->bus, msi->devfn); if ( !pdev ) return -ENODEV; @@ -634,7 +634,7 @@ static int __pci_enable_msix(struct msi_ u8 slot = PCI_SLOT(msi->devfn); u8 func = PCI_FUNC(msi->devfn); - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); pdev = pci_get_pdev(msi->bus, msi->devfn); if ( !pdev ) return -ENODEV; @@ -688,7 +688,7 @@ static void __pci_disable_msix(struct ms */ int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) { - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); return msi->table_base ? __pci_enable_msix(msi, desc) : __pci_enable_msi(msi, desc); diff -r 2312cc25232b -r 738513b106fa xen/arch/x86/physdev.c --- a/xen/arch/x86/physdev.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/arch/x86/physdev.c Fri Dec 19 14:52:32 2008 +0000 @@ -100,7 +100,7 @@ static int physdev_map_pirq(struct physd goto free_domain; } - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); /* Verify or get pirq. */ spin_lock(&d->event_lock); if ( map->pirq < 0 ) @@ -148,7 +148,7 @@ static int physdev_map_pirq(struct physd done: spin_unlock(&d->event_lock); - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) free_irq_vector(vector); free_domain: @@ -172,11 +172,11 @@ static int physdev_unmap_pirq(struct phy if ( d == NULL ) return -ESRCH; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); spin_lock(&d->event_lock); ret = unmap_domain_pirq(d, unmap->pirq); spin_unlock(&d->event_lock); - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); rcu_unlock_domain(d); @@ -345,12 +345,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H irq_op.vector = assign_irq_vector(irq); - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); spin_lock(&dom0->event_lock); ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, MAP_PIRQ_TYPE_GSI, NULL); spin_unlock(&dom0->event_lock); - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); if ( copy_to_guest(arg, &irq_op, 1) != 0 ) ret = -EFAULT; diff -r 2312cc25232b -r 738513b106fa xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Dec 19 14:52:32 2008 +0000 @@ -126,7 +126,7 @@ static void amd_iommu_setup_dom0_devices u32 l; int bdf; - write_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); for ( bus = 0; bus < 256; bus++ ) { for ( dev = 0; dev < 32; dev++ ) @@ -153,7 +153,7 @@ static void amd_iommu_setup_dom0_devices } } } - write_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); } int amd_iov_detect(void) @@ -282,11 +282,11 @@ static int reassign_device( struct domai struct amd_iommu *iommu; int bdf; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); pdev = pci_get_pdev_by_domain(source, bus, devfn); if ( !pdev ) { - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return -ENODEV; } @@ -297,7 +297,7 @@ static int reassign_device( struct domai if ( !iommu ) { - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); amd_iov_error("Fail to find iommu." " %x:%x.%x cannot be assigned to domain %d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id); @@ -314,7 +314,7 @@ static int reassign_device( struct domai bus, PCI_SLOT(devfn), PCI_FUNC(devfn), source->domain_id, target->domain_id); - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return 0; } diff -r 2312cc25232b -r 738513b106fa xen/drivers/passthrough/iommu.c --- a/xen/drivers/passthrough/iommu.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/drivers/passthrough/iommu.c Fri Dec 19 14:52:32 2008 +0000 @@ -87,7 +87,7 @@ int iommu_add_device(struct pci_dev *pde if ( !pdev->domain ) return -EINVAL; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); hd = domain_hvm_iommu(pdev->domain); if ( !iommu_enabled || !hd->platform_ops ) @@ -117,7 +117,7 @@ int assign_device(struct domain *d, u8 b if ( !iommu_enabled || !hd->platform_ops ) return 0; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) ) goto done; @@ -128,7 +128,7 @@ int assign_device(struct domain *d, u8 b goto done; } done: - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return rc; } @@ -211,7 +211,8 @@ int iommu_unmap_page(struct domain *d, u return hd->platform_ops->unmap_page(d, gfn); } -int deassign_device(struct domain *d, u8 bus, u8 devfn) +/* caller should hold the pcidevs_lock */ +int deassign_device(struct domain *d, u8 bus, u8 devfn) { struct hvm_iommu *hd = domain_hvm_iommu(d); struct pci_dev *pdev = NULL; @@ -219,20 +220,16 @@ int deassign_device(struct domain *d, u if ( !iommu_enabled || !hd->platform_ops ) return -EINVAL; - read_lock(&pcidevs_lock); + ASSERT(spin_is_locked(&pcidevs_lock)); pdev = pci_get_pdev(bus, devfn); if (!pdev) - { - read_unlock(&pcidevs_lock); return -ENODEV; - } if (pdev->domain != d) { - read_unlock(&pcidevs_lock); gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: deassign a device not owned\n"); - return -EINVAL; + return -EINVAL; } hd->platform_ops->reassign_device(d, dom0, bus, devfn); @@ -242,8 +239,6 @@ int deassign_device(struct domain *d, u d->need_iommu = 0; hd->platform_ops->teardown(d); } - - read_unlock(&pcidevs_lock); return 0; } @@ -288,7 +283,7 @@ int iommu_get_device_group(struct domain group_id = ops->get_device_group_id(bus, devfn); - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); for_each_pdev( d, pdev ) { if ( (pdev->bus == bus) && (pdev->devfn == devfn) ) @@ -302,13 +297,13 @@ int iommu_get_device_group(struct domain bdf |= (pdev->devfn & 0xff) << 8; if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) ) { - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return -1; } i++; } } - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return i; } diff -r 2312cc25232b -r 738513b106fa xen/drivers/passthrough/pci.c --- a/xen/drivers/passthrough/pci.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/drivers/passthrough/pci.c Fri Dec 19 14:52:32 2008 +0000 @@ -28,7 +28,7 @@ LIST_HEAD(alldevs_list); -rwlock_t pcidevs_lock = RW_LOCK_UNLOCKED; +spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED; struct pci_dev *alloc_pdev(u8 bus, u8 devfn) { @@ -62,7 +62,7 @@ struct pci_dev *pci_get_pdev(int bus, in { struct pci_dev *pdev = NULL; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) if ( (pdev->bus == bus || bus == -1) && @@ -78,7 +78,7 @@ struct pci_dev *pci_get_pdev_by_domain(s { struct pci_dev *pdev = NULL; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) if ( (pdev->bus == bus || bus == -1) && @@ -96,7 +96,7 @@ int pci_add_device(u8 bus, u8 devfn) struct pci_dev *pdev; int ret = -ENOMEM; - write_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); pdev = alloc_pdev(bus, devfn); if ( !pdev ) goto out; @@ -113,7 +113,7 @@ int pci_add_device(u8 bus, u8 devfn) } out: - write_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); printk(XENLOG_DEBUG "PCI add device %02x:%02x.%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); return ret; @@ -124,7 +124,7 @@ int pci_remove_device(u8 bus, u8 devfn) struct pci_dev *pdev; int ret = -ENODEV;; - write_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) if ( pdev->bus == bus && pdev->devfn == devfn ) { @@ -138,7 +138,7 @@ int pci_remove_device(u8 bus, u8 devfn) break; } - write_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return ret; } @@ -187,7 +187,7 @@ void pci_release_devices(struct domain * struct pci_dev *pdev; u8 bus, devfn; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); pci_clean_dpci_irqs(d); while ( (pdev = pci_get_pdev_by_domain(d, -1, -1)) ) { @@ -195,7 +195,7 @@ void pci_release_devices(struct domain * bus = pdev->bus; devfn = pdev->devfn; deassign_device(d, bus, devfn); } - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); } #ifdef SUPPORT_MSI_REMAPPING @@ -205,7 +205,7 @@ static void dump_pci_devices(unsigned ch struct msi_desc *msi; printk("==== PCI devices ====\n"); - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) { @@ -217,7 +217,7 @@ static void dump_pci_devices(unsigned ch printk(">\n"); } - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); } static int __init setup_dump_pcidevs(void) diff -r 2312cc25232b -r 738513b106fa xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/drivers/passthrough/vtd/iommu.c Fri Dec 19 14:52:32 2008 +0000 @@ -1037,7 +1037,7 @@ static int domain_context_mapping_one( struct pci_dev *pdev = NULL; int agaw; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); spin_lock(&iommu->lock); maddr = bus_to_context_maddr(iommu, bus); context_entries = (struct context_entry *)map_vtd_domain_page(maddr); @@ -1215,7 +1215,7 @@ static int domain_context_mapping(struct if ( !drhd ) return -ENODEV; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); type = pdev_type(bus, devfn); switch ( type ) @@ -1297,7 +1297,7 @@ static int domain_context_unmap_one( struct context_entry *context, *context_entries; u64 maddr; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); spin_lock(&iommu->lock); maddr = bus_to_context_maddr(iommu, bus); @@ -1399,7 +1399,7 @@ static int reassign_device_ownership( struct iommu *pdev_iommu; int ret, found = 0; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); pdev = pci_get_pdev_by_domain(source, bus, devfn); if (!pdev) @@ -1439,7 +1439,6 @@ void iommu_domain_teardown(struct domain if ( list_empty(&acpi_drhd_units) ) return; - ASSERT(rw_is_locked(&pcidevs_lock)); spin_lock(&hd->mapping_lock); iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw)); hd->pgd_maddr = 0; @@ -1529,7 +1528,7 @@ static int iommu_prepare_rmrr_dev(struct u64 base, end; unsigned long base_pfn, end_pfn; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); ASSERT(rmrr->base_address < rmrr->end_address); base = rmrr->base_address & PAGE_MASK_4K; @@ -1554,7 +1553,7 @@ static int intel_iommu_add_device(struct u16 bdf; int ret, i; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); if ( !pdev->domain ) return -EINVAL; @@ -1617,7 +1616,7 @@ static void setup_dom0_devices(struct do hd = domain_hvm_iommu(d); - write_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); for ( bus = 0; bus < 256; bus++ ) { for ( dev = 0; dev < 32; dev++ ) @@ -1637,7 +1636,7 @@ static void setup_dom0_devices(struct do } } } - write_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); } void clear_fault_bits(struct iommu *iommu) @@ -1710,7 +1709,7 @@ static void setup_dom0_rmrr(struct domai u16 bdf; int ret, i; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); for_each_rmrr_device ( rmrr, bdf, i ) { ret = iommu_prepare_rmrr_dev(d, rmrr, PCI_BUS(bdf), PCI_DEVFN2(bdf)); @@ -1718,7 +1717,7 @@ static void setup_dom0_rmrr(struct domai gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); } - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); } int intel_vtd_setup(void) @@ -1771,15 +1770,15 @@ int device_assigned(u8 bus, u8 devfn) { struct pci_dev *pdev; - read_lock(&pcidevs_lock); + spin_lock(&pcidevs_lock); pdev = pci_get_pdev_by_domain(dom0, bus, devfn); if (!pdev) { - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return -1; } - read_unlock(&pcidevs_lock); + spin_unlock(&pcidevs_lock); return 0; } @@ -1793,7 +1792,7 @@ int intel_iommu_assign_device(struct dom if ( list_empty(&acpi_drhd_units) ) return -ENODEV; - ASSERT(rw_is_locked(&pcidevs_lock)); + ASSERT(spin_is_locked(&pcidevs_lock)); pdev = pci_get_pdev(bus, devfn); if (!pdev) return -ENODEV; diff -r 2312cc25232b -r 738513b106fa xen/include/xen/pci.h --- a/xen/include/xen/pci.h Fri Dec 19 14:44:40 2008 +0000 +++ b/xen/include/xen/pci.h Fri Dec 19 14:52:32 2008 +0000 @@ -42,13 +42,12 @@ struct pci_dev { list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list) /* - * The pcidevs_lock write-lock must be held when doing alloc_pdev() or - * free_pdev(). Never de-reference pdev without holding pdev->lock or - * pcidevs_lock. Always aquire pcidevs_lock before pdev->lock when - * doing free_pdev(). + * The pcidevs_lock protect alldevs_list, and the assignment for the + * devices, it also sync the access to the msi capability that is not + * interrupt handling related (the mask bit register). */ -extern rwlock_t pcidevs_lock; +extern spinlock_t pcidevs_lock; struct pci_dev *alloc_pdev(u8 bus, u8 devfn); void free_pdev(struct pci_dev *pdev); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |