[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] VT-d: improve RMRR region handling
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1261489152 0 # Node ID d30244049f7e10fd8b87b19eeace19a2e104b6a0 # Parent bde24590c13a65a55b3bb240d47765cfd935fbc6 VT-d: improve RMRR region handling This patch improves RMRR regions handling as follows: 1) Get rid of duplicated RMRR mapping: different devices may share the same RMRR regions, when they are assigned to the same guest, it only need to map the same RMRR region once because RMRR region must be identity mapped. Add an array of mapped RMRRs to achieve this. 2) Needn't call domain_context_mapping to map the device again in iommu_prepare_rmrr_dev, and change iommu_prepare_rmrr_dev to rmrr_identity_mapping which is more suitable. 3) A device may have more than one RMRR regions, remove "break" in intel_iommu_add_device to let it map all RMRR regions of the device. Signed-off-by: Weidong Han <Weidong.han@xxxxxxxxx> --- xen/drivers/passthrough/iommu.c | 11 +++ xen/drivers/passthrough/vtd/iommu.c | 111 ++++++++++++++++++++++-------------- xen/include/xen/hvm/iommu.h | 7 ++ 3 files changed, 85 insertions(+), 44 deletions(-) diff -r bde24590c13a -r d30244049f7e xen/drivers/passthrough/iommu.c --- a/xen/drivers/passthrough/iommu.c Tue Dec 22 11:33:15 2009 +0000 +++ b/xen/drivers/passthrough/iommu.c Tue Dec 22 13:39:12 2009 +0000 @@ -95,6 +95,7 @@ int iommu_domain_init(struct domain *dom spin_lock_init(&hd->mapping_lock); INIT_LIST_HEAD(&hd->g2m_ioport_list); + INIT_LIST_HEAD(&hd->mapped_rmrrs); if ( !iommu_enabled ) return 0; @@ -187,8 +188,9 @@ void iommu_domain_destroy(struct domain void iommu_domain_destroy(struct domain *d) { struct hvm_iommu *hd = domain_hvm_iommu(d); - struct list_head *ioport_list, *tmp; + struct list_head *ioport_list, *rmrr_list, *tmp; struct g2m_ioport *ioport; + struct mapped_rmrr *mrmrr; if ( !iommu_enabled || !hd->platform_ops ) return; @@ -210,6 +212,13 @@ void iommu_domain_destroy(struct domain ioport = list_entry(ioport_list, struct g2m_ioport, list); list_del(&ioport->list); xfree(ioport); + } + + list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs ) + { + mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list); + list_del(&mrmrr->list); + xfree(mrmrr); } } diff -r bde24590c13a -r d30244049f7e xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Tue Dec 22 11:33:15 2009 +0000 +++ b/xen/drivers/passthrough/vtd/iommu.c Tue Dec 22 13:39:12 2009 +0000 @@ -551,6 +551,8 @@ static void dma_pte_clear_one(struct dom u64 pg_maddr; int flush_dev_iotlb; int iommu_domid; + struct list_head *rmrr_list, *tmp; + struct mapped_rmrr *mrmrr; spin_lock(&hd->mapping_lock); /* get last level pte */ @@ -592,6 +594,22 @@ static void dma_pte_clear_one(struct dom } unmap_vtd_domain_page(page); + + /* if the cleared address is between mapped RMRR region, + * remove the mapped RMRR + */ + spin_lock(&pcidevs_lock); + list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs ) + { + mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list); + if ( addr >= mrmrr->base && addr <= mrmrr->end ) + { + list_del(&mrmrr->list); + xfree(mrmrr); + break; + } + } + spin_unlock(&pcidevs_lock); } static void iommu_free_pagetable(u64 pt_maddr, int level) @@ -1261,21 +1279,6 @@ static int domain_context_mapping(struct u8 secbus; struct pci_dev *pdev = pci_get_pdev(bus, devfn); - if ( pdev == NULL ) - { - /* We can reach here by setup_dom0_rmrr() -> iommu_prepare_rmrr_dev() - * -> domain_context_mapping(). - * In the case a user enables VT-d and disables USB (that usually needs - * RMRR) in BIOS, we can't discover the BDF of the USB controller in - * setup_dom0_devices(), but the ACPI RMRR structures may still contain - * the BDF and at last pci_get_pdev() returns NULL here. - */ - gdprintk(XENLOG_WARNING VTDPREFIX, - "domain_context_mapping: can't find bdf = %x:%x.%x\n", - bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); - return 0; - } - drhd = acpi_find_matched_drhd_unit(pdev); if ( !drhd ) return -ENODEV; @@ -1601,17 +1604,36 @@ static int intel_iommu_unmap_page(struct return 0; } -static int iommu_prepare_rmrr_dev(struct domain *d, - struct acpi_rmrr_unit *rmrr, - u8 bus, u8 devfn) -{ - int ret = 0; +static int domain_rmrr_mapped(struct domain *d, + struct acpi_rmrr_unit *rmrr) +{ + struct hvm_iommu *hd = domain_hvm_iommu(d); + struct mapped_rmrr *mrmrr; + + list_for_each_entry( mrmrr, &hd->mapped_rmrrs, list ) + { + if ( mrmrr->base == rmrr->base_address && + mrmrr->end == rmrr->end_address ) + return 1; + } + + return 0; +} + +static int rmrr_identity_mapping(struct domain *d, + struct acpi_rmrr_unit *rmrr) +{ u64 base, end; unsigned long base_pfn, end_pfn; + struct mapped_rmrr *mrmrr; + struct hvm_iommu *hd = domain_hvm_iommu(d); ASSERT(spin_is_locked(&pcidevs_lock)); ASSERT(rmrr->base_address < rmrr->end_address); - + + if ( domain_rmrr_mapped(d, rmrr) ) + return 0; + base = rmrr->base_address & PAGE_MASK_4K; base_pfn = base >> PAGE_SHIFT_4K; end = PAGE_ALIGN_4K(rmrr->end_address); @@ -1619,13 +1641,19 @@ static int iommu_prepare_rmrr_dev(struct while ( base_pfn < end_pfn ) { - intel_iommu_map_page(d, base_pfn, base_pfn); + if ( intel_iommu_map_page(d, base_pfn, base_pfn) ) + return -1; base_pfn++; } - ret = domain_context_mapping(d, bus, devfn); - - return ret; + mrmrr = xmalloc(struct mapped_rmrr); + if ( !mrmrr ) + return -ENOMEM; + mrmrr->base = rmrr->base_address; + mrmrr->end = rmrr->end_address; + list_add_tail(&mrmrr->list, &hd->mapped_rmrrs); + + return 0; } static int intel_iommu_add_device(struct pci_dev *pdev) @@ -1651,12 +1679,10 @@ static int intel_iommu_add_device(struct { if ( PCI_BUS(bdf) == pdev->bus && PCI_DEVFN2(bdf) == pdev->devfn ) { - ret = iommu_prepare_rmrr_dev(pdev->domain, rmrr, - pdev->bus, pdev->devfn); + ret = rmrr_identity_mapping(pdev->domain, rmrr); if ( ret ) gdprintk(XENLOG_ERR VTDPREFIX, "intel_iommu_add_device: RMRR mapping failed\n"); - break; } } @@ -1690,11 +1716,8 @@ static int intel_iommu_remove_device(str static void setup_dom0_devices(struct domain *d) { - struct hvm_iommu *hd; struct pci_dev *pdev; int bus, devfn; - - hd = domain_hvm_iommu(d); spin_lock(&pcidevs_lock); for ( bus = 0; bus < 256; bus++ ) @@ -1829,7 +1852,7 @@ static void setup_dom0_rmrr(struct domai spin_lock(&pcidevs_lock); for_each_rmrr_device ( rmrr, bdf, i ) { - ret = iommu_prepare_rmrr_dev(d, rmrr, PCI_BUS(bdf), PCI_DEVFN2(bdf)); + ret = rmrr_identity_mapping(d, rmrr); if ( ret ) dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); @@ -1973,25 +1996,27 @@ static int intel_iommu_assign_device(str if ( ret ) goto done; + /* FIXME: Because USB RMRR conflicts with guest bios region, + * ignore USB RMRR temporarily. + */ + if ( is_usb_device(bus, devfn) ) + { + ret = 0; + goto done; + } + /* Setup rmrr identity mapping */ for_each_rmrr_device( rmrr, bdf, i ) { if ( PCI_BUS(bdf) == bus && PCI_DEVFN2(bdf) == devfn ) { - /* FIXME: Because USB RMRR conflicts with guest bios region, - * ignore USB RMRR temporarily. - */ - if ( is_usb_device(bus, devfn) ) + ret = rmrr_identity_mapping(d, rmrr); + if ( ret ) { - ret = 0; + gdprintk(XENLOG_ERR VTDPREFIX, + "IOMMU: mapping reserved region failed\n"); goto done; } - - ret = iommu_prepare_rmrr_dev(d, rmrr, bus, devfn); - if ( ret ) - gdprintk(XENLOG_ERR VTDPREFIX, - "IOMMU: mapping reserved region failed\n"); - goto done; } } diff -r bde24590c13a -r d30244049f7e xen/include/xen/hvm/iommu.h --- a/xen/include/xen/hvm/iommu.h Tue Dec 22 11:33:15 2009 +0000 +++ b/xen/include/xen/hvm/iommu.h Tue Dec 22 13:39:12 2009 +0000 @@ -29,12 +29,19 @@ struct g2m_ioport { unsigned int np; }; +struct mapped_rmrr { + struct list_head list; + u64 base; + u64 end; +}; + struct hvm_iommu { u64 pgd_maddr; /* io page directory machine address */ spinlock_t mapping_lock; /* io page table lock */ int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ struct list_head g2m_ioport_list; /* guest to machine ioport mapping */ u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */ + struct list_head mapped_rmrrs; /* amd iommu support */ int domain_id; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |