[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-3.2-testing] VT-d: flush iotlb of selective iommu when a domain's VT-d table is changed
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1216376993 -3600 # Node ID 4539e0ec232a931012b6d62c3318cdd176bca5b3 # Parent f3eb066168077c721d40a9d7966a8b6e74d379c1 VT-d: flush iotlb of selective iommu when a domain's VT-d table is changed When a domain's VT-d table is changed, only the iommus under which the domain has assigned devices need to be flushed. Signed-off-by: Yang, Xiaowei <xiaowei.yang@xxxxxxxxx> xen-unstable changeset: 17725:9a7a6f729d2c0352a772aa274454dee1c96faa5a xen-unstable date: Mon May 26 08:25:36 2008 +0100 --- xen/arch/x86/hvm/vmx/vtd/intel-iommu.c | 39 +++++++++++++++++++++++++++++++-- xen/include/asm-x86/hvm/iommu.h | 1 xen/include/asm-x86/iommu.h | 1 3 files changed, 39 insertions(+), 2 deletions(-) diff -r f3eb06616807 -r 4539e0ec232a xen/arch/x86/hvm/vmx/vtd/intel-iommu.c --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Fri Jul 18 11:29:29 2008 +0100 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Fri Jul 18 11:29:53 2008 +0100 @@ -543,6 +543,7 @@ void iommu_flush_all(void) /* clear one page's page table */ static void dma_pte_clear_one(struct domain *domain, u64 addr) { + struct hvm_iommu *hd = domain_hvm_iommu(domain); struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *pte = NULL; @@ -564,6 +565,10 @@ static void dma_pte_clear_one(struct dom for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; + + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) + continue; + if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), addr, 1, 0); @@ -956,7 +961,7 @@ struct iommu *iommu_alloc(void *hw_data) dprintk(XENLOG_INFO VTDPREFIX, "iommu_alloc: iommu->reg = %p drhd->address = %lx\n", iommu->reg, drhd->address); - nr_iommus++; + iommu->index = nr_iommus++; if ( !iommu->reg ) { @@ -1107,6 +1112,8 @@ static int domain_context_mapping_one( iommu_flush_write_buffer(iommu); else iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0); + + set_bit(iommu->index, &hd->iommu_bitmap); spin_unlock_irqrestore(&iommu->lock, flags); return ret; } @@ -1380,11 +1387,12 @@ void reassign_device_ownership( { struct hvm_iommu *source_hd = domain_hvm_iommu(source); struct hvm_iommu *target_hd = domain_hvm_iommu(target); - struct pci_dev *pdev; + struct pci_dev *pdev, *pdev2; struct acpi_drhd_unit *drhd; struct iommu *iommu; int status; unsigned long flags; + int found = 0; gdprintk(XENLOG_INFO VTDPREFIX, "reassign_device-%x:%x:%x- source = %d target = %d\n", @@ -1406,6 +1414,18 @@ void reassign_device_ownership( list_move(&pdev->list, &target_hd->pdev_list); spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); + + for_each_pdev ( source, pdev2 ) + { + drhd = acpi_find_matched_drhd_unit(pdev2); + if ( drhd->iommu == iommu ) + { + found = 1; + break; + } + } + if ( !found ) + clear_bit(iommu->index, &source_hd->iommu_bitmap); status = domain_context_mapping(target, iommu, pdev); if ( status != 0 ) @@ -1498,6 +1518,7 @@ static int domain_context_mapped(struct int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn) { + struct hvm_iommu *hd = domain_hvm_iommu(d); struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *pte = NULL; @@ -1523,6 +1544,10 @@ int iommu_map_page(struct domain *d, pad for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; + + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) + continue; + if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), gfn << PAGE_SHIFT_4K, 1, 0); @@ -1553,6 +1578,7 @@ int iommu_page_mapping(struct domain *do int iommu_page_mapping(struct domain *domain, dma_addr_t iova, void *hpa, size_t size, int prot) { + struct hvm_iommu *hd = domain_hvm_iommu(domain); struct acpi_drhd_unit *drhd; struct iommu *iommu; unsigned long start_pfn, end_pfn; @@ -1587,6 +1613,10 @@ int iommu_page_mapping(struct domain *do for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; + + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) + continue; + if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), iova, index, 0); @@ -1606,6 +1636,7 @@ int iommu_page_unmapping(struct domain * void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry) { + struct hvm_iommu *hd = domain_hvm_iommu(d); struct acpi_drhd_unit *drhd; struct iommu *iommu = NULL; struct dma_pte *pte = (struct dma_pte *) p2m_entry; @@ -1613,6 +1644,10 @@ void iommu_flush(struct domain *d, dma_a for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; + + if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) + continue; + if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), gfn << PAGE_SHIFT_4K, 1, 0); diff -r f3eb06616807 -r 4539e0ec232a xen/include/asm-x86/hvm/iommu.h --- a/xen/include/asm-x86/hvm/iommu.h Fri Jul 18 11:29:29 2008 +0100 +++ b/xen/include/asm-x86/hvm/iommu.h Fri Jul 18 11:29:53 2008 +0100 @@ -43,6 +43,7 @@ struct hvm_iommu { int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ struct list_head g2m_ioport_list; /* guest to machine ioport mapping */ domid_t iommu_domid; /* domain id stored in iommu */ + u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */ /* amd iommu support */ int domain_id; diff -r f3eb06616807 -r 4539e0ec232a xen/include/asm-x86/iommu.h --- a/xen/include/asm-x86/iommu.h Fri Jul 18 11:29:29 2008 +0100 +++ b/xen/include/asm-x86/iommu.h Fri Jul 18 11:29:53 2008 +0100 @@ -54,6 +54,7 @@ struct iommu { struct iommu { struct list_head list; void __iomem *reg; /* Pointer to hardware regs, virtual addr */ + u32 index; /* Sequence number of iommu */ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ u64 cap; u64 ecap; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |