[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] EPT/VT-d: bug fix for EPT/VT-d table sharing
# HG changeset patch # User Keir Fraser <keir@xxxxxxx> # Date 1294648832 0 # Node ID 66e8062894641e5622aa6adc54297d5700b1cea3 # Parent 946d84529a07c81a11636f136fba0d682c939c6c EPT/VT-d: bug fix for EPT/VT-d table sharing This patch makes following changes: 1) Moves EPT/VT-d sharing initialization back to when it is actually needed to make sure vmx_ept_vpid_cap has been initialized. 2) added page order parameter to iommu_pte_flush() to tell VT-d what size of page to flush. 3) added hap_2mb flag to ease performance studies between base 4KB EPT size and when 2MB and 1GB page size support are enabled. Signed-off-by: Allen Kay <allen.m.kay@xxxxxxxxx> --- xen/arch/x86/mm/hap/p2m-ept.c | 6 +- xen/arch/x86/mm/p2m.c | 5 +- xen/drivers/passthrough/vtd/iommu.c | 90 ++++++++++++++---------------------- xen/include/xen/iommu.h | 2 4 files changed, 45 insertions(+), 58 deletions(-) diff -r 946d84529a07 -r 66e806289464 xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Sat Jan 08 11:07:18 2011 +0000 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Mon Jan 10 08:40:32 2011 +0000 @@ -451,12 +451,12 @@ out: if ( rv && iommu_enabled && need_iommu(p2m->domain) && need_modify_vtd_table ) { if ( iommu_hap_pt_share ) - iommu_pte_flush(d, gfn, (u64*)ept_entry, vtd_pte_present); + iommu_pte_flush(d, gfn, (u64*)ept_entry, order, vtd_pte_present); else { if ( p2mt == p2m_ram_rw ) { - if ( order == EPT_TABLE_ORDER ) + if ( order > 0 ) { for ( i = 0; i < (1 << order); i++ ) iommu_map_page( @@ -469,7 +469,7 @@ out: } else { - if ( order == EPT_TABLE_ORDER ) + if ( order > 0 ) { for ( i = 0; i < (1 << order); i++ ) iommu_unmap_page(p2m->domain, gfn - offset + i); diff -r 946d84529a07 -r 66e806289464 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Sat Jan 08 11:07:18 2011 +0000 +++ b/xen/arch/x86/mm/p2m.c Mon Jan 10 08:40:32 2011 +0000 @@ -42,6 +42,9 @@ /* turn on/off 1GB host page table support for hap, default on */ static bool_t __read_mostly opt_hap_1gb = 1; boolean_param("hap_1gb", opt_hap_1gb); + +static bool_t __read_mostly opt_hap_2mb = 1; +boolean_param("hap_2mb", opt_hap_2mb); /* Printouts */ #define P2M_PRINTK(_f, _a...) \ @@ -1779,7 +1782,7 @@ int set_p2m_entry(struct p2m_domain *p2m order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << 18) - 1)) == 0) && hvm_hap_has_1gb(d) && opt_hap_1gb ) ? 18 : ((((gfn | mfn_x(mfn) | todo) & ((1ul << 9) - 1)) == 0) && - hvm_hap_has_2mb(d)) ? 9 : 0; + hvm_hap_has_2mb(d) && opt_hap_2mb) ? 9 : 0; else order = 0; diff -r 946d84529a07 -r 66e806289464 xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Sat Jan 08 11:07:18 2011 +0000 +++ b/xen/drivers/passthrough/vtd/iommu.c Mon Jan 10 08:40:32 2011 +0000 @@ -518,24 +518,9 @@ static int inline iommu_flush_iotlb_dsi( return status; } -static int inline get_alignment(u64 base, unsigned int size) -{ - int t = 0; - u64 end; - - end = base + size - 1; - while ( base != end ) - { - t++; - base >>= 1; - end >>= 1; - } - return t; -} - static int inline iommu_flush_iotlb_psi( struct iommu *iommu, u16 did, u64 addr, unsigned int pages, - int flush_non_present_entry, int flush_dev_iotlb) + int order, int flush_non_present_entry, int flush_dev_iotlb) { unsigned int align; struct iommu_flush *flush = iommu_get_flush(iommu); @@ -548,17 +533,12 @@ static int inline iommu_flush_iotlb_psi( if ( !cap_pgsel_inv(iommu->cap) ) return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb); - /* - * PSI requires page size is 2 ^ x, and the base address is naturally - * aligned to the size - */ - align = get_alignment(addr >> PAGE_SHIFT_4K, pages); /* Fallback to domain selective flush if size is too big */ - if ( align > cap_max_amask_val(iommu->cap) ) + if ( order > cap_max_amask_val(iommu->cap) ) return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb); - addr >>= PAGE_SHIFT_4K + align; - addr <<= PAGE_SHIFT_4K + align; + addr >>= PAGE_SHIFT_4K + order; + addr <<= PAGE_SHIFT_4K + order; /* apply platform specific errata workarounds */ vtd_ops_preamble_quirk(iommu); @@ -634,8 +614,8 @@ static void dma_pte_clear_one(struct dom iommu_domid= domain_iommu_domid(domain, iommu); if ( iommu_domid == -1 ) continue; - if ( iommu_flush_iotlb_psi(iommu, iommu_domid, - addr, 1, 0, flush_dev_iotlb) ) + if ( iommu_flush_iotlb_psi(iommu, iommu_domid, addr, + 1, 0, 0, flush_dev_iotlb) ) iommu_flush_write_buffer(iommu); } } @@ -1710,7 +1690,7 @@ static int intel_iommu_map_page( if ( iommu_domid == -1 ) continue; if ( iommu_flush_iotlb_psi(iommu, iommu_domid, - (paddr_t)gfn << PAGE_SHIFT_4K, 1, + (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0, !dma_pte_present(old), flush_dev_iotlb) ) iommu_flush_write_buffer(iommu); } @@ -1729,7 +1709,8 @@ static int intel_iommu_unmap_page(struct return 0; } -void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int present) +void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, + int order, int present) { struct acpi_drhd_unit *drhd; struct iommu *iommu = NULL; @@ -1751,7 +1732,7 @@ void iommu_pte_flush(struct domain *d, u continue; if ( iommu_flush_iotlb_psi(iommu, iommu_domid, (paddr_t)gfn << PAGE_SHIFT_4K, 1, - !present, flush_dev_iotlb) ) + order, !present, flush_dev_iotlb) ) iommu_flush_write_buffer(iommu); } } @@ -1767,6 +1748,28 @@ static int vtd_ept_page_compatible(struc return 0; return 1; +} + +static bool_t vtd_ept_share(void) +{ + struct acpi_drhd_unit *drhd; + struct iommu *iommu; + bool_t share = TRUE; + + /* sharept defaults to 0 for now, default to 1 when feature matures */ + if ( !sharept ) + share = FALSE; + + /* + * Determine whether EPT and VT-d page tables can be shared or not. + */ + for_each_drhd_unit ( drhd ) + { + iommu = drhd->iommu; + if ( !vtd_ept_page_compatible(drhd->iommu) ) + share = FALSE; + } + return share; } /* @@ -1779,11 +1782,13 @@ void iommu_set_pgd(struct domain *d) ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled ); - if ( !iommu_hap_pt_share ) - return; - + iommu_hap_pt_share = vtd_ept_share(); pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn)); + + dprintk(XENLOG_INFO VTDPREFIX, + "VT-d page table %s with EPT table\n", + iommu_hap_pt_share ? "shares" : "not sharing"); } static int domain_rmrr_mapped(struct domain *d, @@ -2036,27 +2041,6 @@ static int init_vtd_hw(void) } } iommu_flush_all(); - - /* - * Determine whether EPT and VT-d page tables can be shared or not. - */ - iommu_hap_pt_share = TRUE; - for_each_drhd_unit ( drhd ) - { - iommu = drhd->iommu; - if ( (drhd->iommu->nr_pt_levels != VTD_PAGE_TABLE_LEVEL_4) || - !vtd_ept_page_compatible(drhd->iommu) ) - iommu_hap_pt_share = FALSE; - } - - /* keep boot flag sharept as safe fallback. remove after feature matures */ - if ( !sharept ) - iommu_hap_pt_share = FALSE; - - dprintk(XENLOG_INFO VTDPREFIX, - "VT-d page table %sshared with EPT table\n", - iommu_hap_pt_share ? "" : "not "); - return 0; } diff -r 946d84529a07 -r 66e806289464 xen/include/xen/iommu.h --- a/xen/include/xen/iommu.h Sat Jan 08 11:07:18 2011 +0000 +++ b/xen/include/xen/iommu.h Mon Jan 10 08:40:32 2011 +0000 @@ -85,7 +85,7 @@ int iommu_map_page(struct domain *d, uns int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, unsigned int flags); int iommu_unmap_page(struct domain *d, unsigned long gfn); -void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int present); +void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order, int present); void iommu_set_pgd(struct domain *d); void iommu_domain_teardown(struct domain *d); int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |