[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] rewrite ept_change_entry_type_global in a recursive way
VMX: rewrite ept_change_entry_type_global in a recursive way signed-off-by: Xin Li <xin.li@xxxxxxxxx> diff -r 9db8fc1ca2ef xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Thu Jun 10 17:30:23 2010 +0100 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Thu Jun 10 04:37:37 2010 -0700 @@ -31,6 +31,9 @@ #include <asm/hvm/cacheattr.h> #include <xen/keyhandler.h> #include <xen/softirq.h> + +#define is_epte_present(ept_entry) ((ept_entry)->epte & 0x7) +#define is_epte_superpage(ept_entry) ((ept_entry)->sp) /* Non-ept "lock-and-check" wrapper */ static int ept_pod_check_and_populate(struct domain *d, unsigned long gfn, @@ -105,7 +108,7 @@ static int ept_set_middle_entry(struct d ept_entry->emt = 0; ept_entry->ipat = 0; - ept_entry->sp_avail = 0; + ept_entry->sp = 0; ept_entry->avail1 = 0; ept_entry->mfn = page_to_mfn(pg); ept_entry->avail2 = 0; @@ -141,7 +144,7 @@ static int ept_next_level(struct domain ept_entry = (*table) + index; - if ( !(ept_entry->epte & 0x7) ) + if ( !is_epte_present(ept_entry) ) { if ( ept_entry->avail1 == p2m_populate_on_demand ) return GUEST_TABLE_POD_PAGE; @@ -153,8 +156,8 @@ static int ept_next_level(struct domain return GUEST_TABLE_MAP_FAILED; } - /* The only time sp_avail would be set here is if we had hit a superpage */ - if ( ept_entry->sp_avail ) + /* The only time sp would be set here is if we had hit a superpage */ + if ( is_epte_superpage(ept_entry) ) return GUEST_TABLE_SUPER_PAGE; else { @@ -201,7 +204,7 @@ static int ept_split_large_page(struct d split_entry->emt = ept_entry->emt; split_entry->ipat = ept_entry->ipat; - split_entry->sp_avail = (level > 1) ? 1 : 0; + split_entry->sp = (level > 1) ? 1 : 0; split_entry->mfn = s_mfn; @@ -243,13 +246,12 @@ ept_set_entry(struct domain *d, unsigned uint8_t ipat = 0; int need_modify_vtd_table = 1; int needs_sync = 1; - struct p2m_domain *p2m = p2m_get_hostp2m(d); if ( order != 0 ) if ( (gfn & ((1UL << order) - 1)) ) return 1; - table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); + table = map_domain_page(ept_get_asr(d)); ASSERT(table != NULL); @@ -279,7 +281,7 @@ ept_set_entry(struct domain *d, unsigned /* We reached the level we're looking for */ /* No need to flush if the old entry wasn't valid */ - if ( !(ept_entry->epte & 7) ) + if ( !is_epte_present(ept_entry) ) needs_sync = 0; if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) || @@ -288,7 +290,7 @@ ept_set_entry(struct domain *d, unsigned ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &ipat, direct_mmio); ept_entry->ipat = ipat; - ept_entry->sp_avail = order ? 1 : 0; + ept_entry->sp = order ? 1 : 0; if ( ept_entry->mfn == mfn_x(mfn) ) need_modify_vtd_table = 0; @@ -385,8 +387,7 @@ static mfn_t ept_get_entry(struct domain static mfn_t ept_get_entry(struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) { - ept_entry_t *table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + ept_entry_t *table = map_domain_page(ept_get_asr(d)); unsigned long gfn_remainder = gfn; ept_entry_t *ept_entry; u32 index; @@ -479,8 +480,7 @@ out: * pass a p2m_query_t type along to distinguish. */ static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long gfn, int *level) { - ept_entry_t *table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + ept_entry_t *table = map_domain_page(ept_get_asr(d)); unsigned long gfn_remainder = gfn; ept_entry_t *ept_entry; ept_entry_t content = { .epte = 0 }; @@ -514,8 +514,7 @@ static ept_entry_t ept_get_entry_content void ept_walk_table(struct domain *d, unsigned long gfn) { - ept_entry_t *table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); + ept_entry_t *table = map_domain_page(ept_get_asr(d)); unsigned long gfn_remainder = gfn; int i; @@ -542,7 +541,8 @@ void ept_walk_table(struct domain *d, un gdprintk(XENLOG_ERR, " epte %"PRIx64"\n", ept_entry->epte); - if ( i==0 || !(ept_entry->epte & 0x7) || ept_entry->sp_avail) + if ( (i == 0) || !is_epte_present(ept_entry) || + is_epte_superpage(ept_entry) ) goto out; else { @@ -608,7 +608,7 @@ void ept_change_entry_emt_with_range(str order = 0; mfn = _mfn(e.mfn); - if ( e.sp_avail ) + if ( is_epte_superpage(&e) ) { while ( level ) { @@ -638,97 +638,44 @@ void ept_change_entry_emt_with_range(str p2m_unlock(d->arch.p2m); } -/* +/* * Walk the whole p2m table, changing any entries of the old type * to the new type. This is used in hardware-assisted paging to * quickly enable or diable log-dirty tracking */ -static void ept_change_entry_type_global(struct domain *d, p2m_type_t ot, - p2m_type_t nt) +static void ept_change_entry_type_page(mfn_t ept_page_mfn, int ept_page_level, + p2m_type_t ot, p2m_type_t nt) { - ept_entry_t *l4e; - ept_entry_t *l3e; - ept_entry_t *l2e; - ept_entry_t *l1e; - int i4; - int i3; - int i2; - int i1; + ept_entry_t *epte = map_domain_page(mfn_x(ept_page_mfn)); - if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 ) - return; - - l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))))); - for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ ) + for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ ) { - if ( !l4e[i4].epte ) + if ( !is_epte_present(epte + i) ) continue; - if ( !l4e[i4].sp_avail ) - { - l3e = map_domain_page(l4e[i4].mfn); - for ( i3 = 0; i3 < EPT_PAGETABLE_ENTRIES; i3++ ) - { - if ( !l3e[i3].epte ) - continue; - - if ( !l3e[i3].sp_avail ) - { - l2e = map_domain_page(l3e[i3].mfn); - for ( i2 = 0; i2 < EPT_PAGETABLE_ENTRIES; i2++ ) - { - if ( !l2e[i2].epte ) - continue; - - if ( !l2e[i2].sp_avail ) - { - l1e = map_domain_page(l2e[i2].mfn); - - for ( i1 = 0; i1 < EPT_PAGETABLE_ENTRIES; i1++ ) - { - if ( !l1e[i1].epte ) - continue; - - if ( l1e[i1].avail1 != ot ) - continue; - l1e[i1].avail1 = nt; - ept_p2m_type_to_flags(l1e+i1, nt); - } - - unmap_domain_page(l1e); - } - else - { - if ( l2e[i2].avail1 != ot ) - continue; - l2e[i2].avail1 = nt; - ept_p2m_type_to_flags(l2e+i2, nt); - } - } - - unmap_domain_page(l2e); - } - else - { - if ( l3e[i3].avail1 != ot ) - continue; - l3e[i3].avail1 = nt; - ept_p2m_type_to_flags(l3e+i3, nt); - } - } - - unmap_domain_page(l3e); - } + if ( (ept_page_level > 0) && !is_epte_superpage(epte + i) ) + ept_change_entry_type_page(_mfn(epte[i].mfn), + ept_page_level - 1, ot, nt); else { - if ( l4e[i4].avail1 != ot ) + if ( epte[i].avail1 != ot ) continue; - l4e[i4].avail1 = nt; - ept_p2m_type_to_flags(l4e+i4, nt); + + epte[i].avail1 = nt; + ept_p2m_type_to_flags(epte + i, nt); } } - unmap_domain_page(l4e); + unmap_domain_page(epte); +} + +static void ept_change_entry_type_global(struct domain *d, + p2m_type_t ot, p2m_type_t nt) +{ + if ( ept_get_asr(d) == 0 ) + return; + + ept_change_entry_type_page(_mfn(ept_get_asr(d)), ept_get_wl(d), ot, nt); ept_sync_domain(d); } @@ -767,8 +714,7 @@ static void ept_dump_p2m_table(unsigned { gfn_remainder = gfn; mfn = _mfn(INVALID_MFN); - table = - map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); + table = map_domain_page(ept_get_asr(d)); for ( i = ept_get_wl(d); i > 0; i-- ) { diff -r 9db8fc1ca2ef xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Thu Jun 10 17:30:23 2010 +0100 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Jun 10 04:37:37 2010 -0700 @@ -35,7 +35,7 @@ typedef union { x : 1, emt : 3, /* EPT Memory type */ ipat : 1, /* Ignore PAT memory type */ - sp_avail : 1, /* Is this a superpage? */ + sp : 1, /* Is this a superpage? */ avail1 : 4, mfn : 40, avail2 : 12; Attachment:
recursive_range.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |