[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm: adjust p2m interface to return superpage sizes
# HG changeset patch # User Tim Deegan <tim@xxxxxxx> # Date 1315491186 -3600 # Node ID 04e2fd7379d07cc0d2da9dc9da50a2b54c198f13 # Parent bdd19847ae63b5dfb036e228cb16ec3ae678e995 x86/mm: adjust p2m interface to return superpage sizes Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> Signed-off-by: Tim Deegan <tim@xxxxxxx> Committed-by: Tim Deegan <tim@xxxxxxx> --- diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/hvm/hvm.c Thu Sep 08 15:13:06 2011 +0100 @@ -1216,7 +1216,7 @@ } p2m = p2m_get_hostp2m(v->domain); - mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest); + mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL); /* Check access permissions first, then handle faults */ if ( access_valid && (mfn_x(mfn) != INVALID_MFN) ) diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Sep 08 15:13:06 2011 +0100 @@ -1160,7 +1160,7 @@ p2m = p2m_get_p2m(v); _d.gpa = gpa; _d.qualification = 0; - _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, p2m_query)); + _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, p2m_query, NULL)); __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d); } @@ -1180,7 +1180,7 @@ if ( p2m == NULL ) p2m = p2m_get_p2m(v); /* Everything else is an error. */ - mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest); + mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL); gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n", gpa, mfn_x(mfn), p2mt); diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/mm/guest_walk.c --- a/xen/arch/x86/mm/guest_walk.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/mm/guest_walk.c Thu Sep 08 15:13:06 2011 +0100 @@ -95,7 +95,7 @@ p2m_access_t a; /* Translate the gfn, unsharing if shared */ - *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare); + *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare, NULL); if ( p2m_is_paging(*p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/mm/hap/guest_walk.c --- a/xen/arch/x86/mm/hap/guest_walk.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/mm/hap/guest_walk.c Thu Sep 08 15:13:06 2011 +0100 @@ -59,7 +59,7 @@ /* Get the top-level table's MFN */ top_mfn = gfn_to_mfn_type_p2m(p2m, cr3 >> PAGE_SHIFT, - &p2mt, &p2ma, p2m_unshare); + &p2mt, &p2ma, p2m_unshare, NULL); if ( p2m_is_paging(p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); @@ -92,7 +92,7 @@ if ( missing == 0 ) { gfn_t gfn = guest_l1e_get_gfn(gw.l1e); - gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare); + gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare, NULL); if ( p2m_is_paging(p2mt) ) { ASSERT(!p2m_is_nestedp2m(p2m)); diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/mm/hap/nested_hap.c --- a/xen/arch/x86/mm/hap/nested_hap.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/mm/hap/nested_hap.c Thu Sep 08 15:13:06 2011 +0100 @@ -136,7 +136,8 @@ p2m_access_t p2ma; /* walk L0 P2M table */ - mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, p2m_query); + mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, + p2m_query, NULL); if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) ) return NESTEDHVM_PAGEFAULT_ERROR; diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/mm/p2m-ept.c --- a/xen/arch/x86/mm/p2m-ept.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/mm/p2m-ept.c Thu Sep 08 15:13:06 2011 +0100 @@ -507,7 +507,7 @@ /* Read ept p2m entries */ static mfn_t ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t* a, - p2m_query_t q) + p2m_query_t q, unsigned int *page_order) { struct domain *d = p2m->domain; ept_entry_t *table = map_domain_page(ept_get_asr(d)); @@ -594,6 +594,9 @@ ((1 << (i * EPT_TABLE_ORDER)) - 1)); mfn = _mfn(split_mfn); } + + if ( page_order ) + *page_order = i * EPT_TABLE_ORDER; } out: diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/mm/p2m-pt.c --- a/xen/arch/x86/mm/p2m-pt.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/mm/p2m-pt.c Thu Sep 08 15:13:06 2011 +0100 @@ -503,7 +503,8 @@ /* Read the current domain's p2m table (through the linear mapping). */ static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, - p2m_access_t *a, p2m_query_t q) + p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) { mfn_t mfn = _mfn(INVALID_MFN); p2m_type_t p2mt = p2m_mmio_dm; @@ -567,6 +568,8 @@ else p2mt = p2m_mmio_dm; + if ( page_order ) + *page_order = PAGE_ORDER_1G; goto out; } #endif @@ -620,6 +623,8 @@ else p2mt = p2m_mmio_dm; + if ( page_order ) + *page_order = PAGE_ORDER_2M; goto out; } @@ -669,6 +674,8 @@ p2mt = p2m_mmio_dm; } + if ( page_order ) + *page_order = PAGE_ORDER_4K; out: *t = p2mt; return mfn; @@ -676,7 +683,8 @@ static mfn_t p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q) + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) { mfn_t mfn; paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; @@ -699,7 +707,7 @@ /* Use the fast path with the linear mapping if we can */ if ( p2m == p2m_get_hostp2m(current->domain) ) - return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q); + return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q, page_order); mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); @@ -753,6 +761,8 @@ unmap_domain_page(l3e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); + if ( page_order ) + *page_order = PAGE_ORDER_1G; return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); } @@ -787,6 +797,8 @@ unmap_domain_page(l2e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); + if ( page_order ) + *page_order = PAGE_ORDER_2M; return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); } @@ -817,6 +829,8 @@ unmap_domain_page(l1e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); + if ( page_order ) + *page_order = PAGE_ORDER_4K; return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN); } diff -r bdd19847ae63 -r 04e2fd7379d0 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/arch/x86/mm/p2m.c Thu Sep 08 15:13:06 2011 +0100 @@ -307,7 +307,7 @@ #ifdef __x86_64__ for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ ) { - mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query); + mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query, NULL); if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) { ASSERT(!p2m_is_nestedp2m(p2m)); @@ -372,7 +372,7 @@ { for ( i = 0; i < (1UL << page_order); i++ ) { - mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query); + mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query, NULL); if ( !p2m_is_grant(t) ) set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); @@ -878,7 +878,7 @@ /* First, handle rx2rw conversion automatically */ p2m_lock(p2m); - mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query); + mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL); if ( access_w && p2ma == p2m_access_rx2rw ) { @@ -1036,7 +1036,7 @@ return 0; } - mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query); + mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query, NULL); if ( mfn_x(mfn) == INVALID_MFN ) return -ESRCH; diff -r bdd19847ae63 -r 04e2fd7379d0 xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Wed Sep 07 10:37:48 2011 +0100 +++ b/xen/include/asm-x86/p2m.h Thu Sep 08 15:13:06 2011 +0100 @@ -233,7 +233,8 @@ unsigned long gfn, p2m_type_t *p2mt, p2m_access_t *p2ma, - p2m_query_t q); + p2m_query_t q, + unsigned int *page_order); void (*change_entry_type_global)(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt); @@ -303,10 +304,14 @@ /* Read a particular P2M table, mapping pages as we go. Most callers * should _not_ call this directly; use the other gfn_to_mfn_* functions * below unless you know you want to walk a p2m that isn't a domain's - * main one. */ + * main one. + * If the lookup succeeds, the return value is != INVALID_MFN and + * *page_order is filled in with the order of the superpage (if any) that + * the entry was found in. */ static inline mfn_t gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn, - p2m_type_t *t, p2m_access_t *a, p2m_query_t q) + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) { mfn_t mfn; @@ -318,14 +323,14 @@ return _mfn(gfn); } - mfn = p2m->get_entry(p2m, gfn, t, a, q); + mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); #ifdef __x86_64__ if ( q == p2m_unshare && p2m_is_shared(*t) ) { ASSERT(!p2m_is_nestedp2m(p2m)); mem_sharing_unshare_page(p2m->domain, gfn, 0); - mfn = p2m->get_entry(p2m, gfn, t, a, q); + mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); } #endif @@ -349,7 +354,7 @@ p2m_query_t q) { p2m_access_t a; - return gfn_to_mfn_type_p2m(p2m_get_hostp2m(d), gfn, t, &a, q); + return gfn_to_mfn_type_p2m(p2m_get_hostp2m(d), gfn, t, &a, q, NULL); } /* Syntactic sugar: most callers will use one of these. _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |