[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 3 of 6] x86/mm: make 'query type' argument to get_gfn into a set of flags
On Feb 23, 2012, at 11:34 AM, Tim Deegan wrote: > # HG changeset patch > # User Tim Deegan <tim@xxxxxxx> > # Date 1330013729 0 > # Node ID 00f61d0186a6b098b349b6f94c9a11c5a18dc99a > # Parent ec94098841bfae20101608f3daf7a1a01ff71c49 > x86/mm: make 'query type' argument to get_gfn into a set of flags > > Having an enum for this won't work if we want to add any orthogonal > options to it -- the existing code is only correct (after the removal of > p2m_guest in the previous patch) because there are no tests anywhere for > '== p2m_alloc', only for '!= p2m_query' and '== p2m_unshare'. > > Replace it with a set of flags. > > Signed-off-by: Tim Deegan <tim@xxxxxxx> P2M_UNSHARE flag should imply P2M_ALLOC. They make no sense separate, and code is simplified a bit. Otherwise: Acked-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx> > > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/hvm/emulate.c > --- a/xen/arch/x86/hvm/emulate.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/hvm/emulate.c Thu Feb 23 16:15:29 2012 +0000 > @@ -696,7 +696,7 @@ static int hvmemul_rep_movs( > > get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL, NULL, > current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL, NULL, > - p2m_alloc, &tg); > + P2M_ALLOC, &tg); > > if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) ) > { > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/hvm/hvm.c > --- a/xen/arch/x86/hvm/hvm.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/hvm/hvm.c Thu Feb 23 16:15:29 2012 +0000 > @@ -1235,7 +1235,7 @@ int hvm_hap_nested_page_fault(unsigned l > > p2m = p2m_get_hostp2m(v->domain); > mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, > - access_w ? p2m_unshare : p2m_alloc, NULL); > + P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), > NULL); > > /* Check access permissions first, then handle faults */ > if ( mfn_x(mfn) != INVALID_MFN ) > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/hvm/svm/svm.c > --- a/xen/arch/x86/hvm/svm/svm.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/hvm/svm/svm.c Thu Feb 23 16:15:29 2012 +0000 > @@ -1265,7 +1265,7 @@ static void svm_do_nested_pgfault(struct > p2m = p2m_get_p2m(v); > _d.gpa = gpa; > _d.qualification = 0; > - mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, p2m_query, > NULL); > + mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL); > __put_gfn(p2m, gfn); > _d.mfn = mfn_x(mfn); > > @@ -1287,7 +1287,7 @@ static void svm_do_nested_pgfault(struct > if ( p2m == NULL ) > p2m = p2m_get_p2m(v); > /* Everything else is an error. */ > - mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL); > + mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL); > __put_gfn(p2m, gfn); > gdprintk(XENLOG_ERR, > "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n", > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/guest_walk.c > --- a/xen/arch/x86/mm/guest_walk.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/guest_walk.c Thu Feb 23 16:15:29 2012 +0000 > @@ -98,7 +98,8 @@ static inline void *map_domain_gfn(struc > void *map; > > /* Translate the gfn, unsharing if shared */ > - *mfn = get_gfn_type_access(p2m, gfn_x(gfn), p2mt, &p2ma, p2m_unshare, > NULL); > + *mfn = get_gfn_type_access(p2m, gfn_x(gfn), p2mt, &p2ma, > + P2M_ALLOC | P2M_UNSHARE, NULL); > if ( p2m_is_paging(*p2mt) ) > { > ASSERT(!p2m_is_nestedp2m(p2m)); > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/hap/guest_walk.c > --- a/xen/arch/x86/mm/hap/guest_walk.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/hap/guest_walk.c Thu Feb 23 16:15:29 2012 +0000 > @@ -60,7 +60,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA > > /* Get the top-level table's MFN */ > top_gfn = cr3 >> PAGE_SHIFT; > - top_mfn = get_gfn_type_access(p2m, top_gfn, &p2mt, &p2ma, p2m_unshare, > NULL); > + top_mfn = get_gfn_type_access(p2m, top_gfn, &p2mt, &p2ma, > + P2M_ALLOC | P2M_UNSHARE, NULL); > if ( p2m_is_paging(p2mt) ) > { > ASSERT(!p2m_is_nestedp2m(p2m)); > @@ -96,7 +97,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA > if ( missing == 0 ) > { > gfn_t gfn = guest_l1e_get_gfn(gw.l1e); > - (void)get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma, > p2m_unshare, NULL); > + (void)get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma, > + P2M_ALLOC | P2M_UNSHARE, NULL); > if ( p2m_is_paging(p2mt) ) > { > ASSERT(!p2m_is_nestedp2m(p2m)); > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/hap/nested_hap.c > --- a/xen/arch/x86/mm/hap/nested_hap.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/hap/nested_hap.c Thu Feb 23 16:15:29 2012 +0000 > @@ -150,7 +150,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain > > /* walk L0 P2M table */ > mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, > - p2m_query, page_order); > + 0, page_order); > > rc = NESTEDHVM_PAGEFAULT_MMIO; > if ( p2m_is_mmio(p2mt) ) > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/mem_sharing.c > --- a/xen/arch/x86/mm/mem_sharing.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/mem_sharing.c Thu Feb 23 16:15:29 2012 +0000 > @@ -734,7 +734,7 @@ int mem_sharing_share_pages(struct domai > > get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn, > cd, cgfn, &cmfn_type, NULL, &cmfn, > - p2m_query, &tg); > + 0, &tg); > > /* This tricky business is to avoid two callers deadlocking if > * grabbing pages in opposite client/source order */ > @@ -849,7 +849,7 @@ int mem_sharing_add_to_physmap(struct do > > get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn, > cd, cgfn, &cmfn_type, &a, &cmfn, > - p2m_query, &tg); > + 0, &tg); > > /* Get the source shared page, check and lock */ > ret = XENMEM_SHARING_OP_S_HANDLE_INVALID; > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/p2m-ept.c > --- a/xen/arch/x86/mm/p2m-ept.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/p2m-ept.c Thu Feb 23 16:15:29 2012 +0000 > @@ -514,7 +514,7 @@ static mfn_t ept_get_entry(struct p2m_do > goto out; > else if ( ret == GUEST_TABLE_POD_PAGE ) > { > - if ( q == p2m_query ) > + if ( !(q & P2M_ALLOC) ) > { > *t = p2m_populate_on_demand; > goto out; > @@ -541,7 +541,7 @@ static mfn_t ept_get_entry(struct p2m_do > > if ( ept_entry->sa_p2mt == p2m_populate_on_demand ) > { > - if ( q == p2m_query ) > + if ( !(q & P2M_ALLOC) ) > { > *t = p2m_populate_on_demand; > goto out; > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/p2m-pod.c > --- a/xen/arch/x86/mm/p2m-pod.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/p2m-pod.c Thu Feb 23 16:15:29 2012 +0000 > @@ -529,7 +529,7 @@ p2m_pod_decrease_reservation(struct doma > p2m_access_t a; > p2m_type_t t; > > - (void)p2m->get_entry(p2m, gpfn + i, &t, &a, p2m_query, NULL); > + (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL); > > if ( t == p2m_populate_on_demand ) > pod++; > @@ -570,7 +570,7 @@ p2m_pod_decrease_reservation(struct doma > p2m_type_t t; > p2m_access_t a; > > - mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL); > if ( t == p2m_populate_on_demand ) > { > set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, > p2m->default_access); > @@ -656,7 +656,7 @@ p2m_pod_zero_check_superpage(struct p2m_ > for ( i=0; i<SUPERPAGE_PAGES; i++ ) > { > p2m_access_t a; > - mfn = p2m->get_entry(p2m, gfn + i, &type, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL); > > if ( i == 0 ) > { > @@ -786,7 +786,7 @@ p2m_pod_zero_check(struct p2m_domain *p2 > for ( i=0; i<count; i++ ) > { > p2m_access_t a; > - mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, p2m_query, > NULL); > + mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL); > /* If this is ram, and not a pagetable or from the xen heap, and > probably not mapped > elsewhere, map it; otherwise, skip. */ > if ( p2m_is_ram(types[i]) > @@ -932,7 +932,7 @@ p2m_pod_emergency_sweep(struct p2m_domai > for ( i=p2m->pod.reclaim_single; i > 0 ; i-- ) > { > p2m_access_t a; > - (void)p2m->get_entry(p2m, i, &t, &a, p2m_query, NULL); > + (void)p2m->get_entry(p2m, i, &t, &a, 0, NULL); > if ( p2m_is_ram(t) ) > { > gfns[j] = i; > @@ -1130,7 +1130,7 @@ guest_physmap_mark_populate_on_demand(st > for ( i = 0; i < (1UL << order); i++ ) > { > p2m_access_t a; > - omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL); > + omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); > if ( p2m_is_ram(ot) ) > { > printk("%s: gfn_to_mfn returned type %d!\n", > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/p2m-pt.c > --- a/xen/arch/x86/mm/p2m-pt.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/p2m-pt.c Thu Feb 23 16:15:29 2012 +0000 > @@ -513,7 +513,7 @@ pod_retry_l3: > (p2m_flags_to_type(l3e_get_flags(l3e)) == > p2m_populate_on_demand) ) > { > /* The read has succeeded, so we know that mapping exists */ > - if ( q != p2m_query ) > + if ( q & P2M_ALLOC ) > { > if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) ) > goto pod_retry_l3; > @@ -565,7 +565,7 @@ pod_retry_l2: > { > /* The read has succeeded, so we know that the mapping > * exits at this point. */ > - if ( q != p2m_query ) > + if ( q & P2M_ALLOC ) > { > if ( !p2m_pod_demand_populate(p2m, gfn, > PAGE_ORDER_2M, q) ) > @@ -623,7 +623,7 @@ pod_retry_l1: > { > /* The read has succeeded, so we know that the mapping > * exits at this point. */ > - if ( q != p2m_query ) > + if ( q & P2M_ALLOC ) > { > if ( !p2m_pod_demand_populate(p2m, gfn, > PAGE_ORDER_4K, q) ) > @@ -714,7 +714,7 @@ pod_retry_l3: > { > if ( p2m_flags_to_type(l3e_get_flags(*l3e)) == > p2m_populate_on_demand ) > { > - if ( q != p2m_query ) > + if ( q & P2M_ALLOC ) > { > if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) > ) > goto pod_retry_l3; > @@ -753,7 +753,7 @@ pod_retry_l2: > /* PoD: Try to populate a 2-meg chunk */ > if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand > ) > { > - if ( q != p2m_query ) { > + if ( q & P2M_ALLOC ) { > if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) ) > goto pod_retry_l2; > } else > @@ -786,7 +786,7 @@ pod_retry_l1: > /* PoD: Try to populate */ > if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand > ) > { > - if ( q != p2m_query ) { > + if ( q & P2M_ALLOC ) { > if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) ) > goto pod_retry_l1; > } else > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/p2m.c > --- a/xen/arch/x86/mm/p2m.c Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/p2m.c Thu Feb 23 16:15:29 2012 +0000 > @@ -167,7 +167,7 @@ mfn_t __get_gfn_type_access(struct p2m_d > mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); > > #ifdef __x86_64__ > - if ( q == p2m_unshare && p2m_is_shared(*t) ) > + if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) ) > { > ASSERT(!p2m_is_nestedp2m(p2m)); > mem_sharing_unshare_page(p2m->domain, gfn, 0); > @@ -180,7 +180,7 @@ mfn_t __get_gfn_type_access(struct p2m_d > { > /* Return invalid_mfn to avoid caller's access */ > mfn = _mfn(INVALID_MFN); > - if (q != p2m_query) > + if ( q & P2M_ALLOC ) > domain_crash(p2m->domain); > } > #endif > @@ -367,7 +367,7 @@ void p2m_teardown(struct p2m_domain *p2m > for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ ) > { > p2m_access_t a; > - mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); > if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) > { > ASSERT(!p2m_is_nestedp2m(p2m)); > @@ -437,7 +437,7 @@ p2m_remove_page(struct p2m_domain *p2m, > { > for ( i = 0; i < (1UL << page_order); i++ ) > { > - mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query, > NULL); > + mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL); > if ( !p2m_is_grant(t) && !p2m_is_shared(t) ) > set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); > ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); > @@ -499,7 +499,7 @@ guest_physmap_add_entry(struct domain *d > /* First, remove m->p mappings for existing p->m mappings */ > for ( i = 0; i < (1UL << page_order); i++ ) > { > - omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL); > + omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); > #ifdef __x86_64__ > if ( p2m_is_shared(ot) ) > { > @@ -512,7 +512,7 @@ guest_physmap_add_entry(struct domain *d > p2m_unlock(p2m); > return rc; > } > - omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL); > + omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); > ASSERT(!p2m_is_shared(ot)); > } > #endif /* __x86_64__ */ > @@ -561,7 +561,7 @@ guest_physmap_add_entry(struct domain *d > * address */ > P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n", > mfn + i, ogfn, gfn + i); > - omfn = p2m->get_entry(p2m, ogfn, &ot, &a, p2m_query, NULL); > + omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL); > if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) > { > ASSERT(mfn_valid(omfn)); > @@ -620,7 +620,7 @@ p2m_type_t p2m_change_type(struct domain > > gfn_lock(p2m, gfn, 0); > > - mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL); > if ( pt == ot ) > set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access); > > @@ -648,7 +648,7 @@ void p2m_change_type_range(struct domain > > for ( gfn = start; gfn < end; gfn++ ) > { > - mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL); > if ( pt == ot ) > set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, > p2m->default_access); > } > @@ -674,7 +674,7 @@ set_mmio_p2m_entry(struct domain *d, uns > return 0; > > gfn_lock(p2m, gfn, 0); > - omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL); > + omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL); > if ( p2m_is_grant(ot) ) > { > p2m_unlock(p2m); > @@ -710,7 +710,7 @@ clear_mmio_p2m_entry(struct domain *d, u > return 0; > > gfn_lock(p2m, gfn, 0); > - mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); > > /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */ > if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) ) > @@ -741,7 +741,7 @@ set_shared_p2m_entry(struct domain *d, u > return 0; > > gfn_lock(p2m, gfn, 0); > - omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL); > + omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL); > /* At the moment we only allow p2m change if gfn has already been made > * sharable first */ > ASSERT(p2m_is_shared(ot)); > @@ -793,7 +793,7 @@ int p2m_mem_paging_nominate(struct domai > > gfn_lock(p2m, gfn, 0); > > - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL); > > /* Check if mfn is valid */ > if ( !mfn_valid(mfn) ) > @@ -856,7 +856,7 @@ int p2m_mem_paging_evict(struct domain * > gfn_lock(p2m, gfn, 0); > > /* Get mfn */ > - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL); > if ( unlikely(!mfn_valid(mfn)) ) > goto out; > > @@ -980,7 +980,7 @@ void p2m_mem_paging_populate(struct doma > > /* Fix p2m mapping */ > gfn_lock(p2m, gfn, 0); > - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL); > /* Allow only nominated or evicted pages to enter page-in path */ > if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged ) > { > @@ -1042,7 +1042,7 @@ int p2m_mem_paging_prep(struct domain *d > > gfn_lock(p2m, gfn, 0); > > - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL); > > ret = -ENOENT; > /* Allow missing pages */ > @@ -1135,7 +1135,7 @@ void p2m_mem_paging_resume(struct domain > if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) > { > gfn_lock(p2m, rsp.gfn, 0); > - mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, 0, NULL); > /* Allow only pages which were prepared properly, or pages which > * were nominated but not evicted */ > if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) ) > @@ -1168,7 +1168,7 @@ bool_t p2m_mem_access_check(unsigned lon > > /* First, handle rx2rw conversion automatically */ > gfn_lock(p2m, gfn, 0); > - mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL); > > if ( access_w && p2ma == p2m_access_rx2rw ) > { > @@ -1297,7 +1297,7 @@ int p2m_set_mem_access(struct domain *d, > p2m_lock(p2m); > for ( pfn = start_pfn; pfn < start_pfn + nr; pfn++ ) > { > - mfn = p2m->get_entry(p2m, pfn, &t, &_a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL); > if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 ) > { > rc = -ENOMEM; > @@ -1338,7 +1338,7 @@ int p2m_get_mem_access(struct domain *d, > } > > gfn_lock(p2m, gfn, 0); > - mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query, NULL); > + mfn = p2m->get_entry(p2m, pfn, &t, &a, 0, NULL); > gfn_unlock(p2m, gfn, 0); > > if ( mfn_x(mfn) == INVALID_MFN ) > @@ -1579,7 +1579,7 @@ void audit_p2m(struct domain *d, > continue; > } > > - p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, p2m_query, NULL); > + p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, 0, NULL); > if ( mfn_x(p2mfn) != mfn ) > { > mpbad++; > diff -r ec94098841bf -r 00f61d0186a6 xen/arch/x86/mm/shadow/types.h > --- a/xen/arch/x86/mm/shadow/types.h Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/arch/x86/mm/shadow/types.h Thu Feb 23 16:15:29 2012 +0000 > @@ -193,7 +193,7 @@ static inline shadow_l4e_t shadow_l4e_fr > > /* Override get_gfn to work with gfn_t */ > #undef get_gfn_query > -#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_query) > +#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), 0) > > /* The shadow types needed for the various levels. */ > > diff -r ec94098841bf -r 00f61d0186a6 xen/include/asm-x86/guest_pt.h > --- a/xen/include/asm-x86/guest_pt.h Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/include/asm-x86/guest_pt.h Thu Feb 23 16:15:29 2012 +0000 > @@ -53,7 +53,7 @@ gfn_to_paddr(gfn_t gfn) > > /* Override get_gfn to work with gfn_t */ > #undef get_gfn > -#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_alloc) > +#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC) > > > /* Types of the guest's page tables and access functions for them */ > diff -r ec94098841bf -r 00f61d0186a6 xen/include/asm-x86/p2m.h > --- a/xen/include/asm-x86/p2m.h Thu Feb 23 16:15:29 2012 +0000 > +++ b/xen/include/asm-x86/p2m.h Thu Feb 23 16:15:29 2012 +0000 > @@ -115,11 +115,9 @@ typedef enum { > } p2m_access_t; > > /* Modifiers to the query */ > -typedef enum { > - p2m_query, /* Do not populate a PoD entries */ > - p2m_alloc, /* Automatically populate PoD entries */ > - p2m_unshare, /* Break c-o-w sharing; implies alloc */ > -} p2m_query_t; > +typedef unsigned int p2m_query_t; > +#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */ > +#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */ > > /* We use bitmaps and maks to handle groups of types */ > #define p2m_to_mask(_t) (1UL << (_t)) > @@ -330,9 +328,10 @@ static inline mfn_t get_gfn_type(struct > * N.B. get_gfn_query() is the _only_ one guaranteed not to take the > * p2m lock; none of the others can be called with the p2m or paging > * lock held. */ > -#define get_gfn(d, g, t) get_gfn_type((d), (g), (t), p2m_alloc) > -#define get_gfn_query(d, g, t) get_gfn_type((d), (g), (t), p2m_query) > -#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), p2m_unshare) > +#define get_gfn(d, g, t) get_gfn_type((d), (g), (t), P2M_ALLOC) > +#define get_gfn_query(d, g, t) get_gfn_type((d), (g), (t), 0) > +#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \ > + P2M_ALLOC | P2M_UNSHARE) > > /* Compatibility function exporting the old untyped interface */ > static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long > gpfn) > @@ -364,8 +363,7 @@ static inline mfn_t get_gfn_query_unlock > p2m_type_t *t) > { > p2m_access_t a; > - return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, > - p2m_query, NULL, 0); > + return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0); > } > > /* General conversion function from mfn to gfn */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |