|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/mm: make 'query type' argument to get_gfn into a set of flags
# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1331812014 0
# Node ID 09ce2e4bcce5e4ffafb412e6fa4413560e89af94
# Parent 7a439281b8ee5aef0b495e191fab7c8d0e46e4be
x86/mm: make 'query type' argument to get_gfn into a set of flags
Having an enum for this won't work if we want to add any orthogonal
options to it -- the existing code is only correct (after the removal of
p2m_guest in the previous patch) because there are no tests anywhere for
'== p2m_alloc', only for '!= p2m_query' and '== p2m_unshare'.
Replace it with a set of flags.
Signed-off-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/hvm/emulate.c Thu Mar 15 11:46:54 2012 +0000
@@ -716,7 +716,7 @@
get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL, NULL,
current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL, NULL,
- p2m_alloc, &tg);
+ P2M_ALLOC, &tg);
if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) )
{
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/hvm/hvm.c Thu Mar 15 11:46:54 2012 +0000
@@ -1270,7 +1270,7 @@
p2m = p2m_get_hostp2m(v->domain);
mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
- access_w ? p2m_unshare : p2m_alloc, NULL);
+ P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL);
/* Check access permissions first, then handle faults */
if ( mfn_x(mfn) != INVALID_MFN )
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Mar 15 11:46:54 2012 +0000
@@ -1265,7 +1265,7 @@
p2m = p2m_get_p2m(v);
_d.gpa = gpa;
_d.qualification = 0;
- mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, p2m_query, NULL);
+ mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL);
__put_gfn(p2m, gfn);
_d.mfn = mfn_x(mfn);
@@ -1287,7 +1287,7 @@
if ( p2m == NULL )
p2m = p2m_get_p2m(v);
/* Everything else is an error. */
- mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL);
+ mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
__put_gfn(p2m, gfn);
gdprintk(XENLOG_ERR,
"SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/guest_walk.c Thu Mar 15 11:46:54 2012 +0000
@@ -98,7 +98,8 @@
void *map;
/* Translate the gfn, unsharing if shared */
- *mfn = get_gfn_type_access(p2m, gfn_x(gfn), p2mt, &p2ma, p2m_unshare,
NULL);
+ *mfn = get_gfn_type_access(p2m, gfn_x(gfn), p2mt, &p2ma,
+ P2M_ALLOC | P2M_UNSHARE, NULL);
if ( p2m_is_paging(*p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/hap/guest_walk.c Thu Mar 15 11:46:54 2012 +0000
@@ -60,7 +60,8 @@
/* Get the top-level table's MFN */
top_gfn = cr3 >> PAGE_SHIFT;
- top_mfn = get_gfn_type_access(p2m, top_gfn, &p2mt, &p2ma, p2m_unshare,
NULL);
+ top_mfn = get_gfn_type_access(p2m, top_gfn, &p2mt, &p2ma,
+ P2M_ALLOC | P2M_UNSHARE, NULL);
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
@@ -96,7 +97,8 @@
if ( missing == 0 )
{
gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
- (void)get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare,
NULL);
+ (void)get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma,
+ P2M_ALLOC | P2M_UNSHARE, NULL);
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/hap/nested_hap.c Thu Mar 15 11:46:54 2012 +0000
@@ -150,7 +150,7 @@
/* walk L0 P2M table */
mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma,
- p2m_query, page_order);
+ 0, page_order);
rc = NESTEDHVM_PAGEFAULT_MMIO;
if ( p2m_is_mmio(p2mt) )
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/mem_sharing.c Thu Mar 15 11:46:54 2012 +0000
@@ -729,7 +729,7 @@
get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
cd, cgfn, &cmfn_type, NULL, &cmfn,
- p2m_query, &tg);
+ 0, &tg);
/* This tricky business is to avoid two callers deadlocking if
* grabbing pages in opposite client/source order */
@@ -844,7 +844,7 @@
get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
cd, cgfn, &cmfn_type, &a, &cmfn,
- p2m_query, &tg);
+ 0, &tg);
/* Get the source shared page, check and lock */
ret = XENMEM_SHARING_OP_S_HANDLE_INVALID;
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/p2m-ept.c Thu Mar 15 11:46:54 2012 +0000
@@ -514,7 +514,7 @@
goto out;
else if ( ret == GUEST_TABLE_POD_PAGE )
{
- if ( q == p2m_query )
+ if ( !(q & P2M_ALLOC) )
{
*t = p2m_populate_on_demand;
goto out;
@@ -541,7 +541,7 @@
if ( ept_entry->sa_p2mt == p2m_populate_on_demand )
{
- if ( q == p2m_query )
+ if ( !(q & P2M_ALLOC) )
{
*t = p2m_populate_on_demand;
goto out;
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/p2m-pod.c Thu Mar 15 11:46:54 2012 +0000
@@ -529,7 +529,7 @@
p2m_access_t a;
p2m_type_t t;
- (void)p2m->get_entry(p2m, gpfn + i, &t, &a, p2m_query, NULL);
+ (void)p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL);
if ( t == p2m_populate_on_demand )
pod++;
@@ -570,7 +570,7 @@
p2m_type_t t;
p2m_access_t a;
- mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL);
if ( t == p2m_populate_on_demand )
{
set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
p2m->default_access);
@@ -656,7 +656,7 @@
for ( i=0; i<SUPERPAGE_PAGES; i++ )
{
p2m_access_t a;
- mfn = p2m->get_entry(p2m, gfn + i, &type, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, NULL);
if ( i == 0 )
{
@@ -786,7 +786,7 @@
for ( i=0; i<count; i++ )
{
p2m_access_t a;
- mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, p2m_query, NULL);
+ mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL);
/* If this is ram, and not a pagetable or from the xen heap, and
probably not mapped
elsewhere, map it; otherwise, skip. */
if ( p2m_is_ram(types[i])
@@ -932,7 +932,7 @@
for ( i=p2m->pod.reclaim_single; i > 0 ; i-- )
{
p2m_access_t a;
- (void)p2m->get_entry(p2m, i, &t, &a, p2m_query, NULL);
+ (void)p2m->get_entry(p2m, i, &t, &a, 0, NULL);
if ( p2m_is_ram(t) )
{
gfns[j] = i;
@@ -1130,7 +1130,7 @@
for ( i = 0; i < (1UL << order); i++ )
{
p2m_access_t a;
- omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL);
+ omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
if ( p2m_is_ram(ot) )
{
printk("%s: gfn_to_mfn returned type %d!\n",
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/p2m-pt.c Thu Mar 15 11:46:54 2012 +0000
@@ -513,7 +513,7 @@
(p2m_flags_to_type(l3e_get_flags(l3e)) == p2m_populate_on_demand)
)
{
/* The read has succeeded, so we know that mapping exists */
- if ( q != p2m_query )
+ if ( q & P2M_ALLOC )
{
if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
@@ -565,7 +565,7 @@
{
/* The read has succeeded, so we know that the mapping
* exits at this point. */
- if ( q != p2m_query )
+ if ( q & P2M_ALLOC )
{
if ( !p2m_pod_demand_populate(p2m, gfn,
PAGE_ORDER_2M, q) )
@@ -623,7 +623,7 @@
{
/* The read has succeeded, so we know that the mapping
* exits at this point. */
- if ( q != p2m_query )
+ if ( q & P2M_ALLOC )
{
if ( !p2m_pod_demand_populate(p2m, gfn,
PAGE_ORDER_4K, q) )
@@ -714,7 +714,7 @@
{
if ( p2m_flags_to_type(l3e_get_flags(*l3e)) ==
p2m_populate_on_demand )
{
- if ( q != p2m_query )
+ if ( q & P2M_ALLOC )
{
if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
goto pod_retry_l3;
@@ -753,7 +753,7 @@
/* PoD: Try to populate a 2-meg chunk */
if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand )
{
- if ( q != p2m_query ) {
+ if ( q & P2M_ALLOC ) {
if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) )
goto pod_retry_l2;
} else
@@ -786,7 +786,7 @@
/* PoD: Try to populate */
if ( p2m_flags_to_type(l1e_get_flags(*l1e)) == p2m_populate_on_demand )
{
- if ( q != p2m_query ) {
+ if ( q & P2M_ALLOC ) {
if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) )
goto pod_retry_l1;
} else
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/p2m.c Thu Mar 15 11:46:54 2012 +0000
@@ -151,6 +151,10 @@
{
mfn_t mfn;
+ /* Unshare makes no sense withuot populate. */
+ if ( q & P2M_UNSHARE )
+ q |= P2M_ALLOC;
+
if ( !p2m || !paging_mode_translate(p2m->domain) )
{
/* Not necessarily true, but for non-translated guests, we claim
@@ -167,7 +171,7 @@
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
#ifdef __x86_64__
- if ( q == p2m_unshare && p2m_is_shared(*t) )
+ if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
/* Try to unshare. If we fail, communicate ENOMEM without
@@ -183,7 +187,7 @@
{
/* Return invalid_mfn to avoid caller's access */
mfn = _mfn(INVALID_MFN);
- if (q != p2m_query)
+ if ( q & P2M_ALLOC )
domain_crash(p2m->domain);
}
#endif
@@ -370,7 +374,7 @@
for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
{
p2m_access_t a;
- mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
@@ -441,7 +445,7 @@
{
for ( i = 0; i < (1UL << page_order); i++ )
{
- mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, p2m_query, NULL);
+ mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
if ( !p2m_is_grant(t) && !p2m_is_shared(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
@@ -503,7 +507,7 @@
/* First, remove m->p mappings for existing p->m mappings */
for ( i = 0; i < (1UL << page_order); i++ )
{
- omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL);
+ omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
#ifdef __x86_64__
if ( p2m_is_shared(ot) )
{
@@ -528,7 +532,7 @@
(void)mem_sharing_notify_enomem(p2m->domain, gfn + i, 0);
return rc;
}
- omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL);
+ omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL);
ASSERT(!p2m_is_shared(ot));
}
#endif /* __x86_64__ */
@@ -577,7 +581,7 @@
* address */
P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
mfn + i, ogfn, gfn + i);
- omfn = p2m->get_entry(p2m, ogfn, &ot, &a, p2m_query, NULL);
+ omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL);
if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
{
ASSERT(mfn_valid(omfn));
@@ -636,7 +640,7 @@
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
if ( pt == ot )
set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
@@ -664,7 +668,7 @@
for ( gfn = start; gfn < end; gfn++ )
{
- mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
if ( pt == ot )
set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt,
p2m->default_access);
}
@@ -690,7 +694,7 @@
return 0;
gfn_lock(p2m, gfn, 0);
- omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
+ omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
if ( p2m_is_grant(ot) )
{
p2m_unlock(p2m);
@@ -726,7 +730,7 @@
return 0;
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);
/* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
@@ -757,7 +761,7 @@
return 0;
gfn_lock(p2m, gfn, 0);
- omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
+ omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
/* At the moment we only allow p2m change if gfn has already been made
* sharable first */
ASSERT(p2m_is_shared(ot));
@@ -809,7 +813,7 @@
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
/* Check if mfn is valid */
if ( !mfn_valid(mfn) )
@@ -872,7 +876,7 @@
gfn_lock(p2m, gfn, 0);
/* Get mfn */
- mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
if ( unlikely(!mfn_valid(mfn)) )
goto out;
@@ -999,7 +1003,7 @@
/* Fix p2m mapping */
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
/* Allow only nominated or evicted pages to enter page-in path */
if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
{
@@ -1061,7 +1065,7 @@
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL);
ret = -ENOENT;
/* Allow missing pages */
@@ -1154,7 +1158,7 @@
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
{
gfn_lock(p2m, rsp.gfn, 0);
- mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, rsp.gfn, &p2mt, &a, 0, NULL);
/* Allow only pages which were prepared properly, or pages which
* were nominated but not evicted */
if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
@@ -1187,7 +1191,7 @@
/* First, handle rx2rw conversion automatically */
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL);
if ( access_w && p2ma == p2m_access_rx2rw )
{
@@ -1316,7 +1320,7 @@
p2m_lock(p2m);
for ( pfn = start_pfn; pfn < start_pfn + nr; pfn++ )
{
- mfn = p2m->get_entry(p2m, pfn, &t, &_a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
{
rc = -ENOMEM;
@@ -1357,7 +1361,7 @@
}
gfn_lock(p2m, gfn, 0);
- mfn = p2m->get_entry(p2m, pfn, &t, &a, p2m_query, NULL);
+ mfn = p2m->get_entry(p2m, pfn, &t, &a, 0, NULL);
gfn_unlock(p2m, gfn, 0);
if ( mfn_x(mfn) == INVALID_MFN )
@@ -1598,7 +1602,7 @@
continue;
}
- p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, p2m_query, NULL);
+ p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, 0, NULL);
if ( mfn_x(p2mfn) != mfn )
{
mpbad++;
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/arch/x86/mm/shadow/types.h Thu Mar 15 11:46:54 2012 +0000
@@ -193,7 +193,7 @@
/* Override get_gfn to work with gfn_t */
#undef get_gfn_query
-#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_query)
+#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), 0)
/* The shadow types needed for the various levels. */
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/include/asm-x86/guest_pt.h
--- a/xen/include/asm-x86/guest_pt.h Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/include/asm-x86/guest_pt.h Thu Mar 15 11:46:54 2012 +0000
@@ -53,7 +53,7 @@
/* Override get_gfn to work with gfn_t */
#undef get_gfn
-#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_alloc)
+#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC)
/* Types of the guest's page tables and access functions for them */
diff -r 7a439281b8ee -r 09ce2e4bcce5 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Mar 15 11:41:11 2012 +0000
+++ b/xen/include/asm-x86/p2m.h Thu Mar 15 11:46:54 2012 +0000
@@ -117,11 +117,9 @@
} p2m_access_t;
/* Modifiers to the query */
-typedef enum {
- p2m_query, /* Do not populate a PoD entries */
- p2m_alloc, /* Automatically populate PoD entries */
- p2m_unshare, /* Break c-o-w sharing; implies alloc */
-} p2m_query_t;
+typedef unsigned int p2m_query_t;
+#define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */
+#define P2M_UNSHARE (1u<<1) /* Break CoW sharing */
/* We use bitmaps and maks to handle groups of types */
#define p2m_to_mask(_t) (1UL << (_t))
@@ -332,9 +330,10 @@
* N.B. get_gfn_query() is the _only_ one guaranteed not to take the
* p2m lock; none of the others can be called with the p2m or paging
* lock held. */
-#define get_gfn(d, g, t) get_gfn_type((d), (g), (t), p2m_alloc)
-#define get_gfn_query(d, g, t) get_gfn_type((d), (g), (t), p2m_query)
-#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), p2m_unshare)
+#define get_gfn(d, g, t) get_gfn_type((d), (g), (t), P2M_ALLOC)
+#define get_gfn_query(d, g, t) get_gfn_type((d), (g), (t), 0)
+#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \
+ P2M_ALLOC | P2M_UNSHARE)
/* Compatibility function exporting the old untyped interface */
static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long
gpfn)
@@ -366,8 +365,7 @@
p2m_type_t *t)
{
p2m_access_t a;
- return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a,
- p2m_query, NULL, 0);
+ return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
}
/* General conversion function from mfn to gfn */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |