|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 08/15] xen/x86: p2m: Use typesafe gfn for the P2M callbacks get_entry and set_entry
On 10/02/2017 01:59 PM, Julien Grall wrote:
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
>
> ---
>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
> Cc: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
>
> Changes in v3:
> - Add Razvan's acked-by
> - Add Wei's reviewed-by
>
> Changes in v2:
> - Add Andre's acked
> - Add Kevin's reviewed (EPT part)
> ---
> xen/arch/x86/hvm/hvm.c | 2 +-
> xen/arch/x86/mm/mem_access.c | 19 +++++------
> xen/arch/x86/mm/mem_sharing.c | 4 +--
> xen/arch/x86/mm/p2m-ept.c | 6 ++--
> xen/arch/x86/mm/p2m-pod.c | 15 +++++----
> xen/arch/x86/mm/p2m-pt.c | 6 ++--
> xen/arch/x86/mm/p2m.c | 77
> +++++++++++++++++++++++--------------------
> xen/include/asm-x86/p2m.h | 4 +--
> 8 files changed, 73 insertions(+), 60 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 887f0e556c..6b8c4cbdda 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1787,7 +1787,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
> long gla,
> {
> bool_t sve;
>
> - p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, &sve);
> + p2m->get_entry(p2m, _gfn(gfn), &p2mt, &p2ma, 0, NULL, &sve);
>
> if ( !sve && altp2m_vcpu_emulate_ve(curr) )
> {
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index 33a77c9f1d..c0cd0174cf 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -66,7 +66,7 @@ static int _p2m_get_mem_access(struct p2m_domain *p2m,
> gfn_t gfn,
> }
>
> gfn_lock(p2m, gfn, 0);
> - mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
> gfn_unlock(p2m, gfn, 0);
>
> if ( mfn_eq(mfn, INVALID_MFN) )
> @@ -142,7 +142,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> vm_event_request_t **req_ptr)
> {
> struct vcpu *v = current;
> - unsigned long gfn = gpa >> PAGE_SHIFT;
> + gfn_t gfn = gaddr_to_gfn(gpa);
> struct domain *d = v->domain;
> struct p2m_domain *p2m = NULL;
> mfn_t mfn;
> @@ -215,7 +215,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
> *req_ptr = req;
>
> req->reason = VM_EVENT_REASON_MEM_ACCESS;
> - req->u.mem_access.gfn = gfn;
> + req->u.mem_access.gfn = gfn_x(gfn);
> req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
> if ( npfec.gla_valid )
> {
> @@ -247,7 +247,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
> p2m_domain *hp2m,
> unsigned long gfn_l = gfn_x(gfn);
> int rc;
>
> - mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
> + mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL);
>
> /* Check host p2m if no valid entry in alternate */
> if ( !mfn_valid(mfn) )
> @@ -264,16 +264,16 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
> p2m_domain *hp2m,
> if ( page_order != PAGE_ORDER_4K )
> {
> unsigned long mask = ~((1UL << page_order) - 1);
> - unsigned long gfn2_l = gfn_l & mask;
> + gfn_t gfn2 = _gfn(gfn_l & mask);
> mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
>
> - rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a,
> 1);
> + rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1);
> if ( rc )
> return rc;
> }
> }
>
> - return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
> + return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a,
> current->domain != d);
> }
>
> @@ -295,10 +295,9 @@ static int set_mem_access(struct domain *d, struct
> p2m_domain *p2m,
> mfn_t mfn;
> p2m_access_t _a;
> p2m_type_t t;
> - unsigned long gfn_l = gfn_x(gfn);
>
> - mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
> - rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
> + mfn = p2m->get_entry(p2m, gfn, &t, &_a, 0, NULL, NULL);
> + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
> }
>
> return rc;
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index 3ab119cef2..62a3899089 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -1234,7 +1234,7 @@ int relinquish_shared_pages(struct domain *d)
>
> if ( atomic_read(&d->shr_pages) == 0 )
> break;
> - mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL);
> if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
> {
> /* Does not fail with ENOMEM given the DESTROY flag */
> @@ -1243,7 +1243,7 @@ int relinquish_shared_pages(struct domain *d)
> /* Clear out the p2m entry so no one else may try to
> * unshare. Must succeed: we just read the old entry and
> * we hold the p2m lock. */
> - set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K,
> + set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K,
> p2m_invalid, p2m_access_rwx, -1);
> ASSERT(set_rc == 0);
> count += 0x10;
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index 23c0518733..f14d1686b7 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -674,11 +674,12 @@ bool_t ept_handle_misconfig(uint64_t gpa)
> * Returns: 0 for success, -errno for failure
> */
> static int
> -ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
> +ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
> unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma,
> int sve)
> {
> ept_entry_t *table, *ept_entry = NULL;
> + unsigned long gfn = gfn_x(gfn_);
> unsigned long gfn_remainder = gfn;
> unsigned int i, target = order / EPT_TABLE_ORDER;
> unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) :
> gfn;
> @@ -910,11 +911,12 @@ out:
>
> /* Read ept p2m entries */
> static mfn_t ept_get_entry(struct p2m_domain *p2m,
> - unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
> + gfn_t gfn_, p2m_type_t *t, p2m_access_t* a,
> p2m_query_t q, unsigned int *page_order,
> bool_t *sve)
> {
> ept_entry_t *table =
> map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
> + unsigned long gfn = gfn_x(gfn_);
> unsigned long gfn_remainder = gfn;
> ept_entry_t *ept_entry;
> u32 index;
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index eb74e5c01f..c8c8cff014 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -543,7 +543,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> unsigned int order)
> p2m_type_t t;
> unsigned int cur_order;
>
> - p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order, NULL);
> + p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, &cur_order, NULL);
> n = 1UL << min(order, cur_order);
> if ( t == p2m_populate_on_demand )
> pod += n;
> @@ -603,7 +603,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn,
> unsigned int order)
> p2m_access_t a;
> unsigned int cur_order;
>
> - mfn = p2m->get_entry(p2m, gfn_x(gfn) + i, &t, &a, 0, &cur_order,
> NULL);
> + mfn = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, &cur_order,
> NULL);
> if ( order < cur_order )
> cur_order = order;
> n = 1UL << cur_order;
> @@ -717,7 +717,8 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m,
> unsigned long gfn)
> unsigned long k;
> const struct page_info *page;
>
> - mfn = p2m->get_entry(p2m, gfn + i, &type, &a, 0, &cur_order, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn + i), &type, &a, 0,
> + &cur_order, NULL);
>
> /*
> * Conditions that must be met for superpage-superpage:
> @@ -859,7 +860,9 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long
> *gfns, int count)
> for ( i = 0; i < count; i++ )
> {
> p2m_access_t a;
> - mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, 0, NULL, NULL);
> +
> + mfns[i] = p2m->get_entry(p2m, _gfn(gfns[i]), types + i, &a,
> + 0, NULL, NULL);
> /*
> * If this is ram, and not a pagetable or from the xen heap, and
> * probably not mapped elsewhere, map it; otherwise, skip.
> @@ -988,7 +991,7 @@ p2m_pod_emergency_sweep(struct p2m_domain *p2m)
> for ( i = p2m->pod.reclaim_single; i > 0 ; i-- )
> {
> p2m_access_t a;
> - (void)p2m->get_entry(p2m, i, &t, &a, 0, NULL, NULL);
> + (void)p2m->get_entry(p2m, _gfn(i), &t, &a, 0, NULL, NULL);
> if ( p2m_is_ram(t) )
> {
> gfns[j] = i;
> @@ -1237,7 +1240,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d,
> unsigned long gfn,
> p2m_access_t a;
> unsigned int cur_order;
>
> - p2m->get_entry(p2m, gfn + i, &ot, &a, 0, &cur_order, NULL);
> + p2m->get_entry(p2m, _gfn(gfn + i), &ot, &a, 0, &cur_order, NULL);
> n = 1UL << min(order, cur_order);
> if ( p2m_is_ram(ot) )
> {
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index 0e63d6ed11..4bfec4f5f0 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -479,12 +479,13 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
>
> /* Returns: 0 for success, -errno for failure */
> static int
> -p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
> +p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
> unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
> int sve)
> {
> /* XXX -- this might be able to be faster iff current->domain == d */
> void *table;
> + unsigned long gfn = gfn_x(gfn_);
> unsigned long i, gfn_remainder = gfn;
> l1_pgentry_t *p2m_entry, entry_content;
> /* Intermediate table to free if we're replacing it with a superpage. */
> @@ -731,11 +732,12 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long
> gfn, mfn_t mfn,
> }
>
> static mfn_t
> -p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
> +p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
> p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
> unsigned int *page_order, bool_t *sve)
> {
> mfn_t mfn;
> + unsigned long gfn = gfn_x(gfn_);
> paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
> l2_pgentry_t *l2e;
> l1_pgentry_t *l1e;
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 0b479105b9..35d4a15391 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -415,11 +415,12 @@ void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m)
> mm_write_unlock(&p2m->lock);
> }
>
> -mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
> +mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
> p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
> unsigned int *page_order, bool_t locked)
> {
> mfn_t mfn;
> + gfn_t gfn = _gfn(gfn_l);
>
> /* Unshare makes no sense withuot populate. */
> if ( q & P2M_UNSHARE )
> @@ -430,7 +431,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m,
> unsigned long gfn,
> /* Not necessarily true, but for non-translated guests, we claim
> * it's the most generic kind of memory */
> *t = p2m_ram_rw;
> - return _mfn(gfn);
> + return _mfn(gfn_l);
> }
>
> if ( locked )
> @@ -444,8 +445,8 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m,
> unsigned long gfn,
> ASSERT(p2m_is_hostp2m(p2m));
> /* Try to unshare. If we fail, communicate ENOMEM without
> * sleeping. */
> - if ( mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0 )
> - (void)mem_sharing_notify_enomem(p2m->domain, gfn, 0);
> + if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
> + (void)mem_sharing_notify_enomem(p2m->domain, gfn_l, 0);
> mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
> }
>
> @@ -556,7 +557,7 @@ int p2m_set_entry(struct p2m_domain *p2m, unsigned long
> gfn, mfn_t mfn,
> else
> order = 0;
>
> - set_rc = p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma, -1);
> + set_rc = p2m->set_entry(p2m, _gfn(gfn), mfn, order, p2mt, p2ma, -1);
> if ( set_rc )
> rc = set_rc;
>
> @@ -735,7 +736,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long
> gfn, unsigned long mfn,
> {
> for ( i = 0; i < (1UL << page_order); i++ )
> {
> - mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL, NULL);
> + mfn_return = p2m->get_entry(p2m, _gfn(gfn + i), &t, &a, 0,
> + NULL, NULL);
> if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t)
> )
> set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
> ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
> @@ -762,7 +764,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn,
> mfn_t mfn,
> unsigned int page_order, p2m_type_t t)
> {
> struct p2m_domain *p2m = p2m_get_hostp2m(d);
> - unsigned long i, ogfn;
> + unsigned long i;
> + gfn_t ogfn;
> p2m_type_t ot;
> p2m_access_t a;
> mfn_t omfn;
> @@ -803,7 +806,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn,
> mfn_t mfn,
> /* First, remove m->p mappings for existing p->m mappings */
> for ( i = 0; i < (1UL << page_order); i++ )
> {
> - omfn = p2m->get_entry(p2m, gfn_x(gfn_add(gfn, i)), &ot,
> + omfn = p2m->get_entry(p2m, gfn_add(gfn, i), &ot,
> &a, 0, NULL, NULL);
> if ( p2m_is_shared(ot) )
> {
> @@ -831,7 +834,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn,
> mfn_t mfn,
> 0);
> return rc;
> }
> - omfn = p2m->get_entry(p2m, gfn_x(gfn_add(gfn, i)),
> + omfn = p2m->get_entry(p2m, gfn_add(gfn, i),
> &ot, &a, 0, NULL, NULL);
> ASSERT(!p2m_is_shared(ot));
> }
> @@ -873,21 +876,24 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn,
> mfn_t mfn,
> }
> if ( page_get_owner(mfn_to_page(mfn_add(mfn, i))) != d )
> continue;
> - ogfn = mfn_to_gfn(d, mfn_add(mfn, i));
> - if ( (ogfn != INVALID_M2P_ENTRY) && (ogfn != gfn_x(gfn_add(gfn, i)))
> )
> + ogfn = _gfn(mfn_to_gfn(d, mfn_add(mfn, i)));
> + if ( !gfn_eq(ogfn, _gfn(INVALID_M2P_ENTRY)) &&
> + !gfn_eq(ogfn, gfn_add(gfn, i)) )
> {
> /* This machine frame is already mapped at another physical
> * address */
> P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
> - mfn_x(mfn_add(mfn, i)), ogfn, gfn_x(gfn_add(gfn, i)));
> + mfn_x(mfn_add(mfn, i)), gfn_x(ogfn),
> + gfn_x(gfn_add(gfn, i)));
> omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL, NULL);
> if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
> {
> ASSERT(mfn_valid(omfn));
> P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
> - ogfn , mfn_x(omfn));
> + gfn_x(ogfn) , mfn_x(omfn));
> if ( mfn_eq(omfn, mfn_add(mfn, i)) )
> - p2m_remove_page(p2m, ogfn, mfn_x(mfn_add(mfn, i)), 0);
> + p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(mfn_add(mfn, i)),
> + 0);
> }
> }
> }
> @@ -948,7 +954,7 @@ int p2m_change_type_one(struct domain *d, unsigned long
> gfn,
>
> gfn_lock(p2m, gfn, 0);
>
> - mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &pt, &a, 0, NULL, NULL);
> rc = likely(pt == ot)
> ? p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt,
> p2m->default_access)
> @@ -1065,14 +1071,15 @@ int p2m_finish_type_change(struct domain *d,
> * 1 + new order for caller to retry with smaller order (guaranteed
> * to be smaller than order passed in)
> */
> -static int set_typed_p2m_entry(struct domain *d, unsigned long gfn, mfn_t
> mfn,
> - unsigned int order, p2m_type_t gfn_p2mt,
> - p2m_access_t access)
> +static int set_typed_p2m_entry(struct domain *d, unsigned long gfn_l,
> + mfn_t mfn, unsigned int order,
> + p2m_type_t gfn_p2mt, p2m_access_t access)
> {
> int rc = 0;
> p2m_access_t a;
> p2m_type_t ot;
> mfn_t omfn;
> + gfn_t gfn = _gfn(gfn_l);
> unsigned int cur_order = 0;
> struct p2m_domain *p2m = p2m_get_hostp2m(d);
>
> @@ -1103,11 +1110,11 @@ static int set_typed_p2m_entry(struct domain *d,
> unsigned long gfn, mfn_t mfn,
> }
> }
>
> - P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn, mfn_x(mfn));
> - rc = p2m_set_entry(p2m, gfn, mfn, order, gfn_p2mt, access);
> + P2M_DEBUG("set %d %lx %lx\n", gfn_p2mt, gfn_l, mfn_x(mfn));
> + rc = p2m_set_entry(p2m, gfn_l, mfn, order, gfn_p2mt, access);
> if ( rc )
> gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
> - gfn, order, rc, mfn_x(mfn));
> + gfn_l, order, rc, mfn_x(mfn));
> else if ( p2m_is_pod(ot) )
> {
> pod_lock(p2m);
> @@ -1157,7 +1164,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned
> long gfn,
>
> gfn_lock(p2m, gfn, 0);
>
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
>
> if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
> ret = p2m_set_entry(p2m, gfn, _mfn(gfn), PAGE_ORDER_4K,
> @@ -1201,7 +1208,7 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned
> long gfn, mfn_t mfn,
> return -EIO;
>
> gfn_lock(p2m, gfn, order);
> - actual_mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, &cur_order, NULL);
> + actual_mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, &cur_order, NULL);
> if ( cur_order < order )
> {
> rc = cur_order + 1;
> @@ -1245,7 +1252,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned
> long gfn)
>
> gfn_lock(p2m, gfn, 0);
>
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
> if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn )
> {
> ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
> @@ -1278,7 +1285,7 @@ int set_shared_p2m_entry(struct domain *d, unsigned
> long gfn, mfn_t mfn)
> return -EIO;
>
> gfn_lock(p2m, gfn, 0);
> - omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL, NULL);
> + omfn = p2m->get_entry(p2m, _gfn(gfn), &ot, &a, 0, NULL, NULL);
> /* At the moment we only allow p2m change if gfn has already been made
> * sharable first */
> ASSERT(p2m_is_shared(ot));
> @@ -1330,7 +1337,7 @@ int p2m_mem_paging_nominate(struct domain *d, unsigned
> long gfn)
>
> gfn_lock(p2m, gfn, 0);
>
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
>
> /* Check if mfn is valid */
> if ( !mfn_valid(mfn) )
> @@ -1392,7 +1399,7 @@ int p2m_mem_paging_evict(struct domain *d, unsigned
> long gfn)
> gfn_lock(p2m, gfn, 0);
>
> /* Get mfn */
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
> if ( unlikely(!mfn_valid(mfn)) )
> goto out;
>
> @@ -1524,7 +1531,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned
> long gfn)
>
> /* Fix p2m mapping */
> gfn_lock(p2m, gfn, 0);
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
> /* Allow only nominated or evicted pages to enter page-in path */
> if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
> {
> @@ -1586,7 +1593,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long
> gfn, uint64_t buffer)
>
> gfn_lock(p2m, gfn, 0);
>
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
>
> ret = -ENOENT;
> /* Allow missing pages */
> @@ -1674,7 +1681,7 @@ void p2m_mem_paging_resume(struct domain *d,
> vm_event_response_t *rsp)
> unsigned long gfn = rsp->u.mem_access.gfn;
>
> gfn_lock(p2m, gfn, 0);
> - mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> + mfn = p2m->get_entry(p2m, _gfn(gfn), &p2mt, &a, 0, NULL, NULL);
> /*
> * Allow only pages which were prepared properly, or pages which
> * were nominated but not evicted.
> @@ -2263,7 +2270,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned
> int idx,
> p2m_lock(hp2m);
> p2m_lock(ap2m);
>
> - mfn = ap2m->get_entry(ap2m, gfn_x(old_gfn), &t, &a, 0, NULL, NULL);
> + mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL);
>
> if ( gfn_eq(new_gfn, INVALID_GFN) )
> {
> @@ -2292,21 +2299,21 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned
> int idx,
> gfn = _gfn(gfn_x(old_gfn) & mask);
> mfn = _mfn(mfn_x(mfn) & mask);
>
> - if ( ap2m->set_entry(ap2m, gfn_x(gfn), mfn, page_order, t, a, 1)
> )
> + if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a, 1) )
> goto out;
> }
> }
>
> - mfn = ap2m->get_entry(ap2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
> + mfn = ap2m->get_entry(ap2m, new_gfn, &t, &a, 0, NULL, NULL);
>
> if ( !mfn_valid(mfn) )
> - mfn = hp2m->get_entry(hp2m, gfn_x(new_gfn), &t, &a, 0, NULL, NULL);
> + mfn = hp2m->get_entry(hp2m, new_gfn, &t, &a, 0, NULL, NULL);
>
> /* Note: currently it is not safe to remap to a shared entry */
> if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
> goto out;
>
> - if ( !ap2m->set_entry(ap2m, gfn_x(old_gfn), mfn, PAGE_ORDER_4K, t, a,
> + if ( !ap2m->set_entry(ap2m, old_gfn, mfn, PAGE_ORDER_4K, t, a,
> (current->domain != d)) )
> {
> rc = 0;
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 8f3409b400..1c9a51e9ad 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -234,13 +234,13 @@ struct p2m_domain {
> struct page_list_head pages;
>
> int (*set_entry)(struct p2m_domain *p2m,
> - unsigned long gfn,
> + gfn_t gfn,
> mfn_t mfn, unsigned int page_order,
> p2m_type_t p2mt,
> p2m_access_t p2ma,
> int sve);
> mfn_t (*get_entry)(struct p2m_domain *p2m,
> - unsigned long gfn,
> + gfn_t gfn,
> p2m_type_t *p2mt,
> p2m_access_t *p2ma,
> p2m_query_t q,
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |