[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 1/2] xen: Switch parameter in get_page_from_gfn to use typesafe gfn



No functional change intended.

Only reasonable clean-ups are done in this patch. The rest will use _gfn
for the time being.

The code in get_page_from_gfn is slightly reworked to also handle a bad
behavior because it is not safe to use mfn_to_page(...) because
mfn_valid(...) succeeds.

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>

---
    Changes in v4:
        - Drop all the reviewed-by/acked-by as the last version was
        sent nearly 9 months ago.
        - Rework the change in x86 version get_page_from_gfn
        - s/%#PRI_/%PRI_/

    Changes in v3:
        - Add Jan's acked-by

    Changes in v2:
        - Remove >> PAGE_SHIFT in svm code
        - Fix typo in the e-mail address
        - Small NITs
---
 xen/arch/arm/guestcopy.c             |  2 +-
 xen/arch/arm/mm.c                    |  2 +-
 xen/arch/x86/cpu/vpmu.c              |  2 +-
 xen/arch/x86/domctl.c                |  6 +++---
 xen/arch/x86/hvm/dm.c                |  2 +-
 xen/arch/x86/hvm/domain.c            |  6 ++++--
 xen/arch/x86/hvm/hvm.c               |  9 +++++----
 xen/arch/x86/hvm/svm/svm.c           |  8 ++++----
 xen/arch/x86/hvm/viridian/viridian.c | 16 ++++++++--------
 xen/arch/x86/hvm/vmx/vmx.c           |  4 ++--
 xen/arch/x86/hvm/vmx/vvmx.c          | 12 ++++++------
 xen/arch/x86/mm.c                    | 24 ++++++++++++++----------
 xen/arch/x86/mm/p2m.c                |  2 +-
 xen/arch/x86/mm/shadow/hvm.c         |  6 +++---
 xen/arch/x86/physdev.c               |  3 ++-
 xen/arch/x86/pv/descriptor-tables.c  |  4 ++--
 xen/arch/x86/pv/emul-priv-op.c       |  6 +++---
 xen/arch/x86/pv/mm.c                 |  2 +-
 xen/arch/x86/traps.c                 | 11 ++++++-----
 xen/common/domain.c                  |  2 +-
 xen/common/event_fifo.c              | 12 ++++++------
 xen/common/memory.c                  |  4 ++--
 xen/include/asm-arm/p2m.h            |  6 +++---
 xen/include/asm-x86/p2m.h            | 16 ++++++++++++----
 24 files changed, 92 insertions(+), 75 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 7a0f3e9d5f..55892062bb 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -37,7 +37,7 @@ static struct page_info *translate_get_page(copy_info_t info, 
uint64_t addr,
         return get_page_from_gva(info.gva.v, addr,
                                  write ? GV2M_WRITE : GV2M_READ);
 
-    page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC);
+    page = get_page_from_gfn(info.gpa.d, gaddr_to_gfn(addr), &p2mt, P2M_ALLOC);
 
     if ( !page )
         return NULL;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index e1cdeaaf2f..e9afd53e69 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1405,7 +1405,7 @@ int xenmem_add_to_physmap_one(
 
         /* Take reference to the foreign domain page.
          * Reference will be released in XENMEM_remove_from_physmap */
-        page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC);
+        page = get_page_from_gfn(od, _gfn(idx), &p2mt, P2M_ALLOC);
         if ( !page )
         {
             put_pg_owner(od);
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 375599aca5..c5a4c9a603 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -590,7 +590,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t 
*params)
     struct vcpu *v;
     struct vpmu_struct *vpmu;
     struct page_info *page;
-    uint64_t gfn = params->val;
+    gfn_t gfn = _gfn(params->val);
 
     if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
         return -EINVAL;
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 2d45e5b8a8..3ce2dd1463 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -453,7 +453,7 @@ long arch_do_domctl(
                 break;
             }
 
-            page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
+            page = get_page_from_gfn(d, _gfn(gfn), &t, P2M_ALLOC);
 
             if ( unlikely(!page) ||
                  unlikely(is_xen_heap_page(page)) )
@@ -503,11 +503,11 @@ long arch_do_domctl(
 
     case XEN_DOMCTL_hypercall_init:
     {
-        unsigned long gmfn = domctl->u.hypercall_init.gmfn;
+        gfn_t gfn = _gfn(domctl->u.hypercall_init.gmfn);
         struct page_info *page;
         void *hypercall_page;
 
-        page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+        page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
 
         if ( !page || !get_page_type(page, PGT_writable_page) )
         {
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d6d0e8be89..3b3ad27938 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -186,7 +186,7 @@ static int modified_memory(struct domain *d,
         {
             struct page_info *page;
 
-            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+            page = get_page_from_gfn(d, _gfn(pfn), NULL, P2M_UNSHARE);
             if ( page )
             {
                 paging_mark_pfn_dirty(d, _pfn(pfn));
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 5d5a746a25..3c29ff86be 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
     if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
-        struct page_info *page = get_page_from_gfn(v->domain,
-                                 v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
+        struct page_info *page;
+
+        page = get_page_from_gfn(v->domain,
+                                 gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
                                  NULL, P2M_ALLOC);
         if ( !page )
         {
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 029eea3b85..236bd6ed38 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2168,7 +2168,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
 {
     struct vcpu *v = current;
     struct domain *d = v->domain;
-    unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
+    unsigned long old_value = v->arch.hvm.guest_cr[0];
     struct page_info *page;
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -2223,7 +2223,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
         if ( !paging_mode_hap(d) )
         {
             /* The guest CR3 must be pointing to the guest physical. */
-            gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
+            gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]);
+
             page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
             if ( !page )
             {
@@ -2315,7 +2316,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-        page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+        page = get_page_from_gfn(v->domain, gaddr_to_gfn(value),
                                  NULL, P2M_ALLOC);
         if ( !page )
             goto bad_cr3;
@@ -3143,7 +3144,7 @@ enum hvm_translation_result hvm_translate_get_page(
          && hvm_mmio_internal(gfn_to_gaddr(gfn)) )
         return HVMTRANS_bad_gfn_to_mfn;
 
-    page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
+    page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
 
     if ( !page )
         return HVMTRANS_bad_gfn_to_mfn;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index cf83ce9a19..620eb951b6 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -317,7 +317,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
     {
         if ( c->cr0 & X86_CR0_PG )
         {
-            page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
+            page = get_page_from_gfn(v->domain, gaddr_to_gfn(c->cr3),
                                      NULL, P2M_ALLOC);
             if ( !page )
             {
@@ -2276,9 +2276,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
         return NULL;
 
     /* Need to translate L1-GPA to MPA */
-    page = get_page_from_gfn(v->domain, 
-                            nv->nv_vvmcxaddr >> PAGE_SHIFT, 
-                            &p2mt, P2M_ALLOC | P2M_UNSHARE);
+    page = get_page_from_gfn(v->domain,
+                             gaddr_to_gfn(nv->nv_vvmcxaddr),
+                             &p2mt, P2M_ALLOC | P2M_UNSHARE);
     if ( !page )
         return NULL;
 
diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index 4b06b78a27..b393a1457b 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -256,16 +256,16 @@ static void dump_hypercall(const struct domain *d)
 
 static void enable_hypercall_page(struct domain *d)
 {
-    unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.pfn;
-    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+    gfn_t gfn = _gfn(d->arch.hvm.viridian->hypercall_gpa.pfn);
+    struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
     uint8_t *p;
 
     if ( !page || !get_page_type(page, PGT_writable_page) )
     {
         if ( page )
             put_page(page);
-        gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
-                 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+        gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+                 gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
         return;
     }
 
@@ -733,13 +733,13 @@ void viridian_dump_guest_page(const struct vcpu *v, const 
char *name,
 
 void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
 {
-    unsigned long gmfn = vp->msr.pfn;
+    gfn_t gfn = _gfn(vp->msr.pfn);
     struct page_info *page;
 
     if ( vp->ptr )
         return;
 
-    page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+    page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
     if ( !page )
         goto fail;
 
@@ -760,8 +760,8 @@ void viridian_map_guest_page(struct domain *d, struct 
viridian_page *vp)
     return;
 
  fail:
-    gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
-             gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+    gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+             gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
 }
 
 void viridian_unmap_guest_page(struct viridian_page *vp)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0060310d74..38bdef0862 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -674,7 +674,7 @@ static int vmx_restore_cr0_cr3(
     {
         if ( cr0 & X86_CR0_PG )
         {
-            page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+            page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3),
                                      NULL, P2M_ALLOC);
             if ( !page )
             {
@@ -1314,7 +1314,7 @@ static void vmx_load_pdptrs(struct vcpu *v)
     if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
         goto crash;
 
-    page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+    page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE);
     if ( !page )
     {
         /* Ideally you don't want to crash but rather go into a wait 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index fdf449bfd1..c93ae59921 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -646,11 +646,11 @@ static void nvmx_update_apic_access_address(struct vcpu 
*v)
     if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
     {
         p2m_type_t p2mt;
-        unsigned long apic_gpfn;
+        gfn_t apic_gfn;
         struct page_info *apic_pg;
 
-        apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT;
-        apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+        apic_gfn = gaddr_to_gfn(get_vvmcs(v, APIC_ACCESS_ADDR));
+        apic_pg = get_page_from_gfn(v->domain, apic_gfn, &p2mt, P2M_ALLOC);
         ASSERT(apic_pg && !p2m_is_paging(p2mt));
         __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
         put_page(apic_pg);
@@ -667,11 +667,11 @@ static void nvmx_update_virtual_apic_address(struct vcpu 
*v)
     if ( ctrl & CPU_BASED_TPR_SHADOW )
     {
         p2m_type_t p2mt;
-        unsigned long vapic_gpfn;
+        gfn_t vapic_gfn;
         struct page_info *vapic_pg;
 
-        vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT;
-        vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+        vapic_gfn = gaddr_to_gfn(get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR));
+        vapic_pg = get_page_from_gfn(v->domain, vapic_gfn, &p2mt, P2M_ALLOC);
         ASSERT(vapic_pg && !p2m_is_paging(p2mt));
         __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
         put_page(vapic_pg);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 7531406543..f8e2b6f921 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2083,7 +2083,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t 
nl1e,
             p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ?
                             P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC;
 
-            page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q);
+            page = get_page_from_gfn(pg_dom, _gfn(l1e_get_pfn(nl1e)), &p2mt, 
q);
 
             if ( p2m_is_paged(p2mt) )
             {
@@ -3307,7 +3307,8 @@ long do_mmuext_op(
             if ( paging_mode_refcounts(pg_owner) )
                 break;
 
-            page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+            page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+                                     P2M_ALLOC);
             if ( unlikely(!page) )
             {
                 rc = -EINVAL;
@@ -3372,7 +3373,8 @@ long do_mmuext_op(
             if ( paging_mode_refcounts(pg_owner) )
                 break;
 
-            page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+            page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+                                     P2M_ALLOC);
             if ( unlikely(!page) )
             {
                 gdprintk(XENLOG_WARNING,
@@ -3588,7 +3590,8 @@ long do_mmuext_op(
         }
 
         case MMUEXT_CLEAR_PAGE:
-            page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC);
+            page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
+                                     P2M_ALLOC);
             if ( unlikely(p2mt != p2m_ram_rw) && page )
             {
                 put_page(page);
@@ -3616,7 +3619,7 @@ long do_mmuext_op(
         {
             struct page_info *src_page, *dst_page;
 
-            src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt,
+            src_page = get_page_from_gfn(pg_owner, _gfn(op.arg2.src_mfn), 
&p2mt,
                                          P2M_ALLOC);
             if ( unlikely(p2mt != p2m_ram_rw) && src_page )
             {
@@ -3632,7 +3635,7 @@ long do_mmuext_op(
                 break;
             }
 
-            dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt,
+            dst_page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
                                          P2M_ALLOC);
             if ( unlikely(p2mt != p2m_ram_rw) && dst_page )
             {
@@ -3720,7 +3723,8 @@ long do_mmu_update(
 {
     struct mmu_update req;
     void *va = NULL;
-    unsigned long gpfn, gmfn;
+    unsigned long gpfn;
+    gfn_t gfn;
     struct page_info *page;
     unsigned int cmd, i = 0, done = 0, pt_dom;
     struct vcpu *curr = current, *v = curr;
@@ -3833,8 +3837,8 @@ long do_mmu_update(
             rc = -EINVAL;
 
             req.ptr -= cmd;
-            gmfn = req.ptr >> PAGE_SHIFT;
-            page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC);
+            gfn = gaddr_to_gfn(req.ptr);
+            page = get_page_from_gfn(pt_owner, gfn, &p2mt, P2M_ALLOC);
 
             if ( unlikely(!page) || p2mt != p2m_ram_rw )
             {
@@ -3842,7 +3846,7 @@ long do_mmu_update(
                     put_page(page);
                 if ( p2m_is_paged(p2mt) )
                 {
-                    p2m_mem_paging_populate(pt_owner, gmfn);
+                    p2m_mem_paging_populate(pt_owner, gfn_x(gfn));
                     rc = -ENOENT;
                 }
                 else
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4b1e38b131..4ca35b56d6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2956,7 +2956,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long 
fgfn,
      * Take a refcnt on the mfn. NB: following supported for foreign mapping:
      *     ram_rw | ram_logdirty | ram_ro | paging_out.
      */
-    page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+    page = get_page_from_gfn(fdom, _gfn(fgfn), &p2mt, P2M_ALLOC);
     if ( !page ||
          !p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) )
     {
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index 0aa560b7f5..1315597878 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -388,15 +388,15 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
 static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
                                 struct sh_emulate_ctxt *sh_ctxt)
 {
-    unsigned long gfn;
+    gfn_t gfn;
     struct page_info *page;
     mfn_t mfn;
     p2m_type_t p2mt;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
 
     /* Translate the VA to a GFN. */
-    gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec);
-    if ( gfn == gfn_x(INVALID_GFN) )
+    gfn = _gfn(paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec));
+    if ( gfn_eq(gfn, INVALID_GFN) )
     {
         x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt);
 
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 3a3c15890b..4f3f438614 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -229,7 +229,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg)
             break;
 
         ret = -EINVAL;
-        page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC);
+        page = get_page_from_gfn(current->domain, _gfn(info.gmfn),
+                                 NULL, P2M_ALLOC);
         if ( !page )
             break;
         if ( !get_page_type(page, PGT_writable_page) )
diff --git a/xen/arch/x86/pv/descriptor-tables.c 
b/xen/arch/x86/pv/descriptor-tables.c
index 940804b18a..7b3fb2806a 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -112,7 +112,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, 
unsigned int entries)
     {
         struct page_info *page;
 
-        page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+        page = get_page_from_gfn(d, _gfn(frames[i]), NULL, P2M_ALLOC);
         if ( !page )
             goto fail;
         if ( !get_page_type(page, PGT_seg_desc_page) )
@@ -219,7 +219,7 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d)
     if ( !IS_ALIGNED(gaddr, sizeof(d)) || !check_descriptor(currd, &d) )
         return -EINVAL;
 
-    page = get_page_from_gfn(currd, gfn_x(gfn), NULL, P2M_ALLOC);
+    page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
     if ( !page )
         return -EINVAL;
 
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 8a4909bf4c..77ffa7fdcf 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -756,12 +756,12 @@ static int write_cr(unsigned int reg, unsigned long val,
     case 3: /* Write CR3 */
     {
         struct domain *currd = curr->domain;
-        unsigned long gfn;
+        gfn_t gfn;
         struct page_info *page;
         int rc;
 
-        gfn = !is_pv_32bit_domain(currd)
-              ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val);
+        gfn = _gfn(!is_pv_32bit_domain(currd)
+                   ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val));
         page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
         if ( !page )
             break;
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index f5ea00ca4e..c9ad1152b4 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -106,7 +106,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
     if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
         return false;
 
-    page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
+    page = get_page_from_gfn(currd, _gfn(l1e_get_pfn(gl1e)), NULL, P2M_ALLOC);
     if ( unlikely(!page) )
         return false;
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 23069e25ec..d8f8070ac9 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -803,7 +803,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t 
val)
     case 0: /* Write hypercall page */
     {
         void *hypercall_page;
-        unsigned long gmfn = val >> PAGE_SHIFT;
+        gfn_t gfn = gaddr_to_gfn(val);
         unsigned int page_index = val & (PAGE_SIZE - 1);
         struct page_info *page;
         p2m_type_t t;
@@ -816,7 +816,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t 
val)
             return X86EMUL_EXCEPTION;
         }
 
-        page = get_page_from_gfn(d, gmfn, &t, P2M_ALLOC);
+        page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
 
         if ( !page || !get_page_type(page, PGT_writable_page) )
         {
@@ -825,13 +825,14 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, 
uint64_t val)
 
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(d, gmfn);
+                p2m_mem_paging_populate(d, gfn_x(gfn));
                 return X86EMUL_RETRY;
             }
 
             gdprintk(XENLOG_WARNING,
-                     "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
-                     gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), 
base);
+                     "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn") to MSR %08x\n",
+                     gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN),
+                     base);
             return X86EMUL_EXCEPTION;
         }
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 744b572195..e8f6dfbdf1 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1299,7 +1299,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, 
unsigned offset)
     if ( (v != current) && !(v->pause_flags & VPF_down) )
         return -EINVAL;
 
-    page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+    page = get_page_from_gfn(d, _gfn(gfn), NULL, P2M_ALLOC);
     if ( !page )
         return -EINVAL;
 
diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c
index 230f440f14..073981ab43 100644
--- a/xen/common/event_fifo.c
+++ b/xen/common/event_fifo.c
@@ -361,7 +361,7 @@ static const struct evtchn_port_ops evtchn_port_ops_fifo =
     .print_state   = evtchn_fifo_print_state,
 };
 
-static int map_guest_page(struct domain *d, uint64_t gfn, void **virt)
+static int map_guest_page(struct domain *d, gfn_t gfn, void **virt)
 {
     struct page_info *p;
 
@@ -422,7 +422,7 @@ static int setup_control_block(struct vcpu *v)
     return 0;
 }
 
-static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
+static int map_control_block(struct vcpu *v, gfn_t gfn, uint32_t offset)
 {
     void *virt;
     unsigned int i;
@@ -508,7 +508,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control 
*init_control)
 {
     struct domain *d = current->domain;
     uint32_t vcpu_id;
-    uint64_t gfn;
+    gfn_t gfn;
     uint32_t offset;
     struct vcpu *v;
     int rc;
@@ -516,7 +516,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control 
*init_control)
     init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
 
     vcpu_id = init_control->vcpu;
-    gfn     = init_control->control_gfn;
+    gfn     = _gfn(init_control->control_gfn);
     offset  = init_control->offset;
 
     if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
@@ -578,7 +578,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control 
*init_control)
     return rc;
 }
 
-static int add_page_to_event_array(struct domain *d, unsigned long gfn)
+static int add_page_to_event_array(struct domain *d, gfn_t gfn)
 {
     void *virt;
     unsigned int slot;
@@ -628,7 +628,7 @@ int evtchn_fifo_expand_array(const struct 
evtchn_expand_array *expand_array)
         return -EOPNOTSUPP;
 
     spin_lock(&d->event_lock);
-    rc = add_page_to_event_array(d, expand_array->array_gfn);
+    rc = add_page_to_event_array(d, _gfn(expand_array->array_gfn));
     spin_unlock(&d->event_lock);
 
     return rc;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index d9b35a608c..2634a8b762 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1393,7 +1393,7 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             return rc;
         }
 
-        page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+        page = get_page_from_gfn(d, _gfn(xrfp.gpfn), NULL, P2M_ALLOC);
         if ( page )
         {
             rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
@@ -1664,7 +1664,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, 
bool readonly,
     p2m_type_t p2mt;
     struct page_info *page;
 
-    page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
+    page = get_page_from_gfn(d, gfn, &p2mt, q);
 
 #ifdef CONFIG_HAS_MEM_PAGING
     if ( p2m_is_paging(p2mt) )
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 03f2ee75c1..5e50596fcb 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -298,7 +298,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, 
gfn_t gfn,
                                         p2m_type_t *t);
 
 static inline struct page_info *get_page_from_gfn(
-    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+    struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
 {
     mfn_t mfn;
     p2m_type_t _t;
@@ -309,7 +309,7 @@ static inline struct page_info *get_page_from_gfn(
      * not auto-translated.
      */
     if ( likely(d != dom_xen) )
-        return p2m_get_page_from_gfn(d, _gfn(gfn), t);
+        return p2m_get_page_from_gfn(d, gfn, t);
 
     if ( !t )
         t = &_t;
@@ -320,7 +320,7 @@ static inline struct page_info *get_page_from_gfn(
      * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the
      * page.
      */
-    mfn = _mfn(gfn);
+    mfn = _mfn(gfn_x(gfn));
     page = mfn_to_page(mfn);
 
     if ( !mfn_valid(mfn) || !get_page(page, d) )
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..ff4528baf9 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -491,18 +491,26 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain 
*p2m, gfn_t gfn,
                                         p2m_query_t q);
 
 static inline struct page_info *get_page_from_gfn(
-    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+    struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
 {
     struct page_info *page;
+    mfn_t mfn;
 
     if ( paging_mode_translate(d) )
-        return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, 
q);
+        return p2m_get_page_from_gfn(p2m_get_hostp2m(d), gfn, t, NULL, q);
 
     /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
     if ( t )
         *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
-    page = mfn_to_page(_mfn(gfn));
-    return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
+
+    mfn = _mfn(gfn_x(gfn));
+
+    if ( !mfn_valid(mfn) )
+        return NULL;
+
+    page = mfn_to_page(mfn);
+
+    return get_page(page, d) ? page : NULL;
 }
 
 /* General conversion function from mfn to gfn */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.