[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] x86/vmap: handle superpages in vmap_to_mfn()



From: Hongyan Xia <hongyxia@xxxxxxxxxx>

There is simply no guarantee that vmap won't return superpages to the
caller. It can happen if the list of MFNs are contiguous, or we simply
have a large granularity. Although rare, if such things do happen, we
will simply hit BUG_ON() and crash. Properly handle such cases in a new
implementation.

Note that vmap is now too large to be a macro, so implement it as a
normal function and move the declaration to mm.h (page.h cannot handle
mfn_t).

Signed-off-by: Hongyan Xia <hongyxia@xxxxxxxxxx>
---
 xen/arch/x86/domain_page.c |  2 +-
 xen/arch/x86/mm.c          | 43 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/mm.h   |  2 ++
 xen/include/asm-x86/page.h |  2 --
 4 files changed, 46 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index eac5e3304fb8..4ba75d397a17 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -338,7 +338,7 @@ mfn_t domain_page_map_to_mfn(const void *ptr)
         return _mfn(virt_to_mfn(ptr));
 
     if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
-        return vmap_to_mfn(va);
+        return vmap_to_mfn(ptr);
 
     ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5a50339284c7..c22385e90d8a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5194,6 +5194,49 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
         }                                          \
     } while ( false )
 
+mfn_t vmap_to_mfn(const void *v)
+{
+    bool locking = system_state > SYS_STATE_boot;
+    unsigned int l2_offset = l2_table_offset((unsigned long)v);
+    unsigned int l1_offset = l1_table_offset((unsigned long)v);
+    l3_pgentry_t *pl3e = virt_to_xen_l3e((unsigned long)v);
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+    struct page_info *l3page;
+    mfn_t ret;
+
+    ASSERT(pl3e);
+    l3page = virt_to_page(pl3e);
+    L3T_LOCK(l3page);
+
+    ASSERT(l3e_get_flags(*pl3e) & _PAGE_PRESENT);
+    if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
+    {
+        ret = mfn_add(l3e_get_mfn(*pl3e),
+                      (l2_offset << PAGETABLE_ORDER) + l1_offset);
+        L3T_UNLOCK(l3page);
+        return ret;
+    }
+
+    pl2e = map_l2t_from_l3e(*pl3e) + l2_offset;
+    ASSERT(l2e_get_flags(*pl2e) & _PAGE_PRESENT);
+    if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
+    {
+        ret = mfn_add(l2e_get_mfn(*pl2e), l1_offset);
+        L3T_UNLOCK(l3page);
+        return ret;
+    }
+
+    pl1e = map_l1t_from_l2e(*pl2e) + l1_offset;
+    UNMAP_DOMAIN_PAGE(pl2e);
+    ASSERT(l1e_get_flags(*pl1e) & _PAGE_PRESENT);
+    ret = l1e_get_mfn(*pl1e);
+    L3T_UNLOCK(l3page);
+    UNMAP_DOMAIN_PAGE(pl1e);
+
+    return ret;
+}
+
 int map_pages_to_xen(
     unsigned long virt,
     mfn_t mfn,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index deeba75a1cbb..6354d165f48b 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -578,6 +578,8 @@ mfn_t alloc_xen_pagetable_new(void);
 void free_xen_pagetable_new(mfn_t mfn);
 
 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
+mfn_t vmap_to_mfn(const void *v);
+#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
 
 int __sync_local_execstate(void);
 
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 7a771baf7cb3..b2bcc95fd2de 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -291,8 +291,6 @@ void copy_page_sse2(void *, const void *);
 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va)     l1e_get_mfn(*virt_to_xen_l1e((unsigned long)(va)))
-#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 #endif /* !defined(__ASSEMBLY__) */
 
-- 
2.17.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.