[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 1/9] x86/mm: add pv prefix to {alloc, free}_page_type



The two functions are only used by PV code paths because:

1. To allocate a PGT_l*_page_table type page, a DomU must explicitly
   request such types via PV MMU hypercall.
2. PV Dom0 builder explicitly asks for PGT_l*_page_table type pages,
   but it is obviously PV only.
3. p2m_alloc_ptp explicitly sets PGT_l1_page_table, but the allocation
   and deallocation of such pages don't go through the two functions
   touched in this patch.
4. shadow_enable explicitly sets PGT_l2_page_table, but the allocation
   and deallocation of such pages don't go through the two functions
   touched in this patch.

Also move the declarations to pv/mm.h. The code will be moved later.
Take the chance to change preemptible to bool.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/domain.c       |  2 +-
 xen/arch/x86/mm.c           | 14 +++++++-------
 xen/include/asm-x86/mm.h    |  3 ---
 xen/include/asm-x86/pv/mm.h | 11 +++++++++++
 4 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f93327b0a2..bc80e4f90e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1905,7 +1905,7 @@ static int relinquish_memory(
             if ( likely(y == x) )
             {
                 /* No need for atomic update of type_info here: noone else 
updates it. */
-                switch ( ret = free_page_type(page, x, 1) )
+                switch ( ret = pv_free_page_type(page, x, true) )
                 {
                 case 0:
                     break;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 35f204369b..97ec467002 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2301,8 +2301,8 @@ static void get_page_light(struct page_info *page)
     while ( unlikely(y != x) );
 }
 
-static int alloc_page_type(struct page_info *page, unsigned long type,
-                           int preemptible)
+int pv_alloc_page_type(struct page_info *page, unsigned long type,
+                       bool preemptible)
 {
     struct domain *owner = page_get_owner(page);
     int rc;
@@ -2331,7 +2331,7 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
         rc = alloc_segdesc_page(page);
         break;
     default:
-        printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%lx\n",
+        printk("Bad type in %s %lx t=%" PRtype_info " c=%lx\n", __func__,
                type, page->u.inuse.type_info,
                page->count_info);
         rc = -EINVAL;
@@ -2375,8 +2375,8 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
 }
 
 
-int free_page_type(struct page_info *page, unsigned long type,
-                   int preemptible)
+int pv_free_page_type(struct page_info *page, unsigned long type,
+                      bool preemptible)
 {
     struct domain *owner = page_get_owner(page);
     unsigned long gmfn;
@@ -2433,7 +2433,7 @@ int free_page_type(struct page_info *page, unsigned long 
type,
 static int _put_final_page_type(struct page_info *page, unsigned long type,
                                 bool preemptible, struct page_info *ptpg)
 {
-    int rc = free_page_type(page, type, preemptible);
+    int rc = pv_free_page_type(page, type, preemptible);
 
     /* No need for atomic update of type_info here: noone else updates it. */
     if ( rc == 0 )
@@ -2695,7 +2695,7 @@ static int _get_page_type(struct page_info *page, 
unsigned long type,
             page->partial_pte = 0;
         }
         page->linear_pt_count = 0;
-        rc = alloc_page_type(page, type, preemptible);
+        rc = pv_alloc_page_type(page, type, preemptible);
     }
 
     if ( (x & PGT_partial) && !(nx & PGT_partial) )
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 3013c266fe..741c98575e 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -338,9 +338,6 @@ static inline void *__page_to_virt(const struct page_info 
*pg)
                     (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
 }
 
-int free_page_type(struct page_info *page, unsigned long type,
-                   int preemptible);
-
 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
                        const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
diff --git a/xen/include/asm-x86/pv/mm.h b/xen/include/asm-x86/pv/mm.h
index 246b99014c..abf798b541 100644
--- a/xen/include/asm-x86/pv/mm.h
+++ b/xen/include/asm-x86/pv/mm.h
@@ -31,6 +31,10 @@ void pv_destroy_gdt(struct vcpu *v);
 bool pv_map_ldt_shadow_page(unsigned int off);
 bool pv_destroy_ldt(struct vcpu *v);
 
+int pv_alloc_page_type(struct page_info *page, unsigned long type,
+                       bool preemptible);
+int pv_free_page_type(struct page_info *page, unsigned long type,
+                      bool preemptible);
 #else
 
 #include <xen/errno.h>
@@ -52,6 +56,13 @@ static inline bool pv_map_ldt_shadow_page(unsigned int off) 
{ return false; }
 static inline bool pv_destroy_ldt(struct vcpu *v)
 { ASSERT_UNREACHABLE(); return false; }
 
+static inline int pv_alloc_page_type(struct page_info *page, unsigned long 
type,
+                                     bool preemptible)
+{ ASSERT_UNREACHABLE(); return -EINVAL; }
+static inline int pv_free_page_type(struct page_info *page, unsigned long type,
+                                    bool preemptible)
+{ ASSERT_UNREACHABLE(); return -EINVAL; }
+
 #endif
 
 #endif /* __X86_PV_MM_H__ */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.