[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.0-testing] x86: Only build memory-event features on 64-bit Xen



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1277918576 -3600
# Node ID d86c4f5470e211ff1eee6b44e174c4c6c1e629a9
# Parent  4004f12e5135eec1e77c1f1e67bf70753e729863
x86: Only build memory-event features on 64-bit Xen

32-bit Xen doesn't have enough p2m types to support them.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
xen-unstable changeset:   21693:6b5a5bfaf357
xen-unstable date:        Tue Jun 29 18:16:41 2010 +0100
---
 xen/arch/x86/domctl.c             |    2 ++
 xen/arch/x86/hvm/hvm.c            |    2 ++
 xen/arch/x86/mm.c                 |    9 +++++++--
 xen/arch/x86/mm/Makefile          |    6 +++---
 xen/arch/x86/mm/p2m.c             |   14 +++++++++++---
 xen/include/asm-x86/mem_sharing.h |    8 ++++++++
 xen/include/asm-x86/p2m.h         |   26 +++++++++++++++++---------
 7 files changed, 50 insertions(+), 17 deletions(-)

diff -r 4004f12e5135 -r d86c4f5470e2 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/arch/x86/domctl.c     Wed Jun 30 18:22:56 2010 +0100
@@ -1418,6 +1418,7 @@ long arch_do_domctl(
     break;
 #endif /* XEN_GDBSX_CONFIG */
 
+#ifdef __x86_64__
     case XEN_DOMCTL_mem_event_op:
     {
         struct domain *d;
@@ -1448,6 +1449,7 @@ long arch_do_domctl(
         } 
     }
     break;
+#endif /* __x86_64__ */
 
     default:
         ret = -ENOSYS;
diff -r 4004f12e5135 -r d86c4f5470e2 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Jun 30 18:22:56 2010 +0100
@@ -947,6 +947,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
         return 1;
     }
 
+#ifdef __x86_64__
     /* Check if the page has been paged out */
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
         p2m_mem_paging_populate(current->domain, gfn);
@@ -957,6 +958,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
         mem_sharing_unshare_page(current->domain, gfn, 0);
         return 1;
     }
+#endif
  
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
diff -r 4004f12e5135 -r d86c4f5470e2 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/arch/x86/mm.c Wed Jun 30 18:22:56 2010 +0100
@@ -3160,20 +3160,23 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
+#ifdef __x86_64__
                     /* XXX: Ugly: pull all the checks into a separate 
function. 
                      * Don't want to do it now, not to interfere with 
mem_paging
                      * patches */
                     else if ( p2m_ram_shared == l1e_p2mt )
                     {
                         /* Unshare the page for RW foreign mappings */
-                        if(l1e_get_flags(l1e) & _PAGE_RW)
+                        if ( l1e_get_flags(l1e) & _PAGE_RW )
                         {
                             rc = mem_sharing_unshare_page(pg_owner, 
                                                           l1e_get_pfn(l1e), 
                                                           0);
-                            if(rc) break; 
+                            if ( rc )
+                                break; 
                         }
                     } 
+#endif
 
                     okay = mod_l1_entry(va, l1e, mfn,
                                         cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
@@ -4518,8 +4521,10 @@ long arch_memory_op(int op, XEN_GUEST_HA
         return rc;
     }
 
+#ifdef __x86_64__
     case XENMEM_get_sharing_freed_pages:
         return mem_sharing_get_nr_saved_mfns();
+#endif
 
     default:
         return subarch_memory_op(op, arg);
diff -r 4004f12e5135 -r d86c4f5470e2 xen/arch/x86/mm/Makefile
--- a/xen/arch/x86/mm/Makefile  Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/arch/x86/mm/Makefile  Wed Jun 30 18:22:56 2010 +0100
@@ -6,9 +6,9 @@ obj-y += guest_walk_2.o
 obj-y += guest_walk_2.o
 obj-y += guest_walk_3.o
 obj-$(x86_64) += guest_walk_4.o
-obj-y += mem_event.o
-obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(x86_64) += mem_event.o
+obj-$(x86_64) += mem_paging.o
+obj-$(x86_64) += mem_sharing.o
 
 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r 4004f12e5135 -r d86c4f5470e2 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c     Wed Jun 30 18:22:56 2010 +0100
@@ -1709,17 +1709,24 @@ void p2m_teardown(struct domain *d)
 {
     struct page_info *pg;
     struct p2m_domain *p2m = d->arch.p2m;
+#ifdef __x86_64__
     unsigned long gfn;
     p2m_type_t t;
     mfn_t mfn;
+#endif
 
     p2m_lock(p2m);
-    for(gfn=0; gfn < p2m->max_mapped_pfn; gfn++)
+
+
+#ifdef __x86_64__
+    for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
     {
         mfn = p2m->get_entry(d, gfn, &t, p2m_query);
-        if(mfn_valid(mfn) && (t == p2m_ram_shared))
+        if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
     }
+#endif
+
     d->arch.phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
@@ -2415,6 +2422,7 @@ clear_mmio_p2m_entry(struct domain *d, u
     return rc;
 }
 
+#ifdef __x86_64__
 int
 set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
@@ -2597,7 +2605,7 @@ void p2m_mem_paging_resume(struct domain
     /* Unpause any domains that were paused because the ring was full */
     mem_event_unpause_vcpus(d);
 }
-
+#endif /* __x86_64__ */
 
 /*
  * Local variables:
diff -r 4004f12e5135 -r d86c4f5470e2 xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/include/asm-x86/mem_sharing.h Wed Jun 30 18:22:56 2010 +0100
@@ -22,6 +22,8 @@
 #ifndef __MEM_SHARING_H__
 #define __MEM_SHARING_H__
 
+#ifdef __x86_64__
+
 #define sharing_supported(_d) \
     (is_hvm_domain(_d) && (_d)->arch.hvm_domain.hap_enabled) 
 
@@ -43,4 +45,10 @@ int mem_sharing_domctl(struct domain *d,
                        xen_domctl_mem_sharing_op_t *mec);
 void mem_sharing_init(void);
 
+#else 
+
+#define mem_sharing_init()  do { } while (0)
+
+#endif /* __x86_64__ */
+
 #endif /* __MEM_SHARING_H__ */
diff -r 4004f12e5135 -r d86c4f5470e2 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Wed Jun 30 18:20:11 2010 +0100
+++ b/xen/include/asm-x86/p2m.h Wed Jun 30 18:22:56 2010 +0100
@@ -77,11 +77,12 @@ typedef enum {
     p2m_grant_map_rw = 7,       /* Read/write grant mapping */
     p2m_grant_map_ro = 8,       /* Read-only grant mapping */
 
+    /* Likewise, although these are defined in all builds, they can only
+     * be used in 64-bit builds */
     p2m_ram_paging_out = 9,       /* Memory that is being paged out */
     p2m_ram_paged = 10,           /* Memory that has been paged out */
     p2m_ram_paging_in = 11,       /* Memory that is being paged in */
     p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
-
     p2m_ram_shared = 13,          /* Shared or sharable memory */
 } p2m_type_t;
 
@@ -153,6 +154,7 @@ typedef enum {
 #define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_TYPES)
 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
+
 
 /* Populate-on-demand */
 #define POPULATE_ON_DEMAND_MFN  (1<<9)
@@ -314,20 +316,21 @@ static inline mfn_t gfn_to_mfn_unshare(s
                                        int must_succeed)
 {
     mfn_t mfn;
-    int ret;
 
     mfn = gfn_to_mfn(d, gfn, p2mt);
-    if(p2m_is_shared(*p2mt))
+#ifdef __x86_64__
+    if ( p2m_is_shared(*p2mt) )
     {
-        ret = mem_sharing_unshare_page(d, gfn,
-                must_succeed ? MEM_SHARING_MUST_SUCCEED : 0);
-        if(ret < 0)
+        if ( mem_sharing_unshare_page(d, gfn,
+                                      must_succeed 
+                                      ? MEM_SHARING_MUST_SUCCEED : 0) )
         {
             BUG_ON(must_succeed);
             return mfn;
         }
         mfn = gfn_to_mfn(d, gfn, p2mt);
     }
+#endif
 
     return mfn;
 }
@@ -429,10 +432,11 @@ p2m_type_t p2m_change_type(struct domain
 /* Set mmio addresses in the p2m table (for pass-through) */
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+
+
+#ifdef __x86_64__
 /* Modify p2m table for shared gfn */
-int
-set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
-
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
 /* Check if a nominated gfn is valid to be paged out */
 int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
 /* Evict a frame */
@@ -443,6 +447,10 @@ int p2m_mem_paging_prep(struct domain *d
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct domain *d);
+#else
+static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
+{ }
+#endif
 
 #endif /* _XEN_P2M_H */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.