diff -r 7b00193bd033 xen/arch/x86/domctl.c --- a/xen/arch/x86/domctl.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/domctl.c Tue Jun 29 15:11:14 2010 +0100 @@ -1420,6 +1420,7 @@ break; #endif /* XEN_GDBSX_CONFIG */ +#ifdef __x86_64__ case XEN_DOMCTL_mem_event_op: { struct domain *d; @@ -1450,6 +1451,7 @@ } } break; +#endif /* __x86_64__ */ default: ret = -ENOSYS; diff -r 7b00193bd033 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/hvm/hvm.c Tue Jun 29 15:11:14 2010 +0100 @@ -982,6 +982,7 @@ return 1; } +#ifdef __x86_64__ /* Check if the page has been paged out */ if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) ) p2m_mem_paging_populate(current->domain, gfn); @@ -992,6 +993,7 @@ mem_sharing_unshare_page(current->domain, gfn, 0); return 1; } +#endif /* Spurious fault? PoD and log-dirty also take this path. */ if ( p2m_is_ram(p2mt) ) diff -r 7b00193bd033 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/mm.c Tue Jun 29 15:11:14 2010 +0100 @@ -3448,20 +3448,23 @@ rc = -ENOENT; break; } +#ifdef __x86_64__ /* XXX: Ugly: pull all the checks into a separate function. * Don't want to do it now, not to interfere with mem_paging * patches */ else if ( p2m_ram_shared == l1e_p2mt ) { /* Unshare the page for RW foreign mappings */ - if(l1e_get_flags(l1e) & _PAGE_RW) + if ( l1e_get_flags(l1e) & _PAGE_RW ) { rc = mem_sharing_unshare_page(pg_owner, l1e_get_pfn(l1e), 0); - if(rc) break; + if ( rc ) + break; } } +#endif okay = mod_l1_entry(va, l1e, mfn, cmd == MMU_PT_UPDATE_PRESERVE_AD, v, @@ -4806,8 +4809,10 @@ return rc; } +#ifdef __x86_64__ case XENMEM_get_sharing_freed_pages: return mem_sharing_get_nr_saved_mfns(); +#endif default: return subarch_memory_op(op, arg); diff -r 7b00193bd033 xen/arch/x86/mm/Makefile --- a/xen/arch/x86/mm/Makefile Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/mm/Makefile Tue Jun 29 15:11:14 2010 +0100 @@ -6,9 +6,9 @@ obj-y += guest_walk_2.o obj-y += guest_walk_3.o obj-$(x86_64) += guest_walk_4.o -obj-y += mem_event.o -obj-y += mem_paging.o -obj-y += mem_sharing.o +obj-$(x86_64) += mem_event.o +obj-$(x86_64) += mem_paging.o +obj-$(x86_64) += mem_sharing.o guest_walk_%.o: guest_walk.c Makefile $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@ diff -r 7b00193bd033 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/mm/p2m.c Tue Jun 29 15:11:14 2010 +0100 @@ -1868,17 +1868,23 @@ { struct page_info *pg; struct p2m_domain *p2m = p2m_get_hostp2m(d); +#ifdef __x86_64__ unsigned long gfn; p2m_type_t t; mfn_t mfn; +#endif p2m_lock(p2m); - for(gfn=0; gfn < p2m->max_mapped_pfn; gfn++) + +#ifdef __x86_64__ + for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ ) { mfn = p2m->get_entry(d, gfn, &t, p2m_query); - if(mfn_valid(mfn) && (t == p2m_ram_shared)) + if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN)); } +#endif + p2m->phys_table = pagetable_null(); while ( (pg = page_list_remove_head(&p2m->pages)) ) @@ -2616,6 +2622,7 @@ return rc; } +#ifdef __x86_64__ int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) { @@ -2798,7 +2805,7 @@ /* Unpause any domains that were paused because the ring was full */ mem_event_unpause_vcpus(d); } - +#endif /* __x86_64__ */ /* * Local variables: diff -r 7b00193bd033 xen/include/asm-x86/mem_sharing.h --- a/xen/include/asm-x86/mem_sharing.h Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/include/asm-x86/mem_sharing.h Tue Jun 29 15:11:14 2010 +0100 @@ -22,6 +22,8 @@ #ifndef __MEM_SHARING_H__ #define __MEM_SHARING_H__ +#ifdef __x86_64__ + #define sharing_supported(_d) \ (is_hvm_domain(_d) && paging_mode_hap(_d)) @@ -43,4 +45,10 @@ xen_domctl_mem_sharing_op_t *mec); void mem_sharing_init(void); +#else + +#define mem_sharing_init() do { } while (0) + +#endif /* __x86_64__ */ + #endif /* __MEM_SHARING_H__ */ diff -r 7b00193bd033 xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/include/asm-x86/p2m.h Tue Jun 29 15:11:14 2010 +0100 @@ -78,11 +78,12 @@ p2m_grant_map_rw = 7, /* Read/write grant mapping */ p2m_grant_map_ro = 8, /* Read-only grant mapping */ + /* Likewise, although these are defined in all builds, they can only + * be used in 64-bit builds */ p2m_ram_paging_out = 9, /* Memory that is being paged out */ p2m_ram_paged = 10, /* Memory that has been paged out */ p2m_ram_paging_in = 11, /* Memory that is being paged in */ p2m_ram_paging_in_start = 12, /* Memory that is being paged in */ - p2m_ram_shared = 13, /* Shared or sharable memory */ } p2m_type_t; @@ -154,6 +155,7 @@ #define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES) #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES) #define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES) + /* Populate-on-demand */ #define POPULATE_ON_DEMAND_MFN (1<<9) @@ -323,20 +325,21 @@ int must_succeed) { mfn_t mfn; - int ret; mfn = gfn_to_mfn(d, gfn, p2mt); - if(p2m_is_shared(*p2mt)) +#ifdef __x86_64__ + if ( p2m_is_shared(*p2mt) ) { - ret = mem_sharing_unshare_page(d, gfn, - must_succeed ? MEM_SHARING_MUST_SUCCEED : 0); - if(ret < 0) + if ( mem_sharing_unshare_page(d, gfn, + must_succeed + ? MEM_SHARING_MUST_SUCCEED : 0) ) { BUG_ON(must_succeed); return mfn; } mfn = gfn_to_mfn(d, gfn, p2mt); } +#endif return mfn; } @@ -438,10 +441,11 @@ /* Set mmio addresses in the p2m table (for pass-through) */ int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn); + + +#ifdef __x86_64__ /* Modify p2m table for shared gfn */ -int -set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); - +int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); /* Check if a nominated gfn is valid to be paged out */ int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn); /* Evict a frame */ @@ -452,6 +456,10 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn); /* Resume normal operation (in case a domain was paused) */ void p2m_mem_paging_resume(struct domain *d); +#else +static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn) +{ } +#endif struct page_info *p2m_alloc_ptp(struct domain *d, unsigned long type);