diff -r d1b0a5adaeab xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Nov 29 23:40:40 2006 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 30 11:13:37 2006 +0100 @@ -628,6 +628,13 @@ static int vmx_pae_enabled(struct vcpu * { unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4; return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE)); +} + +/* Works only for vcpu == current */ +static void vmx_update_host_cr3(struct vcpu *v) +{ + ASSERT(v == current); + __vmwrite(HOST_CR3, v->arch.cr3); } /* Setup HVM interfaces */ diff -r d1b0a5adaeab xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Wed Nov 29 23:40:40 2006 +0000 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Nov 30 11:13:37 2006 +0100 @@ -581,7 +581,7 @@ guest_index(void *ptr) return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t); } -static inline u32 +static u32 shadow_l1_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2) @@ -593,7 +593,7 @@ shadow_l1_index(mfn_t *smfn, u32 guest_i #endif } -static inline u32 +static u32 shadow_l2_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2) @@ -613,13 +613,13 @@ shadow_l2_index(mfn_t *smfn, u32 guest_i #if GUEST_PAGING_LEVELS >= 4 -static inline u32 +static u32 shadow_l3_index(mfn_t *smfn, u32 guest_index) { return guest_index; } -static inline u32 +static u32 shadow_l4_index(mfn_t *smfn, u32 guest_index) { return guest_index; diff -r d1b0a5adaeab xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Nov 29 23:40:40 2006 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Nov 30 11:13:37 2006 +0100 @@ -272,13 +272,6 @@ static inline int vmx_lme_is_set(struct return efer & EFER_LME; } -/* Works only for vcpu == current */ -static inline void vmx_update_host_cr3(struct vcpu *v) -{ - ASSERT(v == current); - __vmwrite(HOST_CR3, v->arch.cr3); -} - static inline int vmx_pgbit_test(struct vcpu *v) { unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;