[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [VMX] Get rid of special vm_launch schedule tail.
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Node ID 1db00df48218a4f58ca75910de9273fb4672af19 # Parent b2668cc03914ebd65d54b6c5a7d0ee6bf102357c [VMX] Get rid of special vm_launch schedule tail. This required various hacking, including getting rid of implicit vcpu==current assumption in __vmwrite() and a couple of tweaks to the shadow code. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmcs.c | 112 +++++++++++++++---------------------- xen/arch/x86/hvm/vmx/vmx.c | 76 +++++++++++++------------ xen/arch/x86/mm/shadow/common.c | 10 --- xen/arch/x86/mm/shadow/multi.c | 2 xen/include/asm-x86/hvm/vmx/vmcs.h | 1 xen/include/asm-x86/hvm/vmx/vmx.h | 70 ----------------------- 6 files changed, 91 insertions(+), 180 deletions(-) diff -r b2668cc03914 -r 1db00df48218 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Mon Nov 06 13:13:04 2006 +0000 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Mon Nov 06 15:40:30 2006 +0000 @@ -285,12 +285,9 @@ static void vmx_set_host_env(struct vcpu error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom()); } -/* Update CR3, CR0, CR4, GDT, LDT, TR */ +#if 0 static void vmx_do_launch(struct vcpu *v) { - unsigned int error = 0; - unsigned long cr0, cr4; - if ( v->vcpu_id != 0 ) { /* Sync AP's TSC with BSP's */ @@ -298,62 +295,13 @@ static void vmx_do_launch(struct vcpu *v v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); } - - __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : ); - - error |= __vmwrite(GUEST_CR0, cr0); - cr0 &= ~X86_CR0_PG; - error |= __vmwrite(CR0_READ_SHADOW, cr0); - error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control); - v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control; - - __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : ); - - error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE); - cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE); - - error |= __vmwrite(CR4_READ_SHADOW, cr4); - - hvm_stts(v); - - if ( vlapic_init(v) == 0 ) - { -#ifdef __x86_64__ - u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control; - u64 vapic_page_addr = - page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page); - - *cpu_exec_control |= CPU_BASED_TPR_SHADOW; - *cpu_exec_control &= ~CPU_BASED_CR8_STORE_EXITING; - *cpu_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING; - error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control); - error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR, vapic_page_addr); - error |= __vmwrite(TPR_THRESHOLD, 0); -#endif - } - - vmx_set_host_env(v); - init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor); - - error |= __vmwrite(GUEST_LDTR_SELECTOR, 0); - error |= __vmwrite(GUEST_LDTR_BASE, 0); - error |= __vmwrite(GUEST_LDTR_LIMIT, 0); - - error |= __vmwrite(GUEST_TR_BASE, 0); - error |= __vmwrite(GUEST_TR_LIMIT, 0xff); - - shadow_update_paging_modes(v); - - __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3); - __vmwrite(HOST_CR3, v->arch.cr3); - - v->arch.schedule_tail = arch_vmx_do_resume; -} +} +#endif static int construct_vmcs(struct vcpu *v) { int error = 0; - unsigned long tmp; + unsigned long tmp, cr0, cr4; union vmcs_arbytes arbytes; vmx_vmcs_enter(v); @@ -362,6 +310,8 @@ static int construct_vmcs(struct vcpu *v error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control); error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control); error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control); + error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control); + v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control; /* Host data selectors. */ error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS); @@ -465,6 +415,48 @@ static int construct_vmcs(struct vcpu *v error |= __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP); + /* Guest CR0. */ + cr0 = read_cr0(); + v->arch.hvm_vmx.cpu_cr0 = cr0; + error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); + v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS); + error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0); + + /* Guest CR4. */ + cr4 = read_cr4(); + error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE); + v->arch.hvm_vmx.cpu_shadow_cr4 = + cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE); + error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4); + + /* XXX Move this out. */ + init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor); + if ( vlapic_init(v) != 0 ) + return -1; + +#ifdef __x86_64__ + /* VLAPIC TPR optimisation. */ + v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW; + v->arch.hvm_vcpu.u.vmx.exec_control &= + ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING); + error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, + v->arch.hvm_vcpu.u.vmx.exec_control); + error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR, + page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page)); + error |= __vmwrite(TPR_THRESHOLD, 0); +#endif + + error |= __vmwrite(GUEST_LDTR_SELECTOR, 0); + error |= __vmwrite(GUEST_LDTR_BASE, 0); + error |= __vmwrite(GUEST_LDTR_LIMIT, 0); + + error |= __vmwrite(GUEST_TR_BASE, 0); + error |= __vmwrite(GUEST_TR_LIMIT, 0xff); + + shadow_update_paging_modes(v); + __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3); + __vmwrite(HOST_CR3, v->arch.cr3); + vmx_vmcs_exit(v); return error; @@ -533,14 +525,6 @@ void arch_vmx_do_resume(struct vcpu *v) hvm_do_resume(v); reset_stack_and_jump(vmx_asm_do_vmentry); } - -void arch_vmx_do_launch(struct vcpu *v) -{ - vmx_load_vmcs(v); - vmx_do_launch(v); - reset_stack_and_jump(vmx_asm_do_vmentry); -} - /* Dump a section of VMCS */ static void print_section(char *header, uint32_t start, diff -r b2668cc03914 -r 1db00df48218 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 06 13:13:04 2006 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 06 15:40:30 2006 +0000 @@ -59,7 +59,7 @@ static int vmx_vcpu_initialise(struct vc spin_lock_init(&v->arch.hvm_vmx.vmcs_lock); - v->arch.schedule_tail = arch_vmx_do_launch; + v->arch.schedule_tail = arch_vmx_do_resume; v->arch.ctxt_switch_from = vmx_ctxt_switch_from; v->arch.ctxt_switch_to = vmx_ctxt_switch_to; @@ -474,10 +474,10 @@ static void vmx_store_cpu_guest_regs( if ( crs != NULL ) { - __vmread(CR0_READ_SHADOW, &crs[0]); + crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0; crs[2] = v->arch.hvm_vmx.cpu_cr2; __vmread(GUEST_CR3, &crs[3]); - __vmread(CR4_READ_SHADOW, &crs[4]); + crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4; } vmx_vmcs_exit(v); @@ -570,8 +570,6 @@ static unsigned long vmx_get_ctrl_reg(st /* Make sure that xen intercepts any FP accesses from current */ static void vmx_stts(struct vcpu *v) { - unsigned long cr0; - /* VMX depends on operating on the current vcpu */ ASSERT(v == current); @@ -581,11 +579,10 @@ static void vmx_stts(struct vcpu *v) * then this is not necessary: no FPU activity can occur until the guest * clears CR0.TS, and we will initialise the FPU when that happens. */ - __vmread_vcpu(v, CR0_READ_SHADOW, &cr0); - if ( !(cr0 & X86_CR0_TS) ) - { - __vmread_vcpu(v, GUEST_CR0, &cr0); - __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS); + if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) ) + { + v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS; + __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); } } @@ -662,6 +659,12 @@ static int vmx_guest_x86_mode(struct vcp X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16); } +static int vmx_pae_enabled(struct vcpu *v) +{ + unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4; + return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE)); +} + /* Setup HVM interfaces */ static void vmx_setup_hvm_funcs(void) { @@ -811,19 +814,16 @@ static int vmx_do_page_fault(unsigned lo static void vmx_do_no_device_fault(void) { - unsigned long cr0; struct vcpu *v = current; setup_fpu(current); __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); /* Disable TS in guest CR0 unless the guest wants the exception too. */ - __vmread_vcpu(v, CR0_READ_SHADOW, &cr0); - if ( !(cr0 & X86_CR0_TS) ) - { - __vmread_vcpu(v, GUEST_CR0, &cr0); - cr0 &= ~X86_CR0_TS; - __vmwrite(GUEST_CR0, cr0); + if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) ) + { + v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; + __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); } } @@ -1158,9 +1158,9 @@ static int vmx_world_save(struct vcpu *v error |= __vmread(GUEST_RSP, &c->esp); error |= __vmread(GUEST_RFLAGS, &c->eflags); - error |= __vmread(CR0_READ_SHADOW, &c->cr0); + c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0; c->cr3 = v->arch.hvm_vmx.cpu_cr3; - error |= __vmread(CR4_READ_SHADOW, &c->cr4); + c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4; error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit); error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base); @@ -1220,7 +1220,8 @@ static int vmx_world_restore(struct vcpu error |= __vmwrite(GUEST_RSP, c->esp); error |= __vmwrite(GUEST_RFLAGS, c->eflags); - error |= __vmwrite(CR0_READ_SHADOW, c->cr0); + v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0; + error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0); if (!vmx_paging_enabled(v)) goto skip_cr3; @@ -1270,7 +1271,8 @@ static int vmx_world_restore(struct vcpu HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3); error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK)); - error |= __vmwrite(CR4_READ_SHADOW, c->cr4); + v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4; + error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4); error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit); error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base); @@ -1408,7 +1410,7 @@ static int vmx_set_cr0(unsigned long val /* * CR0: We don't want to lose PE and PG. */ - __vmread_vcpu(v, CR0_READ_SHADOW, &old_cr0); + old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0; paging_enabled = (old_cr0 & X86_CR0_PE) && (old_cr0 & X86_CR0_PG); /* TS cleared? Then initialise FPU now. */ @@ -1418,8 +1420,11 @@ static int vmx_set_cr0(unsigned long val __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); } - __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE); - __vmwrite(CR0_READ_SHADOW, value); + v->arch.hvm_vmx.cpu_cr0 = value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE; + __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); + + v->arch.hvm_vmx.cpu_shadow_cr0 = value; + __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0); HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value); @@ -1655,9 +1660,9 @@ static int mov_to_cr(int gp, int cr, str } case 4: /* CR4 */ { - __vmread(CR4_READ_SHADOW, &old_cr); - - if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) ) + old_cr = v->arch.hvm_vmx.cpu_shadow_cr4; + + if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) ) { if ( vmx_pgbit_test(v) ) { @@ -1706,7 +1711,8 @@ static int mov_to_cr(int gp, int cr, str } __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK); - __vmwrite(CR4_READ_SHADOW, value); + v->arch.hvm_vmx.cpu_shadow_cr4 = value; + __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4); /* * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates @@ -1804,16 +1810,14 @@ static int vmx_cr_access(unsigned long e setup_fpu(v); __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); - __vmread_vcpu(v, GUEST_CR0, &value); - value &= ~X86_CR0_TS; /* clear TS */ - __vmwrite(GUEST_CR0, value); - - __vmread_vcpu(v, CR0_READ_SHADOW, &value); - value &= ~X86_CR0_TS; /* clear TS */ - __vmwrite(CR0_READ_SHADOW, value); + v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */ + __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); + + v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */ + __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0); break; case TYPE_LMSW: - __vmread_vcpu(v, CR0_READ_SHADOW, &value); + value = v->arch.hvm_vmx.cpu_shadow_cr0; value = (value & ~0xF) | (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF); TRACE_VMEXIT(1, TYPE_LMSW); diff -r b2668cc03914 -r 1db00df48218 xen/arch/x86/mm/shadow/common.c --- a/xen/arch/x86/mm/shadow/common.c Mon Nov 06 13:13:04 2006 +0000 +++ b/xen/arch/x86/mm/shadow/common.c Mon Nov 06 15:40:30 2006 +0000 @@ -2273,15 +2273,6 @@ void sh_update_paging_modes(struct vcpu // - changes in CR0.PG, CR4.PAE, CR4.PSE, or CR4.PGE // - // Avoid determining the current shadow mode for uninitialized CPUs, as - // we can not yet determine whether it is an HVM or PV domain. - // - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) - { - SHADOW_PRINTK("%s: postponing determination of shadow mode\n", __func__); - return; - } - // First, tear down any old shadow tables held by this vcpu. // shadow_detach_old_tables(v); @@ -2316,7 +2307,6 @@ void sh_update_paging_modes(struct vcpu v->arch.shadow.translate_enabled = !!hvm_paging_enabled(v); if ( !v->arch.shadow.translate_enabled ) { - /* Set v->arch.guest_table to use the p2m map, and choose * the appropriate shadow mode */ old_guest_table = pagetable_get_mfn(v->arch.guest_table); diff -r b2668cc03914 -r 1db00df48218 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c Mon Nov 06 13:13:04 2006 +0000 +++ b/xen/arch/x86/mm/shadow/multi.c Mon Nov 06 15:40:30 2006 +0000 @@ -3357,7 +3357,7 @@ sh_update_cr3(struct vcpu *v) sh_detach_old_tables(v); - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) + if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) { ASSERT(v->arch.cr3 == 0); return; diff -r b2668cc03914 -r 1db00df48218 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Mon Nov 06 13:13:04 2006 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Mon Nov 06 15:40:30 2006 +0000 @@ -76,7 +76,6 @@ struct arch_vmx_struct { unsigned long cpu_shadow_cr4; /* copy of guest read shadow CR4 */ unsigned long cpu_cr2; /* save CR2 */ unsigned long cpu_cr3; - unsigned long cpu_based_exec_control; struct vmx_msr_state msr_content; unsigned long vmxassist_enabled:1; }; diff -r b2668cc03914 -r 1db00df48218 xen/include/asm-x86/hvm/vmx/vmx.h --- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Nov 06 13:13:04 2006 +0000 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Mon Nov 06 15:40:30 2006 +0000 @@ -30,7 +30,6 @@ extern void vmx_asm_do_vmentry(void); extern void vmx_asm_do_vmentry(void); extern void vmx_intr_assist(void); extern void vmx_migrate_timers(struct vcpu *v); -extern void arch_vmx_do_launch(struct vcpu *); extern void arch_vmx_do_resume(struct vcpu *); extern void set_guest_time(struct vcpu *v, u64 gtime); @@ -220,54 +219,8 @@ static always_inline int ___vmread( return rc; } - -static always_inline void __vmwrite_vcpu( - struct vcpu *v, unsigned long field, unsigned long value) -{ - switch ( field ) { - case CR0_READ_SHADOW: - v->arch.hvm_vmx.cpu_shadow_cr0 = value; - break; - case GUEST_CR0: - v->arch.hvm_vmx.cpu_cr0 = value; - break; - case CR4_READ_SHADOW: - v->arch.hvm_vmx.cpu_shadow_cr4 = value; - break; - case CPU_BASED_VM_EXEC_CONTROL: - v->arch.hvm_vmx.cpu_based_exec_control = value; - break; - default: - printk("__vmwrite_cpu: invalid field %lx\n", field); - break; - } -} - -static always_inline void __vmread_vcpu( - struct vcpu *v, unsigned long field, unsigned long *value) -{ - switch ( field ) { - case CR0_READ_SHADOW: - *value = v->arch.hvm_vmx.cpu_shadow_cr0; - break; - case GUEST_CR0: - *value = v->arch.hvm_vmx.cpu_cr0; - break; - case CR4_READ_SHADOW: - *value = v->arch.hvm_vmx.cpu_shadow_cr4; - break; - case CPU_BASED_VM_EXEC_CONTROL: - *value = v->arch.hvm_vmx.cpu_based_exec_control; - break; - default: - printk("__vmread_vcpu: invalid field %lx\n", field); - break; - } -} - static inline int __vmwrite(unsigned long field, unsigned long value) { - struct vcpu *v = current; int rc; __asm__ __volatile__ ( VMWRITE_OPCODE @@ -278,15 +231,6 @@ static inline int __vmwrite(unsigned lon : "0" (0), "a" (field) , "c" (value) : "memory"); - switch ( field ) { - case CR0_READ_SHADOW: - case GUEST_CR0: - case CR4_READ_SHADOW: - case CPU_BASED_VM_EXEC_CONTROL: - __vmwrite_vcpu(v, field, value); - break; - } - return rc; } @@ -337,16 +281,8 @@ static inline int __vmxon (u64 addr) static inline int vmx_paging_enabled(struct vcpu *v) { - unsigned long cr0; - __vmread_vcpu(v, CR0_READ_SHADOW, &cr0); + unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0; return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG)); -} - -static inline int vmx_pae_enabled(struct vcpu *v) -{ - unsigned long cr4; - __vmread_vcpu(v, CR4_READ_SHADOW, &cr4); - return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE)); } static inline int vmx_long_mode_enabled(struct vcpu *v) @@ -370,9 +306,7 @@ static inline void vmx_update_host_cr3(s static inline int vmx_pgbit_test(struct vcpu *v) { - unsigned long cr0; - - __vmread_vcpu(v, CR0_READ_SHADOW, &cr0); + unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0; return (cr0 & X86_CR0_PG); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |