[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] Merge
# HG changeset patch # User Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> # Date 1292431765 0 # Node ID f46f46bbb69a788037f866025c88743d55dde416 # Parent c3c1fd0a940c434aa6b69463b090f365c42d463b # Parent 8d9ca1012ffa46fe063cb634a6a335d56bb6fcf3 Merge --- xen/arch/x86/hvm/svm/asid.c | 9 - xen/arch/x86/hvm/svm/emulate.c | 4 xen/arch/x86/hvm/svm/intr.c | 16 +-- xen/arch/x86/hvm/svm/svm.c | 186 +++++++++++++++++++++++++------------ xen/arch/x86/hvm/svm/vmcb.c | 103 ++++++++++---------- xen/include/asm-x86/hvm/svm/svm.h | 2 xen/include/asm-x86/hvm/svm/vmcb.h | 153 +++++++++++++++++++++++------- 7 files changed, 320 insertions(+), 153 deletions(-) diff -r c3c1fd0a940c -r f46f46bbb69a xen/arch/x86/hvm/svm/asid.c --- a/xen/arch/x86/hvm/svm/asid.c Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/arch/x86/hvm/svm/asid.c Wed Dec 15 16:49:25 2010 +0000 @@ -41,18 +41,19 @@ asmlinkage void svm_asid_handle_vmrun(vo asmlinkage void svm_asid_handle_vmrun(void) { struct vcpu *curr = current; + struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; bool_t need_flush = hvm_asid_handle_vmenter(); /* ASID 0 indicates that ASIDs are disabled. */ if ( curr->arch.hvm_vcpu.asid == 0 ) { - curr->arch.hvm_svm.vmcb->guest_asid = 1; - curr->arch.hvm_svm.vmcb->tlb_control = 1; + vmcb_set_guest_asid(vmcb, 1); + vmcb->tlb_control = 1; return; } - curr->arch.hvm_svm.vmcb->guest_asid = curr->arch.hvm_vcpu.asid; - curr->arch.hvm_svm.vmcb->tlb_control = need_flush; + vmcb_set_guest_asid(vmcb, curr->arch.hvm_vcpu.asid); + vmcb->tlb_control = need_flush; } /* diff -r c3c1fd0a940c -r f46f46bbb69a xen/arch/x86/hvm/svm/emulate.c --- a/xen/arch/x86/hvm/svm/emulate.c Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/arch/x86/hvm/svm/emulate.c Wed Dec 15 16:49:25 2010 +0000 @@ -120,7 +120,9 @@ static const u8 *opc_bytes[INSTR_MAX_COU static int fetch(struct vcpu *v, u8 *buf, unsigned long addr, int len) { - uint32_t pfec = (v->arch.hvm_svm.vmcb->cpl == 3) ? PFEC_user_mode : 0; + uint32_t pfec; + + pfec = (vmcb_get_cpl(v->arch.hvm_svm.vmcb) == 3) ? PFEC_user_mode : 0; switch ( hvm_fetch_from_guest_virt(buf, addr, len, pfec) ) { diff -r c3c1fd0a940c -r f46f46bbb69a xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/arch/x86/hvm/svm/intr.c Wed Dec 15 16:49:25 2010 +0000 @@ -42,6 +42,7 @@ static void svm_inject_nmi(struct vcpu * static void svm_inject_nmi(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); eventinj_t event; event.bytes = 0; @@ -56,9 +57,10 @@ static void svm_inject_nmi(struct vcpu * * SVM does not virtualise the NMI mask, so we emulate it by intercepting * the next IRET and blocking NMI injection until the intercept triggers. */ - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET; + vmcb_set_general1_intercepts( + vmcb, general1_intercepts | GENERAL1_INTERCEPT_IRET); } - + static void svm_inject_extint(struct vcpu *v, int vector) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -76,6 +78,7 @@ static void enable_intr_window(struct vc static void enable_intr_window(struct vcpu *v, struct hvm_intack intack) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); vintr_t intr; ASSERT(intack.source != hvm_intsrc_none); @@ -100,16 +103,17 @@ static void enable_intr_window(struct vc * we inject a VINTR, ...). */ if ( (intack.source == hvm_intsrc_nmi) && - (vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET) ) + (general1_intercepts & GENERAL1_INTERCEPT_IRET) ) return; - intr = vmcb->vintr; + intr = vmcb_get_vintr(vmcb); intr.fields.irq = 1; intr.fields.vector = 0; intr.fields.prio = intack.vector >> 4; intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic); - vmcb->vintr = intr; - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; + vmcb_set_vintr(vmcb, intr); + vmcb_set_general1_intercepts( + vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR); } asmlinkage void svm_intr_assist(void) diff -r c3c1fd0a940c -r f46f46bbb69a xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Dec 15 16:49:25 2010 +0000 @@ -115,14 +115,14 @@ static void svm_save_dr(struct vcpu *v) /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */ v->arch.hvm_vcpu.flag_dr_dirty = 0; - v->arch.hvm_svm.vmcb->dr_intercepts = ~0u; + vmcb_set_dr_intercepts(vmcb, ~0u); v->arch.guest_context.debugreg[0] = read_debugreg(0); v->arch.guest_context.debugreg[1] = read_debugreg(1); v->arch.guest_context.debugreg[2] = read_debugreg(2); v->arch.guest_context.debugreg[3] = read_debugreg(3); - v->arch.guest_context.debugreg[6] = vmcb->dr6; - v->arch.guest_context.debugreg[7] = vmcb->dr7; + v->arch.guest_context.debugreg[6] = vmcb_get_dr6(vmcb); + v->arch.guest_context.debugreg[7] = vmcb_get_dr7(vmcb); } static void __restore_debug_registers(struct vcpu *v) @@ -133,14 +133,14 @@ static void __restore_debug_registers(st return; v->arch.hvm_vcpu.flag_dr_dirty = 1; - vmcb->dr_intercepts = 0; + vmcb_set_dr_intercepts(vmcb, 0); write_debugreg(0, v->arch.guest_context.debugreg[0]); write_debugreg(1, v->arch.guest_context.debugreg[1]); write_debugreg(2, v->arch.guest_context.debugreg[2]); write_debugreg(3, v->arch.guest_context.debugreg[3]); - vmcb->dr6 = v->arch.guest_context.debugreg[6]; - vmcb->dr7 = v->arch.guest_context.debugreg[7]; + vmcb_set_dr6(vmcb, v->arch.guest_context.debugreg[6]); + vmcb_set_dr7(vmcb, v->arch.guest_context.debugreg[7]); } /* @@ -230,9 +230,9 @@ static int svm_vmcb_restore(struct vcpu if ( paging_mode_hap(v->domain) ) { - vmcb->np_enable = 1; - vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */ - vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m)); + vmcb_set_np_enable(vmcb, 1); + vmcb_set_g_pat(vmcb, MSR_IA32_CR_PAT_RESET /* guest PAT */); + vmcb_set_h_cr3(vmcb, pagetable_get_paddr(p2m_get_pagetable(p2m))); } if ( c->pending_valid ) @@ -247,6 +247,7 @@ static int svm_vmcb_restore(struct vcpu } } + vmcb->cleanbits.bytes = 0; paging_update_paging_modes(v); return 0; @@ -307,7 +308,8 @@ static void svm_fpu_enter(struct vcpu *v struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; setup_fpu(v); - vmcb->exception_intercepts &= ~(1U << TRAP_no_device); + vmcb_set_exception_intercepts( + vmcb, vmcb_get_exception_intercepts(vmcb) & ~(1U << TRAP_no_device)); } static void svm_fpu_leave(struct vcpu *v) @@ -325,8 +327,10 @@ static void svm_fpu_leave(struct vcpu *v */ if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) ) { - v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device; - vmcb->cr0 |= X86_CR0_TS; + vmcb_set_exception_intercepts( + vmcb, + vmcb_get_exception_intercepts(vmcb) | (1U << TRAP_no_device)); + vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) | X86_CR0_TS); } } @@ -338,7 +342,7 @@ static unsigned int svm_get_interrupt_sh if ( vmcb->interrupt_shadow ) intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI; - if ( vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET ) + if ( vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET ) intr_shadow |= HVM_INTR_SHADOW_NMI; return intr_shadow; @@ -347,13 +351,15 @@ static void svm_set_interrupt_shadow(str static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); vmcb->interrupt_shadow = !!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI)); - vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; + general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; if ( intr_shadow & HVM_INTR_SHADOW_NMI ) - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET; + general1_intercepts |= GENERAL1_INTERCEPT_IRET; + vmcb_set_general1_intercepts(vmcb, general1_intercepts); } static int svm_guest_x86_mode(struct vcpu *v) @@ -377,6 +383,7 @@ static void svm_update_guest_cr(struct v static void svm_update_guest_cr(struct vcpu *v, unsigned int cr) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + uint64_t value; switch ( cr ) { @@ -387,27 +394,29 @@ static void svm_update_guest_cr(struct v { if ( v != current ) hw_cr0_mask |= X86_CR0_TS; - else if ( vmcb->cr0 & X86_CR0_TS ) + else if ( vmcb_get_cr0(vmcb) & X86_CR0_TS ) svm_fpu_enter(v); } - vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask; + value = v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask; if ( !paging_mode_hap(v->domain) ) - vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP; + value |= X86_CR0_PG | X86_CR0_WP; + vmcb_set_cr0(vmcb, value); break; } case 2: - vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2]; + vmcb_set_cr2(vmcb, v->arch.hvm_vcpu.guest_cr[2]); break; case 3: - vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; + vmcb_set_cr3(vmcb, v->arch.hvm_vcpu.hw_cr[3]); hvm_asid_flush_vcpu(v); break; case 4: - vmcb->cr4 = HVM_CR4_HOST_MASK; + value = HVM_CR4_HOST_MASK; if ( paging_mode_hap(v->domain) ) - vmcb->cr4 &= ~X86_CR4_PAE; - vmcb->cr4 |= v->arch.hvm_vcpu.guest_cr[4]; + value &= ~X86_CR4_PAE; + value |= v->arch.hvm_vcpu.guest_cr[4]; + vmcb_set_cr4(vmcb, value); break; default: BUG(); @@ -418,10 +427,12 @@ static void svm_update_guest_efer(struct { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; bool_t lma = !!(v->arch.hvm_vcpu.guest_efer & EFER_LMA); - - vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME; + uint64_t new_efer; + + new_efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME; if ( lma ) - vmcb->efer |= EFER_LME; + new_efer |= EFER_LME; + vmcb_set_efer(vmcb, new_efer); /* * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with @@ -481,7 +492,7 @@ static void svm_get_segment_register(str break; case x86_seg_ss: memcpy(reg, &vmcb->ss, sizeof(*reg)); - reg->attr.fields.dpl = vmcb->cpl; + reg->attr.fields.dpl = vmcb->_cpl; if ( reg->attr.fields.type == 0 ) reg->attr.fields.db = 0; break; @@ -515,6 +526,16 @@ static void svm_set_segment_register(str switch ( seg ) { + case x86_seg_cs: + case x86_seg_ds: + case x86_seg_es: + case x86_seg_ss: /* cpl */ + vmcb->cleanbits.fields.seg = 0; + break; + case x86_seg_gdtr: + case x86_seg_idtr: + vmcb->cleanbits.fields.dt = 0; + break; case x86_seg_fs: case x86_seg_gs: case x86_seg_tr: @@ -547,7 +568,7 @@ static void svm_set_segment_register(str break; case x86_seg_ss: memcpy(&vmcb->ss, reg, sizeof(*reg)); - vmcb->cpl = vmcb->ss.attr.fields.dpl; + vmcb->_cpl = vmcb->ss.attr.fields.dpl; break; case x86_seg_tr: memcpy(&vmcb->tr, reg, sizeof(*reg)); @@ -573,15 +594,20 @@ static void svm_set_segment_register(str static void svm_set_tsc_offset(struct vcpu *v, u64 offset) { - v->arch.hvm_svm.vmcb->tsc_offset = offset; + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + vmcb_set_tsc_offset(vmcb, offset); } static void svm_set_rdtsc_exiting(struct vcpu *v, bool_t enable) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_RDTSC; + u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); + + general1_intercepts &= ~GENERAL1_INTERCEPT_RDTSC; if ( enable ) - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; + general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; + + vmcb_set_general1_intercepts(vmcb, general1_intercepts); } static void svm_init_hypercall_page(struct domain *d, void *hypercall_page) @@ -626,6 +652,7 @@ static void svm_ctxt_switch_from(struct static void svm_ctxt_switch_to(struct vcpu *v) { + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; int cpu = smp_processor_id(); #ifdef __x86_64__ @@ -651,7 +678,8 @@ static void svm_ctxt_switch_to(struct vc svm_restore_dr(v); svm_vmsave(per_cpu(root_vmcb, cpu)); - svm_vmload(v->arch.hvm_svm.vmcb); + svm_vmload(vmcb); + vmcb->cleanbits.bytes = 0; vpmu_load(v); if ( cpu_has_rdtscp ) @@ -660,16 +688,17 @@ static void svm_ctxt_switch_to(struct vc static void svm_do_resume(struct vcpu *v) { + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; bool_t debug_state = v->domain->debugger_attached; + vintr_t intr; if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) { + uint32_t intercepts = vmcb_get_exception_intercepts(vmcb); uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3); v->arch.hvm_vcpu.debug_state_latch = debug_state; - if ( debug_state ) - v->arch.hvm_svm.vmcb->exception_intercepts |= mask; - else - v->arch.hvm_svm.vmcb->exception_intercepts &= ~mask; + vmcb_set_exception_intercepts( + vmcb, debug_state ? (intercepts | mask) : (intercepts & ~mask)); } if ( v->arch.hvm_svm.launch_core != smp_processor_id() ) @@ -682,8 +711,10 @@ static void svm_do_resume(struct vcpu *v } /* Reflect the vlapic's TPR in the hardware vtpr */ - v->arch.hvm_svm.vmcb->vintr.fields.tpr = + intr = vmcb_get_vintr(vmcb); + intr.fields.tpr = (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4; + vmcb_set_vintr(vmcb, intr); hvm_do_resume(v); reset_stack_and_jump(svm_asm_do_resume); @@ -740,7 +771,7 @@ static void svm_inject_exception( if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF ) { __restore_debug_registers(curr); - vmcb->dr6 |= 0x4000; + vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000); } case TRAP_int3: if ( curr->domain->debugger_attached ) @@ -770,7 +801,8 @@ static void svm_inject_exception( if ( trapnr == TRAP_page_fault ) { - vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2; + curr->arch.hvm_vcpu.guest_cr[2] = cr2; + vmcb_set_cr2(vmcb, cr2); HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2)); } else @@ -885,6 +917,8 @@ static int svm_cpu_up(void) struct hvm_function_table * __init start_svm(void) { + bool_t printed = 0; + if ( !test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability) ) return NULL; @@ -898,6 +932,19 @@ struct hvm_function_table * __init start svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ? cpuid_edx(0x8000000A) : 0); + + printk("SVM: Supported advanced features:\n"); + +#define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; } + P(cpu_has_svm_npt, "Nested Page Tables (NPT)"); + P(cpu_has_svm_lbrv, "Last Branch Record (LBR) Virtualisation"); + P(cpu_has_svm_nrips, "Next-RIP Saved on #VMEXIT"); + P(cpu_has_svm_cleanbits, "VMCB Clean Bits"); + P(cpu_has_pause_filter, "Pause-Intercept Filter"); +#undef P + + if ( !printed ) + printk(" - none\n"); svm_function_table.hap_supported = cpu_has_svm_npt; svm_function_table.hap_capabilities = HVM_HAP_SUPERPAGE_2MB | @@ -950,7 +997,7 @@ static void svm_fpu_dirty_intercept(void svm_fpu_enter(curr); if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) ) - vmcb->cr0 &= ~X86_CR0_TS; + vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS); } #define bitmaskof(idx) (1U << ((idx) & 31)) @@ -1040,23 +1087,23 @@ static int svm_msr_read_intercept(unsign goto gpf; case MSR_IA32_DEBUGCTLMSR: - *msr_content = vmcb->debugctlmsr; + *msr_content = vmcb_get_debugctlmsr(vmcb); break; case MSR_IA32_LASTBRANCHFROMIP: - *msr_content = vmcb->lastbranchfromip; + *msr_content = vmcb_get_lastbranchfromip(vmcb); break; case MSR_IA32_LASTBRANCHTOIP: - *msr_content = vmcb->lastbranchtoip; + *msr_content = vmcb_get_lastbranchtoip(vmcb); break; case MSR_IA32_LASTINTFROMIP: - *msr_content = vmcb->lastintfromip; + *msr_content = vmcb_get_lastintfromip(vmcb); break; case MSR_IA32_LASTINTTOIP: - *msr_content = vmcb->lastinttoip; + *msr_content = vmcb_get_lastinttoip(vmcb); break; case MSR_K7_PERFCTR0: @@ -1112,7 +1159,7 @@ static int svm_msr_write_intercept(unsig break; case MSR_IA32_DEBUGCTLMSR: - vmcb->debugctlmsr = msr_content; + vmcb_set_debugctlmsr(vmcb, msr_content); if ( !msr_content || !cpu_has_svm_lbrv ) break; vmcb->lbr_control.fields.enable = 1; @@ -1124,19 +1171,19 @@ static int svm_msr_write_intercept(unsig break; case MSR_IA32_LASTBRANCHFROMIP: - vmcb->lastbranchfromip = msr_content; + vmcb_set_lastbranchfromip(vmcb, msr_content); break; case MSR_IA32_LASTBRANCHTOIP: - vmcb->lastbranchtoip = msr_content; + vmcb_set_lastbranchtoip(vmcb, msr_content); break; case MSR_IA32_LASTINTFROMIP: - vmcb->lastintfromip = msr_content; + vmcb_set_lastintfromip(vmcb, msr_content); break; case MSR_IA32_LASTINTTOIP: - vmcb->lastinttoip = msr_content; + vmcb_set_lastinttoip(vmcb, msr_content); break; case MSR_K7_PERFCTR0: @@ -1379,9 +1426,11 @@ asmlinkage void svm_vmexit_handler(struc struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; eventinj_t eventinj; int inst_len, rc; + vintr_t intr; if ( paging_mode_hap(v->domain) ) - v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] = vmcb->cr3; + v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] = + vmcb_get_cr3(vmcb); /* * Before doing anything else, we need to sync up the VLAPIC's TPR with @@ -1390,8 +1439,9 @@ asmlinkage void svm_vmexit_handler(struc * NB. We need to preserve the low bits of the TPR to make checked builds * of Windows work, even though they don't actually do anything. */ + intr = vmcb_get_vintr(vmcb); vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI, - ((vmcb->vintr.fields.tpr & 0x0F) << 4) | + ((intr.fields.tpr & 0x0F) << 4) | (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0x0F)); exit_reason = vmcb->exitcode; @@ -1414,6 +1464,8 @@ asmlinkage void svm_vmexit_handler(struc perfc_incra(svmexits, exit_reason); hvm_maybe_deassert_evtchn_irq(); + + vmcb->cleanbits.bytes = cpu_has_svm_cleanbits ? ~0u : 0u; /* Event delivery caused this intercept? Queue for redelivery. */ eventinj = vmcb->exitintinfo; @@ -1495,10 +1547,17 @@ asmlinkage void svm_vmexit_handler(struc svm_vmexit_mce_intercept(v, regs); break; - case VMEXIT_VINTR: - vmcb->vintr.fields.irq = 0; - vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR; - break; + case VMEXIT_VINTR: { + u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); + intr = vmcb_get_vintr(vmcb); + + intr.fields.irq = 0; + general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR; + + vmcb_set_vintr(vmcb, intr); + vmcb_set_general1_intercepts(vmcb, general1_intercepts); + break; + } case VMEXIT_INVD: case VMEXIT_WBINVD: @@ -1613,7 +1672,9 @@ asmlinkage void svm_vmexit_handler(struc svm_do_nested_pgfault(vmcb->exitinfo2); break; - case VMEXIT_IRET: + case VMEXIT_IRET: { + u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); + /* * IRET clears the NMI mask. However because we clear the mask * /before/ executing IRET, we set the interrupt shadow to prevent @@ -1622,9 +1683,12 @@ asmlinkage void svm_vmexit_handler(struc * may inject an NMI before the NMI handler's IRET instruction is * retired. */ - vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; + general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; vmcb->interrupt_shadow = 1; - break; + + vmcb_set_general1_intercepts(vmcb, general1_intercepts); + break; + } case VMEXIT_PAUSE: svm_vmexit_do_pause(regs); @@ -1641,8 +1705,10 @@ asmlinkage void svm_vmexit_handler(struc } /* The exit may have updated the TPR: reflect this in the hardware vtpr */ - vmcb->vintr.fields.tpr = + intr = vmcb_get_vintr(vmcb); + intr.fields.tpr = (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4; + vmcb_set_vintr(vmcb, intr); } asmlinkage void svm_trace_vmentry(void) diff -r c3c1fd0a940c -r f46f46bbb69a xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Dec 15 16:49:25 2010 +0000 @@ -109,12 +109,13 @@ void svm_intercept_msr(struct vcpu *v, u } } +/* This function can directly access fields which are covered by clean bits. */ static int construct_vmcb(struct vcpu *v) { struct arch_svm_struct *arch_svm = &v->arch.hvm_svm; struct vmcb_struct *vmcb = arch_svm->vmcb; - vmcb->general1_intercepts = + vmcb->_general1_intercepts = GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI | GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT | GENERAL1_INTERCEPT_CPUID | GENERAL1_INTERCEPT_INVD | @@ -122,7 +123,7 @@ static int construct_vmcb(struct vcpu *v GENERAL1_INTERCEPT_INVLPGA | GENERAL1_INTERCEPT_IOIO_PROT | GENERAL1_INTERCEPT_MSR_PROT | GENERAL1_INTERCEPT_SHUTDOWN_EVT| GENERAL1_INTERCEPT_TASK_SWITCH; - vmcb->general2_intercepts = + vmcb->_general2_intercepts = GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL | GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE | GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI | @@ -131,13 +132,13 @@ static int construct_vmcb(struct vcpu *v GENERAL2_INTERCEPT_XSETBV; /* Intercept all debug-register writes. */ - vmcb->dr_intercepts = ~0u; + vmcb->_dr_intercepts = ~0u; /* Intercept all control-register accesses except for CR2 and CR8. */ - vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ | - CR_INTERCEPT_CR2_WRITE | - CR_INTERCEPT_CR8_READ | - CR_INTERCEPT_CR8_WRITE); + vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ | + CR_INTERCEPT_CR2_WRITE | + CR_INTERCEPT_CR8_READ | + CR_INTERCEPT_CR8_WRITE); /* I/O and MSR permission bitmaps. */ arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0); @@ -153,21 +154,21 @@ static int construct_vmcb(struct vcpu *v svm_disable_intercept_for_msr(v, MSR_STAR); svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK); - vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); - vmcb->iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); + vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); + vmcb->_iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); /* Virtualise EFLAGS.IF and LAPIC TPR (CR8). */ - vmcb->vintr.fields.intr_masking = 1; + vmcb->_vintr.fields.intr_masking = 1; /* Initialise event injection to no-op. */ vmcb->eventinj.bytes = 0; /* TSC. */ - vmcb->tsc_offset = 0; + vmcb->_tsc_offset = 0; if ( v->domain->arch.vtsc ) { - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; - vmcb->general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP; + vmcb->_general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; + vmcb->_general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP; } /* Guest EFER. */ @@ -225,38 +226,42 @@ static int construct_vmcb(struct vcpu *v paging_update_paging_modes(v); - vmcb->exception_intercepts = + vmcb->_exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_no_device); if ( paging_mode_hap(v->domain) ) { - vmcb->np_enable = 1; /* enable nested paging */ - vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */ - vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m_get_hostp2m(v->domain))); + vmcb->_np_enable = 1; /* enable nested paging */ + vmcb->_g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */ + vmcb->_h_cr3 = pagetable_get_paddr( + p2m_get_pagetable(p2m_get_hostp2m(v->domain))); /* No point in intercepting CR3 reads/writes. */ - vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE); + vmcb->_cr_intercepts &= + ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE); /* * No point in intercepting INVLPG if we don't have shadow pagetables * that need to be fixed up. */ - vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_INVLPG; + vmcb->_general1_intercepts &= ~GENERAL1_INTERCEPT_INVLPG; /* PAT is under complete control of SVM when using nested paging. */ svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT); } else { - vmcb->exception_intercepts |= (1U << TRAP_page_fault); + vmcb->_exception_intercepts |= (1U << TRAP_page_fault); } if ( cpu_has_pause_filter ) { - vmcb->pause_filter_count = 3000; - vmcb->general1_intercepts |= GENERAL1_INTERCEPT_PAUSE; - } + vmcb->_pause_filter_count = 3000; + vmcb->_general1_intercepts |= GENERAL1_INTERCEPT_PAUSE; + } + + vmcb->cleanbits.bytes = 0; return 0; } @@ -309,6 +314,7 @@ static void svm_dump_sel(char *name, svm (unsigned long long)s->base); } +/* This function can directly access fields which are covered by clean bits. */ void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb) { printk("Dumping guest's current state at %s...\n", from); @@ -317,47 +323,48 @@ void svm_dump_vmcb(const char *from, str printk("cr_intercepts = 0x%08x dr_intercepts = 0x%08x " "exception_intercepts = 0x%08x\n", - vmcb->cr_intercepts, vmcb->dr_intercepts, - vmcb->exception_intercepts); + vmcb->_cr_intercepts, vmcb->_dr_intercepts, + vmcb->_exception_intercepts); printk("general1_intercepts = 0x%08x general2_intercepts = 0x%08x\n", - vmcb->general1_intercepts, vmcb->general2_intercepts); + vmcb->_general1_intercepts, vmcb->_general2_intercepts); printk("iopm_base_pa = 0x%016llx msrpm_base_pa = 0x%016llx tsc_offset = " "0x%016llx\n", - (unsigned long long) vmcb->iopm_base_pa, - (unsigned long long) vmcb->msrpm_base_pa, - (unsigned long long) vmcb->tsc_offset); + (unsigned long long)vmcb->_iopm_base_pa, + (unsigned long long)vmcb->_msrpm_base_pa, + (unsigned long long)vmcb->_tsc_offset); printk("tlb_control = 0x%08x vintr = 0x%016llx interrupt_shadow = " "0x%016llx\n", vmcb->tlb_control, - (unsigned long long) vmcb->vintr.bytes, - (unsigned long long) vmcb->interrupt_shadow); + (unsigned long long)vmcb->_vintr.bytes, + (unsigned long long)vmcb->interrupt_shadow); printk("exitcode = 0x%016llx exitintinfo = 0x%016llx\n", - (unsigned long long) vmcb->exitcode, - (unsigned long long) vmcb->exitintinfo.bytes); + (unsigned long long)vmcb->exitcode, + (unsigned long long)vmcb->exitintinfo.bytes); printk("exitinfo1 = 0x%016llx exitinfo2 = 0x%016llx \n", - (unsigned long long) vmcb->exitinfo1, - (unsigned long long) vmcb->exitinfo2); + (unsigned long long)vmcb->exitinfo1, + (unsigned long long)vmcb->exitinfo2); printk("np_enable = 0x%016llx guest_asid = 0x%03x\n", - (unsigned long long) vmcb->np_enable, vmcb->guest_asid); + (unsigned long long)vmcb->_np_enable, vmcb->_guest_asid); printk("cpl = %d efer = 0x%016llx star = 0x%016llx lstar = 0x%016llx\n", - vmcb->cpl, (unsigned long long) vmcb->efer, - (unsigned long long) vmcb->star, (unsigned long long) vmcb->lstar); + vmcb->_cpl, (unsigned long long)vmcb->_efer, + (unsigned long long)vmcb->star, (unsigned long long)vmcb->lstar); printk("CR0 = 0x%016llx CR2 = 0x%016llx\n", - (unsigned long long) vmcb->cr0, (unsigned long long) vmcb->cr2); + (unsigned long long)vmcb->_cr0, (unsigned long long)vmcb->_cr2); printk("CR3 = 0x%016llx CR4 = 0x%016llx\n", - (unsigned long long) vmcb->cr3, (unsigned long long) vmcb->cr4); + (unsigned long long)vmcb->_cr3, (unsigned long long)vmcb->_cr4); printk("RSP = 0x%016llx RIP = 0x%016llx\n", - (unsigned long long) vmcb->rsp, (unsigned long long) vmcb->rip); + (unsigned long long)vmcb->rsp, (unsigned long long)vmcb->rip); printk("RAX = 0x%016llx RFLAGS=0x%016llx\n", - (unsigned long long) vmcb->rax, (unsigned long long) vmcb->rflags); + (unsigned long long)vmcb->rax, (unsigned long long)vmcb->rflags); printk("DR6 = 0x%016llx, DR7 = 0x%016llx\n", - (unsigned long long) vmcb->dr6, (unsigned long long) vmcb->dr7); + (unsigned long long)vmcb->_dr6, (unsigned long long)vmcb->_dr7); printk("CSTAR = 0x%016llx SFMask = 0x%016llx\n", - (unsigned long long) vmcb->cstar, - (unsigned long long) vmcb->sfmask); + (unsigned long long)vmcb->cstar, + (unsigned long long)vmcb->sfmask); printk("KernGSBase = 0x%016llx PAT = 0x%016llx \n", - (unsigned long long) vmcb->kerngsbase, - (unsigned long long) vmcb->g_pat); - printk("H_CR3 = 0x%016llx\n", (unsigned long long)vmcb->h_cr3); + (unsigned long long)vmcb->kerngsbase, + (unsigned long long)vmcb->_g_pat); + printk("H_CR3 = 0x%016llx CleanBits = 0x%08x\n", + (unsigned long long)vmcb->_h_cr3, vmcb->cleanbits.bytes); /* print out all the selectors */ svm_dump_sel("CS", &vmcb->cs); diff -r c3c1fd0a940c -r f46f46bbb69a xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/include/asm-x86/hvm/svm/svm.h Wed Dec 15 16:49:25 2010 +0000 @@ -68,12 +68,14 @@ extern u32 svm_feature_flags; #define SVM_FEATURE_LBRV 1 #define SVM_FEATURE_SVML 2 #define SVM_FEATURE_NRIPS 3 +#define SVM_FEATURE_CLEAN 5 #define SVM_FEATURE_PAUSEF 10 #define cpu_has_svm_npt test_bit(SVM_FEATURE_NPT, &svm_feature_flags) #define cpu_has_svm_lbrv test_bit(SVM_FEATURE_LBRV, &svm_feature_flags) #define cpu_has_svm_svml test_bit(SVM_FEATURE_SVML, &svm_feature_flags) #define cpu_has_svm_nrips test_bit(SVM_FEATURE_NRIPS, &svm_feature_flags) +#define cpu_has_svm_cleanbits test_bit(SVM_FEATURE_CLEAN, &svm_feature_flags) #define cpu_has_pause_filter test_bit(SVM_FEATURE_PAUSEF, &svm_feature_flags) #endif /* __ASM_X86_HVM_SVM_H__ */ diff -r c3c1fd0a940c -r f46f46bbb69a xen/include/asm-x86/hvm/svm/vmcb.h --- a/xen/include/asm-x86/hvm/svm/vmcb.h Wed Dec 15 16:49:06 2010 +0000 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Wed Dec 15 16:49:25 2010 +0000 @@ -366,12 +366,44 @@ typedef union } fields; } __attribute__ ((packed)) lbrctrl_t; +typedef union +{ + uint32_t bytes; + struct + { + /* cr_intercepts, dr_intercepts, exception_intercepts, + * general{1,2}_intercepts, pause_filter_count, tsc_offset */ + uint32_t intercepts: 1; + /* iopm_base_pa, msrpm_base_pa */ + uint32_t iopm: 1; + /* guest_asid */ + uint32_t asid: 1; + /* vintr */ + uint32_t tpr: 1; + /* np_enable, h_cr3, g_pat */ + uint32_t np: 1; + /* cr0, cr3, cr4, efer */ + uint32_t cr: 1; + /* dr6, dr7 */ + uint32_t dr: 1; + /* gdtr, idtr */ + uint32_t dt: 1; + /* cs, ds, es, ss, cpl */ + uint32_t seg: 1; + /* cr2 */ + uint32_t cr2: 1; + /* debugctlmsr, last{branch,int}{to,from}ip */ + uint32_t lbr: 1; + uint32_t resv: 21; + } fields; +} __attribute__ ((packed)) vmcbcleanbits_t; + struct vmcb_struct { - u32 cr_intercepts; /* offset 0x00 */ - u32 dr_intercepts; /* offset 0x04 */ - u32 exception_intercepts; /* offset 0x08 */ - u32 general1_intercepts; /* offset 0x0C */ - u32 general2_intercepts; /* offset 0x10 */ + u32 _cr_intercepts; /* offset 0x00 - cleanbit 0 */ + u32 _dr_intercepts; /* offset 0x04 - cleanbit 0 */ + u32 _exception_intercepts; /* offset 0x08 - cleanbit 0 */ + u32 _general1_intercepts; /* offset 0x0C - cleanbit 0 */ + u32 _general2_intercepts; /* offset 0x10 - cleanbit 0 */ u32 res01; /* offset 0x14 */ u64 res02; /* offset 0x18 */ u64 res03; /* offset 0x20 */ @@ -379,49 +411,50 @@ struct vmcb_struct { u64 res05; /* offset 0x30 */ u32 res06; /* offset 0x38 */ u16 res06a; /* offset 0x3C */ - u16 pause_filter_count; /* offset 0x3E */ - u64 iopm_base_pa; /* offset 0x40 */ - u64 msrpm_base_pa; /* offset 0x48 */ - u64 tsc_offset; /* offset 0x50 */ - u32 guest_asid; /* offset 0x58 */ + u16 _pause_filter_count; /* offset 0x3E - cleanbit 0 */ + u64 _iopm_base_pa; /* offset 0x40 - cleanbit 1 */ + u64 _msrpm_base_pa; /* offset 0x48 - cleanbit 1 */ + u64 _tsc_offset; /* offset 0x50 - cleanbit 0 */ + u32 _guest_asid; /* offset 0x58 - cleanbit 2 */ u8 tlb_control; /* offset 0x5C */ u8 res07[3]; - vintr_t vintr; /* offset 0x60 */ + vintr_t _vintr; /* offset 0x60 - cleanbit 3 */ u64 interrupt_shadow; /* offset 0x68 */ u64 exitcode; /* offset 0x70 */ u64 exitinfo1; /* offset 0x78 */ u64 exitinfo2; /* offset 0x80 */ eventinj_t exitintinfo; /* offset 0x88 */ - u64 np_enable; /* offset 0x90 */ + u64 _np_enable; /* offset 0x90 - cleanbit 4 */ u64 res08[2]; eventinj_t eventinj; /* offset 0xA8 */ - u64 h_cr3; /* offset 0xB0 */ + u64 _h_cr3; /* offset 0xB0 - cleanbit 4 */ lbrctrl_t lbr_control; /* offset 0xB8 */ - u64 res09; /* offset 0xC0 */ + vmcbcleanbits_t cleanbits; /* offset 0xC0 */ + u32 res09; /* offset 0xC4 */ u64 nextrip; /* offset 0xC8 */ u64 res10a[102]; /* offset 0xD0 pad to save area */ - svm_segment_register_t es; /* offset 1024 */ - svm_segment_register_t cs; - svm_segment_register_t ss; - svm_segment_register_t ds; + svm_segment_register_t es; /* offset 1024 - cleanbit 8 */ + svm_segment_register_t cs; /* cleanbit 8 */ + svm_segment_register_t ss; /* cleanbit 8 */ + svm_segment_register_t ds; /* cleanbit 8 */ svm_segment_register_t fs; svm_segment_register_t gs; - svm_segment_register_t gdtr; + svm_segment_register_t gdtr; /* cleanbit 7 */ svm_segment_register_t ldtr; - svm_segment_register_t idtr; + svm_segment_register_t idtr; /* cleanbit 7 */ svm_segment_register_t tr; u64 res10[5]; u8 res11[3]; - u8 cpl; + u8 _cpl; /* cleanbit 8 */ u32 res12; - u64 efer; /* offset 1024 + 0xD0 */ + u64 _efer; /* offset 1024 + 0xD0 - cleanbit 5 */ u64 res13[14]; - u64 cr4; /* loffset 1024 + 0x148 */ - u64 cr3; - u64 cr0; - u64 dr7; - u64 dr6; + u64 _cr4; /* offset 1024 + 0x148 - cleanbit 5 */ + u64 _cr3; /* cleanbit 5 */ + u64 _cr0; /* cleanbit 5 */ + u64 _dr7; /* cleanbit 6 */ + u64 _dr6; /* cleanbit 6 */ u64 rflags; u64 rip; u64 res14[11]; @@ -436,17 +469,17 @@ struct vmcb_struct { u64 sysenter_cs; u64 sysenter_esp; u64 sysenter_eip; - u64 cr2; + u64 _cr2; /* cleanbit 9 */ u64 pdpe0; u64 pdpe1; u64 pdpe2; u64 pdpe3; - u64 g_pat; - u64 debugctlmsr; - u64 lastbranchfromip; - u64 lastbranchtoip; - u64 lastintfromip; - u64 lastinttoip; + u64 _g_pat; /* cleanbit 4 */ + u64 _debugctlmsr; /* cleanbit 10 */ + u64 _lastbranchfromip; /* cleanbit 10 */ + u64 _lastbranchtoip; /* cleanbit 10 */ + u64 _lastintfromip; /* cleanbit 10 */ + u64 _lastinttoip; /* cleanbit 10 */ u64 res16[301]; } __attribute__ ((packed)); @@ -485,6 +518,58 @@ void svm_intercept_msr(struct vcpu *v, u void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable); #define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 0) #define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 1) + +/* + * VMCB accessor functions. + */ + +#define VMCB_ACCESSORS(_type, _name, _cleanbit) \ +static inline void vmcb_set_##_name(struct vmcb_struct *vmcb, _type value) \ +{ \ + vmcb->_##_name = value; \ + vmcb->cleanbits.fields._cleanbit = 0; \ +} \ +static inline _type vmcb_get_##_name(struct vmcb_struct *vmcb) \ +{ \ + return vmcb->_##_name; \ +} + +VMCB_ACCESSORS(u32, cr_intercepts, intercepts) +VMCB_ACCESSORS(u32, dr_intercepts, intercepts) +VMCB_ACCESSORS(u32, exception_intercepts, intercepts) +VMCB_ACCESSORS(u32, general1_intercepts, intercepts) +VMCB_ACCESSORS(u32, general2_intercepts, intercepts) +VMCB_ACCESSORS(u16, pause_filter_count, intercepts) +VMCB_ACCESSORS(u64, tsc_offset, intercepts) +VMCB_ACCESSORS(u64, iopm_base_pa, iopm) +VMCB_ACCESSORS(u64, msrpm_base_pa, iopm) +VMCB_ACCESSORS(u32, guest_asid, asid) +VMCB_ACCESSORS(vintr_t, vintr, tpr) +VMCB_ACCESSORS(u64, np_enable, np) +VMCB_ACCESSORS(u64, h_cr3, np) +VMCB_ACCESSORS(u64, g_pat, np) +VMCB_ACCESSORS(u64, cr0, cr) +VMCB_ACCESSORS(u64, cr3, cr) +VMCB_ACCESSORS(u64, cr4, cr) +VMCB_ACCESSORS(u64, efer, cr) +VMCB_ACCESSORS(u64, dr6, dr) +VMCB_ACCESSORS(u64, dr7, dr) +/* Updates are all via hvm_set_segment_register(). */ +/* VMCB_ACCESSORS(svm_segment_register_t, gdtr, dt) */ +/* VMCB_ACCESSORS(svm_segment_register_t, idtr, dt) */ +/* VMCB_ACCESSORS(svm_segment_register_t, cs, seg) */ +/* VMCB_ACCESSORS(svm_segment_register_t, ds, seg) */ +/* VMCB_ACCESSORS(svm_segment_register_t, es, seg) */ +/* VMCB_ACCESSORS(svm_segment_register_t, ss, seg) */ +VMCB_ACCESSORS(u8, cpl, seg) +VMCB_ACCESSORS(u64, cr2, cr2) +VMCB_ACCESSORS(u64, debugctlmsr, lbr) +VMCB_ACCESSORS(u64, lastbranchfromip, lbr) +VMCB_ACCESSORS(u64, lastbranchtoip, lbr) +VMCB_ACCESSORS(u64, lastintfromip, lbr) +VMCB_ACCESSORS(u64, lastinttoip, lbr) + +#undef VMCB_ACCESSORS #endif /* ASM_X86_HVM_SVM_VMCS_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |