[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] VMX: dump full guest state
commit de8d96927b7e82bd36ef14852f8f0c4f81a12dbc Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu Jan 22 12:54:49 2015 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu Jan 22 12:54:49 2015 +0100 VMX: dump full guest state Several guest state fields did not get dumped so far. Where suitable (to reduce the amount of output) make some of the dumping conditional upon guest settings (this isn't required for correctness as vmr() already uses __vmread_safe(), i.e. it is fine to access non-existing fields). Move CR3_TARGET_* and TSC_OFFSET processing into the control state section, at once making the upper bound of CR3_TARGET_VALUEn printed depend on CR3_TARGET_COUNT (which architecturally can be higher than 4). Also rename GUEST_PDPTRn to GUEST_PDPTEn (matching the SDM naming) and group them as well as CR3_TARGET_VALUEn similar to EOI_EXIT_BITMAP. Finally, drop casts - they haven't been needed anymore since the dropping of 32-bit support (and some of them were not really needed in the first place). Introduce vmr16() and vmr32() helper macros to avoid the "l" printk format modifier and at the same time validate that only 16-/32-bit fields get accessed this way. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/vmcs.c | 112 +++++++++++++++++++++--------------- xen/arch/x86/hvm/vmx/vmx.c | 12 ++-- xen/arch/x86/hvm/vmx/vvmx.c | 14 ++-- xen/include/asm-x86/hvm/vmx/vmcs.h | 17 ++---- 4 files changed, 84 insertions(+), 71 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 9d8033e..c5e6f88 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1395,6 +1395,16 @@ static inline unsigned long vmr(unsigned long field) return __vmread_safe(field, &val) ? val : 0; } +#define vmr16(fld) ({ \ + BUILD_BUG_ON((fld) & 0x6001); \ + (uint16_t)vmr(fld); \ +}) + +#define vmr32(fld) ({ \ + BUILD_BUG_ON(((fld) & 0x6001) != 0x4000); \ + (uint32_t)vmr(fld); \ +}) + static void vmx_dump_sel(char *name, uint32_t selector) { uint32_t sel, attr, limit; @@ -1420,44 +1430,45 @@ static void vmx_dump_sel2(char *name, uint32_t lim) void vmcs_dump_vcpu(struct vcpu *v) { struct cpu_user_regs *regs = &v->arch.user_regs; - unsigned long long x; + uint32_t vmentry_ctl, vmexit_ctl; + unsigned long cr4; + uint64_t efer; + unsigned int i, n; if ( v == current ) regs = guest_cpu_user_regs(); vmx_vmcs_enter(v); + vmentry_ctl = vmr32(VM_ENTRY_CONTROLS), + vmexit_ctl = vmr32(VM_EXIT_CONTROLS); + cr4 = vmr(GUEST_CR4); + efer = vmr(GUEST_EFER); + printk("*** Guest State ***\n"); - printk("CR0: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", - (unsigned long long)vmr(GUEST_CR0), - (unsigned long long)vmr(CR0_READ_SHADOW), - (unsigned long long)vmr(CR0_GUEST_HOST_MASK)); - printk("CR4: actual=0x%016llx, shadow=0x%016llx, gh_mask=%016llx\n", - (unsigned long long)vmr(GUEST_CR4), - (unsigned long long)vmr(CR4_READ_SHADOW), - (unsigned long long)vmr(CR4_GUEST_HOST_MASK)); - printk("CR3: actual=0x%016llx, target_count=%d\n", - (unsigned long long)vmr(GUEST_CR3), - (int)vmr(CR3_TARGET_COUNT)); - printk(" target0=%016llx, target1=%016llx\n", - (unsigned long long)vmr(CR3_TARGET_VALUE0), - (unsigned long long)vmr(CR3_TARGET_VALUE1)); - printk(" target2=%016llx, target3=%016llx\n", - (unsigned long long)vmr(CR3_TARGET_VALUE2), - (unsigned long long)vmr(CR3_TARGET_VALUE3)); - printk("RSP = 0x%016llx (0x%016llx) RIP = 0x%016llx (0x%016llx)\n", - (unsigned long long)vmr(GUEST_RSP), - (unsigned long long)regs->esp, - (unsigned long long)vmr(GUEST_RIP), - (unsigned long long)regs->eip); - printk("RFLAGS=0x%016llx (0x%016llx) DR7 = 0x%016llx\n", - (unsigned long long)vmr(GUEST_RFLAGS), - (unsigned long long)regs->eflags, - (unsigned long long)vmr(GUEST_DR7)); - printk("Sysenter RSP=%016llx CS:RIP=%04x:%016llx\n", - (unsigned long long)vmr(GUEST_SYSENTER_ESP), - (int)vmr(GUEST_SYSENTER_CS), - (unsigned long long)vmr(GUEST_SYSENTER_EIP)); + printk("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", + vmr(GUEST_CR0), vmr(CR0_READ_SHADOW), vmr(CR0_GUEST_HOST_MASK)); + printk("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", + cr4, vmr(CR4_READ_SHADOW), vmr(CR4_GUEST_HOST_MASK)); + printk("CR3 = 0x%016lx\n", vmr(GUEST_CR3)); + if ( (v->arch.hvm_vmx.secondary_exec_control & + SECONDARY_EXEC_ENABLE_EPT) && + (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA) ) + { + printk("PDPTE0 = 0x%016lx PDPTE1 = 0x%016lx\n", + vmr(GUEST_PDPTE(0)), vmr(GUEST_PDPTE(1))); + printk("PDPTE2 = 0x%016lx PDPTE3 = 0x%016lx\n", + vmr(GUEST_PDPTE(2)), vmr(GUEST_PDPTE(3))); + } + printk("RSP = 0x%016lx (0x%016lx) RIP = 0x%016lx (0x%016lx)\n", + vmr(GUEST_RSP), regs->esp, + vmr(GUEST_RIP), regs->eip); + printk("RFLAGS=0x%08lx (0x%08lx) DR7 = 0x%016lx\n", + vmr(GUEST_RFLAGS), regs->eflags, + vmr(GUEST_DR7)); + printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", + vmr(GUEST_SYSENTER_ESP), + vmr32(GUEST_SYSENTER_CS), vmr(GUEST_SYSENTER_EIP)); vmx_dump_sel("CS", GUEST_CS_SELECTOR); vmx_dump_sel("DS", GUEST_DS_SELECTOR); vmx_dump_sel("SS", GUEST_SS_SELECTOR); @@ -1468,18 +1479,21 @@ void vmcs_dump_vcpu(struct vcpu *v) vmx_dump_sel("LDTR", GUEST_LDTR_SELECTOR); vmx_dump_sel2("IDTR", GUEST_IDTR_LIMIT); vmx_dump_sel("TR", GUEST_TR_SELECTOR); - printk("Guest PAT = 0x%08x%08x\n", - (uint32_t)vmr(GUEST_PAT_HIGH), (uint32_t)vmr(GUEST_PAT)); - x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32; - x |= (uint32_t)vmr(TSC_OFFSET); - printk("TSC Offset = %016llx\n", x); - x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32; - x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL); - printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x, - (unsigned long long)vmr(GUEST_PENDING_DBG_EXCEPTIONS)); - printk("Interruptibility=%04x ActivityState=%04x\n", - (int)vmr(GUEST_INTERRUPTIBILITY_INFO), - (int)vmr(GUEST_ACTIVITY_STATE)); + if ( (vmexit_ctl & (VM_EXIT_SAVE_GUEST_PAT | VM_EXIT_SAVE_GUEST_EFER)) || + (vmentry_ctl & (VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_GUEST_EFER)) ) + printk("EFER = 0x%016lx PAT = 0x%016lx\n", efer, vmr(GUEST_PAT)); + printk("PreemptionTimer = 0x%08x SM Base = 0x%08x\n", + vmr32(GUEST_PREEMPTION_TIMER), vmr32(GUEST_SMBASE)); + printk("DebugCtl = 0x%016lx DebugExceptions = 0x%016lx\n", + vmr(GUEST_IA32_DEBUGCTL), vmr(GUEST_PENDING_DBG_EXCEPTIONS)); + if ( vmentry_ctl & (VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_BNDCFGS) ) + printk("PerfGlobCtl = 0x%016lx BndCfgS = 0x%016lx\n", + vmr(GUEST_PERF_GLOBAL_CTRL), vmr(GUEST_BNDCFGS)); + printk("Interruptibility = %08x ActivityState = %08x\n", + vmr32(GUEST_INTERRUPTIBILITY_INFO), vmr32(GUEST_ACTIVITY_STATE)); + if ( v->arch.hvm_vmx.secondary_exec_control & + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY ) + printk("InterruptStatus = %04x\n", vmr16(GUEST_INTR_STATUS)); printk("*** Host State ***\n"); printk("RSP = 0x%016llx RIP = 0x%016llx\n", @@ -1516,9 +1530,7 @@ void vmcs_dump_vcpu(struct vcpu *v) (uint32_t)vmr(PIN_BASED_VM_EXEC_CONTROL), (uint32_t)vmr(CPU_BASED_VM_EXEC_CONTROL), (uint32_t)vmr(SECONDARY_VM_EXEC_CONTROL)); - printk("EntryControls=%08x ExitControls=%08x\n", - (uint32_t)vmr(VM_ENTRY_CONTROLS), - (uint32_t)vmr(VM_EXIT_CONTROLS)); + printk("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); printk("ExceptionBitmap=%08x\n", (uint32_t)vmr(EXCEPTION_BITMAP)); printk("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", @@ -1537,8 +1549,16 @@ void vmcs_dump_vcpu(struct vcpu *v) (uint32_t)vmr(IDT_VECTORING_ERROR_CODE)); printk("TPR Threshold = 0x%02x\n", (uint32_t)vmr(TPR_THRESHOLD)); + printk("TSC Offset = 0x%016lx\n", vmr(TSC_OFFSET)); printk("EPT pointer = 0x%08x%08x\n", (uint32_t)vmr(EPT_POINTER_HIGH), (uint32_t)vmr(EPT_POINTER)); + n = vmr32(CR3_TARGET_COUNT); + for ( i = 0; i + 1 < n; i += 2 ) + printk("CR3 target%u=%016lx target%u=%016lx\n", + i, vmr(CR3_TARGET_VALUE(i)), + i + 1, vmr(CR3_TARGET_VALUE(i + 1))); + if ( i < n ) + printk("CR3 target%u=%016lx\n", i, vmr(CR3_TARGET_VALUE(i))); printk("Virtual processor ID = 0x%04x\n", (uint32_t)vmr(VIRTUAL_PROCESSOR_ID)); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 522892f..ed5e27a 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1129,7 +1129,7 @@ static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow) static void vmx_load_pdptrs(struct vcpu *v) { unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3]; - uint64_t *guest_pdptrs; + uint64_t *guest_pdptes; struct page_info *page; p2m_type_t p2mt; char *p; @@ -1155,7 +1155,7 @@ static void vmx_load_pdptrs(struct vcpu *v) p = __map_domain_page(page); - guest_pdptrs = (uint64_t *)(p + (cr3 & ~PAGE_MASK)); + guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK)); /* * We do not check the PDPTRs for validity. The CPU will do this during @@ -1165,10 +1165,10 @@ static void vmx_load_pdptrs(struct vcpu *v) vmx_vmcs_enter(v); - __vmwrite(GUEST_PDPTR0, guest_pdptrs[0]); - __vmwrite(GUEST_PDPTR1, guest_pdptrs[1]); - __vmwrite(GUEST_PDPTR2, guest_pdptrs[2]); - __vmwrite(GUEST_PDPTR3, guest_pdptrs[3]); + __vmwrite(GUEST_PDPTE(0), guest_pdptes[0]); + __vmwrite(GUEST_PDPTE(1), guest_pdptes[1]); + __vmwrite(GUEST_PDPTE(2), guest_pdptes[2]); + __vmwrite(GUEST_PDPTE(3), guest_pdptes[3]); vmx_vmcs_exit(v); diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 9ccc03f..26611b3 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -881,11 +881,11 @@ static const u16 vmcs_gstate_field[] = { GUEST_SYSENTER_EIP, }; -static const u16 gpdptr_fields[] = { - GUEST_PDPTR0, - GUEST_PDPTR1, - GUEST_PDPTR2, - GUEST_PDPTR3, +static const u16 gpdpte_fields[] = { + GUEST_PDPTE(0), + GUEST_PDPTE(1), + GUEST_PDPTE(2), + GUEST_PDPTE(3), }; /* @@ -1173,7 +1173,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs) if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) && !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) ) - vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdptr_fields), gpdptr_fields); + vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields); regs->eip = __get_vvmcs(vvmcs, GUEST_RIP); regs->esp = __get_vvmcs(vvmcs, GUEST_RSP); @@ -1348,7 +1348,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs) if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) && !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) ) - shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdptr_fields), gpdptr_fields); + shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields); vmx_vmcs_switch(v->arch.hvm_vmx.vmcs, nvcpu->nv_n1vmcx); diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 6a99dca..3b16ee6 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -362,14 +362,8 @@ enum vmcs_field { GUEST_EFER_HIGH = 0x00002807, GUEST_PERF_GLOBAL_CTRL = 0x00002808, GUEST_PERF_GLOBAL_CTRL_HIGH = 0x00002809, - GUEST_PDPTR0 = 0x0000280a, - GUEST_PDPTR0_HIGH = 0x0000280b, - GUEST_PDPTR1 = 0x0000280c, - GUEST_PDPTR1_HIGH = 0x0000280d, - GUEST_PDPTR2 = 0x0000280e, - GUEST_PDPTR2_HIGH = 0x0000280f, - GUEST_PDPTR3 = 0x00002810, - GUEST_PDPTR3_HIGH = 0x00002811, + GUEST_PDPTE0 = 0x0000280a, +#define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */ GUEST_BNDCFGS = 0x00002812, GUEST_BNDCFGS_HIGH = 0x00002813, HOST_PAT = 0x00002c00, @@ -424,7 +418,8 @@ enum vmcs_field { GUEST_TR_AR_BYTES = 0x00004822, GUEST_INTERRUPTIBILITY_INFO = 0x00004824, GUEST_ACTIVITY_STATE = 0x00004826, - GUEST_SYSENTER_CS = 0x0000482A, + GUEST_SMBASE = 0x00004828, + GUEST_SYSENTER_CS = 0x0000482a, GUEST_PREEMPTION_TIMER = 0x0000482e, HOST_SYSENTER_CS = 0x00004c00, CR0_GUEST_HOST_MASK = 0x00006000, @@ -432,9 +427,7 @@ enum vmcs_field { CR0_READ_SHADOW = 0x00006004, CR4_READ_SHADOW = 0x00006006, CR3_TARGET_VALUE0 = 0x00006008, - CR3_TARGET_VALUE1 = 0x0000600a, - CR3_TARGET_VALUE2 = 0x0000600c, - CR3_TARGET_VALUE3 = 0x0000600e, +#define CR3_TARGET_VALUE(n) (CR3_TARGET_VALUE0 + (n) * 2) /* n < CR3_TARGET_COUNT */ EXIT_QUALIFICATION = 0x00006400, GUEST_LINEAR_ADDRESS = 0x0000640a, GUEST_CR0 = 0x00006800, -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |