[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/VMX: use unambiguous register names



commit 718dcb95277caf39c3ab946be7352bac9acc5792
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Jan 5 11:11:19 2017 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jan 5 11:11:19 2017 +0100

    x86/VMX: use unambiguous register names
    
    This is in preparation of eliminating the mis-naming of 64-bit fields
    with 32-bit register names (eflags instead of rflags etc). Use the
    guaranteed 32-bit underscore prefixed names for now where appropriate.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c |  6 ++--
 xen/arch/x86/hvm/vmx/vmx.c  | 67 ++++++++++++++++++++++-----------------------
 xen/arch/x86/hvm/vmx/vvmx.c | 28 +++++++++----------
 3 files changed, 49 insertions(+), 52 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 5db5fea..59ef199 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1778,10 +1778,10 @@ void vmcs_dump_vcpu(struct vcpu *v)
                vmr(GUEST_PDPTE(2)), vmr(GUEST_PDPTE(3)));
     }
     printk("RSP = 0x%016lx (0x%016lx)  RIP = 0x%016lx (0x%016lx)\n",
-           vmr(GUEST_RSP), regs->esp,
-           vmr(GUEST_RIP), regs->eip);
+           vmr(GUEST_RSP), regs->rsp,
+           vmr(GUEST_RIP), regs->rip);
     printk("RFLAGS=0x%08lx (0x%08lx)  DR7 = 0x%016lx\n",
-           vmr(GUEST_RFLAGS), regs->eflags,
+           vmr(GUEST_RFLAGS), regs->rflags,
            vmr(GUEST_DR7));
     printk("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
            vmr(GUEST_SYSENTER_ESP),
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 15d66a2..e75b190 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -295,7 +295,7 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     /* %eax == 1 signals full real-mode support to the guest loader. */
     if ( v->vcpu_id == 0 )
-        v->arch.user_regs.eax = 1;
+        v->arch.user_regs.rax = 1;
 
     return 0;
 }
@@ -560,7 +560,7 @@ int vmx_guest_x86_mode(struct vcpu *v)
 
     if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
-    if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
         return 1;
     __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
     if ( hvm_long_mode_enabled(v) &&
@@ -1670,7 +1670,7 @@ static void vmx_inject_event(const struct x86_event 
*event)
     switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
     {
     case TRAP_debug:
-        if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
+        if ( guest_cpu_user_regs()->_eflags & X86_EFLAGS_TF )
         {
             __restore_debug_registers(curr);
             write_debugreg(6, read_debugreg(6) | DR_STEP);
@@ -1770,7 +1770,7 @@ static void vmx_set_info_guest(struct vcpu *v)
      */
     __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
     if ( v->domain->debugger_attached &&
-         (v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
+         (v->arch.user_regs._eflags & X86_EFLAGS_TF) &&
          (intr_shadow & VMX_INTR_SHADOW_STI) )
     {
         intr_shadow &= ~VMX_INTR_SHADOW_STI;
@@ -2331,8 +2331,8 @@ void update_guest_eip(void)
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     unsigned long x;
 
-    regs->eip += get_instruction_length(); /* Safe: callers audited */
-    regs->eflags &= ~X86_EFLAGS_RF;
+    regs->rip += get_instruction_length(); /* Safe: callers audited */
+    regs->_eflags &= ~X86_EFLAGS_RF;
 
     __vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
     if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
@@ -2341,7 +2341,7 @@ void update_guest_eip(void)
         __vmwrite(GUEST_INTERRUPTIBILITY_INFO, x);
     }
 
-    if ( regs->eflags & X86_EFLAGS_TF )
+    if ( regs->_eflags & X86_EFLAGS_TF )
         hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 }
 
@@ -2370,21 +2370,21 @@ static int vmx_do_cpuid(struct cpu_user_regs *regs)
         return 1;  /* Don't advance the guest IP! */
     }
 
-    eax = regs->eax;
-    ebx = regs->ebx;
-    ecx = regs->ecx;
-    edx = regs->edx;
+    eax = regs->_eax;
+    ebx = regs->_ebx;
+    ecx = regs->_ecx;
+    edx = regs->_edx;
 
-    leaf = regs->eax;
-    subleaf = regs->ecx;
+    leaf = regs->_eax;
+    subleaf = regs->_ecx;
 
     hvm_cpuid(leaf, &eax, &ebx, &ecx, &edx);
     HVMTRACE_5D(CPUID, leaf, eax, ebx, ecx, edx);
 
-    regs->eax = eax;
-    regs->ebx = ebx;
-    regs->ecx = ecx;
-    regs->edx = edx;
+    regs->rax = eax;
+    regs->rbx = ebx;
+    regs->rcx = ecx;
+    regs->rdx = edx;
 
     return hvm_monitor_cpuid(get_instruction_length(), leaf, subleaf);
 }
@@ -3097,8 +3097,8 @@ void vmx_enter_realmode(struct cpu_user_regs *regs)
     /* Adjust RFLAGS to enter virtual 8086 mode with IOPL == 3.  Since
      * we have CR4.VME == 1 and our own TSS with an empty interrupt
      * redirection bitmap, all software INTs will be handled by vm86 */
-    v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags;
-    regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+    v->arch.hvm_vmx.vm86_saved_eflags = regs->_eflags;
+    regs->_eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
 }
 
 static int vmx_handle_eoi_write(void)
@@ -3240,12 +3240,10 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
 
     if ( hvm_long_mode_enabled(v) )
         HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
-                    (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
-                    0, 0, 0);
+                    regs->_eip, regs->rip >> 32, 0, 0, 0);
     else
         HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason,
-                    (uint32_t)regs->eip, 
-                    0, 0, 0, 0);
+                    regs->_eip, 0, 0, 0, 0);
 
     perfc_incra(vmexits, exit_reason);
 
@@ -3330,8 +3328,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     if ( v->arch.hvm_vmx.vmx_realmode )
     {
         /* Put RFLAGS back the way the guest wants it */
-        regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
-        regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
+        regs->_eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+        regs->_eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
 
         /* Unless this exit was for an interrupt, we've hit something
          * vm86 can't handle.  Try again, using the emulator. */
@@ -3420,7 +3418,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
                 if ( trap_type >= X86_EVENTTYPE_SW_INTERRUPT )
                     __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
 
-                rc = hvm_monitor_debug(regs->eip,
+                rc = hvm_monitor_debug(regs->rip,
                                        HVM_MONITOR_DEBUG_EXCEPTION,
                                        trap_type, insn_len);
 
@@ -3445,7 +3443,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
                 int rc;
 
                 __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
-                rc = hvm_monitor_debug(regs->eip,
+                rc = hvm_monitor_debug(regs->rip,
                                        HVM_MONITOR_SOFTWARE_BREAKPOINT,
                                        X86_EVENTTYPE_SW_EXCEPTION,
                                        insn_len);
@@ -3473,9 +3471,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU,
                         "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
-                        (unsigned long)regs->eax, (unsigned long)regs->ebx,
-                        (unsigned long)regs->ecx, (unsigned long)regs->edx,
-                        (unsigned long)regs->esi, (unsigned long)regs->edi);
+                        regs->rax, regs->rbx, regs->rcx,
+                        regs->rdx, regs->rsi, regs->rdi);
 
             if ( paging_fault(exit_qualification, regs) )
             {
@@ -3556,7 +3553,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             __vmread(IDT_VECTORING_ERROR_CODE, &ecode);
         else
              ecode = -1;
-        regs->eip += inst_len;
+        regs->rip += inst_len;
         hvm_task_switch((uint16_t)exit_qualification, reasons[source], ecode);
         break;
     }
@@ -3585,7 +3582,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     }
     case EXIT_REASON_HLT:
         update_guest_eip(); /* Safe: HLT */
-        hvm_hlt(regs->eflags);
+        hvm_hlt(regs->_eflags);
         break;
     case EXIT_REASON_INVLPG:
         update_guest_eip(); /* Safe: INVLPG */
@@ -3593,7 +3590,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         vmx_invlpg_intercept(exit_qualification);
         break;
     case EXIT_REASON_RDTSCP:
-        regs->ecx = hvm_msr_tsc_aux(v);
+        regs->rcx = hvm_msr_tsc_aux(v);
         /* fall through */
     case EXIT_REASON_RDTSC:
         update_guest_eip(); /* Safe: RDTSC, RDTSCP */
@@ -3602,7 +3599,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     case EXIT_REASON_VMCALL:
     {
         int rc;
-        HVMTRACE_1D(VMMCALL, regs->eax);
+        HVMTRACE_1D(VMMCALL, regs->_eax);
         rc = hvm_do_hypercall(regs);
         if ( rc != HVM_HCALL_preempted )
         {
@@ -3782,7 +3779,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         vmx_update_cpu_exec_control(v);
         if ( v->arch.hvm_vcpu.single_step )
         {
-            hvm_monitor_debug(regs->eip,
+            hvm_monitor_debug(regs->rip,
                               HVM_MONITOR_SINGLESTEP_BREAKPOINT,
                               0, 0);
 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index d53c576..a5db7f3 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -471,23 +471,23 @@ gp_fault:
 
 static void vmsucceed(struct cpu_user_regs *regs)
 {
-    regs->eflags &= ~X86_EFLAGS_ARITH_MASK;
+    regs->_eflags &= ~X86_EFLAGS_ARITH_MASK;
 }
 
 static void vmfail_valid(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
 {
     struct vcpu *v = current;
-    unsigned long eflags = regs->eflags;
+    unsigned int eflags = regs->_eflags;
 
-    regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
+    regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
     set_vvmcs(v, VM_INSTRUCTION_ERROR, errno);
 }
 
 static void vmfail_invalid(struct cpu_user_regs *regs)
 {
-    unsigned long eflags = regs->eflags;
+    unsigned int eflags = regs->_eflags;
 
-    regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
+    regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
 }
 
 static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
@@ -1135,9 +1135,9 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
          !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
         vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
 
-    regs->eip = get_vvmcs(v, GUEST_RIP);
-    regs->esp = get_vvmcs(v, GUEST_RSP);
-    regs->eflags = get_vvmcs(v, GUEST_RFLAGS);
+    regs->rip = get_vvmcs(v, GUEST_RIP);
+    regs->rsp = get_vvmcs(v, GUEST_RSP);
+    regs->rflags = get_vvmcs(v, GUEST_RFLAGS);
 
     /* updating host cr0 to sync TS bit */
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
@@ -1169,8 +1169,8 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct 
cpu_user_regs *regs)
     shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_gstate_field),
                          vmcs_gstate_field);
     /* RIP, RSP are in user regs */
-    set_vvmcs(v, GUEST_RIP, regs->eip);
-    set_vvmcs(v, GUEST_RSP, regs->esp);
+    set_vvmcs(v, GUEST_RIP, regs->rip);
+    set_vvmcs(v, GUEST_RSP, regs->rsp);
 
     /* CR3 sync if exec doesn't want cr3 load exiting: i.e. nested EPT */
     if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) )
@@ -1321,10 +1321,10 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
     if ( lm_l1 != lm_l2 )
         paging_update_paging_modes(v);
 
-    regs->eip = get_vvmcs(v, HOST_RIP);
-    regs->esp = get_vvmcs(v, HOST_RSP);
+    regs->rip = get_vvmcs(v, HOST_RIP);
+    regs->rsp = get_vvmcs(v, HOST_RSP);
     /* VM exit clears all bits except bit 1 */
-    regs->eflags = 0x2;
+    regs->rflags = X86_EFLAGS_MBS;
 
     /* updating host cr0 to sync TS bit */
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
@@ -2248,7 +2248,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
         ctrl = __n2_exec_control(v);
         if ( ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP )
         {
-            status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->ecx,
+            status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->_ecx,
                          !!(exit_reason == EXIT_REASON_MSR_WRITE));
             if ( status )
                 nvcpu->nv_vmexit_pending = 1;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.