[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 6/8] x86/vvmx: refactor nvmx_handle_vmclear()



1. Add VMX_INSN_VMCLEAR_WITH_VMXON_PTR errno and add the appropriate
   check to the function.

2. Correct the return value for not-4KB-aligned case and for invalid
   physaddr (when hvm_map_guest_frame_rw() fails).

3. Remove enum vmx_ops_result and use vmfail/vmsucceed() calls directly.

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
v3:
- Added Acked-by

v2:
- Removal of enum vmx_ops_result and refactoring
---
 xen/arch/x86/hvm/vmx/vvmx.c        | 52 +++++++++++++++++-------------
 xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
 2 files changed, 30 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 4391cd314a..1cb4af5113 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -207,12 +207,6 @@ struct vmx_inst_decoded {
     unsigned int reg2;
 };
 
-enum vmx_ops_result {
-    VMSUCCEED,
-    VMFAIL_VALID,
-    VMFAIL_INVALID,
-};
-
 #define CASE_SET_REG(REG, reg)      \
     case VMX_REG_ ## REG: regs->reg = value; break
 #define CASE_GET_REG(REG, reg)      \
@@ -1754,16 +1748,26 @@ static int nvmx_handle_vmclear(struct cpu_user_regs 
*regs)
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    BUILD_BUG_ON(X86EMUL_OKAY != VMSUCCEED); /* rc = VMSUCCEED; */
+    if ( gpa == vcpu_2_nvmx(v).vmxon_region_pa )
+    {
+        vmfail(regs, VMX_INSN_VMCLEAR_WITH_VMXON_PTR);
+        goto out;
+    }
+
     if ( gpa & 0xfff )
-        rc = VMFAIL_INVALID;
-    else if ( gpa == nvcpu->nv_vvmcxaddr )
+    {
+        vmfail(regs, VMX_INSN_VMCLEAR_INVALID_PHYADDR);
+        goto out;
+    }
+
+    if ( gpa == nvcpu->nv_vvmcxaddr )
     {
         if ( cpu_has_vmx_vmcs_shadowing )
             nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx);
         clear_vvmcs_launched(&nvmx->launched_list,
                              PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr));
         nvmx_purge_vvmcs(v);
+        vmsucceed(regs);
     }
     else 
     {
@@ -1771,24 +1775,26 @@ static int nvmx_handle_vmclear(struct cpu_user_regs 
*regs)
         bool_t writable;
 
         vvmcs = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 0, &writable);
-        if ( vvmcs ) 
+
+        if ( !vvmcs )
         {
-            if ( writable )
-                clear_vvmcs_launched(&nvmx->launched_list,
-                                     mfn_x(domain_page_map_to_mfn(vvmcs)));
-            else
-                rc = VMFAIL_VALID;
-            hvm_unmap_guest_frame(vvmcs, 0);
+            vmfail(regs, VMX_INSN_VMCLEAR_INVALID_PHYADDR);
+            goto out;
         }
-    }
 
-    if ( rc == VMSUCCEED )
-        vmsucceed(regs);
-    else if ( rc == VMFAIL_VALID )
-        vmfail(regs, VMX_INSN_VMCLEAR_INVALID_PHYADDR);
-    else
-        vmfail_invalid(regs);
+        if ( writable )
+        {
+            clear_vvmcs_launched(&nvmx->launched_list,
+                                 mfn_x(domain_page_map_to_mfn(vvmcs)));
+            vmsucceed(regs);
+        }
+        else
+            vmfail(regs, VMX_INSN_VMCLEAR_INVALID_PHYADDR);
 
+        hvm_unmap_guest_frame(vvmcs, 0);
+    }
+
+out:
     return X86EMUL_OKAY;
 }
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index eae4e5397e..b3e800138e 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -529,6 +529,7 @@ enum vmx_insn_errno
 {
     VMX_INSN_SUCCEED                       = 0,
     VMX_INSN_VMCLEAR_INVALID_PHYADDR       = 2,
+    VMX_INSN_VMCLEAR_WITH_VMXON_PTR        = 3,
     VMX_INSN_VMLAUNCH_NONCLEAR_VMCS        = 4,
     VMX_INSN_VMRESUME_NONLAUNCHED_VMCS     = 5,
     VMX_INSN_INVALID_CONTROL_STATE         = 7,
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.