[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook



... instead of repeating the same code in various places (and getting
it  wrong in some of them).

In vmx_inst_check_privilege() also stop open coding vmx_get_x86_mode().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2744,7 +2744,7 @@ static void hvm_unmap_entry(void *p)
 static int hvm_load_segment_selector(
     enum x86_segment seg, uint16_t sel, unsigned int eflags)
 {
-    struct segment_register desctab, cs, segr;
+    struct segment_register desctab, segr;
     struct desc_struct *pdesc, desc;
     u8 dpl, rpl, cpl;
     bool_t writable;
@@ -2776,7 +2776,6 @@ static int hvm_load_segment_selector(
     if ( (seg == x86_seg_ldtr) && (sel & 4) )
         goto fail;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
     hvm_get_segment_register(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
@@ -2801,7 +2800,7 @@ static int hvm_load_segment_selector(
 
         dpl = (desc.b >> 13) & 3;
         rpl = sel & 3;
-        cpl = cs.sel & 3;
+        cpl = hvm_get_cpl(v);
 
         switch ( seg )
         {
@@ -3708,16 +3707,10 @@ void hvm_cpuid(unsigned int input, unsig
 
 bool hvm_check_cpuid_faulting(struct vcpu *v)
 {
-    struct segment_register sreg;
-
     if ( !v->arch.cpuid_faulting )
         return false;
 
-    hvm_get_segment_register(v, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 0 )
-        return false;
-
-    return true;
+    return hvm_get_cpl(v) > 0;
 }
 
 static uint64_t _hvm_rdtsc_intercept(void)
@@ -3729,13 +3722,10 @@ static uint64_t _hvm_rdtsc_intercept(voi
     if ( currd->arch.vtsc )
         switch ( hvm_guest_x86_mode(curr) )
         {
-            struct segment_register sreg;
-
         case 8:
         case 4:
         case 2:
-            hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-            if ( unlikely(sreg.attr.fields.dpl) )
+            if ( unlikely(hvm_get_cpl(curr)) )
             {
         case 1:
                 currd->arch.vtsc_usercount++;
@@ -4303,7 +4293,6 @@ int hvm_do_hypercall(struct cpu_user_reg
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct segment_register sreg;
     int mode = hvm_guest_x86_mode(curr);
     unsigned long eax = regs->_eax;
 
@@ -4314,8 +4303,7 @@ int hvm_do_hypercall(struct cpu_user_reg
         /* Fallthrough to permission check. */
     case 4:
     case 2:
-        hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-        if ( unlikely(sreg.attr.fields.dpl) )
+        if ( unlikely(hvm_get_cpl(curr)) )
         {
     default:
             regs->eax = -EPERM;
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -78,8 +78,7 @@ static inline unsigned long gfn_of_rip(u
     struct segment_register sreg;
     uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
 
-    hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 3 )
+    if ( hvm_get_cpl(curr) == 3 )
         pfec |= PFEC_user_mode;
 
     hvm_get_segment_register(curr, x86_seg_cs, &sreg);
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -616,6 +616,11 @@ static void svm_sync_vmcb(struct vcpu *v
     svm_vmsave(arch_svm->vmcb);
 }
 
+static unsigned int svm_get_cpl(struct vcpu *v)
+{
+    return vmcb_get_cpl(v->arch.hvm_svm.vmcb);
+}
+
 static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
@@ -2231,6 +2236,7 @@ static struct hvm_function_table __initd
     .get_interrupt_shadow = svm_get_interrupt_shadow,
     .set_interrupt_shadow = svm_set_interrupt_shadow,
     .guest_x86_mode       = svm_guest_x86_mode,
+    .get_cpl              = svm_get_cpl,
     .get_segment_register = svm_get_segment_register,
     .set_segment_register = svm_set_segment_register,
     .get_shadow_gs_base   = svm_get_shadow_gs_base,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -557,7 +557,7 @@ static void vmx_update_guest_vendor(stru
     vmx_vmcs_exit(v);
 }
 
-static int vmx_guest_x86_mode(struct vcpu *v)
+int vmx_guest_x86_mode(struct vcpu *v)
 {
     unsigned long cs_ar_bytes;
 
@@ -921,6 +921,26 @@ static void vmx_ctxt_switch_to(struct vc
 }
 
 
+unsigned int vmx_get_cpl(void)
+{
+    unsigned long attr;
+
+    __vmread(GUEST_SS_AR_BYTES, &attr);
+
+    return (attr >> 5) & 3;
+}
+
+static unsigned int _vmx_get_cpl(struct vcpu *v)
+{
+    unsigned int cpl;
+
+    vmx_vmcs_enter(v);
+    cpl = vmx_get_cpl();
+    vmx_vmcs_exit(v);
+
+    return cpl;
+}
+
 /* SDM volume 3b section 22.3.1.2: we can only enter virtual 8086 mode
  * if all of CS, SS, DS, ES, FS and GS are 16bit ring-3 data segments.
  * The guest thinks it's got ring-0 segments, so we need to fudge
@@ -2089,6 +2109,7 @@ static struct hvm_function_table __initd
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
+    .get_cpl              = _vmx_get_cpl,
     .get_segment_register = vmx_get_segment_register,
     .set_segment_register = vmx_set_segment_register,
     .get_shadow_gs_base   = vmx_get_shadow_gs_base,
@@ -3837,19 +3858,13 @@ void vmx_vmexit_handler(struct cpu_user_
     /* fall through */
     default:
     exit_and_crash:
-        {
-            struct segment_register ss;
-
-            gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n",
-                     exit_reason);
+        gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n", exit_reason);
 
-            hvm_get_segment_register(v, x86_seg_ss, &ss);
-            if ( ss.attr.fields.dpl )
-                hvm_inject_hw_exception(TRAP_invalid_op,
-                                        X86_EVENT_NO_EC);
-            else
-                domain_crash(v->domain);
-        }
+        if ( vmx_get_cpl() )
+            hvm_inject_hw_exception(TRAP_invalid_op,
+                                    X86_EVENT_NO_EC);
+        else
+            domain_crash(v->domain);
         break;
     }
 
@@ -3871,12 +3886,9 @@ out:
     if ( mode == 8 ? !is_canonical_address(regs->rip)
                    : regs->rip != regs->_eip )
     {
-        struct segment_register ss;
-
         gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
 
-        hvm_get_segment_register(v, x86_seg_ss, &ss);
-        if ( ss.attr.fields.dpl )
+        if ( vmx_get_cpl() )
         {
             __vmread(VM_ENTRY_INTR_INFO, &intr_info);
             if ( !(intr_info & INTR_INFO_VALID_MASK) )
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -349,7 +349,6 @@ static inline u32 __n2_secondary_exec_co
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int 
vmxop_check)
 {
     struct vcpu *v = current;
-    struct segment_register cs;
 
     if ( vmxop_check )
     {
@@ -360,15 +359,12 @@ static int vmx_inst_check_privilege(stru
     else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
         goto invalid_op;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
-
-    if ( (regs->eflags & X86_EFLAGS_VM) ||
-         (hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
+    if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
         goto invalid_op;
     else if ( nestedhvm_vcpu_in_guestmode(v) )
         goto vmexit;
 
-    if ( (cs.sel & 3) > 0 )
+    if ( vmx_get_cpl() > 0 )
         goto gp_fault;
 
     return X86EMUL_OKAY;
@@ -413,16 +409,10 @@ static int decode_vmx_inst(struct cpu_us
     }
     else
     {
-        bool_t mode_64bit = 0;
+        bool mode_64bit = (vmx_guest_x86_mode(v) == 8);
 
         decode->type = VMX_INST_MEMREG_TYPE_MEMORY;
 
-        if ( hvm_long_mode_enabled(v) )
-        {
-            hvm_get_segment_register(v, x86_seg_cs, &seg);
-            mode_64bit = seg.attr.fields.l;
-        }
-
         if ( info.fields.segment > VMX_SREG_GS )
             goto gp_fault;
         hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -174,7 +174,6 @@ guest_walk_tables(struct vcpu *v, struct
 
     if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
     {
-        struct segment_register seg;
         const struct cpu_user_regs *regs = guest_cpu_user_regs();
 
         /* SMEP: kernel-mode instruction fetches from user-mode mappings
@@ -186,8 +185,6 @@ guest_walk_tables(struct vcpu *v, struct
         switch ( v->arch.smap_check_policy )
         {
         case SMAP_CHECK_HONOR_CPL_AC:
-            hvm_get_segment_register(v, x86_seg_ss, &seg);
-
             /*
              * SMAP: kernel-mode data accesses from user-mode mappings
              * should fault.
@@ -199,8 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
              *   - Page fault in kernel mode
              */
             smap = hvm_smap_enabled(v) &&
-                   ((seg.attr.fields.dpl == 3) ||
-                    !(regs->eflags & X86_EFLAGS_AC));
+                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
             break;
         case SMAP_CHECK_ENABLED:
             smap = hvm_smap_enabled(v);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1779,7 +1779,7 @@ void *sh_emulate_map_dest(struct vcpu *v
 #ifndef NDEBUG
     /* We don't emulate user-mode writes to page tables. */
     if ( has_hvm_container_domain(d)
-         ? hvm_get_seg_reg(x86_seg_ss, sh_ctxt)->attr.fields.dpl == 3
+         ? hvm_get_cpl(v) == 3
          : !guest_kernel_mode(v, guest_cpu_user_regs()) )
     {
         gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
--- a/xen/arch/x86/oprofile/xenoprof.c
+++ b/xen/arch/x86/oprofile/xenoprof.c
@@ -84,15 +84,12 @@ int xenoprofile_get_mode(struct vcpu *cu
 
     switch ( hvm_guest_x86_mode(curr) )
     {
-        struct segment_register ss;
-
     case 0: /* real mode */
         return 1;
     case 1: /* vm86 mode */
         return 0;
     default:
-        hvm_get_segment_register(curr, x86_seg_ss, &ss);
-        return (ss.sel & 3) != 3;
+        return hvm_get_cpl(curr) != 3;
     }
 }
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -117,6 +117,7 @@ struct hvm_function_table {
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
     int (*guest_x86_mode)(struct vcpu *v);
+    unsigned int (*get_cpl)(struct vcpu *v);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
@@ -358,6 +359,12 @@ static inline void hvm_flush_guest_tlbs(
 void hvm_hypercall_page_initialise(struct domain *d,
                                    void *hypercall_page);
 
+static inline unsigned int
+hvm_get_cpl(struct vcpu *v)
+{
+    return hvm_funcs.get_cpl(v);
+}
+
 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg);
 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -550,6 +550,9 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
+int vmx_guest_x86_mode(struct vcpu *v);
+unsigned int vmx_get_cpl(void);
+
 void vmx_inject_extint(int trap, uint8_t source);
 void vmx_inject_nmi(void);
 


Attachment: x86-HVM-get-CPL.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.