[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/HVM: introduce hvm_get_cpl() and respective hook



commit aac1df3d03592fba5aadfcf2f496000285bab336
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Dec 15 11:07:55 2016 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Dec 15 11:07:55 2016 +0100

    x86/HVM: introduce hvm_get_cpl() and respective hook
    
    ... instead of repeating the same code in various places (and getting
    it  wrong in some of them).
    
    In vmx_inst_check_privilege() also stop open coding
    vmx_guest_x86_mode().
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
    Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            | 22 +++++--------------
 xen/arch/x86/hvm/monitor.c        |  3 +--
 xen/arch/x86/hvm/svm/svm.c        |  6 +++++
 xen/arch/x86/hvm/vmx/vmx.c        | 46 ++++++++++++++++++++++++---------------
 xen/arch/x86/hvm/vmx/vvmx.c       | 16 +++-----------
 xen/arch/x86/mm/guest_walk.c      |  6 +----
 xen/arch/x86/mm/shadow/common.c   |  2 +-
 xen/arch/x86/oprofile/xenoprof.c  |  5 +----
 xen/include/asm-x86/hvm/hvm.h     |  7 ++++++
 xen/include/asm-x86/hvm/vmx/vmx.h |  3 +++
 10 files changed, 57 insertions(+), 59 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 61f5029..cdb7ca1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2739,7 +2739,7 @@ static void hvm_unmap_entry(void *p)
 static int hvm_load_segment_selector(
     enum x86_segment seg, uint16_t sel, unsigned int eflags)
 {
-    struct segment_register desctab, cs, segr;
+    struct segment_register desctab, segr;
     struct desc_struct *pdesc, desc;
     u8 dpl, rpl, cpl;
     bool_t writable;
@@ -2771,7 +2771,6 @@ static int hvm_load_segment_selector(
     if ( (seg == x86_seg_ldtr) && (sel & 4) )
         goto fail;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
     hvm_get_segment_register(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
@@ -2796,7 +2795,7 @@ static int hvm_load_segment_selector(
 
         dpl = (desc.b >> 13) & 3;
         rpl = sel & 3;
-        cpl = cs.sel & 3;
+        cpl = hvm_get_cpl(v);
 
         switch ( seg )
         {
@@ -3640,16 +3639,10 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
 
 bool hvm_check_cpuid_faulting(struct vcpu *v)
 {
-    struct segment_register sreg;
-
     if ( !v->arch.cpuid_faulting )
         return false;
 
-    hvm_get_segment_register(v, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 0 )
-        return false;
-
-    return true;
+    return hvm_get_cpl(v) > 0;
 }
 
 static uint64_t _hvm_rdtsc_intercept(void)
@@ -3661,13 +3654,10 @@ static uint64_t _hvm_rdtsc_intercept(void)
     if ( currd->arch.vtsc )
         switch ( hvm_guest_x86_mode(curr) )
         {
-            struct segment_register sreg;
-
         case 8:
         case 4:
         case 2:
-            hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-            if ( unlikely(sreg.attr.fields.dpl) )
+            if ( unlikely(hvm_get_cpl(curr)) )
             {
         case 1:
                 currd->arch.vtsc_usercount++;
@@ -4235,7 +4225,6 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct segment_register sreg;
     int mode = hvm_guest_x86_mode(curr);
     unsigned long eax = regs->_eax;
 
@@ -4246,8 +4235,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
         /* Fallthrough to permission check. */
     case 4:
     case 2:
-        hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-        if ( unlikely(sreg.attr.fields.dpl) )
+        if ( unlikely(hvm_get_cpl(curr)) )
         {
     default:
             regs->eax = -EPERM;
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index 401a8c6..322d1d7 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -78,8 +78,7 @@ static inline unsigned long gfn_of_rip(unsigned long rip)
     struct segment_register sreg;
     uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
 
-    hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 3 )
+    if ( hvm_get_cpl(curr) == 3 )
         pfec |= PFEC_user_mode;
 
     hvm_get_segment_register(curr, x86_seg_cs, &sreg);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index bb8273b..afdea6d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -616,6 +616,11 @@ static void svm_sync_vmcb(struct vcpu *v)
     svm_vmsave(arch_svm->vmcb);
 }
 
+static unsigned int svm_get_cpl(struct vcpu *v)
+{
+    return vmcb_get_cpl(v->arch.hvm_svm.vmcb);
+}
+
 static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
@@ -2197,6 +2202,7 @@ static struct hvm_function_table __initdata 
svm_function_table = {
     .get_interrupt_shadow = svm_get_interrupt_shadow,
     .set_interrupt_shadow = svm_set_interrupt_shadow,
     .guest_x86_mode       = svm_guest_x86_mode,
+    .get_cpl              = svm_get_cpl,
     .get_segment_register = svm_get_segment_register,
     .set_segment_register = svm_set_segment_register,
     .get_shadow_gs_base   = svm_get_shadow_gs_base,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 350b945..216b748 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -554,7 +554,7 @@ static void vmx_update_guest_vendor(struct vcpu *v)
     vmx_vmcs_exit(v);
 }
 
-static int vmx_guest_x86_mode(struct vcpu *v)
+int vmx_guest_x86_mode(struct vcpu *v)
 {
     unsigned long cs_ar_bytes;
 
@@ -918,6 +918,26 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
 }
 
 
+unsigned int vmx_get_cpl(void)
+{
+    unsigned long attr;
+
+    __vmread(GUEST_SS_AR_BYTES, &attr);
+
+    return (attr >> 5) & 3;
+}
+
+static unsigned int _vmx_get_cpl(struct vcpu *v)
+{
+    unsigned int cpl;
+
+    vmx_vmcs_enter(v);
+    cpl = vmx_get_cpl();
+    vmx_vmcs_exit(v);
+
+    return cpl;
+}
+
 /* SDM volume 3b section 22.3.1.2: we can only enter virtual 8086 mode
  * if all of CS, SS, DS, ES, FS and GS are 16bit ring-3 data segments.
  * The guest thinks it's got ring-0 segments, so we need to fudge
@@ -2068,6 +2088,7 @@ static struct hvm_function_table __initdata 
vmx_function_table = {
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
+    .get_cpl              = _vmx_get_cpl,
     .get_segment_register = vmx_get_segment_register,
     .set_segment_register = vmx_set_segment_register,
     .get_shadow_gs_base   = vmx_get_shadow_gs_base,
@@ -3789,19 +3810,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     /* fall through */
     default:
     exit_and_crash:
-        {
-            struct segment_register ss;
+        gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n", exit_reason);
 
-            gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n",
-                     exit_reason);
-
-            hvm_get_segment_register(v, x86_seg_ss, &ss);
-            if ( ss.attr.fields.dpl )
-                hvm_inject_hw_exception(TRAP_invalid_op,
-                                        X86_EVENT_NO_EC);
-            else
-                domain_crash(v->domain);
-        }
+        if ( vmx_get_cpl() )
+            hvm_inject_hw_exception(TRAP_invalid_op,
+                                    X86_EVENT_NO_EC);
+        else
+            domain_crash(v->domain);
         break;
     }
 
@@ -3823,12 +3838,9 @@ out:
     if ( mode == 8 ? !is_canonical_address(regs->rip)
                    : regs->rip != regs->_eip )
     {
-        struct segment_register ss;
-
         gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
 
-        hvm_get_segment_register(v, x86_seg_ss, &ss);
-        if ( ss.attr.fields.dpl )
+        if ( vmx_get_cpl() )
         {
             __vmread(VM_ENTRY_INTR_INFO, &intr_info);
             if ( !(intr_info & INTR_INFO_VALID_MASK) )
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index e6e9ebd..958640b 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -349,7 +349,6 @@ static inline u32 __n2_secondary_exec_control(struct vcpu 
*v)
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int 
vmxop_check)
 {
     struct vcpu *v = current;
-    struct segment_register cs;
 
     if ( vmxop_check )
     {
@@ -360,15 +359,12 @@ static int vmx_inst_check_privilege(struct cpu_user_regs 
*regs, int vmxop_check)
     else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
         goto invalid_op;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
-
-    if ( (regs->eflags & X86_EFLAGS_VM) ||
-         (hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
+    if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
         goto invalid_op;
     else if ( nestedhvm_vcpu_in_guestmode(v) )
         goto vmexit;
 
-    if ( (cs.sel & 3) > 0 )
+    if ( vmx_get_cpl() > 0 )
         goto gp_fault;
 
     return X86EMUL_OKAY;
@@ -413,16 +409,10 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
     }
     else
     {
-        bool_t mode_64bit = 0;
+        bool mode_64bit = (vmx_guest_x86_mode(v) == 8);
 
         decode->type = VMX_INST_MEMREG_TYPE_MEMORY;
 
-        if ( hvm_long_mode_enabled(v) )
-        {
-            hvm_get_segment_register(v, x86_seg_cs, &seg);
-            mode_64bit = seg.attr.fields.l;
-        }
-
         if ( info.fields.segment > VMX_SREG_GS )
             goto gp_fault;
         hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 868e909..e8a70ea 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -174,7 +174,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
 
     if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
     {
-        struct segment_register seg;
         const struct cpu_user_regs *regs = guest_cpu_user_regs();
 
         /* SMEP: kernel-mode instruction fetches from user-mode mappings
@@ -186,8 +185,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
         switch ( v->arch.smap_check_policy )
         {
         case SMAP_CHECK_HONOR_CPL_AC:
-            hvm_get_segment_register(v, x86_seg_ss, &seg);
-
             /*
              * SMAP: kernel-mode data accesses from user-mode mappings
              * should fault.
@@ -199,8 +196,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
              *   - Page fault in kernel mode
              */
             smap = hvm_smap_enabled(v) &&
-                   ((seg.attr.fields.dpl == 3) ||
-                    !(regs->eflags & X86_EFLAGS_AC));
+                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
             break;
         case SMAP_CHECK_ENABLED:
             smap = hvm_smap_enabled(v);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0ba4153..a80509c 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1706,7 +1706,7 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long 
vaddr,
 #ifndef NDEBUG
     /* We don't emulate user-mode writes to page tables. */
     if ( has_hvm_container_domain(d)
-         ? hvm_get_seg_reg(x86_seg_ss, sh_ctxt)->attr.fields.dpl == 3
+         ? hvm_get_cpl(v) == 3
          : !guest_kernel_mode(v, guest_cpu_user_regs()) )
     {
         gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
diff --git a/xen/arch/x86/oprofile/xenoprof.c b/xen/arch/x86/oprofile/xenoprof.c
index 002a696..cca759b 100644
--- a/xen/arch/x86/oprofile/xenoprof.c
+++ b/xen/arch/x86/oprofile/xenoprof.c
@@ -84,15 +84,12 @@ int xenoprofile_get_mode(struct vcpu *curr, const struct 
cpu_user_regs *regs)
 
     switch ( hvm_guest_x86_mode(curr) )
     {
-        struct segment_register ss;
-
     case 0: /* real mode */
         return 1;
     case 1: /* vm86 mode */
         return 0;
     default:
-        hvm_get_segment_register(curr, x86_seg_ss, &ss);
-        return (ss.sel & 3) != 3;
+        return hvm_get_cpl(curr) != 3;
     }
 }
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index b89b209..7d2e15f 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -117,6 +117,7 @@ struct hvm_function_table {
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
     int (*guest_x86_mode)(struct vcpu *v);
+    unsigned int (*get_cpl)(struct vcpu *v);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
@@ -351,6 +352,12 @@ static inline void hvm_flush_guest_tlbs(void)
 void hvm_hypercall_page_initialise(struct domain *d,
                                    void *hypercall_page);
 
+static inline unsigned int
+hvm_get_cpl(struct vcpu *v)
+{
+    return hvm_funcs.get_cpl(v);
+}
+
 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg);
 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index 0e5902d..e5c6499 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -550,6 +550,9 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
+int vmx_guest_x86_mode(struct vcpu *v);
+unsigned int vmx_get_cpl(void);
+
 void vmx_inject_extint(int trap, uint8_t source);
 void vmx_inject_nmi(void);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.