[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/emul: Require callers to provide LMA in the emulation context



commit f1db33e953633c9dd421664d54134f211b052336
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Mar 31 14:49:45 2017 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Apr 6 18:12:59 2017 +0100

    x86/emul: Require callers to provide LMA in the emulation context
    
    Long mode (or not) influences emulation behaviour in a number of cases.
    Instead of reusing the ->read_msr() hook to obtain EFER.LMA, require callers
    to provide it directly.
    
    This simplifies all long mode checks during emulation to a simple boolean
    read, removing embedded msr reads.  It also allows for the removal of a 
local
    variable in the sysenter emulation block, and removes a latent bug in the
    syscall emulation block where rc contains a non X86EMUL_* constant for a
    period of time.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
    Reviewed-by: Jan Beulich <JBeulich@xxxxxxxx>
---
 tools/fuzz/x86_instruction_emulator/fuzz-emul.c |  2 +
 tools/tests/x86_emulator/test_x86_emulator.c    |  4 ++
 xen/arch/x86/hvm/emulate.c                      |  4 +-
 xen/arch/x86/mm.c                               |  2 +
 xen/arch/x86/mm/shadow/common.c                 |  5 +--
 xen/arch/x86/traps.c                            |  1 +
 xen/arch/x86/x86_emulate/x86_emulate.c          | 58 ++++++++-----------------
 xen/arch/x86/x86_emulate/x86_emulate.h          |  3 ++
 8 files changed, 35 insertions(+), 44 deletions(-)

diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c 
b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
index 8488816..65c5a3b 100644
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -484,6 +484,8 @@ static bool in_longmode(struct x86_emulate_ctxt *ctxt)
 
 static void set_sizes(struct x86_emulate_ctxt *ctxt)
 {
+    ctxt->lma = long_mode_active(ctxt);
+
     if ( in_longmode(ctxt) )
         ctxt->addr_size = ctxt->sp_size = 64;
     else
diff --git a/tools/tests/x86_emulator/test_x86_emulator.c 
b/tools/tests/x86_emulator/test_x86_emulator.c
index 5be8ddc..efeb175 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -319,6 +319,7 @@ int main(int argc, char **argv)
     ctxt.regs = &regs;
     ctxt.force_writeback = 0;
     ctxt.vendor    = X86_VENDOR_UNKNOWN;
+    ctxt.lma       = sizeof(void *) == 8;
     ctxt.addr_size = 8 * sizeof(void *);
     ctxt.sp_size   = 8 * sizeof(void *);
 
@@ -2922,6 +2923,7 @@ int main(int argc, char **argv)
     {
         decl_insn(vzeroupper);
 
+        ctxt.lma = false;
         ctxt.sp_size = ctxt.addr_size = 32;
 
         asm volatile ( "vxorps %xmm2, %xmm2, %xmm3\n"
@@ -2949,6 +2951,7 @@ int main(int argc, char **argv)
             goto fail;
         printf("okay\n");
 
+        ctxt.lma = true;
         ctxt.sp_size = ctxt.addr_size = 64;
     }
     else
@@ -3001,6 +3004,7 @@ int main(int argc, char **argv)
             continue;
 
         memcpy(res, blobs[j].code, blobs[j].size);
+        ctxt.lma = blobs[j].bitness == 64;
         ctxt.addr_size = ctxt.sp_size = blobs[j].bitness;
 
         if ( ctxt.addr_size == sizeof(void *) * CHAR_BIT )
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 39e4319..a4918e1 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2044,7 +2044,9 @@ void hvm_emulate_init_per_insn(
     unsigned int pfec = PFEC_page_present;
     unsigned long addr;
 
-    if ( hvm_long_mode_active(curr) &&
+    hvmemul_ctxt->ctxt.lma = hvm_long_mode_active(curr);
+
+    if ( hvmemul_ctxt->ctxt.lma &&
          hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
         hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
     else
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 3918a37..96bc280 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5412,6 +5412,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
             .vendor = d->arch.cpuid->x86_vendor,
             .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
+            .lma       = !is_pv_32bit_domain(d),
         },
     };
     int rc;
@@ -5566,6 +5567,7 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long 
addr,
         .vendor = v->domain->arch.cpuid->x86_vendor,
         .addr_size = addr_size,
         .sp_size = addr_size,
+        .lma = !is_pv_32bit_vcpu(v),
         .data = &mmio_ro_ctxt
     };
     int rc;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 736ceaa..d432198 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -326,15 +326,14 @@ const struct x86_emulate_ops *shadow_init_emulation(
 
     sh_ctxt->ctxt.regs = regs;
     sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor;
+    sh_ctxt->ctxt.lma = hvm_long_mode_active(v);
 
     /* Segment cache initialisation. Primed with CS. */
     creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
 
     /* Work out the emulation mode. */
-    if ( hvm_long_mode_active(v) && creg->attr.fields.l )
-    {
+    if ( sh_ctxt->ctxt.lma && creg->attr.fields.l )
         sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
-    }
     else
     {
         sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 0d54baf..5b9bf21 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2966,6 +2966,7 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
     struct priv_op_ctxt ctxt = {
         .ctxt.regs = regs,
         .ctxt.vendor = currd->arch.cpuid->x86_vendor,
+        .ctxt.lma = !is_pv_32bit_domain(currd),
     };
     int rc;
     unsigned int eflags, ar;
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index 15ddb75..9841fdd 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -968,11 +968,10 @@ do {                                                      
              \
 
 #define validate_far_branch(cs, ip) ({                                  \
     if ( sizeof(ip) <= 4 ) {                                            \
-        ASSERT(in_longmode(ctxt, ops) <= 0);                            \
+        ASSERT(!ctxt->lma);                                             \
         generate_exception_if((ip) > (cs)->limit, EXC_GP, 0);           \
     } else                                                              \
-        generate_exception_if(in_longmode(ctxt, ops) &&                 \
-                              (cs)->attr.fields.l                       \
+        generate_exception_if(ctxt->lma && (cs)->attr.fields.l          \
                               ? !is_canonical_address(ip)               \
                               : (ip) > (cs)->limit, EXC_GP, 0);         \
 })
@@ -1630,20 +1629,6 @@ static bool vcpu_has(
 #endif
 
 static int
-in_longmode(
-    struct x86_emulate_ctxt *ctxt,
-    const struct x86_emulate_ops *ops)
-{
-    uint64_t efer;
-
-    if ( !ops->read_msr ||
-         unlikely(ops->read_msr(MSR_EFER, &efer, ctxt) != X86EMUL_OKAY) )
-        return -1;
-
-    return !!(efer & EFER_LMA);
-}
-
-static int
 realmode_load_seg(
     enum x86_segment seg,
     uint16_t sel,
@@ -1767,8 +1752,7 @@ protmode_load_seg(
          * Experimentally in long mode, the L and D bits are checked before
          * the Present bit.
          */
-        if ( in_longmode(ctxt, ops) &&
-             (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
+        if ( ctxt->lma && (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
             goto raise_exn;
         sel = (sel ^ rpl) | cpl;
         break;
@@ -1818,10 +1802,8 @@ protmode_load_seg(
 
     if ( !is_x86_user_segment(seg) )
     {
-        int lm = (desc.b & (1u << 12)) ? 0 : in_longmode(ctxt, ops);
+        bool lm = (desc.b & (1u << 12)) ? false : ctxt->lma;
 
-        if ( lm < 0 )
-            return X86EMUL_UNHANDLEABLE;
         if ( lm )
         {
             switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8,
@@ -5195,7 +5177,7 @@ x86_emulate(
                 case 0x03: /* busy 16-bit TSS */
                 case 0x04: /* 16-bit call gate */
                 case 0x05: /* 16/32-bit task gate */
-                    if ( in_longmode(ctxt, ops) )
+                    if ( ctxt->lma )
                         break;
                     /* fall through */
                 case 0x02: /* LDT */
@@ -5242,7 +5224,7 @@ x86_emulate(
                 {
                 case 0x01: /* available 16-bit TSS */
                 case 0x03: /* busy 16-bit TSS */
-                    if ( in_longmode(ctxt, ops) )
+                    if ( ctxt->lma )
                         break;
                     /* fall through */
                 case 0x02: /* LDT */
@@ -5292,10 +5274,7 @@ x86_emulate(
         sreg.attr.bytes = 0xc93; /* G+DB+P+S+Data */
 
 #ifdef __x86_64__
-        rc = in_longmode(ctxt, ops);
-        if ( rc < 0 )
-            goto cannot_emulate;
-        if ( rc )
+        if ( ctxt->lma )
         {
             cs.attr.bytes = 0xa9b; /* L+DB+P+S+Code */
 
@@ -5732,9 +5711,7 @@ x86_emulate(
             dst.val = src.val;
         break;
 
-    case X86EMUL_OPC(0x0f, 0x34): /* sysenter */ {
-        int lm;
-
+    case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
         vcpu_must_have(sep);
         generate_exception_if(mode_ring0(), EXC_GP, 0);
         generate_exception_if(!in_protmode(ctxt, ops), EXC_GP, 0);
@@ -5745,17 +5722,14 @@ x86_emulate(
             goto done;
 
         generate_exception_if(!(msr_val & 0xfffc), EXC_GP, 0);
-        lm = in_longmode(ctxt, ops);
-        if ( lm < 0 )
-            goto cannot_emulate;
 
         _regs.eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF | X86_EFLAGS_RF);
 
         cs.sel = msr_val & ~3; /* SELECTOR_RPL_MASK */
         cs.base = 0;   /* flat segment */
         cs.limit = ~0u;  /* 4GB limit */
-        cs.attr.bytes = lm ? 0xa9b  /* G+L+P+S+Code */
-                           : 0xc9b; /* G+DB+P+S+Code */
+        cs.attr.bytes = ctxt->lma ? 0xa9b  /* G+L+P+S+Code */
+                                  : 0xc9b; /* G+DB+P+S+Code */
 
         sreg.sel = cs.sel + 8;
         sreg.base = 0;   /* flat segment */
@@ -5770,16 +5744,15 @@ x86_emulate(
         if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_EIP,
                                  &msr_val, ctxt)) != X86EMUL_OKAY )
             goto done;
-        _regs.r(ip) = lm ? msr_val : (uint32_t)msr_val;
+        _regs.r(ip) = ctxt->lma ? msr_val : (uint32_t)msr_val;
 
         if ( (rc = ops->read_msr(MSR_IA32_SYSENTER_ESP,
                                  &msr_val, ctxt)) != X86EMUL_OKAY )
             goto done;
-        _regs.r(sp) = lm ? msr_val : (uint32_t)msr_val;
+        _regs.r(sp) = ctxt->lma ? msr_val : (uint32_t)msr_val;
 
         singlestep = _regs.eflags & X86_EFLAGS_TF;
         break;
-    }
 
     case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
         vcpu_must_have(sep);
@@ -7922,7 +7895,12 @@ int x86_emulate_wrapper(
     const struct x86_emulate_ops *ops)
 {
     unsigned long orig_ip = ctxt->regs->r(ip);
-    int rc = x86_emulate(ctxt, ops);
+    int rc;
+
+    if ( mode_64bit() )
+        ASSERT(ctxt->lma);
+
+    rc = x86_emulate(ctxt, ops);
 
     /* Retire flags should only be set for successful instruction emulation. */
     if ( rc != X86EMUL_OKAY )
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index 215adf6..d9a252e 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -473,6 +473,9 @@ struct x86_emulate_ctxt
     /* Stack pointer width in bits (16, 32 or 64). */
     unsigned int sp_size;
 
+    /* Long mode active? */
+    bool lma;
+
     /*
      * Output-only state:
      */
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.