[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/3] x86emul: correct FPU code/data pointers and opcode handling



Prevent leaking the hypervisor ones (stored by hardware during stub
execution), at once making sure the guest sees correct values there.
This piggybacks on the backout logic used to deal with write faults of
FPU insns.

Deliberately ignore the NO_FPU_SEL feature here: Honoring it would
merely mean extra code with no benefit (once we XRSTOR state, the
selector values will simply be lost anyway).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> [hvm/emulate.c]
---
v2: Re-base.

--- a/tools/tests/x86_emulator/x86_emulate.c
+++ b/tools/tests/x86_emulator/x86_emulate.c
@@ -140,7 +140,8 @@ int emul_test_get_fpu(
 
 void emul_test_put_fpu(
     struct x86_emulate_ctxt *ctxt,
-    enum x86_emulate_fpu_type backout)
+    enum x86_emulate_fpu_type backout,
+    const struct x86_emul_fpu_aux *aux)
 {
     /* TBD */
 }
--- a/tools/tests/x86_emulator/x86_emulate.h
+++ b/tools/tests/x86_emulator/x86_emulate.h
@@ -181,4 +181,5 @@ int emul_test_get_fpu(
 
 void emul_test_put_fpu(
     struct x86_emulate_ctxt *ctxt,
-    enum x86_emulate_fpu_type backout);
+    enum x86_emulate_fpu_type backout,
+    const struct x86_emul_fpu_aux *aux);
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1658,12 +1658,73 @@ static int hvmemul_get_fpu(
 
 static void hvmemul_put_fpu(
     struct x86_emulate_ctxt *ctxt,
-    enum x86_emulate_fpu_type backout)
+    enum x86_emulate_fpu_type backout,
+    const struct x86_emul_fpu_aux *aux)
 {
     struct vcpu *curr = current;
 
     curr->arch.hvm_vcpu.fpu_exception_callback = NULL;
 
+    if ( aux )
+    {
+        typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt = curr->arch.fpu_ctxt;
+        bool dval = aux->dval;
+        int mode = hvm_guest_x86_mode(curr);
+
+        ASSERT(backout == X86EMUL_FPU_none);
+        /*
+         * Latch current register state so that we can replace FIP/FDP/FOP
+         * (which have values resulting from our own invocation of the FPU
+         * instruction during emulation).
+         * NB: See also the comment in hvmemul_get_fpu(); we don't need to
+         * set ->fpu_dirtied here as it is going to be cleared below, and
+         * we also don't need to reload FCW as we're forcing full state to
+         * be reloaded anyway.
+         */
+        save_fpu_enable();
+
+        if ( boot_cpu_has(X86_FEATURE_FDP_EXCP_ONLY) &&
+             !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) )
+            dval = false;
+
+        switch ( mode )
+        {
+        case 8:
+            fpu_ctxt->fip.addr = aux->ip;
+            if ( dval )
+                fpu_ctxt->fdp.addr = aux->dp;
+            fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = 8;
+            break;
+
+        case 4: case 2:
+            fpu_ctxt->fip.offs = aux->ip;
+            fpu_ctxt->fip.sel  = aux->cs;
+            if ( dval )
+            {
+                fpu_ctxt->fdp.offs = aux->dp;
+                fpu_ctxt->fdp.sel  = aux->ds;
+            }
+            fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = mode;
+            break;
+
+        case 0: case 1:
+            fpu_ctxt->fip.addr = aux->ip | (aux->cs << 4);
+            if ( dval )
+                fpu_ctxt->fdp.addr = aux->dp | (aux->ds << 4);
+            fpu_ctxt->x[FPU_WORD_SIZE_OFFSET] = 2;
+            break;
+
+        default:
+            ASSERT_UNREACHABLE();
+            return;
+        }
+
+        fpu_ctxt->fop = aux->op;
+
+        /* Re-use backout code below. */
+        backout = X86EMUL_FPU_fpu;
+    }
+
     if ( backout == X86EMUL_FPU_fpu )
     {
         /*
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -536,6 +536,55 @@ struct operand {
         unsigned long    off;
     } mem;
 };
+
+struct x86_emulate_state {
+    unsigned int op_bytes, ad_bytes;
+
+    enum {
+        ext_none = vex_none,
+        ext_0f   = vex_0f,
+        ext_0f38 = vex_0f38,
+        ext_0f3a = vex_0f3a,
+        /*
+         * For XOP use values such that the respective instruction field
+         * can be used without adjustment.
+         */
+        ext_8f08 = 8,
+        ext_8f09,
+        ext_8f0a,
+    } ext;
+    uint8_t modrm, modrm_mod, modrm_reg, modrm_rm;
+    uint8_t rex_prefix;
+    bool lock_prefix;
+    bool not_64bit; /* Instruction not available in 64bit. */
+    bool fpu_ctrl;  /* Instruction is an FPU control one. */
+    opcode_desc_t desc;
+    union vex vex;
+    union evex evex;
+    enum simd_opsize simd_size;
+
+    /*
+     * Data operand effective address (usually computed from ModRM).
+     * Default is a memory operand relative to segment DS.
+     */
+    struct operand ea;
+
+    /* Immediate operand values, if any. Use otherwise unused fields. */
+#define imm1 ea.val
+#define imm2 ea.orig_val
+
+    unsigned long ip;
+    struct cpu_user_regs *regs;
+
+#ifndef NDEBUG
+    /*
+     * Track caller of x86_decode_insn() to spot missing as well as
+     * premature calls to x86_emulate_free_state().
+     */
+    void *caller;
+#endif
+};
+
 #ifdef __x86_64__
 #define PTR_POISON ((void *)0x8086000000008086UL) /* non-canonical */
 #else
@@ -1028,13 +1077,48 @@ do {
 static void put_fpu(
     struct fpu_insn_ctxt *fic,
     bool failed_late,
+    const struct x86_emulate_state *state,
     struct x86_emulate_ctxt *ctxt,
     const struct x86_emulate_ops *ops)
 {
     if ( unlikely(failed_late) && fic->type == X86EMUL_FPU_fpu )
-        ops->put_fpu(ctxt, X86EMUL_FPU_fpu);
+        ops->put_fpu(ctxt, X86EMUL_FPU_fpu, NULL);
+    else if ( unlikely(fic->type == X86EMUL_FPU_fpu) && !state->fpu_ctrl )
+    {
+        struct x86_emul_fpu_aux aux = {
+            .ip = ctxt->regs->r(ip),
+            .cs = ctxt->regs->cs,
+            .op = ((ctxt->opcode & 7) << 8) | state->modrm,
+        };
+        struct segment_register sreg;
+
+        if ( ops->read_segment &&
+             ops->read_segment(x86_seg_cs, &sreg, ctxt) == X86EMUL_OKAY )
+            aux.cs = sreg.sel;
+        if ( state->ea.type == OP_MEM )
+        {
+            aux.dp = state->ea.mem.off;
+            if ( ops->read_segment &&
+                 ops->read_segment(state->ea.mem.seg, &sreg,
+                                   ctxt) == X86EMUL_OKAY )
+                aux.ds = sreg.sel;
+            else
+                switch ( state->ea.mem.seg )
+                {
+                case x86_seg_cs: aux.ds = ctxt->regs->cs; break;
+                case x86_seg_ds: aux.ds = ctxt->regs->ds; break;
+                case x86_seg_es: aux.ds = ctxt->regs->es; break;
+                case x86_seg_fs: aux.ds = ctxt->regs->fs; break;
+                case x86_seg_gs: aux.ds = ctxt->regs->gs; break;
+                case x86_seg_ss: aux.ds = ctxt->regs->ss; break;
+                default:         ASSERT_UNREACHABLE();    break;
+                }
+            aux.dval = true;
+        }
+        ops->put_fpu(ctxt, X86EMUL_FPU_none, &aux);
+    }
     else if ( fic->type != X86EMUL_FPU_none && ops->put_fpu )
-        ops->put_fpu(ctxt, X86EMUL_FPU_none);
+        ops->put_fpu(ctxt, X86EMUL_FPU_none, NULL);
     fic->type = X86EMUL_FPU_none;
 }
 
@@ -2088,53 +2172,6 @@ int x86emul_unhandleable_rw(
     return X86EMUL_UNHANDLEABLE;
 }
 
-struct x86_emulate_state {
-    unsigned int op_bytes, ad_bytes;
-
-    enum {
-        ext_none = vex_none,
-        ext_0f   = vex_0f,
-        ext_0f38 = vex_0f38,
-        ext_0f3a = vex_0f3a,
-        /*
-         * For XOP use values such that the respective instruction field
-         * can be used without adjustment.
-         */
-        ext_8f08 = 8,
-        ext_8f09,
-        ext_8f0a,
-    } ext;
-    uint8_t modrm, modrm_mod, modrm_reg, modrm_rm;
-    uint8_t rex_prefix;
-    bool lock_prefix;
-    bool not_64bit; /* Instruction not available in 64bit. */
-    opcode_desc_t desc;
-    union vex vex;
-    union evex evex;
-    enum simd_opsize simd_size;
-
-    /*
-     * Data operand effective address (usually computed from ModRM).
-     * Default is a memory operand relative to segment DS.
-     */
-    struct operand ea;
-
-    /* Immediate operand values, if any. Use otherwise unused fields. */
-#define imm1 ea.val
-#define imm2 ea.orig_val
-
-    unsigned long ip;
-    struct cpu_user_regs *regs;
-
-#ifndef NDEBUG
-    /*
-     * Track caller of x86_decode_insn() to spot missing as well as
-     * premature calls to x86_emulate_free_state().
-     */
-    void *caller;
-#endif
-};
-
 /* Helper definitions. */
 #define op_bytes (state->op_bytes)
 #define ad_bytes (state->ad_bytes)
@@ -4233,8 +4270,10 @@ x86_emulate(
                 dst.bytes = 4;
                 break;
             case 4: /* fldenv - TODO */
+                state->fpu_ctrl = true;
                 goto cannot_emulate;
             case 5: /* fldcw m2byte */
+                state->fpu_ctrl = true;
                 if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &src.val,
                                      2, ctxt)) != X86EMUL_OKAY )
                     goto done;
@@ -4242,8 +4281,10 @@ x86_emulate(
                 dst.type = OP_NONE;
                 break;
             case 6: /* fnstenv - TODO */
+                state->fpu_ctrl = true;
                 goto cannot_emulate;
             case 7: /* fnstcw m2byte */
+                state->fpu_ctrl = true;
                 emulate_fpu_insn_memdst("fnstcw", dst.val);
                 dst.bytes = 2;
                 break;
@@ -4332,6 +4373,7 @@ x86_emulate(
         case 0xe3: /* fninit */
         case 0xe4: /* fnsetpm - 287 only, ignored by 387 */
         /* case 0xe5: frstpm - 287 only, #UD on 387 */
+            state->fpu_ctrl = true;
             emulate_fpu_insn_stub(0xdb, modrm);
             break;
         default:
@@ -4475,8 +4517,10 @@ x86_emulate(
                 break;
             case 4: /* frstor - TODO */
             case 6: /* fnsave - TODO */
+                state->fpu_ctrl = true;
                 goto cannot_emulate;
             case 7: /* fnstsw m2byte */
+                state->fpu_ctrl = true;
                 emulate_fpu_insn_memdst("fnstsw", dst.val);
                 dst.bytes = 2;
                 break;
@@ -4549,6 +4593,7 @@ x86_emulate(
         {
         case 0xe0:
             /* fnstsw %ax */
+            state->fpu_ctrl = true;
             dst.bytes = 2;
             dst.type = OP_REG;
             dst.reg = (void *)&_regs.ax;
@@ -7896,7 +7941,7 @@ x86_emulate(
     }
 
  complete_insn: /* Commit shadow register state. */
-    put_fpu(&fic, false, ctxt, ops);
+    put_fpu(&fic, false, state, ctxt, ops);
 
     /* Zero the upper 32 bits of %rip if not in 64-bit mode. */
     if ( !mode_64bit() )
@@ -7920,7 +7965,7 @@ x86_emulate(
     ctxt->regs->eflags &= ~X86_EFLAGS_RF;
 
  done:
-    put_fpu(&fic, fic.insn_bytes > 0 && dst.type == OP_MEM, ctxt, ops);
+    put_fpu(&fic, fic.insn_bytes > 0 && dst.type == OP_MEM, state, ctxt, ops);
     put_stub(stub);
     return rc;
 #undef state
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -135,6 +135,13 @@ struct __attribute__((__packed__)) segme
     uint64_t   base;
 };
 
+struct x86_emul_fpu_aux {
+    unsigned long ip, dp;
+    uint16_t cs, ds;
+    unsigned int op:11;
+    unsigned int dval:1;
+};
+
 /*
  * Return codes from state-accessor functions and from x86_emulate().
  */
@@ -439,10 +446,12 @@ struct x86_emulate_ops
      *  the get_fpu one having got called before!
      * @backout: Undo updates to the specified register file (can, besides
      *           X86EMUL_FPU_none, only be X86EMUL_FPU_fpu at present);
+     * @aux: Packaged up FIP/FDP/FOP values to load into FPU.
      */
     void (*put_fpu)(
         struct x86_emulate_ctxt *ctxt,
-        enum x86_emulate_fpu_type backout);
+        enum x86_emulate_fpu_type backout,
+        const struct x86_emul_fpu_aux *aux);
 
     /* invlpg: Invalidate paging structures which map addressed byte. */
     int (*invlpg)(


Attachment: x86emul-FPU-FxP.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.