[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86emul: generalize invlpg() hook



commit 749c6e3c0e733a3f18d9c7d9f903122b620157c3
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Sep 3 14:49:52 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Sep 3 14:49:52 2019 +0200

    x86emul: generalize invlpg() hook
    
    The hook is already in use for INVLPGA as well. Rename the hook and add
    parameters. For the moment INVLPGA with a non-zero ASID remains
    unsupported, but the TODO item gets pushed into the actual hook handler.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/fuzz/x86_instruction_emulator/fuzz-emul.c | 27 +++++++----
 xen/arch/x86/hvm/emulate.c                      | 64 +++++++++++++++----------
 xen/arch/x86/x86_emulate/x86_emulate.c          | 12 ++---
 xen/arch/x86/x86_emulate/x86_emulate.h          | 24 +++++++---
 4 files changed, 81 insertions(+), 46 deletions(-)

diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c 
b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
index b66df97342..8defabcff3 100644
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -370,16 +370,23 @@ static int fuzz_cmpxchg(
     return maybe_fail(ctxt, "cmpxchg", true);
 }
 
-static int fuzz_invlpg(
-    enum x86_segment seg,
-    unsigned long offset,
+static int fuzz_tlb_op(
+    enum x86emul_tlb_op op,
+    unsigned long addr,
+    unsigned long aux,
     struct x86_emulate_ctxt *ctxt)
 {
-    /* invlpg(), unlike all other hooks, may be called with x86_seg_none. */
-    assert(is_x86_user_segment(seg) || seg == x86_seg_none);
-    assert(ctxt->addr_size == 64 || !(offset >> 32));
+    switch ( op )
+    {
+    case x86emul_invlpg:
+        assert(is_x86_user_segment(aux));
+        /* fall through */
+    case x86emul_invlpga:
+        assert(ctxt->addr_size == 64 || !(addr >> 32));
+        break;
+    }
 
-    return maybe_fail(ctxt, "invlpg", false);
+    return maybe_fail(ctxt, "TLB-management", false);
 }
 
 static int fuzz_cache_op(
@@ -624,7 +631,7 @@ static const struct x86_emulate_ops all_fuzzer_ops = {
     SET(read_msr),
     SET(write_msr),
     SET(cache_op),
-    SET(invlpg),
+    SET(tlb_op),
     .get_fpu    = emul_test_get_fpu,
     .put_fpu    = emul_test_put_fpu,
     .cpuid      = emul_test_cpuid,
@@ -733,12 +740,12 @@ enum {
     HOOK_read_msr,
     HOOK_write_msr,
     HOOK_cache_op,
+    HOOK_tlb_op,
     HOOK_cpuid,
     HOOK_inject_hw_exception,
     HOOK_inject_sw_interrupt,
     HOOK_get_fpu,
     HOOK_put_fpu,
-    HOOK_invlpg,
     HOOK_vmfunc,
     CANONICALIZE_rip,
     CANONICALIZE_rsp,
@@ -777,9 +784,9 @@ static void disable_hooks(struct x86_emulate_ctxt *ctxt)
     MAYBE_DISABLE_HOOK(read_msr);
     MAYBE_DISABLE_HOOK(write_msr);
     MAYBE_DISABLE_HOOK(cache_op);
+    MAYBE_DISABLE_HOOK(tlb_op);
     MAYBE_DISABLE_HOOK(cpuid);
     MAYBE_DISABLE_HOOK(get_fpu);
-    MAYBE_DISABLE_HOOK(invlpg);
 }
 
 /*
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 7cd727ab97..253044a7dc 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2356,35 +2356,51 @@ static void hvmemul_put_fpu(
     }
 }
 
-static int hvmemul_invlpg(
-    enum x86_segment seg,
-    unsigned long offset,
+static int hvmemul_tlb_op(
+    enum x86emul_tlb_op op,
+    unsigned long addr,
+    unsigned long aux,
     struct x86_emulate_ctxt *ctxt)
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    unsigned long addr;
-    int rc;
-
-    rc = hvmemul_virtual_to_linear(
-        seg, offset, 1, NULL, hvm_access_none, hvmemul_ctxt, &addr);
+    int rc = X86EMUL_OKAY;
 
-    if ( rc == X86EMUL_EXCEPTION )
+    switch ( op )
     {
-        /*
-         * `invlpg` takes segment bases into account, but is not subject to
-         * faults from segment type/limit checks, and is specified as a NOP
-         * when issued on non-canonical addresses.
-         *
-         * hvmemul_virtual_to_linear() raises exceptions for type/limit
-         * violations, so squash them.
-         */
-        x86_emul_reset_event(ctxt);
-        rc = X86EMUL_OKAY;
-    }
+    case x86emul_invlpg:
+        rc = hvmemul_virtual_to_linear(aux, addr, 1, NULL, hvm_access_none,
+                                       hvmemul_ctxt, &addr);
 
-    if ( rc == X86EMUL_OKAY )
-        paging_invlpg(current, addr);
+        if ( rc == X86EMUL_EXCEPTION )
+        {
+            /*
+             * `invlpg` takes segment bases into account, but is not subject
+             * to faults from segment type/limit checks, and is specified as
+             * a NOP when issued on non-canonical addresses.
+             *
+             * hvmemul_virtual_to_linear() raises exceptions for type/limit
+             * violations, so squash them.
+             */
+            x86_emul_reset_event(ctxt);
+            rc = X86EMUL_OKAY;
+        }
+
+        if ( rc == X86EMUL_OKAY )
+            paging_invlpg(current, addr);
+        break;
+
+    case x86emul_invlpga:
+        /* TODO: Support ASIDs. */
+        if ( !aux )
+            paging_invlpg(current, addr);
+        else
+        {
+            x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+            rc = X86EMUL_EXCEPTION;
+        }
+        break;
+    }
 
     return rc;
 }
@@ -2425,10 +2441,10 @@ static const struct x86_emulate_ops hvm_emulate_ops = {
     .read_msr      = hvmemul_read_msr,
     .write_msr     = hvmemul_write_msr,
     .cache_op      = hvmemul_cache_op,
+    .tlb_op        = hvmemul_tlb_op,
     .cpuid         = x86emul_cpuid,
     .get_fpu       = hvmemul_get_fpu,
     .put_fpu       = hvmemul_put_fpu,
-    .invlpg        = hvmemul_invlpg,
     .vmfunc        = hvmemul_vmfunc,
 };
 
@@ -2452,10 +2468,10 @@ static const struct x86_emulate_ops 
hvm_emulate_ops_no_write = {
     .read_msr      = hvmemul_read_msr,
     .write_msr     = hvmemul_write_msr_discard,
     .cache_op      = hvmemul_cache_op_discard,
+    .tlb_op        = hvmemul_tlb_op,
     .cpuid         = x86emul_cpuid,
     .get_fpu       = hvmemul_get_fpu,
     .put_fpu       = hvmemul_put_fpu,
-    .invlpg        = hvmemul_invlpg,
     .vmfunc        = hvmemul_vmfunc,
 };
 
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index 648a95d5ec..76a5e264f1 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -5590,10 +5590,9 @@ x86_emulate(
             generate_exception_if(!(msr_val & EFER_SVME) ||
                                   !in_protmode(ctxt, ops), EXC_UD);
             generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            generate_exception_if(_regs.ecx, EXC_UD); /* TODO: Support ASIDs. 
*/
-            fail_if(ops->invlpg == NULL);
-            if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)),
-                                   ctxt)) )
+            fail_if(!ops->tlb_op);
+            if ( (rc = ops->tlb_op(x86emul_invlpga, truncate_ea(_regs.r(ax)),
+                                   _regs.ecx, ctxt)) != X86EMUL_OKAY )
                 goto done;
             break;
 
@@ -5747,8 +5746,9 @@ x86_emulate(
         case GRP7_MEM(7): /* invlpg */
             ASSERT(ea.type == OP_MEM);
             generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            fail_if(ops->invlpg == NULL);
-            if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
+            fail_if(!ops->tlb_op);
+            if ( (rc = ops->tlb_op(x86emul_invlpg, ea.mem.off, ea.mem.seg,
+                                   ctxt)) != X86EMUL_OKAY )
                 goto done;
             break;
 
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index 6b22c536f5..9ed931508f 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -185,6 +185,11 @@ enum x86emul_cache_op {
     x86emul_wbnoinvd,
 };
 
+enum x86emul_tlb_op {
+    x86emul_invlpg,
+    x86emul_invlpga,
+};
+
 struct x86_emulate_state;
 
 /*
@@ -472,6 +477,19 @@ struct x86_emulate_ops
         unsigned long offset,
         struct x86_emulate_ctxt *ctxt);
 
+    /*
+     * tlb_op: Invalidate paging structures which map addressed byte.
+     *
+     * @addr and @aux have @op-specific meaning:
+     * - INVLPG:  @aux:@addr represent seg:offset
+     * - INVLPGA: @addr is the linear address, @aux the ASID
+     */
+    int (*tlb_op)(
+        enum x86emul_tlb_op op,
+        unsigned long addr,
+        unsigned long aux,
+        struct x86_emulate_ctxt *ctxt);
+
     /* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
     int (*cpuid)(
         uint32_t leaf,
@@ -499,12 +517,6 @@ struct x86_emulate_ops
         enum x86_emulate_fpu_type backout,
         const struct x86_emul_fpu_aux *aux);
 
-    /* invlpg: Invalidate paging structures which map addressed byte. */
-    int (*invlpg)(
-        enum x86_segment seg,
-        unsigned long offset,
-        struct x86_emulate_ctxt *ctxt);
-
     /* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */
     int (*vmfunc)(
         struct x86_emulate_ctxt *ctxt);
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.