[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging-4.12] x86/HVM: more consistently set I/O completion



commit 0186e76a62f7409804c2e4785d5a11e7f82a7c52
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Sep 22 17:42:27 2020 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Sep 22 17:42:27 2020 +0200

    x86/HVM: more consistently set I/O completion
    
    Doing this just in hvm_emulate_one_insn() is not enough.
    hvm_ud_intercept() and hvm_emulate_one_vm_event() can get invoked for
    insns requiring one or more continuations, and at least in principle
    hvm_emulate_one_mmio() could, too. Without proper setting of the field,
    handle_hvm_io_completion() will do nothing completion-wise, and in
    particular the missing re-invocation of the insn emulation paths will
    lead to emulation caching not getting disabled in due course, causing
    the ASSERT() in {svm,vmx}_vmenter_helper() to trigger.
    
    Reported-by: Don Slutz <don.slutz@xxxxxxxxx>
    
    Similar considerations go for the clearing of vio->mmio_access, which
    gets moved as well.
    
    Additionally all updating of vio->mmio_* now gets done dependent upon
    the new completion value, rather than hvm_ioreq_needs_completion()'s
    return value. This is because it is the completion chosen which controls
    what path will be taken when handling the completion, not the simple
    boolean return value. In particular, PIO completion doesn't involve
    going through the insn emulator, and hence emulator state ought to get
    cleared early (or it won't get cleared at all).
    
    The new logic, besides allowing for a caller override for the
    continuation type to be set (for VMX real mode emulation), will also
    avoid setting an MMIO completion when a simpler PIO one will do. This
    is a minor optimization only as a side effect - the behavior is strictly
    needed at least for hvm_ud_intercept(), as only memory accesses can
    successfully complete through handle_mmio(). Care of course needs to be
    taken to correctly deal with "mixed" insns (doing both MMIO and PIO at
    the same time, i.e. INS/OUTS). For this, hvmemul_validate() now latches
    whether the insn being emulated is a memory access, as this information
    is no longer easily available at the point where we want to consume it.
    
    Note that the presence of non-NULL .validate fields in the two ops
    structures in hvm_emulate_one_mmio() was really necessary even before
    the changes here: Without this, passing non-NULL as middle argument to
    hvm_emulate_init_once() is meaningless.
    
    The restrictions on when the #UD intercept gets actually enabled are why
    it was decided that this is not a security issue:
    - the "hvm_fep" option to enable its use is a debugging option only,
    - for the cross-vendor case is considered experimental, even if
      unfortunately SUPPORT.md doesn't have an explicit statement about
      this.
    The other two affected functions are
    - hvm_emulate_one_vm_event(), used for introspection,
    - hvm_emulate_one_mmio(), used for Dom0 only,
    which aren't qualifying this as needing an XSA either.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Tested-by: Don Slutz <don.slutz@xxxxxxxxx>
    Reviewed-by: Paul Durrant <paul@xxxxxxx>
    Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
    master commit: b807cfe954b8d0d8852398b4c8a586d95d69a342
    master date: 2020-09-15 10:19:33 +0200
---
 xen/arch/x86/hvm/emulate.c        | 51 ++++++++++++++++++++++++++++++---------
 xen/arch/x86/hvm/hvm.c            |  2 +-
 xen/arch/x86/hvm/io.c             | 11 +--------
 xen/arch/x86/hvm/vmx/realmode.c   |  6 +----
 xen/include/asm-x86/hvm/emulate.h |  7 +++++-
 5 files changed, 49 insertions(+), 28 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index bfa3e1ad93..69e15c56b6 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1577,9 +1577,11 @@ static int hvmemul_validate(
     const struct x86_emulate_state *state,
     struct x86_emulate_ctxt *ctxt)
 {
-    const struct hvm_emulate_ctxt *hvmemul_ctxt =
+    struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
 
+    hvmemul_ctxt->is_mem_access = x86_insn_is_mem_access(state, ctxt);
+
     return !hvmemul_ctxt->validate || hvmemul_ctxt->validate(state, ctxt)
            ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE;
 }
@@ -2395,8 +2397,14 @@ static const struct x86_emulate_ops 
hvm_emulate_ops_no_write = {
     .vmfunc        = hvmemul_vmfunc,
 };
 
+/*
+ * Note that passing HVMIO_no_completion into this function serves as kind
+ * of (but not fully) an "auto select completion" indicator.  When there's
+ * no completion needed, the passed in value will be ignored in any case.
+ */
 static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
-    const struct x86_emulate_ops *ops)
+    const struct x86_emulate_ops *ops,
+    enum hvm_io_completion completion)
 {
     const struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
     struct vcpu *curr = current;
@@ -2414,15 +2422,30 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt 
*hvmemul_ctxt,
         rc = X86EMUL_RETRY;
 
     if ( !hvm_ioreq_needs_completion(&vio->io_req) )
+        completion = HVMIO_no_completion;
+    else if ( completion == HVMIO_no_completion )
+        completion = (vio->io_req.type != IOREQ_TYPE_PIO ||
+                      hvmemul_ctxt->is_mem_access) ? HVMIO_mmio_completion
+                                                   : HVMIO_pio_completion;
+
+    switch ( vio->io_completion = completion )
     {
+    case HVMIO_no_completion:
+    case HVMIO_pio_completion:
         vio->mmio_cache_count = 0;
         vio->mmio_insn_bytes = 0;
-    }
-    else
-    {
+        vio->mmio_access = (struct npfec){};
+        break;
+
+    case HVMIO_mmio_completion:
+    case HVMIO_realmode_completion:
         BUILD_BUG_ON(sizeof(vio->mmio_insn) < sizeof(hvmemul_ctxt->insn_buf));
         vio->mmio_insn_bytes = hvmemul_ctxt->insn_buf_bytes;
         memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes);
+        break;
+
+    default:
+        ASSERT_UNREACHABLE();
     }
 
     if ( hvmemul_ctxt->ctxt.retire.singlestep )
@@ -2462,9 +2485,10 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt 
*hvmemul_ctxt,
 }
 
 int hvm_emulate_one(
-    struct hvm_emulate_ctxt *hvmemul_ctxt)
+    struct hvm_emulate_ctxt *hvmemul_ctxt,
+    enum hvm_io_completion completion)
 {
-    return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops);
+    return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion);
 }
 
 int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
@@ -2473,12 +2497,14 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned 
long gla)
         .read       = x86emul_unhandleable_rw,
         .insn_fetch = hvmemul_insn_fetch,
         .write      = mmcfg_intercept_write,
+        .validate   = hvmemul_validate,
         .cpuid      = hvmemul_cpuid,
     };
     static const struct x86_emulate_ops hvm_ro_emulate_ops_mmio = {
         .read       = x86emul_unhandleable_rw,
         .insn_fetch = hvmemul_insn_fetch,
         .write      = mmio_ro_emulated_write,
+        .validate   = hvmemul_validate,
         .cpuid      = hvmemul_cpuid,
     };
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = gla };
@@ -2499,8 +2525,8 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long 
gla)
     hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write,
                           guest_cpu_user_regs());
     ctxt.ctxt.data = &mmio_ro_ctxt;
-    rc = _hvm_emulate_one(&ctxt, ops);
-    switch ( rc )
+
+    switch ( rc = _hvm_emulate_one(&ctxt, ops, HVMIO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
     case X86EMUL_UNIMPLEMENTED:
@@ -2527,7 +2553,8 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, 
unsigned int trapnr,
     switch ( kind )
     {
     case EMUL_KIND_NOWRITE:
-        rc = _hvm_emulate_one(&ctx, &hvm_emulate_ops_no_write);
+        rc = _hvm_emulate_one(&ctx, &hvm_emulate_ops_no_write,
+                              HVMIO_no_completion);
         break;
     case EMUL_KIND_SET_CONTEXT_INSN: {
         struct vcpu *curr = current;
@@ -2548,7 +2575,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, 
unsigned int trapnr,
     /* Fall-through */
     default:
         ctx.set_context = (kind == EMUL_KIND_SET_CONTEXT_DATA);
-        rc = hvm_emulate_one(&ctx);
+        rc = hvm_emulate_one(&ctx, HVMIO_no_completion);
     }
 
     switch ( rc )
@@ -2644,6 +2671,8 @@ void hvm_emulate_init_per_insn(
         hvmemul_ctxt->insn_buf_bytes = insn_bytes;
         memcpy(hvmemul_ctxt->insn_buf, insn_buf, insn_bytes);
     }
+
+    hvmemul_ctxt->is_mem_access = false;
 }
 
 void hvm_emulate_writeback(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index a98f7471b4..4d170ca597 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3773,7 +3773,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
         return;
     }
 
-    switch ( hvm_emulate_one(&ctxt) )
+    switch ( hvm_emulate_one(&ctxt, HVMIO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
     case X86EMUL_UNIMPLEMENTED:
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 724ab44a76..3e09d9b726 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -81,20 +81,11 @@ void send_invalidate_req(void)
 bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr)
 {
     struct hvm_emulate_ctxt ctxt;
-    struct vcpu *curr = current;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
 
     hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
 
-    rc = hvm_emulate_one(&ctxt);
-
-    if ( hvm_ioreq_needs_completion(&vio->io_req) )
-        vio->io_completion = HVMIO_mmio_completion;
-    else
-        vio->mmio_access = (struct npfec){};
-
-    switch ( rc )
+    switch ( rc = hvm_emulate_one(&ctxt, HVMIO_no_completion) )
     {
     case X86EMUL_UNHANDLEABLE:
         hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc);
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index bb0b4439df..b1d445923a 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -96,15 +96,11 @@ static void realmode_deliver_exception(
 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     struct vcpu *curr = current;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
 
     perfc_incr(realmode_emulations);
 
-    rc = hvm_emulate_one(hvmemul_ctxt);
-
-    if ( hvm_ioreq_needs_completion(&vio->io_req) )
-        vio->io_completion = HVMIO_realmode_completion;
+    rc = hvm_emulate_one(hvmemul_ctxt, HVMIO_realmode_completion);
 
     if ( rc == X86EMUL_UNHANDLEABLE )
     {
diff --git a/xen/include/asm-x86/hvm/emulate.h 
b/xen/include/asm-x86/hvm/emulate.h
index 26a01e83a4..240b39447b 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -46,6 +46,8 @@ struct hvm_emulate_ctxt {
 
     uint32_t intr_shadow;
 
+    bool is_mem_access;
+
     bool_t set_context;
 };
 
@@ -56,11 +58,14 @@ enum emul_kind {
     EMUL_KIND_SET_CONTEXT_INSN
 };
 
+enum hvm_io_completion;
+
 bool __nonnull(1, 2) hvm_emulate_one_insn(
     hvm_emulate_validate_t *validate,
     const char *descr);
 int hvm_emulate_one(
-    struct hvm_emulate_ctxt *hvmemul_ctxt);
+    struct hvm_emulate_ctxt *hvmemul_ctxt,
+    enum hvm_io_completion completion);
 void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
     unsigned int errcode);
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.12



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.