[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.6] x86/HVM: fix forwarding of internally cached requests



commit e69e7930bee39272d3cf2b0109470e317e4fe703
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 9 12:52:24 2016 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 9 12:52:24 2016 +0200

    x86/HVM: fix forwarding of internally cached requests
    
    Forwarding entire batches to the device model when an individual
    iteration of them got rejected by internal device emulation handlers
    with X86EMUL_UNHANDLEABLE is wrong: The device model would then handle
    all iterations, without the internal handler getting to see any past
    the one it returned failure for. This causes misbehavior in at least
    the MSI-X and VGA code, which want to see all such requests for
    internal tracking/caching purposes. But note that this does not apply
    to buffered I/O requests.
    
    This in turn means that the condition in hvm_process_io_intercept() of
    when to crash the domain was wrong: Since X86EMUL_UNHANDLEABLE can
    validly be returned by the individual device handlers, we mustn't
    blindly crash the domain if such occurs on other than the initial
    iteration. Instead we need to distinguish hvm_copy_*_guest_phys()
    failures from device specific ones, and then the former need to always
    be fatal to the domain (i.e. also on the first iteration), since
    otherwise we again would end up forwarding a request to qemu which the
    internal handler didn't get to see.
    
    The adjustment should be okay even for stdvga's MMIO handling:
    - if it is not caching then the accept function would have failed so we
      won't get into hvm_process_io_intercept(),
    - if it issued the buffered ioreq then we only get to the p->count
      reduction if hvm_send_ioreq() actually encountered an error (in which
      we don't care about the request getting split up).
    
    Also commit 4faffc41d ("x86/hvm: limit reps to avoid the need to handle
    retry") went too far in removing code from hvm_process_io_intercept():
    When there were successfully handled iterations, the function should
    continue to return success with a clipped repeat count.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    
    x86/HVM: fix forwarding of internally cached requests (part 2)
    
    Commit 96ae556569 ("x86/HVM: fix forwarding of internally cached
    requests") wasn't quite complete: hvmemul_do_io() also needs to
    propagate up the clipped count. (I really should have re-tested the
    forward port resulting in the earlier change, instead of relying on the
    testing done on the older version of Xen which the fix was first needed
    for.)
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: 96ae556569b8eaedc0bb242932842c3277b515d8
    master date: 2016-03-31 14:52:04 +0200
    master commit: 670ee15ac1e3de7c15381fdaab0e531489b48939
    master date: 2016-04-28 15:09:26 +0200
---
 xen/arch/x86/hvm/emulate.c   | 25 +++++++++++++++++--------
 xen/arch/x86/hvm/intercept.c | 24 ++++++++++++++++++------
 2 files changed, 35 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index abeeef9..8e47b29 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -94,7 +94,7 @@ static const struct hvm_io_handler null_handler = {
 };
 
 static int hvmemul_do_io(
-    bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size,
+    bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size,
     uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
 {
     struct vcpu *curr = current;
@@ -103,7 +103,7 @@ static int hvmemul_do_io(
         .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
         .addr = addr,
         .size = size,
-        .count = reps,
+        .count = *reps,
         .dir = dir,
         .df = df,
         .data = data,
@@ -135,7 +135,7 @@ static int hvmemul_do_io(
         if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
              (p.addr != addr) ||
              (p.size != size) ||
-             (p.count != reps) ||
+             (p.count > *reps) ||
              (p.dir != dir) ||
              (p.df != df) ||
              (p.data_is_ptr != data_is_addr) )
@@ -143,6 +143,8 @@ static int hvmemul_do_io(
 
         if ( data_is_addr )
             return X86EMUL_UNHANDLEABLE;
+
+        *reps = p.count;
         goto finish_access;
     default:
         return X86EMUL_UNHANDLEABLE;
@@ -160,6 +162,13 @@ static int hvmemul_do_io(
 
     rc = hvm_io_intercept(&p);
 
+    /*
+     * p.count may have got reduced (see hvm_process_io_intercept()) - inform
+     * our callers and mirror this into latched state.
+     */
+    ASSERT(p.count <= *reps);
+    *reps = vio->io_req.count = p.count;
+
     switch ( rc )
     {
     case X86EMUL_OKAY:
@@ -213,7 +222,7 @@ static int hvmemul_do_io_buffer(
 
     BUG_ON(buffer == NULL);
 
-    rc = hvmemul_do_io(is_mmio, addr, *reps, size, dir, df, 0,
+    rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0,
                        (uintptr_t)buffer);
     if ( rc == X86EMUL_UNHANDLEABLE && dir == IOREQ_READ )
         memset(buffer, 0xff, size);
@@ -304,13 +313,13 @@ static int hvmemul_do_io_addr(
         count = 1;
     }
 
-    rc = hvmemul_do_io(is_mmio, addr, count, size, dir, df, 1,
+    rc = hvmemul_do_io(is_mmio, addr, &count, size, dir, df, 1,
                        ram_gpa);
+
     if ( rc == X86EMUL_OKAY )
-    {
         v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
-        *reps = count;
-    }
+
+    *reps = count;
 
  out:
     while ( nr_pages )
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 7096d74..3a7a7dd 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -148,8 +148,8 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
-                    rc = X86EMUL_UNHANDLEABLE;
-                    break;
+                    domain_crash(current->domain);
+                    return X86EMUL_UNHANDLEABLE;
                 }
                 if ( rc != X86EMUL_OKAY )
                     break;
@@ -178,8 +178,8 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
-                    rc = X86EMUL_UNHANDLEABLE;
-                    break;
+                    domain_crash(current->domain);
+                    return X86EMUL_UNHANDLEABLE;
                 }
                 if ( rc != X86EMUL_OKAY )
                     break;
@@ -196,8 +196,20 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
         }
     }
 
-    if ( i != 0 && rc == X86EMUL_UNHANDLEABLE )
-        domain_crash(current->domain);
+    if ( i )
+    {
+        p->count = i;
+        rc = X86EMUL_OKAY;
+    }
+    else if ( rc == X86EMUL_UNHANDLEABLE )
+    {
+        /*
+         * Don't forward entire batches to the device model: This would
+         * prevent the internal handlers to see subsequent iterations of
+         * the request.
+         */
+        p->count = 1;
+    }
 
     return rc;
 }
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.