[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging-4.12] x86/hvm: finish IOREQs correctly on completion path



commit 7cf6fbc63501533bf105bd8350596a7df60b5c6f
Author:     Igor Druzhinin <igor.druzhinin@xxxxxxxxxx>
AuthorDate: Fri May 3 10:44:23 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri May 3 10:44:23 2019 +0200

    x86/hvm: finish IOREQs correctly on completion path
    
    Since the introduction of linear_{read,write}() helpers in 3bdec530a5
    (x86/HVM: split page straddling emulated accesses in more cases) the
    completion path for IOREQs has been broken: if there is an IOREQ in
    progress but hvm_copy_{to,from}_guest_linear() returns HVMTRANS_okay
    (e.g. when P2M type of source/destination has been changed by IOREQ
    handler) the execution will never re-enter hvmemul_do_io() where
    IOREQs are completed. This usually results in a domain crash upon
    the execution of the next IOREQ entering hvmemul_do_io() and finding
    the remnants of the previous IOREQ in the state machine.
    
    This particular issue has been discovered in relation to p2m_ioreq_server
    type where an emulator changed the memory type between p2m_ioreq_server
    and p2m_ram_rw in process of responding to IOREQ which made
    hvm_copy_..() to behave differently on the way back.
    
    Fix it for now by checking if IOREQ completion is required (which
    can be identified by querying MMIO cache) before trying to finish
    a memory access immediately through hvm_copy_..(), re-enter
    hvmemul_do_io() otherwise. This change alone only addresses IOREQ
    completion issue for P2M type changing from MMIO to RAM in the
    middle of emulation but leaves a case where new IOREQs might be
    introduced by P2M changes from RAM to MMIO (which is less likely
    to find in practice) that requires more substantial changes in
    MMIO emulation code.
    
    Signed-off-by: Igor Druzhinin <igor.druzhinin@xxxxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    master commit: 522a2f3c5c89cc78c0e2b05af924b76cef7d4bff
    master date: 2019-03-18 16:29:21 +0100
---
 xen/arch/x86/hvm/emulate.c | 31 +++++++++++++++++++++++++------
 1 file changed, 25 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index c236e7da1f..bfa3e1ad93 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -952,7 +952,7 @@ static int hvmemul_phys_mmio_access(
  * cache indexed by linear MMIO address.
  */
 static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
-    struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir)
+    struct hvm_vcpu_io *vio, unsigned long gla, uint8_t dir, bool create)
 {
     unsigned int i;
     struct hvm_mmio_cache *cache;
@@ -966,6 +966,9 @@ static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
             return cache;
     }
 
+    if ( !create )
+        return NULL;
+
     i = vio->mmio_cache_count;
     if( i == ARRAY_SIZE(vio->mmio_cache) )
         return NULL;
@@ -1000,7 +1003,7 @@ static int hvmemul_linear_mmio_access(
 {
     struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
     unsigned long offset = gla & ~PAGE_MASK;
-    struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir);
+    struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(vio, gla, dir, 
true);
     unsigned int chunk, buffer_offset = 0;
     paddr_t gpa;
     unsigned long one_rep = 1;
@@ -1089,8 +1092,9 @@ static int linear_read(unsigned long addr, unsigned int 
bytes, void *p_data,
                        uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     pagefault_info_t pfinfo;
+    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
     unsigned int offset = addr & ~PAGE_MASK;
-    int rc;
+    int rc = HVMTRANS_bad_gfn_to_mfn;
 
     if ( offset + bytes > PAGE_SIZE )
     {
@@ -1104,7 +1108,14 @@ static int linear_read(unsigned long addr, unsigned int 
bytes, void *p_data,
         return rc;
     }
 
-    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+    /*
+     * If there is an MMIO cache entry for the access then we must be 
re-issuing
+     * an access that was previously handled as MMIO. Thus it is imperative 
that
+     * we handle this access in the same way to guarantee completion and hence
+     * clean up any interim state.
+     */
+    if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_READ, false) )
+        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
@@ -1135,8 +1146,9 @@ static int linear_write(unsigned long addr, unsigned int 
bytes, void *p_data,
                         uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     pagefault_info_t pfinfo;
+    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
     unsigned int offset = addr & ~PAGE_MASK;
-    int rc;
+    int rc = HVMTRANS_bad_gfn_to_mfn;
 
     if ( offset + bytes > PAGE_SIZE )
     {
@@ -1150,7 +1162,14 @@ static int linear_write(unsigned long addr, unsigned int 
bytes, void *p_data,
         return rc;
     }
 
-    rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+    /*
+     * If there is an MMIO cache entry for the access then we must be 
re-issuing
+     * an access that was previously handled as MMIO. Thus it is imperative 
that
+     * we handle this access in the same way to guarantee completion and hence
+     * clean up any interim state.
+     */
+    if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_WRITE, false) )
+        rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.12

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.