[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/HVM: don't ignore hvm_copy_to_guest_phys() errors during I/O intercept



commit e0c4efbe6dd6822c91b466c34bc6d8aaa935d90f
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Oct 14 09:52:33 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Oct 14 09:52:33 2013 +0200

    x86/HVM: don't ignore hvm_copy_to_guest_phys() errors during I/O intercept
    
    Building upon the extended retry logic we can now also make sure to
    not ignore errors resulting from writing data back to guest memory.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/hvm/intercept.c |  120 +++++++++++++++++++++++++++++++++++++-----
 1 files changed, 107 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 5ac0893..7cc13b5 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -47,6 +47,7 @@ static int hvm_mmio_access(struct vcpu *v,
                            hvm_mmio_read_t read_handler,
                            hvm_mmio_write_t write_handler)
 {
+    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
     unsigned long data;
     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
 
@@ -54,7 +55,16 @@ static int hvm_mmio_access(struct vcpu *v,
     {
         if ( p->dir == IOREQ_READ )
         {
-            rc = read_handler(v, p->addr, p->size, &data);
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
+                rc = read_handler(v, p->addr, p->size, &data);
             p->data = data;
         }
         else /* p->dir == IOREQ_WRITE */
@@ -66,18 +76,48 @@ static int hvm_mmio_access(struct vcpu *v,
     {
         for ( i = 0; i < p->count; i++ )
         {
-            int ret;
-
-            rc = read_handler(v, p->addr + step * i, p->size, &data);
-            if ( rc != X86EMUL_OKAY )
-                break;
-            ret = hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
-            if ( (ret == HVMCOPY_gfn_paged_out) || 
-                 (ret == HVMCOPY_gfn_shared) )
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
             {
+                rc = read_handler(v, p->addr + step * i, p->size, &data);
+                if ( rc != X86EMUL_OKAY )
+                    break;
+            }
+            switch ( hvm_copy_to_guest_phys(p->data + step * i,
+                                            &data, p->size) )
+            {
+            case HVMCOPY_okay:
+                break;
+            case HVMCOPY_gfn_paged_out:
+            case HVMCOPY_gfn_shared:
                 rc = X86EMUL_RETRY;
                 break;
+            case HVMCOPY_bad_gfn_to_mfn:
+                /* Drop the write as real hardware would. */
+                continue;
+            case HVMCOPY_bad_gva_to_gfn:
+                ASSERT(0);
+                /* fall through */
+            default:
+                rc = X86EMUL_UNHANDLEABLE;
+                break;
             }
+            if ( rc != X86EMUL_OKAY)
+                break;
+        }
+
+        if ( rc == X86EMUL_RETRY )
+        {
+            vio->mmio_retry = 1;
+            vio->mmio_large_read_bytes = p->size;
+            memcpy(vio->mmio_large_read, &data, p->size);
         }
     }
     else
@@ -109,6 +149,9 @@ static int hvm_mmio_access(struct vcpu *v,
             if ( rc != X86EMUL_OKAY )
                 break;
         }
+
+        if ( rc == X86EMUL_RETRY )
+            vio->mmio_retry = 1;
     }
 
     if ( i != 0 )
@@ -137,6 +180,7 @@ int hvm_mmio_intercept(ioreq_t *p)
 
 static int process_portio_intercept(portio_action_t action, ioreq_t *p)
 {
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
     uint32_t data;
 
@@ -144,7 +188,16 @@ static int process_portio_intercept(portio_action_t 
action, ioreq_t *p)
     {
         if ( p->dir == IOREQ_READ )
         {
-            rc = action(IOREQ_READ, p->addr, p->size, &data);
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
+                rc = action(IOREQ_READ, p->addr, p->size, &data);
             p->data = data;
         }
         else
@@ -159,10 +212,48 @@ static int process_portio_intercept(portio_action_t 
action, ioreq_t *p)
     {
         for ( i = 0; i < p->count; i++ )
         {
-            rc = action(IOREQ_READ, p->addr, p->size, &data);
-            if ( rc != X86EMUL_OKAY )
+            if ( vio->mmio_retrying )
+            {
+                if ( vio->mmio_large_read_bytes != p->size )
+                    return X86EMUL_UNHANDLEABLE;
+                memcpy(&data, vio->mmio_large_read, p->size);
+                vio->mmio_large_read_bytes = 0;
+                vio->mmio_retrying = 0;
+            }
+            else
+            {
+                rc = action(IOREQ_READ, p->addr, p->size, &data);
+                if ( rc != X86EMUL_OKAY )
+                    break;
+            }
+            switch ( hvm_copy_to_guest_phys(p->data + step * i,
+                                            &data, p->size) )
+            {
+            case HVMCOPY_okay:
+                break;
+            case HVMCOPY_gfn_paged_out:
+            case HVMCOPY_gfn_shared:
+                rc = X86EMUL_RETRY;
                 break;
-            (void)hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
+            case HVMCOPY_bad_gfn_to_mfn:
+                /* Drop the write as real hardware would. */
+                continue;
+            case HVMCOPY_bad_gva_to_gfn:
+                ASSERT(0);
+                /* fall through */
+            default:
+                rc = X86EMUL_UNHANDLEABLE;
+                break;
+            }
+            if ( rc != X86EMUL_OKAY)
+                break;
+        }
+
+        if ( rc == X86EMUL_RETRY )
+        {
+            vio->mmio_retry = 1;
+            vio->mmio_large_read_bytes = p->size;
+            memcpy(vio->mmio_large_read, &data, p->size);
         }
     }
     else /* p->dir == IOREQ_WRITE */
@@ -195,6 +286,9 @@ static int process_portio_intercept(portio_action_t action, 
ioreq_t *p)
             if ( rc != X86EMUL_OKAY )
                 break;
         }
+
+        if ( rc == X86EMUL_RETRY )
+            vio->mmio_retry = 1;
     }
 
     if ( i != 0 )
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.