[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v15 04/19] pvh: Tolerate HVM guests having no ioreq page



PVH guests don't have a backing device model emulator (qemu); just
tolerate this situation explicitly, rather than special-casing PVH.

For unhandled IO, hvmemul_do_io() will now return X86EMUL_OKAY, which
is I believe what would be the effect if qemu didn't have a handler
for the IO.

This also fixes a potetial DoS in the host from the reworked series:
If the guest makes a hypercall which sends an invalidate request, it
would have crashed the host.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
CC: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
CC: Jan Beulich <beulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/hvm/emulate.c        |   31 ++++++++++++++++++++++++++-----
 xen/arch/x86/hvm/hvm.c            |   11 ++++++++---
 xen/arch/x86/hvm/io.c             |    8 +++++---
 xen/include/asm-x86/hvm/io.h      |    2 +-
 xen/include/asm-x86/hvm/support.h |    3 +--
 5 files changed, 41 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f39c173..590dcfe 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -58,10 +58,22 @@ static int hvmemul_do_io(
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio;
     ioreq_t *p = get_ioreq(curr);
+    ioreq_t _ioreq;
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
     struct page_info *ram_page;
     int rc;
+    int has_dm = 1;
+
+    /* Domains without a backing DM, don't have an ioreq page.  Just
+     * point to a struct on the stack, initialising the state as
+     * needed. */
+    if ( !p )
+    {
+        has_dm = 0;
+        p = &_ioreq;
+        p->state = STATE_IOREQ_NONE;
+    }
 
     /* Check for paged out page */
     ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
@@ -211,7 +223,7 @@ static int hvmemul_do_io(
         p->state = STATE_IORESP_READY;
         if ( !vio->mmio_retry )
         {
-            hvm_io_assist();
+            hvm_io_assist(p);
             vio->io_state = HVMIO_none;
         }
         else
@@ -219,11 +231,20 @@ static int hvmemul_do_io(
             vio->io_state = HVMIO_handle_mmio_awaiting_completion;
         break;
     case X86EMUL_UNHANDLEABLE:
-        rc = X86EMUL_RETRY;
-        if ( !hvm_send_assist_req(curr) )
-            vio->io_state = HVMIO_none;
-        else if ( p_data == NULL )
+        /* If there is no backing DM, just ignore accesses */
+        if ( !has_dm )
+        {
             rc = X86EMUL_OKAY;
+            vio->io_state = HVMIO_none;
+        }
+        else 
+        {
+            rc = X86EMUL_RETRY;
+            if ( !hvm_send_assist_req(curr) )
+                vio->io_state = HVMIO_none;
+            else if ( p_data == NULL )
+                rc = X86EMUL_OKAY;
+        }
         break;
     default:
         BUG();
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 85f9857..7aa1e8a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -347,13 +347,15 @@ void hvm_do_resume(struct vcpu *v)
     check_wakeup_from_wait();
 
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    p = get_ioreq(v);
+    if ( !(p = get_ioreq(v)) )
+        goto check_inject_trap;
+
     while ( p->state != STATE_IOREQ_NONE )
     {
         switch ( p->state )
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
-            hvm_io_assist();
+            hvm_io_assist(p);
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
@@ -368,6 +370,7 @@ void hvm_do_resume(struct vcpu *v)
         }
     }
 
+  check_inject_trap:
     /* Inject pending hw/sw trap */
     if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) 
     {
@@ -1227,7 +1230,9 @@ bool_t hvm_send_assist_req(struct vcpu *v)
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return 0; /* implicitly bins the i/o operation */
 
-    p = get_ioreq(v);
+    if ( !(p = get_ioreq(v)) )
+        return 0;
+
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
     {
         /* This indicates a bug in the device model. Crash the domain. */
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index feb0406..0389c56 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -150,7 +150,10 @@ void send_timeoffset_req(unsigned long timeoff)
 void send_invalidate_req(void)
 {
     struct vcpu *v = current;
-    ioreq_t *p = get_ioreq(v);
+    ioreq_t *p;
+
+    if ( !(p = get_ioreq(v)) )
+        return;
 
     if ( p->state != STATE_IOREQ_NONE )
     {
@@ -262,11 +265,10 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
     return 1;
 }
 
-void hvm_io_assist(void)
+void hvm_io_assist(ioreq_t *p)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    ioreq_t *p = get_ioreq(curr);
     enum hvm_io_state io_state;
 
     rmb(); /* see IORESP_READY /then/ read contents of ioreq */
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index b0718b8..6f4cb96 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -121,7 +121,7 @@ int handle_mmio(void);
 int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(void);
+void hvm_io_assist(ioreq_t *p);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
                   union vioapic_redir_entry *ent);
 
diff --git a/xen/include/asm-x86/hvm/support.h 
b/xen/include/asm-x86/hvm/support.h
index 52aef1f..3529499 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -32,8 +32,7 @@ static inline ioreq_t *get_ioreq(struct vcpu *v)
     struct domain *d = v->domain;
     shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
     ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
-    ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
-    return &p->vcpu_ioreq[v->vcpu_id];
+    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
 }
 
 #define HVM_DELIVER_NO_ERROR_CODE  -1
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.