[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] pvh: tolerate HVM guests having no ioreq page



commit 68209bce0f551dcae991877ffd58211498bb0755
Author:     George Dunlap <george.dunlap@xxxxxxxxxxxxx>
AuthorDate: Wed Nov 13 09:29:02 2013 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Nov 13 09:29:02 2013 +0100

    pvh: tolerate HVM guests having no ioreq page
    
    PVH guests don't have a backing device model emulator (qemu); just
    tolerate this situation explicitly, rather than special-casing PVH.
    
    For unhandled IO, hvmemul_do_io() will now return X86EMUL_OKAY, which
    is I believe what would be the effect if qemu didn't have a handler
    for the IO.
    
    This also fixes a potetial DoS in the host from the reworked series:
    If the guest makes a hypercall which sends an invalidate request, it
    would have crashed the host.
    
    Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
    Acked-by: Eddie Dong <eddie.dong@xxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c        |   32 +++++++++++++++++++++++++++-----
 xen/arch/x86/hvm/hvm.c            |   11 ++++++++---
 xen/arch/x86/hvm/io.c             |    6 ++++--
 xen/include/asm-x86/hvm/io.h      |    2 +-
 xen/include/asm-x86/hvm/support.h |    3 +--
 5 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index f39c173..868aa1d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -58,10 +58,23 @@ static int hvmemul_do_io(
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio;
     ioreq_t *p = get_ioreq(curr);
+    ioreq_t _ioreq;
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
     struct page_info *ram_page;
     int rc;
+    bool_t has_dm = 1;
+
+    /*
+     * Domains without a backing DM, don't have an ioreq page.  Just
+     * point to a struct on the stack, initialising the state as needed.
+     */
+    if ( !p )
+    {
+        has_dm = 0;
+        p = &_ioreq;
+        p->state = STATE_IOREQ_NONE;
+    }
 
     /* Check for paged out page */
     ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
@@ -211,7 +224,7 @@ static int hvmemul_do_io(
         p->state = STATE_IORESP_READY;
         if ( !vio->mmio_retry )
         {
-            hvm_io_assist();
+            hvm_io_assist(p);
             vio->io_state = HVMIO_none;
         }
         else
@@ -219,11 +232,20 @@ static int hvmemul_do_io(
             vio->io_state = HVMIO_handle_mmio_awaiting_completion;
         break;
     case X86EMUL_UNHANDLEABLE:
-        rc = X86EMUL_RETRY;
-        if ( !hvm_send_assist_req(curr) )
-            vio->io_state = HVMIO_none;
-        else if ( p_data == NULL )
+        /* If there is no backing DM, just ignore accesses */
+        if ( !has_dm )
+        {
             rc = X86EMUL_OKAY;
+            vio->io_state = HVMIO_none;
+        }
+        else
+        {
+            rc = X86EMUL_RETRY;
+            if ( !hvm_send_assist_req(curr) )
+                vio->io_state = HVMIO_none;
+            else if ( p_data == NULL )
+                rc = X86EMUL_OKAY;
+        }
         break;
     default:
         BUG();
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e7862bc..f235a24 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -347,13 +347,15 @@ void hvm_do_resume(struct vcpu *v)
     check_wakeup_from_wait();
 
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    p = get_ioreq(v);
+    if ( !(p = get_ioreq(v)) )
+        goto check_inject_trap;
+
     while ( p->state != STATE_IOREQ_NONE )
     {
         switch ( p->state )
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
-            hvm_io_assist();
+            hvm_io_assist(p);
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
@@ -368,6 +370,7 @@ void hvm_do_resume(struct vcpu *v)
         }
     }
 
+ check_inject_trap:
     /* Inject pending hw/sw trap */
     if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) 
     {
@@ -1227,7 +1230,9 @@ bool_t hvm_send_assist_req(struct vcpu *v)
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return 0; /* implicitly bins the i/o operation */
 
-    p = get_ioreq(v);
+    if ( !(p = get_ioreq(v)) )
+        return 0;
+
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
     {
         /* This indicates a bug in the device model. Crash the domain. */
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index feb0406..deb7b92 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -152,6 +152,9 @@ void send_invalidate_req(void)
     struct vcpu *v = current;
     ioreq_t *p = get_ioreq(v);
 
+    if ( !p )
+        return;
+
     if ( p->state != STATE_IOREQ_NONE )
     {
         gdprintk(XENLOG_ERR, "WARNING: send invalidate req with something "
@@ -262,11 +265,10 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
     return 1;
 }
 
-void hvm_io_assist(void)
+void hvm_io_assist(ioreq_t *p)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    ioreq_t *p = get_ioreq(curr);
     enum hvm_io_state io_state;
 
     rmb(); /* see IORESP_READY /then/ read contents of ioreq */
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index b0718b8..6f4cb96 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -121,7 +121,7 @@ int handle_mmio(void);
 int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(void);
+void hvm_io_assist(ioreq_t *p);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
                   union vioapic_redir_entry *ent);
 
diff --git a/xen/include/asm-x86/hvm/support.h 
b/xen/include/asm-x86/hvm/support.h
index 52aef1f..3529499 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -32,8 +32,7 @@ static inline ioreq_t *get_ioreq(struct vcpu *v)
     struct domain *d = v->domain;
     shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
     ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
-    ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
-    return &p->vcpu_ioreq[v->vcpu_id];
+    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
 }
 
 #define HVM_DELIVER_NO_ERROR_CODE  -1
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.