[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging-4.6] x86/HVM: guard against emulator driving ioreq state in weird ways



commit c53663a223ab3cd600855746673fe922fffb83e9
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue May 8 18:28:20 2018 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue May 8 18:28:20 2018 +0100

    x86/HVM: guard against emulator driving ioreq state in weird ways
    
    In the case where hvm_wait_for_io() calls wait_on_xen_event_channel(),
    p->state ends up being read twice in succession: once to determine that
    state != p->state, and then again at the top of the loop.  This gives a
    compromised emulator a chance to change the state back between the two
    reads, potentially keeping Xen in a loop indefinitely.
    
    Instead:
    * Read p->state once in each of the wait_on_xen_event_channel() tests,
    * re-use that value the next time around,
    * and insist that the states continue to transition "forward" (with the
      exception of the transition to STATE_IOREQ_NONE).
    
    This is XSA-262.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c | 29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index dc92dca4ec..ebabc10763 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -453,14 +453,17 @@ static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, 
uint64_t data)
 
 static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
 {
+    unsigned int prev_state = STATE_IOREQ_NONE;
+
     while ( sv->pending )
     {
         unsigned int state = p->state;
 
-        rmb();
-        switch ( state )
+        smp_rmb();
+
+    recheck:
+        if ( unlikely(state == STATE_IOREQ_NONE) )
         {
-        case STATE_IOREQ_NONE:
             /*
              * The only reason we should see this case is when an
              * emulator is dying and it races with an I/O being
@@ -468,14 +471,30 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, 
ioreq_t *p)
              */
             hvm_io_assist(sv, ~0ul);
             break;
+        }
+
+        if ( unlikely(state < prev_state) )
+        {
+            gdprintk(XENLOG_ERR, "Weird HVM ioreq state transition %u -> %u\n",
+                     prev_state, state);
+            sv->pending = 0;
+            domain_crash(sv->vcpu->domain);
+            return 0; /* bail */
+        }
+
+        switch ( prev_state = state )
+        {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
             p->state = STATE_IOREQ_NONE;
             hvm_io_assist(sv, p->data);
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
-            wait_on_xen_event_channel(sv->ioreq_evtchn, p->state != state);
-            break;
+            wait_on_xen_event_channel(sv->ioreq_evtchn,
+                                      ({ state = p->state;
+                                         smp_rmb();
+                                         state != prev_state; }));
+            goto recheck;
         default:
             gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
             sv->pending = 0;
--
generated by git-patchbot for /home/xen/git/xen.git#staging-4.6

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.