[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [PATCH] vmx-io.patch



ChangeSet 1.1576, 2005/05/28 09:53:39+01:00, arun.sharma@xxxxxxxxx

        [PATCH] vmx-io.patch
        
        Fix some of the race conditions that show up when the device models are
        running on one CPU and the VMX domain is running on another on a SMP
        system.
        
        Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>



 arch/x86/vmx.c                 |    3 -
 arch/x86/vmx_io.c              |   82 ++++++++++++++++++++++++++++-------------
 arch/x86/vmx_platform.c        |    3 -
 include/asm-x86/vmx_platform.h |    1 
 4 files changed, 59 insertions(+), 30 deletions(-)


diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        2005-05-28 05:03:39 -04:00
+++ b/xen/arch/x86/vmx.c        2005-05-28 05:03:39 -04:00
@@ -465,7 +465,7 @@
     set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
     p->state = STATE_IOREQ_READY;
     evtchn_send(IOPACKET_PORT);
-    do_block();
+    vmx_wait_io();
 }
 
 enum { COPY_IN = 0, COPY_OUT };
@@ -1266,7 +1266,6 @@
     case EXIT_REASON_PENDING_INTERRUPT:
         __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 
               MONITOR_CPU_BASED_EXEC_CONTROLS);
-        vmx_intr_assist(ed);
         break;
     case EXIT_REASON_TASK_SWITCH:
         __vmx_bug(&regs);
diff -Nru a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     2005-05-28 05:03:39 -04:00
+++ b/xen/arch/x86/vmx_io.c     2005-05-28 05:03:39 -04:00
@@ -34,9 +34,6 @@
 #include <asm/vmx_virpit.h>
 
 #ifdef CONFIG_VMX
-
-extern long do_block();
-  
 #if defined (__i386__)
 static void load_cpu_user_regs(struct cpu_user_regs *regs)
 { 
@@ -186,7 +183,6 @@
 {
     vcpu_iodata_t *vio;
     ioreq_t *p;
-    struct domain *d = ed->domain;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     unsigned long old_eax;
     int sign;
@@ -196,12 +192,6 @@
     mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
     inst_decoder_regs = mpci_p->inst_decoder_regs;
 
-    /* clear the pending event */
-    ed->vcpu_info->evtchn_upcall_pending = 0;
-    /* clear the pending bit for port 2 */
-    clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel);
-    clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_pending[0]);
-
     vio = (vcpu_iodata_t *) ed->arch.arch_vmx.vmx_platform.shared_page_va;
     if (vio == 0) {
         VMX_DBG_LOG(DBG_LEVEL_1, 
@@ -217,8 +207,8 @@
     /* clear IO wait VMX flag */
     if (test_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags)) {
         if (p->state != STATE_IORESP_READY) {
-            printk("got a false I/O reponse\n");
-            do_block();
+                /* An interrupt send event raced us */
+                return;
         } else {
             p->state = STATE_INVALID;
         }
@@ -282,6 +272,51 @@
     }
 }
 
+int vmx_clear_pending_io_event(struct exec_domain *ed) 
+{
+    struct domain *d = ed->domain;
+
+    /* evtchn_pending is shared by other event channels in 0-31 range */
+    if (!d->shared_info->evtchn_pending[IOPACKET_PORT>>5])
+        clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel);
+
+    /* Note: VMX domains may need upcalls as well */
+    if (!ed->vcpu_info->evtchn_pending_sel) 
+        ed->vcpu_info->evtchn_upcall_pending = 0;
+
+    /* clear the pending bit for IOPACKET_PORT */
+    return test_and_clear_bit(IOPACKET_PORT, 
+                              &d->shared_info->evtchn_pending[0]);
+}
+
+/* Because we've cleared the pending events first, we need to guarantee that
+ * all events to be handled by xen for VMX domains are taken care of here.
+ *
+ * interrupts are guaranteed to be checked before resuming guest. 
+ * VMX upcalls have been already arranged for if necessary. 
+ */
+void vmx_check_events(struct exec_domain *d) 
+{
+    /* clear the event *before* checking for work. This should avoid 
+       the set-and-check races */
+    if (vmx_clear_pending_io_event(current))
+        vmx_io_assist(d);
+}
+
+/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from 
+   the device model */
+void vmx_wait_io()
+{
+    extern void do_block();
+
+    do {
+        do_block();
+        vmx_check_events(current);
+        if (!test_bit(ARCH_VMX_IO_WAIT, &current->arch.arch_vmx.flags))
+            break;
+    } while(1);
+}
+
 #if defined(__i386__) || defined(__x86_64__)
 static inline int __fls(u32 word)
 {
@@ -440,22 +475,17 @@
     __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
 
     if (event_pending(d)) {
-        if (test_bit(IOPACKET_PORT, 
&d->domain->shared_info->evtchn_pending[0])) 
-            vmx_io_assist(d);
+        vmx_check_events(d);
 
-        else if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
-            printk("got an event while blocked on I/O\n");
-            do_block();
-        }
-                
-        /* Assumption: device model will not inject an interrupt
-         * while an ioreq_t is pending i.e. the response and 
-         * interrupt can come together. But an interrupt without 
-         * a response to ioreq_t is not ok.
-         */
+        if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
+            vmx_wait_io();
     }
-    if (!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
-        vmx_intr_assist(d);
+
+    /* We can't resume the guest if we're waiting on I/O */
+    ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags));
+
+    /* We always check for interrupts before resuming guest */
+    vmx_intr_assist(d);
 }
 
 #endif /* CONFIG_VMX */
diff -Nru a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       2005-05-28 05:03:39 -04:00
+++ b/xen/arch/x86/vmx_platform.c       2005-05-28 05:03:39 -04:00
@@ -470,7 +470,6 @@
     struct mi_per_cpu_info *mpci_p;
     struct cpu_user_regs *inst_decoder_regs;
     extern long evtchn_send(int lport);
-    extern long do_block(void);
 
     mpci_p = &current->arch.arch_vmx.vmx_platform.mpci;
     inst_decoder_regs = mpci_p->inst_decoder_regs;
@@ -520,7 +519,7 @@
 #endif
 
     evtchn_send(IOPACKET_PORT);
-    do_block(); 
+    vmx_wait_io();
 }
 
 void handle_mmio(unsigned long va, unsigned long gpa)
diff -Nru a/xen/include/asm-x86/vmx_platform.h 
b/xen/include/asm-x86/vmx_platform.h
--- a/xen/include/asm-x86/vmx_platform.h        2005-05-28 05:03:39 -04:00
+++ b/xen/include/asm-x86/vmx_platform.h        2005-05-28 05:03:39 -04:00
@@ -85,6 +85,7 @@
 };
 
 extern void handle_mmio(unsigned long, unsigned long);
+extern void vmx_wait_io(void);
 extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *);
 
 // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO 
frame.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.