[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge with xen-ia64-unstable.hg



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1175683777 -3600
# Node ID c278b1c580db2a5d04066b9e2443a49e796e1e73
# Parent  f378c424e0ced4cbc584e5c6125d065f1cc05d0c
# Parent  6cfc491631d59abafe4bba5936f7dc32165f190d
Merge with xen-ia64-unstable.hg
---
 tools/ioemu/target-i386-dm/helper2.c |   63 ++++++++---------------------------
 tools/libxc/xc_hvm_save.c            |   17 ---------
 tools/libxc/xenguest.h               |    2 -
 tools/libxc/xg_private.c             |    5 --
 xen/arch/x86/hvm/hvm.c               |   50 ---------------------------
 xen/arch/x86/hvm/io.c                |    3 +
 xen/arch/x86/hvm/platform.c          |    4 +-
 xen/common/schedule.c                |    4 ++
 xen/include/asm-x86/hvm/io.h         |    2 -
 xen/include/public/foreign/Makefile  |    2 -
 xen/include/public/hvm/hvm_op.h      |    8 ----
 11 files changed, 27 insertions(+), 133 deletions(-)

diff -r f378c424e0ce -r c278b1c580db tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Tue Apr 03 13:04:51 2007 -0600
+++ b/tools/ioemu/target-i386-dm/helper2.c      Wed Apr 04 11:49:37 2007 +0100
@@ -135,9 +135,6 @@ void cpu_reset(CPUX86State *env)
 {
     int xcHandle;
     int sts;
-
-    /* pause domain first, to avoid repeated reboot request*/
-    xc_domain_pause(xc_handle, domid);
 
     xcHandle = xc_interface_open();
     if (xcHandle < 0)
@@ -597,6 +594,7 @@ int main_loop(void)
     extern int suspend_requested;
     CPUState *env = cpu_single_env;
     int evtchn_fd = xc_evtchn_fd(xce_handle);
+    char qemu_file[20];
 
     buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
                                       cpu_single_env);
@@ -604,52 +602,23 @@ int main_loop(void)
 
     qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
 
-    while (1) {
-        if (vm_running) {
-            if (shutdown_requested)
-                break;
-            if (reset_requested) {
-                qemu_system_reset();
-                reset_requested = 0;
-            }
-            if (suspend_requested) {
-                fprintf(logfile, "device model received suspend signal!\n");
-                break;
-            }
-        }
-
+    while (!(vm_running && suspend_requested))
         /* Wait up to 10 msec. */
         main_loop_wait(10);
-    }
-    if (!suspend_requested)
-        destroy_hvm_domain();
-    else {
-        char qemu_file[20];
-        ioreq_t *req;
-        int rc;
-
-        sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
-        xc_domain_pause(xc_handle, domid);
-
-        /* Pull all outstanding ioreqs through the system */
-        handle_buffered_io(env);
-        main_loop_wait(1); /* For the select() on events */
-        
-        /* Stop the IDE thread */
-        ide_stop_dma_thread();
-
-        /* Make sure that all outstanding IO responses are handled too */ 
-        if ( xc_hvm_drain_io(xc_handle, domid) != 0 )
-        {
-            fprintf(stderr, "error clearing ioreq rings (%s)\n", 
-                    strerror(errno));
-            return -1;
-        }
-
-        /* Save the device state */
-        if (qemu_savevm(qemu_file) < 0)
-            fprintf(stderr, "qemu save fail.\n");
-    }
+
+    fprintf(logfile, "device model received suspend signal!\n");
+
+    /* Pull all outstanding ioreqs through the system */
+    handle_buffered_io(env);
+    main_loop_wait(1); /* For the select() on events */
+
+    /* Stop the IDE thread */
+    ide_stop_dma_thread();
+
+    /* Save the device state */
+    sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
+    if (qemu_savevm(qemu_file) < 0)
+        fprintf(stderr, "qemu save fail.\n");
 
     return 0;
 }
diff -r f378c424e0ce -r c278b1c580db tools/libxc/xc_hvm_save.c
--- a/tools/libxc/xc_hvm_save.c Tue Apr 03 13:04:51 2007 -0600
+++ b/tools/libxc/xc_hvm_save.c Wed Apr 04 11:49:37 2007 +0100
@@ -59,23 +59,6 @@ static int qemu_active;
 static int qemu_active;
 static int qemu_non_active;
 
-int xc_hvm_drain_io(int handle, domid_t dom)
-{
-    DECLARE_HYPERCALL;
-    xen_hvm_drain_io_t arg;
-    int rc;
-
-    hypercall.op     = __HYPERVISOR_hvm_op;
-    hypercall.arg[0] = HVMOP_drain_io;
-    hypercall.arg[1] = (unsigned long)&arg;
-    arg.domid = dom;
-    if ( lock_pages(&arg, sizeof(arg)) != 0 )
-        return -1;
-    rc = do_xen_hypercall(handle, &hypercall);
-    unlock_pages(&arg, sizeof(arg));
-    return rc;
-}
-
 /*
 ** During (live) save/migrate, we maintain a number of bitmaps to track
 ** which pages we have to send, to fixup, and to skip.
diff -r f378c424e0ce -r c278b1c580db tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h    Tue Apr 03 13:04:51 2007 -0600
+++ b/tools/libxc/xenguest.h    Wed Apr 04 11:49:37 2007 +0100
@@ -159,8 +159,6 @@ int xc_get_hvm_param(
 int xc_get_hvm_param(
     int handle, domid_t dom, int param, unsigned long *value);
 
-int xc_hvm_drain_io(int handle, domid_t dom);
-
 /* PowerPC specific. */
 int xc_prose_build(int xc_handle,
                    uint32_t domid,
diff -r f378c424e0ce -r c278b1c580db tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c  Tue Apr 03 13:04:51 2007 -0600
+++ b/tools/libxc/xg_private.c  Wed Apr 04 11:49:37 2007 +0100
@@ -227,11 +227,6 @@ __attribute__((weak)) int xc_get_hvm_par
 
 __attribute__((weak)) int xc_set_hvm_param(
     int handle, domid_t dom, int param, unsigned long value)
-{
-    return -ENOSYS;
-}
-
-__attribute__((weak)) int xc_hvm_drain_io(int handle, domid_t dom)
 {
     return -ENOSYS;
 }
diff -r f378c424e0ce -r c278b1c580db xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/arch/x86/hvm/hvm.c    Wed Apr 04 11:49:37 2007 +0100
@@ -131,7 +131,7 @@ void hvm_do_resume(struct vcpu *v)
         switch ( p->state )
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
-            hvm_io_assist(v);
+            hvm_io_assist();
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
@@ -144,48 +144,6 @@ void hvm_do_resume(struct vcpu *v)
             domain_crash_synchronous();
         }
     }
-}
-
-/* Called from the tools when saving a domain to make sure the io
- * request-response ring is entirely empty. */
-static int hvmop_drain_io(
-    XEN_GUEST_HANDLE(xen_hvm_drain_io_t) uop)
-{
-    struct xen_hvm_drain_io op;
-    struct domain *d;
-    struct vcpu *v;
-    ioreq_t *p;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
-    d = rcu_lock_domain_by_id(op.domid);
-    if ( d == NULL )
-        return -ESRCH;
-
-    rc = -EINVAL;
-    /* Can't do this to yourself, or to a domain without an ioreq ring */
-    if ( d == current->domain || !is_hvm_domain(d) || get_sp(d) == NULL )
-        goto out;
-
-    rc = 0;
-
-    domain_pause(d);  /* It's not safe to do this to running vcpus */
-    for_each_vcpu(d, v)
-    {
-        p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
-        if ( p->state == STATE_IORESP_READY )
-            hvm_io_assist(v);
-    }
-    domain_unpause(d);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
 }
 
 int hvm_domain_initialise(struct domain *d)
@@ -963,12 +921,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
         break;
 
-    case HVMOP_drain_io:
-        rc = hvmop_drain_io(
-            guest_handle_cast(arg, xen_hvm_drain_io_t));
-        break;
-
-
     default:
     {
         gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
diff -r f378c424e0ce -r c278b1c580db xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/arch/x86/hvm/io.c     Wed Apr 04 11:49:37 2007 +0100
@@ -764,13 +764,14 @@ static void hvm_mmio_assist(struct cpu_u
     }
 }
 
-void hvm_io_assist(struct vcpu *v)
+void hvm_io_assist(void)
 {
     vcpu_iodata_t *vio;
     ioreq_t *p;
     struct cpu_user_regs *regs;
     struct hvm_io_op *io_opp;
     unsigned long gmfn;
+    struct vcpu *v = current;
     struct domain *d = v->domain;
 
     io_opp = &v->arch.hvm_vcpu.io_op;
diff -r f378c424e0ce -r c278b1c580db xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/arch/x86/hvm/platform.c       Wed Apr 04 11:49:37 2007 +0100
@@ -865,7 +865,7 @@ void send_pio_req(unsigned long port, un
     if ( hvm_portio_intercept(p) )
     {
         p->state = STATE_IORESP_READY;
-        hvm_io_assist(v);
+        hvm_io_assist();
         return;
     }
 
@@ -914,7 +914,7 @@ static void send_mmio_req(unsigned char 
     if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
     {
         p->state = STATE_IORESP_READY;
-        hvm_io_assist(v);
+        hvm_io_assist();
         return;
     }
 
diff -r f378c424e0ce -r c278b1c580db xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/common/schedule.c     Wed Apr 04 11:49:37 2007 +0100
@@ -461,7 +461,11 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HAN
         if ( d == NULL )
             break;
 
+        /* domain_pause() prevens any further execution in guest context. */
+        domain_pause(d);
         domain_shutdown(d, (u8)sched_remote_shutdown.reason);
+        domain_unpause(d);
+
         rcu_unlock_domain(d);
         ret = 0;
 
diff -r f378c424e0ce -r c278b1c580db xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/include/asm-x86/hvm/io.h      Wed Apr 04 11:49:37 2007 +0100
@@ -149,7 +149,7 @@ void send_timeoffset_req(unsigned long t
 void send_timeoffset_req(unsigned long timeoff);
 extern void handle_mmio(unsigned long gpa);
 extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-extern void hvm_io_assist(struct vcpu *v);
+extern void hvm_io_assist(void);
 
 #endif /* __ASM_X86_HVM_IO_H__ */
 
diff -r f378c424e0ce -r c278b1c580db xen/include/public/foreign/Makefile
--- a/xen/include/public/foreign/Makefile       Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/include/public/foreign/Makefile       Wed Apr 04 11:49:37 2007 +0100
@@ -13,7 +13,7 @@ clean:
        rm -f checker checker.c $(XEN_TARGET_ARCH).size
        rm -f *.pyc *.o *~
 
-ifeq ($(CROSS_COMPILE),)
+ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH))
 check-headers: checker
        ./checker > $(XEN_TARGET_ARCH).size
        diff -u reference.size $(XEN_TARGET_ARCH).size
diff -r f378c424e0ce -r c278b1c580db xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h   Tue Apr 03 13:04:51 2007 -0600
+++ b/xen/include/public/hvm/hvm_op.h   Wed Apr 04 11:49:37 2007 +0100
@@ -70,12 +70,4 @@ typedef struct xen_hvm_set_pci_link_rout
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
-/* Drain all outstanding qemu-dm IO responses from a domain's ioreq ring. */
-#define HVMOP_drain_io            5
-struct xen_hvm_drain_io {
-    domid_t  domid;
-};
-typedef struct xen_hvm_drain_io xen_hvm_drain_io_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_drain_io_t);
-
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.