[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Remove hvm_drain_io() hypercall, simplify qemu main loop and do not



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1175679778 -3600
# Node ID 2d25db73ddc14aeee038479a808005a0cdf55e60
# Parent  7e431ea834a877b1f0c90bdb1e6f1346da4e81cc
Remove hvm_drain_io() hypercall, simplify qemu main loop and do not
pause the domain on reboot (not necessary and defeats new deferred
shutdown logic).
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/ioemu/target-i386-dm/helper2.c |   63 ++++++++---------------------------
 tools/libxc/xc_hvm_save.c            |   17 ---------
 tools/libxc/xenguest.h               |    2 -
 tools/libxc/xg_private.c             |    5 --
 xen/arch/x86/hvm/hvm.c               |   48 --------------------------
 xen/common/schedule.c                |    4 ++
 xen/include/public/hvm/hvm_op.h      |    8 ----
 7 files changed, 20 insertions(+), 127 deletions(-)

diff -r 7e431ea834a8 -r 2d25db73ddc1 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Tue Apr 03 13:22:37 2007 +0100
+++ b/tools/ioemu/target-i386-dm/helper2.c      Wed Apr 04 10:42:58 2007 +0100
@@ -135,9 +135,6 @@ void cpu_reset(CPUX86State *env)
 {
     int xcHandle;
     int sts;
-
-    /* pause domain first, to avoid repeated reboot request*/
-    xc_domain_pause(xc_handle, domid);
 
     xcHandle = xc_interface_open();
     if (xcHandle < 0)
@@ -597,6 +594,7 @@ int main_loop(void)
     extern int suspend_requested;
     CPUState *env = cpu_single_env;
     int evtchn_fd = xc_evtchn_fd(xce_handle);
+    char qemu_file[20];
 
     buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
                                       cpu_single_env);
@@ -604,52 +602,23 @@ int main_loop(void)
 
     qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
 
-    while (1) {
-        if (vm_running) {
-            if (shutdown_requested)
-                break;
-            if (reset_requested) {
-                qemu_system_reset();
-                reset_requested = 0;
-            }
-            if (suspend_requested) {
-                fprintf(logfile, "device model received suspend signal!\n");
-                break;
-            }
-        }
-
+    while (!(vm_running && suspend_requested))
         /* Wait up to 10 msec. */
         main_loop_wait(10);
-    }
-    if (!suspend_requested)
-        destroy_hvm_domain();
-    else {
-        char qemu_file[20];
-        ioreq_t *req;
-        int rc;
-
-        sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
-        xc_domain_pause(xc_handle, domid);
-
-        /* Pull all outstanding ioreqs through the system */
-        handle_buffered_io(env);
-        main_loop_wait(1); /* For the select() on events */
-        
-        /* Stop the IDE thread */
-        ide_stop_dma_thread();
-
-        /* Make sure that all outstanding IO responses are handled too */ 
-        if ( xc_hvm_drain_io(xc_handle, domid) != 0 )
-        {
-            fprintf(stderr, "error clearing ioreq rings (%s)\n", 
-                    strerror(errno));
-            return -1;
-        }
-
-        /* Save the device state */
-        if (qemu_savevm(qemu_file) < 0)
-            fprintf(stderr, "qemu save fail.\n");
-    }
+
+    fprintf(logfile, "device model received suspend signal!\n");
+
+    /* Pull all outstanding ioreqs through the system */
+    handle_buffered_io(env);
+    main_loop_wait(1); /* For the select() on events */
+
+    /* Stop the IDE thread */
+    ide_stop_dma_thread();
+
+    /* Save the device state */
+    sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
+    if (qemu_savevm(qemu_file) < 0)
+        fprintf(stderr, "qemu save fail.\n");
 
     return 0;
 }
diff -r 7e431ea834a8 -r 2d25db73ddc1 tools/libxc/xc_hvm_save.c
--- a/tools/libxc/xc_hvm_save.c Tue Apr 03 13:22:37 2007 +0100
+++ b/tools/libxc/xc_hvm_save.c Wed Apr 04 10:42:58 2007 +0100
@@ -59,23 +59,6 @@ static int qemu_active;
 static int qemu_active;
 static int qemu_non_active;
 
-int xc_hvm_drain_io(int handle, domid_t dom)
-{
-    DECLARE_HYPERCALL;
-    xen_hvm_drain_io_t arg;
-    int rc;
-
-    hypercall.op     = __HYPERVISOR_hvm_op;
-    hypercall.arg[0] = HVMOP_drain_io;
-    hypercall.arg[1] = (unsigned long)&arg;
-    arg.domid = dom;
-    if ( lock_pages(&arg, sizeof(arg)) != 0 )
-        return -1;
-    rc = do_xen_hypercall(handle, &hypercall);
-    unlock_pages(&arg, sizeof(arg));
-    return rc;
-}
-
 /*
 ** During (live) save/migrate, we maintain a number of bitmaps to track
 ** which pages we have to send, to fixup, and to skip.
diff -r 7e431ea834a8 -r 2d25db73ddc1 tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h    Tue Apr 03 13:22:37 2007 +0100
+++ b/tools/libxc/xenguest.h    Wed Apr 04 10:42:58 2007 +0100
@@ -159,8 +159,6 @@ int xc_get_hvm_param(
 int xc_get_hvm_param(
     int handle, domid_t dom, int param, unsigned long *value);
 
-int xc_hvm_drain_io(int handle, domid_t dom);
-
 /* PowerPC specific. */
 int xc_prose_build(int xc_handle,
                    uint32_t domid,
diff -r 7e431ea834a8 -r 2d25db73ddc1 tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c  Tue Apr 03 13:22:37 2007 +0100
+++ b/tools/libxc/xg_private.c  Wed Apr 04 10:42:58 2007 +0100
@@ -227,11 +227,6 @@ __attribute__((weak)) int xc_get_hvm_par
 
 __attribute__((weak)) int xc_set_hvm_param(
     int handle, domid_t dom, int param, unsigned long value)
-{
-    return -ENOSYS;
-}
-
-__attribute__((weak)) int xc_hvm_drain_io(int handle, domid_t dom)
 {
     return -ENOSYS;
 }
diff -r 7e431ea834a8 -r 2d25db73ddc1 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Apr 03 13:22:37 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Apr 04 10:42:58 2007 +0100
@@ -144,48 +144,6 @@ void hvm_do_resume(struct vcpu *v)
             domain_crash_synchronous();
         }
     }
-}
-
-/* Called from the tools when saving a domain to make sure the io
- * request-response ring is entirely empty. */
-static int hvmop_drain_io(
-    XEN_GUEST_HANDLE(xen_hvm_drain_io_t) uop)
-{
-    struct xen_hvm_drain_io op;
-    struct domain *d;
-    struct vcpu *v;
-    ioreq_t *p;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
-    d = rcu_lock_domain_by_id(op.domid);
-    if ( d == NULL )
-        return -ESRCH;
-
-    rc = -EINVAL;
-    /* Can't do this to yourself, or to a domain without an ioreq ring */
-    if ( d == current->domain || !is_hvm_domain(d) || get_sp(d) == NULL )
-        goto out;
-
-    rc = 0;
-
-    domain_pause(d);  /* It's not safe to do this to running vcpus */
-    for_each_vcpu(d, v)
-    {
-        p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
-        if ( p->state == STATE_IORESP_READY )
-            hvm_io_assist(v);
-    }
-    domain_unpause(d);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
 }
 
 int hvm_domain_initialise(struct domain *d)
@@ -963,12 +921,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
         break;
 
-    case HVMOP_drain_io:
-        rc = hvmop_drain_io(
-            guest_handle_cast(arg, xen_hvm_drain_io_t));
-        break;
-
-
     default:
     {
         gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
diff -r 7e431ea834a8 -r 2d25db73ddc1 xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Apr 03 13:22:37 2007 +0100
+++ b/xen/common/schedule.c     Wed Apr 04 10:42:58 2007 +0100
@@ -461,7 +461,11 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HAN
         if ( d == NULL )
             break;
 
+        /* domain_pause() prevens any further execution in guest context. */
+        domain_pause(d);
         domain_shutdown(d, (u8)sched_remote_shutdown.reason);
+        domain_unpause(d);
+
         rcu_unlock_domain(d);
         ret = 0;
 
diff -r 7e431ea834a8 -r 2d25db73ddc1 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h   Tue Apr 03 13:22:37 2007 +0100
+++ b/xen/include/public/hvm/hvm_op.h   Wed Apr 04 10:42:58 2007 +0100
@@ -70,12 +70,4 @@ typedef struct xen_hvm_set_pci_link_rout
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
-/* Drain all outstanding qemu-dm IO responses from a domain's ioreq ring. */
-#define HVMOP_drain_io            5
-struct xen_hvm_drain_io {
-    domid_t  domid;
-};
-typedef struct xen_hvm_drain_io xen_hvm_drain_io_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_drain_io_t);
-
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.