[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5 of 7] tools/xc: log pid in xc_save/xc_restore output


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Wed, 13 Feb 2013 16:53:03 +0100
  • Delivery-date: Wed, 13 Feb 2013 15:53:30 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1360770273 -3600
# Node ID 101ba725b5a58323c34a1944d7d24f600aaf48d5
# Parent  0e6a41e1ee36237ba130959dc3d2a3bd44104eb0
tools/xc: log pid in xc_save/xc_restore output

If several migrations log their output to xend.log its not clear which
line belongs to a which guest. Print entry/exit of xc_save and
xc_restore and also request to print pid with each log call.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 0e6a41e1ee36 -r 101ba725b5a5 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -1456,6 +1456,8 @@ int xc_domain_restore(xc_interface *xch,
     struct restore_ctx *ctx = &_ctx;
     struct domain_info_context *dinfo = &ctx->dinfo;
 
+    DPRINTF("%s: starting restore of new domid %u", __func__, dom);
+
     pagebuf_init(&pagebuf);
     memset(&tailbuf, 0, sizeof(tailbuf));
     tailbuf.ishvm = hvm;
@@ -1485,7 +1487,7 @@ int xc_domain_restore(xc_interface *xch,
         PERROR("read: p2m_size");
         goto out;
     }
-    DPRINTF("xc_domain_restore start: p2m_size = %lx\n", dinfo->p2m_size);
+    DPRINTF("%s: p2m_size = %lx\n", __func__, dinfo->p2m_size);
 
     if ( !get_platform_info(xch, dom,
                             &ctx->max_mfn, &ctx->hvirt_start, &ctx->pt_levels, 
&dinfo->guest_width) )
@@ -2300,7 +2302,7 @@ int xc_domain_restore(xc_interface *xch,
 
     fcntl(io_fd, F_SETFL, orig_io_fd_flags);
 
-    DPRINTF("Restore exit with rc=%d\n", rc);
+    DPRINTF("Restore exit of domid %u with rc=%d\n", dom, rc);
 
     return rc;
 }
diff -r 0e6a41e1ee36 -r 101ba725b5a5 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c
+++ b/tools/libxc/xc_domain_save.c
@@ -888,6 +888,8 @@ int xc_domain_save(xc_interface *xch, in
 
     int completed = 0;
 
+    DPRINTF("%s: starting save of domid %u", __func__, dom);
+
     if ( hvm && !callbacks->switch_qemu_logdirty )
     {
         ERROR("No switch_qemu_logdirty callback provided.");
@@ -2121,7 +2123,7 @@ int xc_domain_save(xc_interface *xch, in
     free(pfn_err);
     free(to_fix);
 
-    DPRINTF("Save exit rc=%d\n",rc);
+    DPRINTF("Save exit of domid %u with rc=%d\n", dom, rc);
 
     return !!rc;
 }
diff -r 0e6a41e1ee36 -r 101ba725b5a5 tools/xcutils/xc_restore.c
--- a/tools/xcutils/xc_restore.c
+++ b/tools/xcutils/xc_restore.c
@@ -19,17 +19,22 @@ int
 main(int argc, char **argv)
 {
     unsigned int domid, store_evtchn, console_evtchn;
-    unsigned int hvm, pae, apic;
+    unsigned int hvm, pae, apic, lflags;
     xc_interface *xch;
     int io_fd, ret;
     int superpages;
     unsigned long store_mfn, console_mfn;
+    xentoollog_level lvl;
+    xentoollog_logger *l;
 
     if ( (argc != 8) && (argc != 9) )
         errx(1, "usage: %s iofd domid store_evtchn "
              "console_evtchn hvm pae apic [superpages]", argv[0]);
 
-    xch = xc_interface_open(0,0,0);
+    lvl = XTL_DETAIL;
+    lflags = XTL_STDIOSTREAM_SHOW_PID | XTL_STDIOSTREAM_HIDE_PROGRESS;
+    l = (xentoollog_logger *)xtl_createlogger_stdiostream(stderr, lvl, lflags);
+    xch = xc_interface_open(l, 0, 0);
     if ( !xch )
         errx(1, "failed to open control interface");
 
diff -r 0e6a41e1ee36 -r 101ba725b5a5 tools/xcutils/xc_save.c
--- a/tools/xcutils/xc_save.c
+++ b/tools/xcutils/xc_save.c
@@ -184,7 +184,7 @@ main(int argc, char **argv)
     si.suspend_evtchn = -1;
 
     lvl = si.flags & XCFLAGS_DEBUG ? XTL_DEBUG: XTL_DETAIL;
-    lflags = XTL_STDIOSTREAM_HIDE_PROGRESS;
+    lflags = XTL_STDIOSTREAM_SHOW_PID | XTL_STDIOSTREAM_HIDE_PROGRESS;
     l = (xentoollog_logger *)xtl_createlogger_stdiostream(stderr, lvl, lflags);
     si.xch = xc_interface_open(l, 0, 0);
     if (!si.xch)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.