[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxc: Support save/restore of up to 4096 VCPUs (increase from 64 VCPUs).


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Thu, 23 Aug 2012 05:22:09 +0000
  • Delivery-date: Thu, 23 Aug 2012 05:22:23 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1345670442 -3600
# Node ID 03507cc3a1ee28c6f88e5a01a03077082bd99b58
# Parent  a325ee7a463903ac13a6bf38a3452271631b0043
libxc: Support save/restore of up to 4096 VCPUs (increase from 64 VCPUs).

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r a325ee7a4639 -r 03507cc3a1ee tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Wed Aug 22 22:15:36 2012 +0100
+++ b/tools/libxc/xc_domain_restore.c   Wed Aug 22 22:20:42 2012 +0100
@@ -462,7 +462,7 @@ static int dump_qemu(xc_interface *xch, 
 
 static int buffer_tail_hvm(xc_interface *xch, struct restore_ctx *ctx,
                            struct tailbuf_hvm *buf, int fd,
-                           unsigned int max_vcpu_id, uint64_t vcpumap,
+                           unsigned int max_vcpu_id, uint64_t *vcpumap,
                            int ext_vcpucontext,
                            int vcpuextstate, uint32_t vcpuextstate_size)
 {
@@ -530,7 +530,7 @@ static int buffer_tail_hvm(xc_interface 
 
 static int buffer_tail_pv(xc_interface *xch, struct restore_ctx *ctx,
                           struct tailbuf_pv *buf, int fd,
-                          unsigned int max_vcpu_id, uint64_t vcpumap,
+                          unsigned int max_vcpu_id, uint64_t *vcpumap,
                           int ext_vcpucontext,
                           int vcpuextstate,
                           uint32_t vcpuextstate_size)
@@ -563,8 +563,8 @@ static int buffer_tail_pv(xc_interface *
     /* VCPU contexts */
     buf->vcpucount = 0;
     for (i = 0; i <= max_vcpu_id; i++) {
-        // DPRINTF("vcpumap: %llx, cpu: %d, bit: %llu\n", vcpumap, i, (vcpumap 
% (1ULL << i)));
-        if ( (!(vcpumap & (1ULL << i))) )
+        // DPRINTF("vcpumap: %llx, cpu: %d, bit: %llu\n", vcpumap[i/64], i, 
(vcpumap[i/64] & (1ULL << (i%64))));
+        if ( (!(vcpumap[i/64] & (1ULL << (i%64)))) )
             continue;
         buf->vcpucount++;
     }
@@ -614,7 +614,7 @@ static int buffer_tail_pv(xc_interface *
 
 static int buffer_tail(xc_interface *xch, struct restore_ctx *ctx,
                        tailbuf_t *buf, int fd, unsigned int max_vcpu_id,
-                       uint64_t vcpumap, int ext_vcpucontext,
+                       uint64_t *vcpumap, int ext_vcpucontext,
                        int vcpuextstate, uint32_t vcpuextstate_size)
 {
     if ( buf->ishvm )
@@ -680,7 +680,7 @@ typedef struct {
 
     int new_ctxt_format;
     int max_vcpu_id;
-    uint64_t vcpumap;
+    uint64_t vcpumap[XC_SR_MAX_VCPUS/64];
     uint64_t identpt;
     uint64_t paging_ring_pfn;
     uint64_t access_ring_pfn;
@@ -745,12 +745,12 @@ static int pagebuf_get_one(xc_interface 
     case XC_SAVE_ID_VCPU_INFO:
         buf->new_ctxt_format = 1;
         if ( RDEXACT(fd, &buf->max_vcpu_id, sizeof(buf->max_vcpu_id)) ||
-             buf->max_vcpu_id >= 64 || RDEXACT(fd, &buf->vcpumap,
-                                               sizeof(uint64_t)) ) {
+             buf->max_vcpu_id >= XC_SR_MAX_VCPUS ||
+             RDEXACT(fd, buf->vcpumap, vcpumap_sz(buf->max_vcpu_id)) ) {
             PERROR("Error when reading max_vcpu_id");
             return -1;
         }
-        // DPRINTF("Max VCPU ID: %d, vcpumap: %llx\n", buf->max_vcpu_id, 
buf->vcpumap);
+        // DPRINTF("Max VCPU ID: %d, vcpumap: %llx\n", buf->max_vcpu_id, 
buf->vcpumap[0]);
         return pagebuf_get_one(xch, ctx, buf, fd, dom);
 
     case XC_SAVE_ID_HVM_IDENT_PT:
@@ -1366,7 +1366,7 @@ int xc_domain_restore(xc_interface *xch,
     struct mmuext_op pin[MAX_PIN_BATCH];
     unsigned int nr_pins;
 
-    uint64_t vcpumap = 1ULL;
+    uint64_t vcpumap[XC_SR_MAX_VCPUS/64] = { 1ULL };
     unsigned int max_vcpu_id = 0;
     int new_ctxt_format = 0;
 
@@ -1517,8 +1517,8 @@ int xc_domain_restore(xc_interface *xch,
         if ( j == 0 ) {
             /* catch vcpu updates */
             if (pagebuf.new_ctxt_format) {
-                vcpumap = pagebuf.vcpumap;
                 max_vcpu_id = pagebuf.max_vcpu_id;
+                memcpy(vcpumap, pagebuf.vcpumap, vcpumap_sz(max_vcpu_id));
             }
             /* should this be deferred? does it change? */
             if ( pagebuf.identpt )
@@ -1880,7 +1880,7 @@ int xc_domain_restore(xc_interface *xch,
     vcpup = tailbuf.u.pv.vcpubuf;
     for ( i = 0; i <= max_vcpu_id; i++ )
     {
-        if ( !(vcpumap & (1ULL << i)) )
+        if ( !(vcpumap[i/64] & (1ULL << (i%64))) )
             continue;
 
         memcpy(ctxt, vcpup, ((dinfo->guest_width == 8) ? sizeof(ctxt->x64)
diff -r a325ee7a4639 -r 03507cc3a1ee tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Wed Aug 22 22:15:36 2012 +0100
+++ b/tools/libxc/xc_domain_save.c      Wed Aug 22 22:20:42 2012 +0100
@@ -855,7 +855,7 @@ int xc_domain_save(xc_interface *xch, in
     unsigned long needed_to_fix = 0;
     unsigned long total_sent    = 0;
 
-    uint64_t vcpumap = 1ULL;
+    uint64_t vcpumap[XC_SR_MAX_VCPUS/64] = { 1ULL };
 
     /* HVM: a buffer for holding HVM context */
     uint32_t hvm_buf_size = 0;
@@ -1581,13 +1581,13 @@ int xc_domain_save(xc_interface *xch, in
     }
 
     {
-        struct {
+        struct chunk {
             int id;
             int max_vcpu_id;
-            uint64_t vcpumap;
+            uint64_t vcpumap[XC_SR_MAX_VCPUS/64];
         } chunk = { XC_SAVE_ID_VCPU_INFO, info.max_vcpu_id };
 
-        if ( info.max_vcpu_id >= 64 )
+        if ( info.max_vcpu_id >= XC_SR_MAX_VCPUS )
         {
             ERROR("Too many VCPUS in guest!");
             goto out;
@@ -1598,11 +1598,12 @@ int xc_domain_save(xc_interface *xch, in
             xc_vcpuinfo_t vinfo;
             if ( (xc_vcpu_getinfo(xch, dom, i, &vinfo) == 0) &&
                  vinfo.online )
-                vcpumap |= 1ULL << i;
+                vcpumap[i/64] |= 1ULL << (i%64);
         }
 
-        chunk.vcpumap = vcpumap;
-        if ( wrexact(io_fd, &chunk, sizeof(chunk)) )
+        memcpy(chunk.vcpumap, vcpumap, vcpumap_sz(info.max_vcpu_id));
+        if ( wrexact(io_fd, &chunk, offsetof(struct chunk, vcpumap)
+                     + vcpumap_sz(info.max_vcpu_id)) )
         {
             PERROR("Error when writing to state file");
             goto out;
@@ -1878,7 +1879,7 @@ int xc_domain_save(xc_interface *xch, in
 
     for ( i = 0; i <= info.max_vcpu_id; i++ )
     {
-        if ( !(vcpumap & (1ULL << i)) )
+        if ( !(vcpumap[i/64] & (1ULL << (i%64))) )
             continue;
 
         if ( (i != 0) && xc_vcpu_getcontext(xch, dom, i, &ctxt) )
diff -r a325ee7a4639 -r 03507cc3a1ee tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h     Wed Aug 22 22:15:36 2012 +0100
+++ b/tools/libxc/xg_save_restore.h     Wed Aug 22 22:20:42 2012 +0100
@@ -269,6 +269,9 @@
 /* When pinning page tables at the end of restore, we also use batching. */
 #define MAX_PIN_BATCH  1024
 
+/* Maximum #VCPUs currently supported for save/restore. */
+#define XC_SR_MAX_VCPUS 4096
+#define vcpumap_sz(max_id) (((max_id)/64+1)*sizeof(uint64_t))
 
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.