[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xl: introduce specific VCPU to PCPU mapping in config file


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 18 May 2012 17:11:08 +0000
  • Delivery-date: Fri, 18 May 2012 17:11:18 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Dario Faggioli <raistlin@xxxxxxxx>
# Date 1337092912 -3600
# Node ID 7fb7341085e4fa08e3d14c0fcf1a501e4e6bbff6
# Parent  edd7c7ad1ad23977b0c39f18a904b2ae0ca14daa
xl: introduce specific VCPU to PCPU mapping in config file

xm supports the following syntax (in the config file) for
specific VCPU to PCPU mapping:

cpus = "0-3,5,^1" # all vcpus run on cpus 0,2,3,5
cpus = ["2", "3"] # VCPU0 runs on CPU2, VCPU1 runs on CPU3

Allow for the same in xl.

This fixes what happened in changeset 54000bca7a6a, which
introduced suppot for the `cpus=` option within xl, but used
both the list (cpus=[2, 3]) and the string (cpus="2,3") syntax
for achieving the same behaviour (pin all guest's vcpus to the
pcpus in the list/string).

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---


diff -r edd7c7ad1ad2 -r 7fb7341085e4 docs/man/xl.cfg.pod.5
--- a/docs/man/xl.cfg.pod.5     Tue May 15 09:18:33 2012 +0200
+++ b/docs/man/xl.cfg.pod.5     Tue May 15 15:41:52 2012 +0100
@@ -108,9 +108,25 @@ created online and the remainder will be
 =item B<cpus="CPU-LIST">
 
 List of which cpus the guest is allowed to use. Default behavior is
-`all cpus`. A list of cpus may be specified as follows: `cpus="0-3,5,^1"`
-(all vcpus will run on cpus 0,2,3,5), or `cpus=["2", "3"]` (all vcpus
-will run on cpus 2 and 3).
+`all cpus`. A C<CPU-LIST> may be specified as follows:
+
+=over 4
+
+=item "all"
+
+To allow all the vcpus of the guest to run on all the cpus on the host.
+
+=item "0-3,5,^1"
+
+To allow all the vcpus of the guest to run on cpus 0,2,3,5.
+
+=item ["2", "3"] (or [2, 3])
+
+To ask for specific vcpu mapping. That means (in this example), vcpu #0
+of the guest will run on cpu #2 of the host and vcpu #1 of the guest will
+run on cpu #3 of the host.
+
+=back
 
 =item B<cpu_weight=WEIGHT>
 
@@ -951,10 +967,6 @@ XXX
 
 XXX
 
-=item B<cpus=XXX>
-
-XXX
-
 =item B<maxmem=NUMBER>
 
 XXX
diff -r edd7c7ad1ad2 -r 7fb7341085e4 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Tue May 15 09:18:33 2012 +0200
+++ b/tools/libxl/xl_cmdimpl.c  Tue May 15 15:41:52 2012 +0100
@@ -71,6 +71,8 @@ static uint32_t domid;
 static const char *common_domname;
 static int fd_lock = -1;
 
+/* Stash for specific vcpu to pcpu mappping */
+static int *vcpu_to_pcpu;
 
 static const char savefileheader_magic[32]=
     "Xen saved domain, xl format\n \0 \r";
@@ -630,6 +632,21 @@ static void parse_config_data(const char
             exit(1);
         }
 
+        /* Prepare the array for single vcpu to pcpu mappings */
+        vcpu_to_pcpu = xmalloc(sizeof(int) * b_info->max_vcpus);
+        memset(vcpu_to_pcpu, -1, sizeof(int) * b_info->max_vcpus);
+
+        /*
+         * Idea here is to let libxl think all the domain's vcpus
+         * have cpu affinity with all the pcpus on the list.
+         * It is then us, here in xl, that matches each single vcpu
+         * to its pcpu (and that's why we need to stash such info in
+         * the vcpu_to_pcpu array now) after the domain has been created.
+         * Doing it like this saves the burden of passing to libxl
+         * some big array hosting the single mappings. Also, using
+         * the cpumap derived from the list ensures memory is being
+         * allocated on the proper nodes anyway.
+         */
         libxl_cpumap_set_none(&b_info->cpumap);
         while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
             i = atoi(buf);
@@ -638,6 +655,8 @@ static void parse_config_data(const char
                 exit(1);
             }
             libxl_cpumap_set(&b_info->cpumap, i);
+            if (n_cpus < b_info->max_vcpus)
+                vcpu_to_pcpu[n_cpus] = i;
             n_cpus++;
         }
     }
@@ -1714,6 +1733,31 @@ start:
     if ( ret )
         goto error_out;
 
+    /* If single vcpu to pcpu mapping was requested, honour it */
+    if (vcpu_to_pcpu) {
+        libxl_cpumap vcpu_cpumap;
+
+        libxl_cpumap_alloc(ctx, &vcpu_cpumap);
+        for (i = 0; i < d_config.b_info.max_vcpus; i++) {
+
+            if (vcpu_to_pcpu[i] != -1) {
+                libxl_cpumap_set_none(&vcpu_cpumap);
+                libxl_cpumap_set(&vcpu_cpumap, vcpu_to_pcpu[i]);
+            } else {
+                libxl_cpumap_set_any(&vcpu_cpumap);
+            }
+            if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) {
+                fprintf(stderr, "setting affinity failed on vcpu `%d'.\n", i);
+                libxl_cpumap_dispose(&vcpu_cpumap);
+                free(vcpu_to_pcpu);
+                ret = ERROR_FAIL;
+                goto error_out;
+            }
+        }
+        libxl_cpumap_dispose(&vcpu_cpumap);
+        free(vcpu_to_pcpu); vcpu_to_pcpu = NULL;
+    }
+
     ret = libxl_userdata_store(ctx, domid, "xl",
                                     config_data, config_len);
     if (ret) {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.