[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 2/7] libxl: In libxl_set_vcpuonline check for maximum number of VCPUs against the cpumap.



There is no sense in trying to online (or offline) CPUs when the size of
cpumap is greater than the maximum number of VCPUs the guest can go to.

As such fail the operation if the count of CPUs to online is greater
than what the guest started with. For the offline case we do not
check (as the bits are unset in the cpumap) and let it go through.

We coalesce some of the underlying libxl_set_vcpuonline code
together which was duplicated in QMP and XenStore codepaths.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
[v2: constify the libxl_domain and handle libxl_domain_info error]
[v3: inline libxl__check_max]
---
 tools/libxl/libxl.c | 63 ++++++++++++++++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 30 deletions(-)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 80c7ff6..c0e9cfe 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -5443,27 +5443,19 @@ int libxl_domain_get_nodeaffinity(libxl_ctx *ctx, 
uint32_t domid,
 }
 
 static int libxl__set_vcpuonline_xenstore(libxl__gc *gc, uint32_t domid,
-                                         libxl_bitmap *cpumap)
+                                         libxl_bitmap *cpumap,
+                                         const libxl_dominfo *info)
 {
-    libxl_dominfo info;
     char *dompath;
     xs_transaction_t t;
-    int i, rc;
-
-    libxl_dominfo_init(&info);
+    int i, rc = ERROR_FAIL;
 
-    rc = libxl_domain_info(CTX, &info, domid);
-    if (rc < 0) {
-        LOGE(ERROR, "getting domain info list");
-        goto out;
-    }
-    rc = ERROR_FAIL;
     if (!(dompath = libxl__xs_get_dompath(gc, domid)))
         goto out;
 
 retry_transaction:
     t = xs_transaction_start(CTX->xsh);
-    for (i = 0; i <= info.vcpu_max_id; i++)
+    for (i = 0; i <= info->vcpu_max_id; i++)
         libxl__xs_write(gc, t,
                        libxl__sprintf(gc, "%s/cpu/%u/availability", dompath, 
i),
                        "%s", libxl_bitmap_test(cpumap, i) ? "online" : 
"offline");
@@ -5473,25 +5465,16 @@ retry_transaction:
     } else
         rc = 0;
 out:
-    libxl_dominfo_dispose(&info);
     return rc;
 }
 
 static int libxl__set_vcpuonline_qmp(libxl__gc *gc, uint32_t domid,
-                                     libxl_bitmap *cpumap)
+                                     libxl_bitmap *cpumap,
+                                     const libxl_dominfo *info)
 {
-    libxl_dominfo info;
-    int i, rc;
-
-    libxl_dominfo_init(&info);
+    int i;
 
-    rc = libxl_domain_info(CTX, &info, domid);
-    if (rc < 0) {
-        LOGE(ERROR, "getting domain info list");
-        libxl_dominfo_dispose(&info);
-        return rc;
-    }
-    for (i = 0; i <= info.vcpu_max_id; i++) {
+    for (i = 0; i <= info->vcpu_max_id; i++) {
         if (libxl_bitmap_test(cpumap, i)) {
             /* Return value is ignore because it does not tell anything useful
              * on the completion of the command.
@@ -5501,33 +5484,53 @@ static int libxl__set_vcpuonline_qmp(libxl__gc *gc, 
uint32_t domid,
             libxl__qmp_cpu_add(gc, domid, i);
         }
     }
-    libxl_dominfo_dispose(&info);
     return 0;
 }
 
 int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
 {
     GC_INIT(ctx);
-    int rc;
+    int rc, maxcpus;
+    libxl_dominfo info;
+
+    libxl_dominfo_init(&info);
+
+    rc = libxl_domain_info(CTX, &info, domid);
+    if (rc < 0) {
+        LOGE(ERROR, "getting domain info list");
+        goto out;
+    }
+
+    maxcpus = libxl_bitmap_count_set(cpumap);
+    if (maxcpus > info.vcpu_max_id + 1)
+    {
+        LOGE(ERROR, "Requested %d VCPUs, however maxcpus is %d!",
+             maxcpus, info.vcpu_max_id + 1);
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
     switch (libxl__domain_type(gc, domid)) {
     case LIBXL_DOMAIN_TYPE_HVM:
         switch (libxl__device_model_version_running(gc, domid)) {
         case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
-            rc = libxl__set_vcpuonline_xenstore(gc, domid, cpumap);
+            rc = libxl__set_vcpuonline_xenstore(gc, domid, cpumap, &info);
             break;
         case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN:
-            rc = libxl__set_vcpuonline_qmp(gc, domid, cpumap);
+            rc = libxl__set_vcpuonline_qmp(gc, domid, cpumap, &info);
             break;
         default:
             rc = ERROR_INVAL;
         }
         break;
     case LIBXL_DOMAIN_TYPE_PV:
-        rc = libxl__set_vcpuonline_xenstore(gc, domid, cpumap);
+        rc = libxl__set_vcpuonline_xenstore(gc, domid, cpumap, &info);
         break;
     default:
         rc = ERROR_INVAL;
     }
+out:
+    libxl_dominfo_dispose(&info);
     GC_FREE;
     return rc;
 }
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.