[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] tools/helpers/init-dom0less: fix vcpu availability



Currently, writing at cpu/<cpu>/availability in xenstore fails for a
couple of reasons: a trailing slash in the path and the fact that
cpupool isn't a bitmap but the cpupool id. This patch fixes this by
just getting libxl_vcpuinfo for each dom0less domain.

Signed-off-by: Amneesh Singh <a-singh21@xxxxxx>
---
 tools/helpers/init-dom0less.c | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

diff --git a/tools/helpers/init-dom0less.c b/tools/helpers/init-dom0less.c
index fee9345..a8cdc6d 100644
--- a/tools/helpers/init-dom0less.c
+++ b/tools/helpers/init-dom0less.c
@@ -99,8 +99,8 @@ static bool do_xs_write_vm(struct xs_handle *xsh, 
xs_transaction_t t,
  * domain started by xl/libxl.
  */
 static int create_xenstore(struct xs_handle *xsh,
-                           libxl_dominfo *info, libxl_uuid uuid,
-                           evtchn_port_t xenstore_port)
+                           libxl_dominfo *info, libxl_vcpuinfo *vcpuinfo,
+                           libxl_uuid uuid, evtchn_port_t xenstore_port)
 {
     domid_t domid;
     unsigned int i;
@@ -168,13 +168,13 @@ retry_transaction:
     if (!do_xs_write_dom(xsh, t, domid, "vm", vm_val_str)) goto err;
     if (!do_xs_write_dom(xsh, t, domid, "name", dom_name_str)) goto err;
     if (!do_xs_write_dom(xsh, t, domid, "cpu", "")) goto err;
-    for (i = 0; i < info->vcpu_max_id; i++) {
-        rc = snprintf(cpu_str, STR_MAX_LENGTH, "cpu/%u/availability/", i);
+    for (i = 0; i <= info->vcpu_max_id; i++) {
+        rc = snprintf(cpu_str, STR_MAX_LENGTH, "cpu/%u/availability", i);
         if (rc < 0 || rc >= STR_MAX_LENGTH)
             goto err;
         rc = -EIO;
         if (!do_xs_write_dom(xsh, t, domid, cpu_str,
-                             (info->cpupool & (1 << i)) ? "online" : 
"offline"))
+                             vcpuinfo[i].online ? "online" : "offline"))
             goto err;
     }
 
@@ -225,7 +225,8 @@ err:
 static int init_domain(struct xs_handle *xsh,
                        struct xc_interface_core *xch,
                        xenforeignmemory_handle *xfh,
-                       libxl_dominfo *info)
+                       libxl_dominfo *info,
+                       libxl_vcpuinfo *vcpuinfo)
 {
     libxl_uuid uuid;
     uint64_t xenstore_evtchn, xenstore_pfn;
@@ -278,7 +279,7 @@ static int init_domain(struct xs_handle *xsh,
     if (rc < 0)
         return rc;
 
-    rc = create_xenstore(xsh, info, uuid, xenstore_evtchn);
+    rc = create_xenstore(xsh, info, vcpuinfo, uuid, xenstore_evtchn);
     if (rc)
         err(1, "writing to xenstore");
 
@@ -300,7 +301,7 @@ int main(int argc, char **argv)
 {
     libxl_dominfo *info = NULL;
     libxl_ctx *ctx;
-    int nb_vm = 0, rc = 0, i;
+    int nb_vm = 0, nb_vcpu = 0, nr_cpus = 0, rc = 0, i;
     struct xs_handle *xsh = NULL;
     struct xc_interface_core *xch = NULL;
     xenforeignmemory_handle *xfh = NULL;
@@ -330,14 +331,17 @@ int main(int argc, char **argv)
 
     for (i = 0; i < nb_vm; i++) {
         domid_t domid = info[i].domid;
+        libxl_vcpuinfo *vcpuinfo;
 
         /* Don't need to check for Dom0 */
         if (!domid)
             continue;
 
+        vcpuinfo = libxl_list_vcpu(ctx, domid, &nb_vcpu, &nr_cpus);
+
         printf("Checking domid: %u\n", domid);
         if (!domain_exists(xsh, domid)) {
-            rc = init_domain(xsh, xch, xfh, &info[i]);
+            rc = init_domain(xsh, xch, xfh, &info[i], vcpuinfo);
             if (rc < 0) {
                 fprintf(stderr, "init_domain failed.\n");
                 goto out;
@@ -345,6 +349,8 @@ int main(int argc, char **argv)
         } else {
             printf("Domain %u has already been initialized\n", domid);
         }
+
+        libxl_vcpuinfo_list_free(vcpuinfo, nb_vcpu);
     }
 out:
     libxl_dominfo_list_free(info, nb_vm);
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.