[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxl/xl: Use libxl_vcpuinfo_destroy



# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1282228376 -3600
# Node ID 477abe597f6d2f6d83343ee99f496d5e7eca7b5a
# Parent  4c9ef5ec91461d10878321c40e47b60dde97cdaf
libxl/xl: Use libxl_vcpuinfo_destroy

Replaces libxl_free_vcpu_list.

The ->cpumap field is now always a unique allocation rather than each
being an offset into the cpumap allocated in the first
libxl_device_pci in the list.

Refactor vcpulist so that the two cases can share more code.

[PATCH 15 of 16 of
 libxl: autogenerate type definitions and destructor functions]

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
 tools/libxl/libxl.c      |   11 ---------
 tools/libxl/libxl.h      |    1 
 tools/libxl/xl_cmdimpl.c |   55 +++++++++++++++++++++++++++--------------------
 3 files changed, 33 insertions(+), 34 deletions(-)

diff -r 4c9ef5ec9146 -r 477abe597f6d tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Thu Aug 19 15:32:28 2010 +0100
+++ b/tools/libxl/libxl.c       Thu Aug 19 15:32:56 2010 +0100
@@ -2902,7 +2902,6 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
     xc_domaininfo_t domaininfo;
     xc_vcpuinfo_t vcpuinfo;
     xc_physinfo_t physinfo = { 0 };
-    uint64_t *cpumaps;
     unsigned num_cpuwords;
 
     if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) {
@@ -2920,9 +2919,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
     }
 
     num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
-    cpumaps = calloc(num_cpuwords * sizeof(*cpumaps), domaininfo.max_vcpu_id + 
1);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
-        ptr->cpumap = cpumaps + (num_cpuwords * *nb_vcpu);
+        ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
         if (!ptr->cpumap) {
             return NULL;
         }
@@ -2943,13 +2941,6 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
         ptr->vcpu_time = vcpuinfo.cpu_time;
     }
     return ret;
-}
-
-void libxl_free_vcpu_list(libxl_vcpuinfo *vcpu)
-{
-    if ( vcpu )
-        free(vcpu[0].cpumap);
-    free(vcpu);
 }
 
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
diff -r 4c9ef5ec9146 -r 477abe597f6d tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Thu Aug 19 15:32:28 2010 +0100
+++ b/tools/libxl/libxl.h       Thu Aug 19 15:32:56 2010 +0100
@@ -432,7 +432,6 @@ int libxl_get_physinfo(libxl_ctx *ctx, l
 int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo);
 libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
                                        int *nb_vcpu, int *nrcpus);
-void libxl_free_vcpu_list(libxl_vcpuinfo *vcpu);
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
                            uint64_t *cpumap, int nrcpus);
 int libxl_set_vcpucount(libxl_ctx *ctx, uint32_t domid, uint32_t count);
diff -r 4c9ef5ec9146 -r 477abe597f6d tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Thu Aug 19 15:32:28 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c  Thu Aug 19 15:32:56 2010 +0100
@@ -3301,49 +3301,58 @@ static void print_vcpuinfo(uint32_t tdom
     }
 }
 
+static void print_domain_vcpuinfo(uint32_t domid, uint32_t nr_cpus)
+{
+    libxl_vcpuinfo *vcpuinfo;
+    int i, nb_vcpu, nrcpus;
+
+    vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &nrcpus);
+
+    if (!vcpuinfo) {
+        fprintf(stderr, "libxl_list_vcpu failed.\n");
+        return;
+    }
+
+    for (i = 0; i < nb_vcpu; i++) {
+        print_vcpuinfo(domid, &vcpuinfo[i], nr_cpus);
+        libxl_vcpuinfo_destroy(&vcpuinfo[i]);
+    }
+
+    free(vcpuinfo);
+}
+
 static void vcpulist(int argc, char **argv)
 {
-    libxl_dominfo *dominfo, *domlist;
-    libxl_vcpuinfo *vcpuinfo, *list = NULL;
+    libxl_dominfo *dominfo;
     libxl_physinfo physinfo;
-    int nb_vcpu, nb_domain, nrcpus;
+    int i, nb_domain;
 
     if (libxl_get_physinfo(&ctx, &physinfo) != 0) {
         fprintf(stderr, "libxl_physinfo failed.\n");
         goto vcpulist_out;
     }
+
     printf("%-32s %5s %5s %5s %5s %9s %s\n",
            "Name", "ID", "VCPU", "CPU", "State", "Time(s)", "CPU Affinity");
     if (!argc) {
-        if (!(domlist = dominfo = libxl_list_domain(&ctx, &nb_domain))) {
+        if (!(dominfo = libxl_list_domain(&ctx, &nb_domain))) {
             fprintf(stderr, "libxl_list_domain failed.\n");
             goto vcpulist_out;
         }
-        for (; nb_domain > 0; --nb_domain, ++dominfo) {
-            if (!(list = vcpuinfo = libxl_list_vcpu(&ctx, dominfo->domid, 
&nb_vcpu,
-                &nrcpus))) {
-                fprintf(stderr, "libxl_list_vcpu failed.\n");
-                goto vcpulist_out;
-            }
-            for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
-                print_vcpuinfo(dominfo->domid, vcpuinfo, physinfo.nr_cpus);
-            }
-            libxl_free_vcpu_list(list);
-        }
-        free(domlist);
+
+        for (i = 0; i<nb_domain; i++)
+            print_domain_vcpuinfo(dominfo[i].domid, physinfo.nr_cpus);
+
+        free(dominfo);
+
     } else {
         for (; argc > 0; ++argv, --argc) {
             if (domain_qualifier_to_domid(*argv, &domid, 0) < 0) {
                 fprintf(stderr, "%s is an invalid domain identifier\n", *argv);
-            }
-            if (!(list = vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, 
&nrcpus))) {
-                fprintf(stderr, "libxl_list_vcpu failed.\n");
                 goto vcpulist_out;
             }
-            for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
-                print_vcpuinfo(domid, vcpuinfo, physinfo.nr_cpus);
-            }
-            libxl_free_vcpu_list(list);
+
+            print_domain_vcpuinfo(domid, physinfo.nr_cpus);
         }
     }
   vcpulist_out:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.