[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 15 of 16] libxl/xl: Use libxl_vcpuinfo_destroy
# HG changeset patch # User Ian Campbell <ian.campbell@xxxxxxxxxx> # Date 1282143678 -3600 # Node ID 59783ebb23f5107c6984c93c413744d8f47e316a # Parent 9dfdd32ce1282c4f9ca38e8735dd2cea97d39b19 libxl/xl: Use libxl_vcpuinfo_destroy Replaces libxl_free_vcpu_list. The ->cpumap field is now always a unique allocation rather than each being an offset into the cpumap allocated in the first libxl_device_pci in the list. Refactor vcpulist so that the two cases can share more code. Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx> diff -r 9dfdd32ce128 -r 59783ebb23f5 tools/libxl/libxl.c --- a/tools/libxl/libxl.c Wed Aug 18 16:01:18 2010 +0100 +++ b/tools/libxl/libxl.c Wed Aug 18 16:01:18 2010 +0100 @@ -2894,7 +2894,6 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct xc_domaininfo_t domaininfo; xc_vcpuinfo_t vcpuinfo; xc_physinfo_t physinfo = { 0 }; - uint64_t *cpumaps; unsigned num_cpuwords; if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) { @@ -2912,9 +2911,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct } num_cpuwords = ((physinfo.max_cpu_id + 64) / 64); - cpumaps = calloc(num_cpuwords * sizeof(*cpumaps), domaininfo.max_vcpu_id + 1); for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) { - ptr->cpumap = cpumaps + (num_cpuwords * *nb_vcpu); + ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap)); if (!ptr->cpumap) { return NULL; } @@ -2935,13 +2933,6 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct ptr->vcpu_time = vcpuinfo.cpu_time; } return ret; -} - -void libxl_free_vcpu_list(libxl_vcpuinfo *vcpu) -{ - if ( vcpu ) - free(vcpu[0].cpumap); - free(vcpu); } int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, diff -r 9dfdd32ce128 -r 59783ebb23f5 tools/libxl/libxl.h --- a/tools/libxl/libxl.h Wed Aug 18 16:01:18 2010 +0100 +++ b/tools/libxl/libxl.h Wed Aug 18 16:01:18 2010 +0100 @@ -432,7 +432,6 @@ int libxl_get_physinfo(libxl_ctx *ctx, l int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo); libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid, int *nb_vcpu, int *nrcpus); -void libxl_free_vcpu_list(libxl_vcpuinfo *vcpu); int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid, uint64_t *cpumap, int nrcpus); int libxl_set_vcpucount(libxl_ctx *ctx, uint32_t domid, uint32_t count); diff -r 9dfdd32ce128 -r 59783ebb23f5 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Wed Aug 18 16:01:18 2010 +0100 +++ b/tools/libxl/xl_cmdimpl.c Wed Aug 18 16:01:18 2010 +0100 @@ -3301,49 +3301,58 @@ static void print_vcpuinfo(uint32_t tdom } } +static void print_domain_vcpuinfo(uint32_t domid, uint32_t nr_cpus) +{ + libxl_vcpuinfo *vcpuinfo; + int i, nb_vcpu, nrcpus; + + vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &nrcpus); + + if (!vcpuinfo) { + fprintf(stderr, "libxl_list_vcpu failed.\n"); + return; + } + + for (i = 0; i < nb_vcpu; i++) { + print_vcpuinfo(domid, &vcpuinfo[i], nr_cpus); + libxl_vcpuinfo_destroy(&vcpuinfo[i]); + } + + free(vcpuinfo); +} + static void vcpulist(int argc, char **argv) { - libxl_dominfo *dominfo, *domlist; - libxl_vcpuinfo *vcpuinfo, *list = NULL; + libxl_dominfo *dominfo; libxl_physinfo physinfo; - int nb_vcpu, nb_domain, nrcpus; + int i, nb_domain; if (libxl_get_physinfo(&ctx, &physinfo) != 0) { fprintf(stderr, "libxl_physinfo failed.\n"); goto vcpulist_out; } + printf("%-32s %5s %5s %5s %5s %9s %s\n", "Name", "ID", "VCPU", "CPU", "State", "Time(s)", "CPU Affinity"); if (!argc) { - if (!(domlist = dominfo = libxl_list_domain(&ctx, &nb_domain))) { + if (!(dominfo = libxl_list_domain(&ctx, &nb_domain))) { fprintf(stderr, "libxl_list_domain failed.\n"); goto vcpulist_out; } - for (; nb_domain > 0; --nb_domain, ++dominfo) { - if (!(list = vcpuinfo = libxl_list_vcpu(&ctx, dominfo->domid, &nb_vcpu, - &nrcpus))) { - fprintf(stderr, "libxl_list_vcpu failed.\n"); - goto vcpulist_out; - } - for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) { - print_vcpuinfo(dominfo->domid, vcpuinfo, physinfo.nr_cpus); - } - libxl_free_vcpu_list(list); - } - free(domlist); + + for (i = 0; i<nb_domain; i++) + print_domain_vcpuinfo(dominfo[i].domid, physinfo.nr_cpus); + + free(dominfo); + } else { for (; argc > 0; ++argv, --argc) { if (domain_qualifier_to_domid(*argv, &domid, 0) < 0) { fprintf(stderr, "%s is an invalid domain identifier\n", *argv); - } - if (!(list = vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &nrcpus))) { - fprintf(stderr, "libxl_list_vcpu failed.\n"); goto vcpulist_out; } - for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) { - print_vcpuinfo(domid, vcpuinfo, physinfo.nr_cpus); - } - libxl_free_vcpu_list(list); + + print_domain_vcpuinfo(domid, physinfo.nr_cpus); } } vcpulist_out: _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |