[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxl, libxc: introduce libxl_get_numainfo()


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 06 Jul 2012 20:44:07 +0000
  • Delivery-date: Fri, 06 Jul 2012 20:44:20 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Dario Faggioli <raistlin@xxxxxxxx>
# Date 1341577061 -3600
# Node ID e0f04c3978f75192a436736d45c7a11405606ebb
# Parent  e9aa6da372bd76774d0d63ad17c43df34673ce30
libxl,libxc: introduce libxl_get_numainfo()

Make some NUMA node information available to the toolstack. Achieve
this by means of xc_numainfo(), which exposes memory size and amount
of free memory of each node, as well as the relative distances of
each node to all the others.

For properly exposing distances we need the IDL to support arrays.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---


diff -r e9aa6da372bd -r e0f04c3978f7 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Fri Jul 06 13:17:40 2012 +0100
+++ b/tools/libxc/xc_misc.c     Fri Jul 06 13:17:41 2012 +0100
@@ -35,6 +35,20 @@ int xc_get_max_cpus(xc_interface *xch)
     return max_cpus;
 }
 
+int xc_get_max_nodes(xc_interface *xch)
+{
+    static int max_nodes = 0;
+    xc_physinfo_t physinfo;
+
+    if ( max_nodes )
+        return max_nodes;
+
+    if ( !xc_physinfo(xch, &physinfo) )
+        max_nodes = physinfo.max_node_id + 1;
+
+    return max_nodes;
+}
+
 int xc_get_cpumap_size(xc_interface *xch)
 {
     return (xc_get_max_cpus(xch) + 7) / 8;
diff -r e9aa6da372bd -r e0f04c3978f7 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Fri Jul 06 13:17:40 2012 +0100
+++ b/tools/libxc/xenctrl.h     Fri Jul 06 13:17:41 2012 +0100
@@ -329,6 +329,12 @@ int xc_get_cpumap_size(xc_interface *xch
 /* allocate a cpumap */
 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
 
+ /*
+ * NODEMAP handling
+ */
+/* return maximum number of NUMA nodes the hypervisor supports */
+int xc_get_max_nodes(xc_interface *xch);
+
 /*
  * DOMAIN DEBUGGING FUNCTIONS
  */
diff -r e9aa6da372bd -r e0f04c3978f7 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Fri Jul 06 13:17:40 2012 +0100
+++ b/tools/libxl/libxl.c       Fri Jul 06 13:17:41 2012 +0100
@@ -3298,6 +3298,75 @@ fail:
     return ret;
 }
 
+libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr)
+{
+    GC_INIT(ctx);
+    xc_numainfo_t ninfo;
+    DECLARE_HYPERCALL_BUFFER(xc_node_to_memsize_t, memsize);
+    DECLARE_HYPERCALL_BUFFER(xc_node_to_memfree_t, memfree);
+    DECLARE_HYPERCALL_BUFFER(uint32_t, node_dists);
+    libxl_numainfo *ret = NULL;
+    int i, j, max_nodes;
+
+    max_nodes = libxl_get_max_nodes(ctx);
+    if (max_nodes == 0)
+    {
+        LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of NODES");
+        ret = NULL;
+        goto out;
+    }
+
+    memsize = xc_hypercall_buffer_alloc
+        (ctx->xch, memsize, sizeof(*memsize) * max_nodes);
+    memfree = xc_hypercall_buffer_alloc
+        (ctx->xch, memfree, sizeof(*memfree) * max_nodes);
+    node_dists = xc_hypercall_buffer_alloc
+        (ctx->xch, node_dists, sizeof(*node_dists) * max_nodes * max_nodes);
+    if ((memsize == NULL) || (memfree == NULL) || (node_dists == NULL)) {
+        LIBXL__LOG_ERRNOVAL(ctx, XTL_ERROR, ENOMEM,
+                            "Unable to allocate hypercall arguments");
+        goto fail;
+    }
+
+    set_xen_guest_handle(ninfo.node_to_memsize, memsize);
+    set_xen_guest_handle(ninfo.node_to_memfree, memfree);
+    set_xen_guest_handle(ninfo.node_to_node_distance, node_dists);
+    ninfo.max_node_index = max_nodes - 1;
+    if (xc_numainfo(ctx->xch, &ninfo) != 0) {
+        LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting numainfo");
+        goto fail;
+    }
+
+    if (ninfo.max_node_index < max_nodes - 1)
+        max_nodes = ninfo.max_node_index + 1;
+
+    *nr = max_nodes;
+
+    ret = libxl__zalloc(NOGC, sizeof(libxl_numainfo) * max_nodes);
+    for (i = 0; i < max_nodes; i++)
+        ret[i].dists = libxl__calloc(NOGC, max_nodes, sizeof(*node_dists));
+
+    for (i = 0; i < max_nodes; i++) {
+#define V(mem, i) (mem[i] == INVALID_NUMAINFO_ID) ? \
+    LIBXL_NUMAINFO_INVALID_ENTRY : mem[i]
+        ret[i].size = V(memsize, i);
+        ret[i].free = V(memfree, i);
+        ret[i].num_dists = max_nodes;
+        for (j = 0; j < ret[i].num_dists; j++)
+            ret[i].dists[j] = V(node_dists, i * max_nodes + j);
+#undef V
+    }
+
+ fail:
+    xc_hypercall_buffer_free(ctx->xch, memsize);
+    xc_hypercall_buffer_free(ctx->xch, memfree);
+    xc_hypercall_buffer_free(ctx->xch, node_dists);
+
+ out:
+    GC_FREE;
+    return ret;
+}
+
 const libxl_version_info* libxl_get_version_info(libxl_ctx *ctx)
 {
     union {
diff -r e9aa6da372bd -r e0f04c3978f7 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Fri Jul 06 13:17:40 2012 +0100
+++ b/tools/libxl/libxl.h       Fri Jul 06 13:17:41 2012 +0100
@@ -532,6 +532,9 @@ int libxl_domain_preserve(libxl_ctx *ctx
 /* get max. number of cpus supported by hypervisor */
 int libxl_get_max_cpus(libxl_ctx *ctx);
 
+/* get max. number of NUMA nodes supported by hypervisor */
+int libxl_get_max_nodes(libxl_ctx *ctx);
+
 int libxl_domain_rename(libxl_ctx *ctx, uint32_t domid,
                         const char *old_name, const char *new_name);
 
@@ -604,6 +607,10 @@ void libxl_vminfo_list_free(libxl_vminfo
 libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out);
 void libxl_cputopology_list_free(libxl_cputopology *, int nb_cpu);
 
+#define LIBXL_NUMAINFO_INVALID_ENTRY (~(uint32_t)0)
+libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr);
+void libxl_numainfo_list_free(libxl_numainfo *, int nr);
+
 libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
                                 int *nb_vcpu, int *nr_vcpus_out);
 void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr_vcpus);
diff -r e9aa6da372bd -r e0f04c3978f7 tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl       Fri Jul 06 13:17:40 2012 +0100
+++ b/tools/libxl/libxl_types.idl       Fri Jul 06 13:17:41 2012 +0100
@@ -433,6 +433,15 @@ libxl_physinfo = Struct("physinfo", [
     ("cap_hvm_directio", bool),
     ], dir=DIR_OUT)
 
+# NUMA node characteristics: size and free are how much memory it has, and how
+# much of it is free, respectively. dists is an array of distances from this
+# node to each other node.
+libxl_numainfo = Struct("numainfo", [
+    ("size", uint64),
+    ("free", uint64),
+    ("dists", Array(uint32, "num_dists")),
+    ], dir=DIR_OUT)
+
 libxl_cputopology = Struct("cputopology", [
     ("core", uint32),
     ("socket", uint32),
diff -r e9aa6da372bd -r e0f04c3978f7 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Fri Jul 06 13:17:40 2012 +0100
+++ b/tools/libxl/libxl_utils.c Fri Jul 06 13:17:41 2012 +0100
@@ -572,6 +572,11 @@ int libxl_get_max_cpus(libxl_ctx *ctx)
     return xc_get_max_cpus(ctx->xch);
 }
 
+int libxl_get_max_nodes(libxl_ctx *ctx)
+{
+    return xc_get_max_nodes(ctx->xch);
+}
+
 int libxl__enum_from_string(const libxl_enum_string_table *t,
                             const char *s, int *e)
 {
@@ -594,6 +599,14 @@ void libxl_cputopology_list_free(libxl_c
     free(list);
 }
 
+void libxl_numainfo_list_free(libxl_numainfo *list, int nr)
+{
+    int i;
+    for (i = 0; i < nr; i++)
+        libxl_numainfo_dispose(&list[i]);
+    free(list);
+}
+
 void libxl_vcpuinfo_list_free(libxl_vcpuinfo *list, int nr)
 {
     int i;
diff -r e9aa6da372bd -r e0f04c3978f7 xen/include/public/sysctl.h
--- a/xen/include/public/sysctl.h       Fri Jul 06 13:17:40 2012 +0100
+++ b/xen/include/public/sysctl.h       Fri Jul 06 13:17:41 2012 +0100
@@ -484,6 +484,7 @@ typedef struct xen_sysctl_topologyinfo x
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t);
 
 /* XEN_SYSCTL_numainfo */
+#define INVALID_NUMAINFO_ID (~0U)
 struct xen_sysctl_numainfo {
     /*
      * IN: maximum addressable entry in the caller-provided arrays.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.