[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxl: introduce some node map helpers


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 06 Jul 2012 20:44:09 +0000
  • Delivery-date: Fri, 06 Jul 2012 20:44:20 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Dario Faggioli <raistlin@xxxxxxxx>
# Date 1341577064 -3600
# Node ID 03a60d7e56fdd9c0ba2d05234b972aa63019680f
# Parent  61742ab1a675618c963e0f4d10950bf87cc5a57a
libxl: introduce some node map helpers

To allow for allocating a node specific libxl_bitmap (as it
is for cpu number and maps). Helper unctions to convert a node
map it its coresponding cpu map and vice versa are also
implemented.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Committed-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---


diff -r 61742ab1a675 -r 03a60d7e56fd tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Fri Jul 06 13:17:43 2012 +0100
+++ b/tools/libxl/libxl_utils.c Fri Jul 06 13:17:44 2012 +0100
@@ -588,6 +588,50 @@ char *libxl_bitmap_to_hex_string(libxl_c
     return q;
 }
 
+int libxl_nodemap_to_cpumap(libxl_ctx *ctx,
+                            const libxl_bitmap *nodemap,
+                            libxl_bitmap *cpumap)
+{
+    libxl_cputopology *tinfo = NULL;
+    int nr_cpus, i, rc = 0;
+
+    tinfo = libxl_get_cpu_topology(ctx, &nr_cpus);
+    if (tinfo == NULL) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    libxl_bitmap_set_none(cpumap);
+    for (i = 0; i < nr_cpus; i++) {
+        if (libxl_bitmap_test(nodemap, tinfo[i].node))
+            libxl_bitmap_set(cpumap, i);
+    }
+ out:
+    libxl_cputopology_list_free(tinfo, nr_cpus);
+    return rc;
+}
+
+int libxl_cpumap_to_nodemap(libxl_ctx *ctx,
+                            const libxl_bitmap *cpumap,
+                            libxl_bitmap *nodemap)
+{
+    libxl_cputopology *tinfo = NULL;
+    int nr_cpus, i, rc = 0;
+
+    tinfo = libxl_get_cpu_topology(ctx, &nr_cpus);
+    if (tinfo == NULL) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    libxl_bitmap_set_none(nodemap);
+    libxl_for_each_set_bit(i, *cpumap)
+        libxl_bitmap_set(nodemap, tinfo[i].node);
+ out:
+    libxl_cputopology_list_free(tinfo, nr_cpus);
+    return rc;
+}
+
 int libxl_get_max_cpus(libxl_ctx *ctx)
 {
     return xc_get_max_cpus(ctx->xch);
diff -r 61742ab1a675 -r 03a60d7e56fd tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h Fri Jul 06 13:17:43 2012 +0100
+++ b/tools/libxl/libxl_utils.h Fri Jul 06 13:17:44 2012 +0100
@@ -104,6 +104,29 @@ static inline int libxl_cpu_bitmap_alloc
     return libxl_bitmap_alloc(ctx, cpumap, max_cpus);
 }
 
+static inline int libxl_node_bitmap_alloc(libxl_ctx *ctx,
+                                          libxl_bitmap *nodemap,
+                                          int max_nodes)
+{
+    if (max_nodes < 0)
+        return ERROR_INVAL;
+    if (max_nodes == 0)
+        max_nodes = libxl_get_max_nodes(ctx);
+    if (max_nodes == 0)
+        return ERROR_FAIL;
+
+    return libxl_bitmap_alloc(ctx, nodemap, max_nodes);
+}
+
+/* Populate cpumap with the cpus spanned by the nodes in nodemap */
+int libxl_nodemap_to_cpumap(libxl_ctx *ctx,
+                            const libxl_bitmap *nodemap,
+                            libxl_bitmap *cpumap);
+/* Populate nodemap with the nodes of the cpus in cpumap */
+int libxl_cpumap_to_nodemap(libxl_ctx *ctx,
+                            const libxl_bitmap *cpuemap,
+                            libxl_bitmap *nodemap);
+
  static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
     return (s + 1023) / 1024;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.