[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1 of 5 V2] libxl: add internal function to get a domain's scheduler



On 29/05/12 14:56, Ian Campbell wrote:
# HG changeset patch
# User Ian Campbell<ian.campbell@xxxxxxxxxx>
# Date 1338299619 -3600
# Node ID 980a25d6ad12ba0f10fa616255b9382cc14ce69e
# Parent  12537c670e9ea9e7f73747e203ca318107b82cd9
libxl: add internal function to get a domain's scheduler.

This takes into account cpupools.

Add a helper to get the info for a single cpu pool, refactor libxl_list_cpupool
t use this. While there fix the leaks due to not disposing the partial list on
realloc failure in that function.

Fix the failure of sched_domain_output to free the poolinfo list.
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

As an aside, I would prefer that the clean-up happen in separate patches; having a single patch do several orthogonal things makes it hard for me to tell what goes with what, both for review, and in case someone in the future needs to do archaeology and figure out what's going on. Not really worth a respin just for that, though.

Signed-off-by: Ian Campbell<ian.campbell@xxxxxxxxxx>
---
v2: add libxl_cpupoolinfo_list_free, use it in libxl_cpupool_list error path.
     Also use it in libxl_cpupool_cpuremove_node (which fixes a leak) and in
     libxl_name_to_cpupoolid, sched_domain_output and main_cpupoollist (which
     also fixes a leak).

     Also in libxl_cpupool_cpuremove_node use libxl_cputopology_list_free
     instead of open coding

diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/libxl.c       Tue May 29 14:53:39 2012 +0100
@@ -552,41 +552,70 @@ int libxl_domain_info(libxl_ctx *ctx, li
      return 0;
  }

+static int cpupool_info(libxl__gc *gc,
+                        libxl_cpupoolinfo *info,
+                        uint32_t poolid,
+                        bool exact /* exactly poolid or>= poolid */)
+{
+    xc_cpupoolinfo_t *xcinfo;
+    int rc = ERROR_FAIL;
+
+    xcinfo = xc_cpupool_getinfo(CTX->xch, poolid);
+    if (xcinfo == NULL)
+        return ERROR_FAIL;
+
+    if (exact&&  xcinfo->cpupool_id != poolid)
+        goto out;
+
+    info->poolid = xcinfo->cpupool_id;
+    info->sched = xcinfo->sched_id;
+    info->n_dom = xcinfo->n_dom;
+    if (libxl_cpumap_alloc(CTX,&info->cpumap))
+        goto out;
+    memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
+
+    rc = 0;
+out:
+    xc_cpupool_infofree(CTX->xch, xcinfo);
+    return rc;
+}
+
+int libxl_cpupool_info(libxl_ctx *ctx,
+                       libxl_cpupoolinfo *info, uint32_t poolid)
+{
+    GC_INIT(ctx);
+    int rc = cpupool_info(gc, info, poolid, true);
+    GC_FREE;
+    return rc;
+}
+
  libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool)
  {
-    libxl_cpupoolinfo *ptr, *tmp;
+    GC_INIT(ctx);
+    libxl_cpupoolinfo info, *ptr, *tmp;
      int i;
-    xc_cpupoolinfo_t *info;
      uint32_t poolid;

      ptr = NULL;

      poolid = 0;
      for (i = 0;; i++) {
-        info = xc_cpupool_getinfo(ctx->xch, poolid);
-        if (info == NULL)
+        if (cpupool_info(gc,&info, poolid, false))
              break;
          tmp = realloc(ptr, (i + 1) * sizeof(libxl_cpupoolinfo));
          if (!tmp) {
              LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool 
info");
-            free(ptr);
-            xc_cpupool_infofree(ctx->xch, info);
-            return NULL;
+            libxl_cpupoolinfo_list_free(ptr, i);
+            goto out;
          }
          ptr = tmp;
-        ptr[i].poolid = info->cpupool_id;
-        ptr[i].sched = info->sched_id;
-        ptr[i].n_dom = info->n_dom;
-        if (libxl_cpumap_alloc(ctx,&ptr[i].cpumap)) {
-            xc_cpupool_infofree(ctx->xch, info);
-            break;
-        }
-        memcpy(ptr[i].cpumap.map, info->cpumap, ptr[i].cpumap.size);
-        poolid = info->cpupool_id + 1;
-        xc_cpupool_infofree(ctx->xch, info);
+        ptr[i] = info;
+        poolid = info.poolid + 1;
      }

      *nb_pool = i;
+out:
+    GC_FREE;
      return ptr;
  }

@@ -3932,14 +3961,10 @@ int libxl_cpupool_cpuremove_node(libxl_c
          }
      }

-    for (cpu = 0; cpu<  nr_cpus; cpu++)
-        libxl_cputopology_dispose(&topology[cpu]);
-    free(topology);
+    libxl_cputopology_list_free(topology, nr_cpus);

  out:
-    for (p = 0; p<  n_pools; p++) {
-        libxl_cpupoolinfo_dispose(poolinfo + p);
-    }
+    libxl_cpupoolinfo_list_free(poolinfo, n_pools);

      return ret;
  }
diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/libxl.h       Tue May 29 14:53:39 2012 +0100
@@ -576,6 +576,7 @@ int libxl_domain_info(libxl_ctx*, libxl_
  libxl_dominfo * libxl_list_domain(libxl_ctx*, int *nb_domain);
  void libxl_dominfo_list_free(libxl_dominfo *list, int nr);
  libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx*, int *nb_pool);
+void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nr);
  libxl_vminfo * libxl_list_vm(libxl_ctx *ctx, int *nb_vm);
  void libxl_vminfo_list_free(libxl_vminfo *list, int nr);

@@ -829,6 +830,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
  int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
  int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, 
int *cpus);
  int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t 
poolid);

  int libxl_domid_valid_guest(uint32_t domid);

diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c   Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/libxl_dom.c   Tue May 29 14:53:39 2012 +0100
@@ -93,6 +93,41 @@ int libxl__domain_shutdown_reason(libxl_
      return (info.flags>>  XEN_DOMINF_shutdownshift)&  XEN_DOMINF_shutdownmask;
  }

+int libxl__domain_cpupool(libxl__gc *gc, uint32_t domid)
+{
+    xc_domaininfo_t info;
+    int ret;
+
+    ret = xc_domain_getinfolist(CTX->xch, domid, 1,&info);
+    if (ret != 1)
+        return ERROR_FAIL;
+    if (info.domain != domid)
+        return ERROR_FAIL;
+
+    return info.cpupool;
+}
+
+libxl_scheduler libxl__domain_scheduler(libxl__gc *gc, uint32_t domid)
+{
+    uint32_t cpupool = libxl__domain_cpupool(gc, domid);
+    libxl_cpupoolinfo poolinfo;
+    libxl_scheduler sched = LIBXL_SCHEDULER_UNKNOWN;
+    int rc;
+
+    if (cpupool<  0)
+        return sched;
+
+    rc = libxl_cpupool_info(CTX,&poolinfo, cpupool);
+    if (rc<  0)
+        goto out;
+
+    sched = poolinfo.sched;
+
+out:
+    libxl_cpupoolinfo_dispose(&poolinfo);
+    return sched;
+}
+
  int libxl__build_pre(libxl__gc *gc, uint32_t domid,
                libxl_domain_build_info *info, libxl__domain_build_state *state)
  {
diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/libxl_internal.h
--- a/tools/libxl/libxl_internal.h      Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/libxl_internal.h      Tue May 29 14:53:39 2012 +0100
@@ -738,6 +738,8 @@ _hidden int libxl__file_reference_unmap(
  /* from xl_dom */
  _hidden libxl_domain_type libxl__domain_type(libxl__gc *gc, uint32_t domid);
  _hidden int libxl__domain_shutdown_reason(libxl__gc *gc, uint32_t domid);
+_hidden int libxl__domain_cpupool(libxl__gc *gc, uint32_t domid);
+_hidden libxl_scheduler libxl__domain_scheduler(libxl__gc *gc, uint32_t domid);
  _hidden int libxl__sched_set_params(libxl__gc *gc, uint32_t domid, 
libxl_sched_params *scparams);
  #define LIBXL__DOMAIN_IS_TYPE(gc, domid, type) \
      libxl__domain_type((gc), (domid)) == LIBXL_DOMAIN_TYPE_##type
diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl       Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/libxl_types.idl       Tue May 29 14:53:39 2012 +0100
@@ -107,7 +107,9 @@ libxl_bios_type = Enumeration("bios_type
      ])

  # Consistent with values defined in domctl.h
+# Except unknown which we have made up
  libxl_scheduler = Enumeration("scheduler", [
+    (0, "unknown"),
      (4, "sedf"),
      (5, "credit"),
      (6, "credit2"),
diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/libxl_utils.c Tue May 29 14:53:39 2012 +0100
@@ -133,9 +133,8 @@ int libxl_name_to_cpupoolid(libxl_ctx *c
              }
              free(poolname);
          }
-        libxl_cpupoolinfo_dispose(poolinfo + i);
      }
-    free(poolinfo);
+    libxl_cpupoolinfo_list_free(poolinfo, nb_pools);
      return ret;
  }

@@ -686,6 +685,14 @@ void libxl_vminfo_list_free(libxl_vminfo
      free(list);
  }

+void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nr)
+{
+    int i;
+    for (i = 0; i<  nr; i++)
+        libxl_cpupoolinfo_dispose(&list[i]);
+    free(list);
+}
+
  int libxl_domid_valid_guest(uint32_t domid)
  {
      /* returns 1 if the value _could_ be a valid guest domid, 0 otherwise
diff -r 12537c670e9e -r 980a25d6ad12 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Tue May 29 12:56:57 2012 +0100
+++ b/tools/libxl/xl_cmdimpl.c  Tue May 29 14:53:39 2012 +0100
@@ -4608,11 +4608,8 @@ static int sched_domain_output(libxl_sch
                  break;
          }
      }
-    if (poolinfo) {
-        for (p = 0; p<  n_pools; p++) {
-            libxl_cpupoolinfo_dispose(poolinfo + p);
-        }
-    }
+    if (poolinfo)
+        libxl_cpupoolinfo_list_free(poolinfo, n_pools);
      return 0;
  }

@@ -6119,8 +6116,9 @@ int main_cpupoollist(int argc, char **ar
                  printf("\n");
              }
          }
-        libxl_cpupoolinfo_dispose(poolinfo + p);
-    }
+    }
+
+    libxl_cpupoolinfo_list_free(poolinfo, n_pools);

      return ret;
  }


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.