[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 04 of 10 v3] libxl: rename libxl_cpumap to libxl_bitmap



On Wed, 2012-07-04 at 17:18 +0100, Dario Faggioli wrote:
> # HG changeset patch
> # User Dario Faggioli <raistlin@xxxxxxxx>
> # Date 1341416323 -7200
> # Node ID cfdd6d53f3dd3c6aa325fe6d8a17e4089daafae5
> # Parent  f1227d5a82e56d10e302aec4c3717d281718a349
> libxl: rename libxl_cpumap to libxl_bitmap
> 
> And leave to the caller the burden of knowing and remembering what kind
> of bitmap each instance of libxl_bitmap is.
> 
> This is basically just some s/libxl_cpumap/libxl_bitmap/ (and some other
> related interface name substitution, e.g., libxl_for_each_cpu) in a bunch
> of files, with no real functional change involved.
> 
> A specific allocation helper is introduced, besides libxl_bitmap_alloc().
> It is called libxl_cpu_bitmap_alloc() and is meant at substituting the old
> libxl_cpumap_alloc(). It is just something easier to use in cases where one
> wants to allocate a libxl_bitmap that is going to serve as a cpu map.
> 
> This is because we want to be able to deal with both cpu and NUMA node
> maps, but we don't want to duplicate all the various helpers and wrappers.
> 
> While at it, add the usual initialization function, common to all libxl
> data structures.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxxxxx>

Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

> 
> ---
> Changes from v2:
>  * rebased on top of 51d2daabd428 (libxl: allow setting more than 31 vcpus).
>  * Fixed one missing rename of cpumap into bitmap.
>  * Added libxl_bitmap_init().
> 
> Changes from v1:
>  * this patch replaces "libxl: abstract libxl_cpumap to just libxl_map"
>    as it directly change the name of the old type instead of adding one
>    more abstraction layer.
> 
> diff --git a/tools/libxl/gentest.py b/tools/libxl/gentest.py
> --- a/tools/libxl/gentest.py
> +++ b/tools/libxl/gentest.py
> @@ -20,7 +20,7 @@ def randomize_case(s):
>  def randomize_enum(e):
>      return random.choice([v.name for v in e.values])
> 
> -handcoded = ["libxl_cpumap", "libxl_key_value_list",
> +handcoded = ["libxl_bitmap", "libxl_key_value_list",
>               "libxl_cpuid_policy_list", "libxl_string_list"]
> 
>  def gen_rand_init(ty, v, indent = "    ", parent = None):
> @@ -117,16 +117,16 @@ static void rand_bytes(uint8_t *p, size_
>          p[i] = rand() % 256;
>  }
> 
> -static void libxl_cpumap_rand_init(libxl_cpumap *cpumap)
> +static void libxl_bitmap_rand_init(libxl_bitmap *bitmap)
>  {
>      int i;
> -    cpumap->size = rand() % 16;
> -    cpumap->map = calloc(cpumap->size, sizeof(*cpumap->map));
> -    libxl_for_each_cpu(i, *cpumap) {
> +    bitmap->size = rand() % 16;
> +    bitmap->map = calloc(bitmap->size, sizeof(*bitmap->map));
> +    libxl_for_each_bit(i, *bitmap) {
>          if (rand() % 2)
> -            libxl_cpumap_set(cpumap, i);
> +            libxl_bitmap_set(bitmap, i);
>          else
> -            libxl_cpumap_reset(cpumap, i);
> +            libxl_bitmap_reset(bitmap, i);
>      }
>  }
> 
> diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> --- a/tools/libxl/libxl.c
> +++ b/tools/libxl/libxl.c
> @@ -586,7 +586,7 @@ static int cpupool_info(libxl__gc *gc,
>      info->poolid = xcinfo->cpupool_id;
>      info->sched = xcinfo->sched_id;
>      info->n_dom = xcinfo->n_dom;
> -    rc = libxl_cpumap_alloc(CTX, &info->cpumap, 0);
> +    rc = libxl_cpu_bitmap_alloc(CTX, &info->cpumap, 0);
>      if (rc)
>      {
>          LOG(ERROR, "unable to allocate cpumap %d\n", rc);
> @@ -3431,7 +3431,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
>      }
> 
>      for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, 
> ++ptr) {
> -        if (libxl_cpumap_alloc(ctx, &ptr->cpumap, 0)) {
> +        if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap, 0)) {
>              LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap");
>              return NULL;
>          }
> @@ -3454,7 +3454,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
>  }
> 
>  int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
> -                           libxl_cpumap *cpumap)
> +                           libxl_bitmap *cpumap)
>  {
>      if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) {
>          LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
> @@ -3464,7 +3464,7 @@ int libxl_set_vcpuaffinity(libxl_ctx *ct
>  }
> 
>  int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
> -                               unsigned int max_vcpus, libxl_cpumap *cpumap)
> +                               unsigned int max_vcpus, libxl_bitmap *cpumap)
>  {
>      int i, rc = 0;
> 
> @@ -3478,7 +3478,7 @@ int libxl_set_vcpuaffinity_all(libxl_ctx
>      return rc;
>  }
> 
> -int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_cpumap 
> *cpumap)
> +int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap 
> *cpumap)
>  {
>      GC_INIT(ctx);
>      libxl_dominfo info;
> @@ -3498,7 +3498,7 @@ retry_transaction:
>      for (i = 0; i <= info.vcpu_max_id; i++)
>          libxl__xs_write(gc, t,
>                         libxl__sprintf(gc, "%s/cpu/%u/availability", dompath, 
> i),
> -                       "%s", libxl_cpumap_test(cpumap, i) ? "online" : 
> "offline");
> +                       "%s", libxl_bitmap_test(cpumap, i) ? "online" : 
> "offline");
>      if (!xs_transaction_end(ctx->xsh, t, 0)) {
>          if (errno == EAGAIN)
>              goto retry_transaction;
> @@ -4094,7 +4094,7 @@ int libxl_tmem_freeable(libxl_ctx *ctx)
>      return rc;
>  }
> 
> -int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap)
> +int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap)
>  {
>      int ncpus;
> 
> @@ -4113,7 +4113,7 @@ int libxl_get_freecpus(libxl_ctx *ctx, l
> 
>  int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
>                           libxl_scheduler sched,
> -                         libxl_cpumap cpumap, libxl_uuid *uuid,
> +                         libxl_bitmap cpumap, libxl_uuid *uuid,
>                           uint32_t *poolid)
>  {
>      GC_INIT(ctx);
> @@ -4136,8 +4136,8 @@ int libxl_cpupool_create(libxl_ctx *ctx,
>          return ERROR_FAIL;
>      }
> 
> -    libxl_for_each_cpu(i, cpumap)
> -        if (libxl_cpumap_test(&cpumap, i)) {
> +    libxl_for_each_bit(i, cpumap)
> +        if (libxl_bitmap_test(&cpumap, i)) {
>              rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
>              if (rc) {
>                  LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
> @@ -4172,7 +4172,7 @@ int libxl_cpupool_destroy(libxl_ctx *ctx
>      int rc, i;
>      xc_cpupoolinfo_t *info;
>      xs_transaction_t t;
> -    libxl_cpumap cpumap;
> +    libxl_bitmap cpumap;
> 
>      info = xc_cpupool_getinfo(ctx->xch, poolid);
>      if (info == NULL) {
> @@ -4184,13 +4184,13 @@ int libxl_cpupool_destroy(libxl_ctx *ctx
>      if ((info->cpupool_id != poolid) || (info->n_dom))
>          goto out;
> 
> -    rc = libxl_cpumap_alloc(ctx, &cpumap, 0);
> +    rc = libxl_cpu_bitmap_alloc(ctx, &cpumap, 0);
>      if (rc)
>          goto out;
> 
>      memcpy(cpumap.map, info->cpumap, cpumap.size);
> -    libxl_for_each_cpu(i, cpumap)
> -        if (libxl_cpumap_test(&cpumap, i)) {
> +    libxl_for_each_bit(i, cpumap)
> +        if (libxl_bitmap_test(&cpumap, i)) {
>              rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
>              if (rc) {
>                  LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
> @@ -4219,7 +4219,7 @@ int libxl_cpupool_destroy(libxl_ctx *ctx
>      rc = 0;
> 
>  out1:
> -    libxl_cpumap_dispose(&cpumap);
> +    libxl_bitmap_dispose(&cpumap);
>  out:
>      xc_cpupool_infofree(ctx->xch, info);
>      GC_FREE;
> @@ -4287,7 +4287,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
>  {
>      int rc = 0;
>      int cpu, nr;
> -    libxl_cpumap freemap;
> +    libxl_bitmap freemap;
>      libxl_cputopology *topology;
> 
>      if (libxl_get_freecpus(ctx, &freemap)) {
> @@ -4302,7 +4302,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
> 
>      *cpus = 0;
>      for (cpu = 0; cpu < nr; cpu++) {
> -        if (libxl_cpumap_test(&freemap, cpu) && (topology[cpu].node == node) 
> &&
> +        if (libxl_bitmap_test(&freemap, cpu) && (topology[cpu].node == node) 
> &&
>              !libxl_cpupool_cpuadd(ctx, poolid, cpu)) {
>                  (*cpus)++;
>          }
> @@ -4311,7 +4311,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
> 
>      free(topology);
>  out:
> -    libxl_cpumap_dispose(&freemap);
> +    libxl_bitmap_dispose(&freemap);
>      return rc;
>  }
> 
> @@ -4353,7 +4353,7 @@ int libxl_cpupool_cpuremove_node(libxl_c
>          if (poolinfo[p].poolid == poolid) {
>              for (cpu = 0; cpu < nr_cpus; cpu++) {
>                  if ((topology[cpu].node == node) &&
> -                    libxl_cpumap_test(&poolinfo[p].cpumap, cpu) &&
> +                    libxl_bitmap_test(&poolinfo[p].cpumap, cpu) &&
>                      !libxl_cpupool_cpuremove(ctx, poolid, cpu)) {
>                          (*cpus)++;
>                  }
> diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
> --- a/tools/libxl/libxl.h
> +++ b/tools/libxl/libxl.h
> @@ -285,8 +285,9 @@ typedef uint64_t libxl_ev_user;
>  typedef struct {
>      uint32_t size;          /* number of bytes in map */
>      uint8_t *map;
> -} libxl_cpumap;
> -void libxl_cpumap_dispose(libxl_cpumap *map);
> +} libxl_bitmap;
> +void libxl_bitmap_init(libxl_bitmap *map);
> +void libxl_bitmap_dispose(libxl_bitmap *map);
> 
>  /* libxl_cpuid_policy_list is a dynamic array storing CPUID policies
>   * for multiple leafs. It is terminated with an entry holding
> @@ -790,10 +791,10 @@ int libxl_userdata_retrieve(libxl_ctx *c
> 
>  int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo);
>  int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
> -                           libxl_cpumap *cpumap);
> +                           libxl_bitmap *cpumap);
>  int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
> -                               unsigned int max_vcpus, libxl_cpumap *cpumap);
> -int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_cpumap 
> *cpumap);
> +                               unsigned int max_vcpus, libxl_bitmap *cpumap);
> +int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap 
> *cpumap);
> 
>  libxl_scheduler libxl_get_scheduler(libxl_ctx *ctx);
> 
> @@ -843,10 +844,10 @@ int libxl_tmem_shared_auth(libxl_ctx *ct
>                             int auth);
>  int libxl_tmem_freeable(libxl_ctx *ctx);
> 
> -int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap);
> +int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap);
>  int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
>                           libxl_scheduler sched,
> -                         libxl_cpumap cpumap, libxl_uuid *uuid,
> +                         libxl_bitmap cpumap, libxl_uuid *uuid,
>                           uint32_t *poolid);
>  int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid);
>  int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
> diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
> --- a/tools/libxl/libxl_create.c
> +++ b/tools/libxl/libxl_create.c
> @@ -203,16 +203,16 @@ int libxl__domain_build_info_setdefault(
>      if (!b_info->max_vcpus)
>          b_info->max_vcpus = 1;
>      if (!b_info->avail_vcpus.size) {
> -        if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1))
> +        if (libxl_cpu_bitmap_alloc(CTX, &b_info->avail_vcpus, 1))
>              return ERROR_FAIL;
> -        libxl_cpumap_set(&b_info->avail_vcpus, 0);
> +        libxl_bitmap_set(&b_info->avail_vcpus, 0);
>      } else if (b_info->avail_vcpus.size > HVM_MAX_VCPUS)
>          return ERROR_FAIL;
> 
>      if (!b_info->cpumap.size) {
> -        if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0))
> +        if (libxl_cpu_bitmap_alloc(CTX, &b_info->cpumap, 0))
>              return ERROR_FAIL;
> -        libxl_cpumap_set_any(&b_info->cpumap);
> +        libxl_bitmap_set_any(&b_info->cpumap);
>      }
> 
>      if (b_info->max_memkb == LIBXL_MEMKB_DEFAULT)
> diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
> --- a/tools/libxl/libxl_dm.c
> +++ b/tools/libxl/libxl_dm.c
> @@ -208,8 +208,8 @@ static char ** libxl__build_device_model
>                                NULL);
>          }
> 
> -        nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
> -        s = libxl_cpumap_to_hex_string(CTX, &b_info->avail_vcpus);
> +        nr_set_cpus = libxl_bitmap_count_set(&b_info->avail_vcpus);
> +        s = libxl_bitmap_to_hex_string(CTX, &b_info->avail_vcpus);
>          flexarray_vappend(dm_args, "-vcpu_avail",
>                                libxl__sprintf(gc, "%s", s), NULL);
>          free(s);
> @@ -459,7 +459,7 @@ static char ** libxl__build_device_model
>              flexarray_append(dm_args, "-smp");
>              if (b_info->avail_vcpus.size) {
>                  int nr_set_cpus = 0;
> -                nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
> +                nr_set_cpus = libxl_bitmap_count_set(&b_info->avail_vcpus);
> 
>                  flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d",
>                                                           b_info->max_vcpus,
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -202,7 +202,7 @@ int libxl__build_post(libxl__gc *gc, uin
>      ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn);
>      for (i = 0; i < info->max_vcpus; i++) {
>          ents[12+(i*2)]   = libxl__sprintf(gc, "cpu/%d/availability", i);
> -        ents[12+(i*2)+1] = libxl_cpumap_test(&info->avail_vcpus, i)
> +        ents[12+(i*2)+1] = libxl_bitmap_test(&info->avail_vcpus, i)
>                              ? "online" : "offline";
>      }
> 
> diff --git a/tools/libxl/libxl_json.c b/tools/libxl/libxl_json.c
> --- a/tools/libxl/libxl_json.c
> +++ b/tools/libxl/libxl_json.c
> @@ -99,8 +99,8 @@ yajl_gen_status libxl_uuid_gen_json(yajl
>      return yajl_gen_string(hand, (const unsigned char *)buf, 
> LIBXL_UUID_FMTLEN);
>  }
> 
> -yajl_gen_status libxl_cpumap_gen_json(yajl_gen hand,
> -                                      libxl_cpumap *cpumap)
> +yajl_gen_status libxl_bitmap_gen_json(yajl_gen hand,
> +                                      libxl_bitmap *bitmap)
>  {
>      yajl_gen_status s;
>      int i;
> @@ -108,8 +108,8 @@ yajl_gen_status libxl_cpumap_gen_json(ya
>      s = yajl_gen_array_open(hand);
>      if (s != yajl_gen_status_ok) goto out;
> 
> -    libxl_for_each_cpu(i, *cpumap) {
> -        if (libxl_cpumap_test(cpumap, i)) {
> +    libxl_for_each_bit(i, *bitmap) {
> +        if (libxl_bitmap_test(bitmap, i)) {
>              s = yajl_gen_integer(hand, i);
>              if (s != yajl_gen_status_ok) goto out;
>          }
> diff --git a/tools/libxl/libxl_json.h b/tools/libxl/libxl_json.h
> --- a/tools/libxl/libxl_json.h
> +++ b/tools/libxl/libxl_json.h
> @@ -26,7 +26,7 @@ yajl_gen_status libxl_defbool_gen_json(y
>  yajl_gen_status libxl_domid_gen_json(yajl_gen hand, libxl_domid *p);
>  yajl_gen_status libxl_uuid_gen_json(yajl_gen hand, libxl_uuid *p);
>  yajl_gen_status libxl_mac_gen_json(yajl_gen hand, libxl_mac *p);
> -yajl_gen_status libxl_cpumap_gen_json(yajl_gen hand, libxl_cpumap *p);
> +yajl_gen_status libxl_bitmap_gen_json(yajl_gen hand, libxl_bitmap *p);
>  yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
>                                                   libxl_cpuid_policy_list *p);
>  yajl_gen_status libxl_string_list_gen_json(yajl_gen hand, libxl_string_list 
> *p);
> diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
> --- a/tools/libxl/libxl_types.idl
> +++ b/tools/libxl/libxl_types.idl
> @@ -10,7 +10,7 @@ libxl_defbool = Builtin("defbool", passb
>  libxl_domid = Builtin("domid", json_fn = "yajl_gen_integer", 
> autogenerate_json = False)
>  libxl_uuid = Builtin("uuid", passby=PASS_BY_REFERENCE)
>  libxl_mac = Builtin("mac", passby=PASS_BY_REFERENCE)
> -libxl_cpumap = Builtin("cpumap", dispose_fn="libxl_cpumap_dispose", 
> passby=PASS_BY_REFERENCE)
> +libxl_bitmap = Builtin("bitmap", dispose_fn="libxl_bitmap_dispose", 
> passby=PASS_BY_REFERENCE)
>  libxl_cpuid_policy_list = Builtin("cpuid_policy_list", 
> dispose_fn="libxl_cpuid_dispose", passby=PASS_BY_REFERENCE)
> 
>  libxl_string_list = Builtin("string_list", 
> dispose_fn="libxl_string_list_dispose", passby=PASS_BY_REFERENCE)
> @@ -198,7 +198,7 @@ libxl_cpupoolinfo = Struct("cpupoolinfo"
>      ("poolid",      uint32),
>      ("sched",       libxl_scheduler),
>      ("n_dom",       uint32),
> -    ("cpumap",      libxl_cpumap)
> +    ("cpumap",      libxl_bitmap)
>      ], dir=DIR_OUT)
> 
>  libxl_vminfo = Struct("vminfo", [
> @@ -247,8 +247,8 @@ libxl_domain_sched_params = Struct("doma
> 
>  libxl_domain_build_info = Struct("domain_build_info",[
>      ("max_vcpus",       integer),
> -    ("avail_vcpus",     libxl_cpumap),
> -    ("cpumap",          libxl_cpumap),
> +    ("avail_vcpus",     libxl_bitmap),
> +    ("cpumap",          libxl_bitmap),
>      ("tsc_mode",        libxl_tsc_mode),
>      ("max_memkb",       MemKB),
>      ("target_memkb",    MemKB),
> @@ -409,7 +409,7 @@ libxl_vcpuinfo = Struct("vcpuinfo", [
>      ("blocked", bool),
>      ("running", bool),
>      ("vcpu_time", uint64), # total vcpu time ran (ns)
> -    ("cpumap", libxl_cpumap), # current cpu's affinities
> +    ("cpumap", libxl_bitmap), # current cpu's affinities
>      ], dir=DIR_OUT)
> 
>  libxl_physinfo = Struct("physinfo", [
> diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
> --- a/tools/libxl/libxl_utils.c
> +++ b/tools/libxl/libxl_utils.c
> @@ -487,79 +487,70 @@ int libxl_mac_to_device_nic(libxl_ctx *c
>      return rc;
>  }
> 
> -int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus)
> +int libxl_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *bitmap, int n_bits)
>  {
>      GC_INIT(ctx);
>      int sz;
> -    int rc;
> 
> -    if (max_cpus < 0) {
> -        rc = ERROR_INVAL;
> -        goto out;
> -    }
> -    if (max_cpus == 0)
> -        max_cpus = libxl_get_max_cpus(ctx);
> -    if (max_cpus == 0) {
> -        rc = ERROR_FAIL;
> -        goto out;
> -    }
> +    sz = (n_bits + 7) / 8;
> +    bitmap->map = libxl__calloc(NOGC, sizeof(*bitmap->map), sz);
> +    bitmap->size = sz;
> 
> -    sz = (max_cpus + 7) / 8;
> -    cpumap->map = libxl__calloc(NOGC, sizeof(*cpumap->map), sz);
> -    cpumap->size = sz;
> -
> -    rc = 0;
> - out:
>      GC_FREE;
> -    return rc;
> +    return 0;
>  }
> 
> -void libxl_cpumap_dispose(libxl_cpumap *map)
> +void libxl_bitmap_init(libxl_bitmap *map)
> +{
> +    memset(map, '\0', sizeof(*map));
> +}
> +
> +void libxl_bitmap_dispose(libxl_bitmap *map)
>  {
>      free(map->map);
>  }
> 
> -int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu)
> +int libxl_bitmap_test(const libxl_bitmap *bitmap, int bit)
>  {
> -    if (cpu >= cpumap->size * 8)
> +    if (bit >= bitmap->size * 8)
>          return 0;
> -    return (cpumap->map[cpu / 8] & (1 << (cpu & 7))) ? 1 : 0;
> +    return (bitmap->map[bit / 8] & (1 << (bit & 7))) ? 1 : 0;
>  }
> 
> -void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu)
> +void libxl_bitmap_set(libxl_bitmap *bitmap, int bit)
>  {
> -    if (cpu >= cpumap->size * 8)
> +    if (bit >= bitmap->size * 8)
>          return;
> -    cpumap->map[cpu / 8] |= 1 << (cpu & 7);
> +    bitmap->map[bit / 8] |= 1 << (bit & 7);
>  }
> 
> -void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
> +void libxl_bitmap_reset(libxl_bitmap *bitmap, int bit)
>  {
> -    if (cpu >= cpumap->size * 8)
> +    if (bit >= bitmap->size * 8)
>          return;
> -    cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
> +    bitmap->map[bit / 8] &= ~(1 << (bit & 7));
>  }
> 
> -int libxl_cpumap_count_set(const libxl_cpumap *cpumap)
> +int libxl_bitmap_count_set(const libxl_bitmap *bitmap)
>  {
> -    int i, nr_set_cpus = 0;
> -    libxl_for_each_set_cpu(i, *cpumap)
> -        nr_set_cpus++;
> +    int i, nr_set_bits = 0;
> +    libxl_for_each_set_bit(i, *bitmap)
> +        nr_set_bits++;
> 
> -    return nr_set_cpus;
> +    return nr_set_bits;
>  }
> 
>  /* NB. caller is responsible for freeing the memory */
> -char *libxl_cpumap_to_hex_string(libxl_ctx *ctx, const libxl_cpumap *cpumap)
> +char *libxl_bitmap_to_hex_string(libxl_ctx *ctx, const libxl_bitmap *bitmap)
>  {
>      GC_INIT(ctx);
> -    int i = cpumap->size;
> -    char *p = libxl__zalloc(NOGC, cpumap->size * 2 + 3);
> +    int i = bitmap->size;
> +    char *p = libxl__zalloc(NOGC, bitmap->size * 2 + 3);
>      char *q = p;
>      strncpy(p, "0x", 2);
>      p += 2;
>      while(--i >= 0) {
> -        sprintf(p, "%02x", cpumap->map[i]);
> +        sprintf(p, "%02x", bitmap->map[i]);
>          p += 2;
>      }
>      *p = '\0';
> diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h
> --- a/tools/libxl/libxl_utils.h
> +++ b/tools/libxl/libxl_utils.h
> @@ -63,29 +63,44 @@ int libxl_devid_to_device_nic(libxl_ctx
>  int libxl_vdev_to_device_disk(libxl_ctx *ctx, uint32_t domid, const char 
> *vdev,
>                                 libxl_device_disk *disk);
> 
> -int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus);
> -int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu);
> -void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
> -void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
> -int libxl_cpumap_count_set(const libxl_cpumap *cpumap);
> -char *libxl_cpumap_to_hex_string(libxl_ctx *ctx, const libxl_cpumap *cpumap);
> -static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
> +int libxl_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *bitmap, int n_bits);
> +    /* Allocated bimap is from malloc, libxl_bitmap_dispose() to be
> +     * called by the application when done. */
> +int libxl_bitmap_test(const libxl_bitmap *bitmap, int bit);
> +void libxl_bitmap_set(libxl_bitmap *bitmap, int bit);
> +void libxl_bitmap_reset(libxl_bitmap *bitmap, int bit);
> +int libxl_bitmap_count_set(const libxl_bitmap *cpumap);
> +char *libxl_bitmap_to_hex_string(libxl_ctx *ctx, const libxl_bitmap *cpumap);
> +static inline void libxl_bitmap_set_any(libxl_bitmap *bitmap)
>  {
> -    memset(cpumap->map, -1, cpumap->size);
> +    memset(bitmap->map, -1, bitmap->size);
>  }
> -static inline void libxl_cpumap_set_none(libxl_cpumap *cpumap)
> +static inline void libxl_bitmap_set_none(libxl_bitmap *bitmap)
>  {
> -    memset(cpumap->map, 0, cpumap->size);
> +    memset(bitmap->map, 0, bitmap->size);
>  }
> -static inline int libxl_cpumap_cpu_valid(libxl_cpumap *cpumap, int cpu)
> +static inline int libxl_bitmap_cpu_valid(libxl_bitmap *bitmap, int bit)
>  {
> -    return cpu >= 0 && cpu < (cpumap->size * 8);
> +    return bit >= 0 && bit < (bitmap->size * 8);
>  }
> -#define libxl_for_each_cpu(var, map) for (var = 0; var < (map).size * 8; 
> var++)
> -#define libxl_for_each_set_cpu(v, m) for (v = 0; v < (m).size * 8; v++) \
> -                                             if (libxl_cpumap_test(&(m), v))
> +#define libxl_for_each_bit(var, map) for (var = 0; var < (map).size * 8; 
> var++)
> +#define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \
> +                                             if (libxl_bitmap_test(&(m), v))
> 
> -static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
> +static inline int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap 
> *cpumap,
> +                                         int max_cpus)
> +{
> +    if (max_cpus < 0)
> +        return ERROR_INVAL;
> +    if (max_cpus == 0)
> +        max_cpus = libxl_get_max_cpus(ctx);
> +    if (max_cpus == 0)
> +        return ERROR_FAIL;
> +
> +    return libxl_bitmap_alloc(ctx, cpumap, max_cpus);
> +}
> +
> + static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
>      return (s + 1023) / 1024;
>  }
> 
> diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
> --- a/tools/libxl/xl_cmdimpl.c
> +++ b/tools/libxl/xl_cmdimpl.c
> @@ -492,19 +492,19 @@ static void split_string_into_string_lis
>      free(s);
>  }
> 
> -static int vcpupin_parse(char *cpu, libxl_cpumap *cpumap)
> -{
> -    libxl_cpumap exclude_cpumap;
> +static int vcpupin_parse(char *cpu, libxl_bitmap *cpumap)
> +{
> +    libxl_bitmap exclude_cpumap;
>      uint32_t cpuida, cpuidb;
>      char *endptr, *toka, *tokb, *saveptr = NULL;
>      int i, rc = 0, rmcpu;
> 
>      if (!strcmp(cpu, "all")) {
> -        libxl_cpumap_set_any(cpumap);
> +        libxl_bitmap_set_any(cpumap);
>          return 0;
>      }
> 
> -    if (libxl_cpumap_alloc(ctx, &exclude_cpumap, 0)) {
> +    if (libxl_cpu_bitmap_alloc(ctx, &exclude_cpumap, 0)) {
>          fprintf(stderr, "Error: Failed to allocate cpumap.\n");
>          return ENOMEM;
>      }
> @@ -534,19 +534,19 @@ static int vcpupin_parse(char *cpu, libx
>              }
>          }
>          while (cpuida <= cpuidb) {
> -            rmcpu == 0 ? libxl_cpumap_set(cpumap, cpuida) :
> -                         libxl_cpumap_set(&exclude_cpumap, cpuida);
> +            rmcpu == 0 ? libxl_bitmap_set(cpumap, cpuida) :
> +                         libxl_bitmap_set(&exclude_cpumap, cpuida);
>              cpuida++;
>          }
>      }
> 
>      /* Clear all the cpus from the removal list */
> -    libxl_for_each_set_cpu(i, exclude_cpumap) {
> -        libxl_cpumap_reset(cpumap, i);
> +    libxl_for_each_set_bit(i, exclude_cpumap) {
> +        libxl_bitmap_reset(cpumap, i);
>      }
> 
>  vcpp_out:
> -    libxl_cpumap_dispose(&exclude_cpumap);
> +    libxl_bitmap_dispose(&exclude_cpumap);
> 
>      return rc;
>  }
> @@ -649,13 +649,13 @@ static void parse_config_data(const char
>      if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
>          b_info->max_vcpus = l;
> 
> -        if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) {
> +        if (libxl_cpu_bitmap_alloc(ctx, &b_info->avail_vcpus, l)) {
>              fprintf(stderr, "Unable to allocate cpumap\n");
>              exit(1);
>          }
> -        libxl_cpumap_set_none(&b_info->avail_vcpus);
> +        libxl_bitmap_set_none(&b_info->avail_vcpus);
>          while (l-- > 0)
> -            libxl_cpumap_set((&b_info->avail_vcpus), l);
> +            libxl_bitmap_set((&b_info->avail_vcpus), l);
>      }
> 
>      if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
> @@ -664,7 +664,7 @@ static void parse_config_data(const char
>      if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1)) {
>          int i, n_cpus = 0;
> 
> -        if (libxl_cpumap_alloc(ctx, &b_info->cpumap, 0)) {
> +        if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) {
>              fprintf(stderr, "Unable to allocate cpumap\n");
>              exit(1);
>          }
> @@ -684,14 +684,14 @@ static void parse_config_data(const char
>           * the cpumap derived from the list ensures memory is being
>           * allocated on the proper nodes anyway.
>           */
> -        libxl_cpumap_set_none(&b_info->cpumap);
> +        libxl_bitmap_set_none(&b_info->cpumap);
>          while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
>              i = atoi(buf);
> -            if (!libxl_cpumap_cpu_valid(&b_info->cpumap, i)) {
> +            if (!libxl_bitmap_cpu_valid(&b_info->cpumap, i)) {
>                  fprintf(stderr, "cpu %d illegal\n", i);
>                  exit(1);
>              }
> -            libxl_cpumap_set(&b_info->cpumap, i);
> +            libxl_bitmap_set(&b_info->cpumap, i);
>              if (n_cpus < b_info->max_vcpus)
>                  vcpu_to_pcpu[n_cpus] = i;
>              n_cpus++;
> @@ -700,12 +700,12 @@ static void parse_config_data(const char
>      else if (!xlu_cfg_get_string (config, "cpus", &buf, 0)) {
>          char *buf2 = strdup(buf);
> 
> -        if (libxl_cpumap_alloc(ctx, &b_info->cpumap, 0)) {
> +        if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap, 0)) {
>              fprintf(stderr, "Unable to allocate cpumap\n");
>              exit(1);
>          }
> 
> -        libxl_cpumap_set_none(&b_info->cpumap);
> +        libxl_bitmap_set_none(&b_info->cpumap);
>          if (vcpupin_parse(buf2, &b_info->cpumap))
>              exit(1);
>          free(buf2);
> @@ -1800,28 +1800,28 @@ start:
> 
>      /* If single vcpu to pcpu mapping was requested, honour it */
>      if (vcpu_to_pcpu) {
> -        libxl_cpumap vcpu_cpumap;
> -
> -        ret = libxl_cpumap_alloc(ctx, &vcpu_cpumap, 0);
> +        libxl_bitmap vcpu_cpumap;
> +
> +        ret = libxl_cpu_bitmap_alloc(ctx, &vcpu_cpumap, 0);
>          if (ret)
>              goto error_out;
>          for (i = 0; i < d_config.b_info.max_vcpus; i++) {
> 
>              if (vcpu_to_pcpu[i] != -1) {
> -                libxl_cpumap_set_none(&vcpu_cpumap);
> -                libxl_cpumap_set(&vcpu_cpumap, vcpu_to_pcpu[i]);
> +                libxl_bitmap_set_none(&vcpu_cpumap);
> +                libxl_bitmap_set(&vcpu_cpumap, vcpu_to_pcpu[i]);
>              } else {
> -                libxl_cpumap_set_any(&vcpu_cpumap);
> +                libxl_bitmap_set_any(&vcpu_cpumap);
>              }
>              if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) {
>                  fprintf(stderr, "setting affinity failed on vcpu `%d'.\n", 
> i);
> -                libxl_cpumap_dispose(&vcpu_cpumap);
> +                libxl_bitmap_dispose(&vcpu_cpumap);
>                  free(vcpu_to_pcpu);
>                  ret = ERROR_FAIL;
>                  goto error_out;
>              }
>          }
> -        libxl_cpumap_dispose(&vcpu_cpumap);
> +        libxl_bitmap_dispose(&vcpu_cpumap);
>          free(vcpu_to_pcpu); vcpu_to_pcpu = NULL;
>      }
> 
> @@ -4058,7 +4058,7 @@ int main_vcpulist(int argc, char **argv)
>  static void vcpupin(const char *d, const char *vcpu, char *cpu)
>  {
>      libxl_vcpuinfo *vcpuinfo;
> -    libxl_cpumap cpumap;
> +    libxl_bitmap cpumap;
> 
>      uint32_t vcpuid;
>      char *endptr;
> @@ -4075,7 +4075,7 @@ static void vcpupin(const char *d, const
> 
>      find_domain(d);
> 
> -    if (libxl_cpumap_alloc(ctx, &cpumap, 0)) {
> +    if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
>          goto vcpupin_out;
>      }
> 
> @@ -4102,7 +4102,7 @@ static void vcpupin(const char *d, const
>          libxl_vcpuinfo_list_free(vcpuinfo, nb_vcpu);
>      }
>    vcpupin_out1:
> -    libxl_cpumap_dispose(&cpumap);
> +    libxl_bitmap_dispose(&cpumap);
>    vcpupin_out:
>      ;
>  }
> @@ -4122,7 +4122,7 @@ static void vcpuset(const char *d, const
>  {
>      char *endptr;
>      unsigned int max_vcpus, i;
> -    libxl_cpumap cpumap;
> +    libxl_bitmap cpumap;
> 
>      max_vcpus = strtoul(nr_vcpus, &endptr, 10);
>      if (nr_vcpus == endptr) {
> @@ -4132,17 +4132,17 @@ static void vcpuset(const char *d, const
> 
>      find_domain(d);
> 
> -    if (libxl_cpumap_alloc(ctx, &cpumap, 0)) {
> -        fprintf(stderr, "libxl_cpumap_alloc failed\n");
> +    if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
> +        fprintf(stderr, "libxl_cpu_bitmap_alloc failed\n");
>          return;
>      }
>      for (i = 0; i < max_vcpus; i++)
> -        libxl_cpumap_set(&cpumap, i);
> +        libxl_bitmap_set(&cpumap, i);
> 
>      if (libxl_set_vcpuonline(ctx, domid, &cpumap) < 0)
>          fprintf(stderr, "libxl_set_vcpuonline failed domid=%d 
> max_vcpus=%d\n", domid, max_vcpus);
> 
> -    libxl_cpumap_dispose(&cpumap);
> +    libxl_bitmap_dispose(&cpumap);
>  }
> 
>  int main_vcpuset(int argc, char **argv)
> @@ -4206,7 +4206,7 @@ static void output_physinfo(void)
>      libxl_physinfo info;
>      const libxl_version_info *vinfo;
>      unsigned int i;
> -    libxl_cpumap cpumap;
> +    libxl_bitmap cpumap;
>      int n = 0;
> 
>      if (libxl_get_physinfo(ctx, &info) != 0) {
> @@ -4238,8 +4238,8 @@ static void output_physinfo(void)
>          printf("sharing_used_memory    : %"PRIu64"\n", 
> info.sharing_used_frames / i);
>      }
>      if (!libxl_get_freecpus(ctx, &cpumap)) {
> -        libxl_for_each_cpu(i, cpumap)
> -            if (libxl_cpumap_test(&cpumap, i))
> +        libxl_for_each_bit(i, cpumap)
> +            if (libxl_bitmap_test(&cpumap, i))
>                  n++;
>          printf("free_cpus              : %d\n", n);
>          free(cpumap.map);
> @@ -5861,8 +5861,8 @@ int main_cpupoolcreate(int argc, char **
>      XLU_ConfigList *cpus;
>      XLU_ConfigList *nodes;
>      int n_cpus, n_nodes, i, n;
> -    libxl_cpumap freemap;
> -    libxl_cpumap cpumap;
> +    libxl_bitmap freemap;
> +    libxl_bitmap cpumap;
>      libxl_uuid uuid;
>      libxl_cputopology *topology;
>      int rc = -ERROR_FAIL;
> @@ -5975,7 +5975,7 @@ int main_cpupoolcreate(int argc, char **
>          fprintf(stderr, "libxl_get_freecpus failed\n");
>          goto out_cfg;
>      }
> -    if (libxl_cpumap_alloc(ctx, &cpumap, 0)) {
> +    if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
>          fprintf(stderr, "Failed to allocate cpumap\n");
>          goto out_cfg;
>      }
> @@ -5992,8 +5992,8 @@ int main_cpupoolcreate(int argc, char **
>              n = atoi(buf);
>              for (i = 0; i < nr; i++) {
>                  if ((topology[i].node == n) &&
> -                    libxl_cpumap_test(&freemap, i)) {
> -                    libxl_cpumap_set(&cpumap, i);
> +                    libxl_bitmap_test(&freemap, i)) {
> +                    libxl_bitmap_set(&cpumap, i);
>                      n_cpus++;
>                  }
>              }
> @@ -6011,11 +6011,11 @@ int main_cpupoolcreate(int argc, char **
>          while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
>              i = atoi(buf);
>              if ((i < 0) || (i >= freemap.size * 8) ||
> -                !libxl_cpumap_test(&freemap, i)) {
> +                !libxl_bitmap_test(&freemap, i)) {
>                  fprintf(stderr, "cpu %d illegal or not free\n", i);
>                  goto out_cfg;
>              }
> -            libxl_cpumap_set(&cpumap, i);
> +            libxl_bitmap_set(&cpumap, i);
>              n_cpus++;
>          }
>      } else
> @@ -6113,8 +6113,8 @@ int main_cpupoollist(int argc, char **ar
>                  printf("%-19s", name);
>                  free(name);
>                  n = 0;
> -                libxl_for_each_cpu(c, poolinfo[p].cpumap)
> -                    if (libxl_cpumap_test(&poolinfo[p].cpumap, c)) {
> +                libxl_for_each_bit(c, poolinfo[p].cpumap)
> +                    if (libxl_bitmap_test(&poolinfo[p].cpumap, c)) {
>                          if (n && opt_cpus) printf(",");
>                          if (opt_cpus) printf("%d", c);
>                          n++;
> @@ -6313,7 +6313,7 @@ int main_cpupoolnumasplit(int argc, char
>      int n_cpus;
>      char name[16];
>      libxl_uuid uuid;
> -    libxl_cpumap cpumap;
> +    libxl_bitmap cpumap;
>      libxl_cpupoolinfo *poolinfo;
>      libxl_cputopology *topology;
>      libxl_dominfo info;
> @@ -6343,7 +6343,7 @@ int main_cpupoolnumasplit(int argc, char
>          return -ERROR_FAIL;
>      }
> 
> -    if (libxl_cpumap_alloc(ctx, &cpumap, 0)) {
> +    if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
>          fprintf(stderr, "Failed to allocate cpumap\n");
>          libxl_cputopology_list_free(topology, n_cpus);
>          return -ERROR_FAIL;
> @@ -6369,7 +6369,7 @@ int main_cpupoolnumasplit(int argc, char
>      for (c = 0; c < n_cpus; c++) {
>          if (topology[c].node == node) {
>              topology[c].node = LIBXL_CPUTOPOLOGY_INVALID_ENTRY;
> -            libxl_cpumap_set(&cpumap, n);
> +            libxl_bitmap_set(&cpumap, n);
>              n++;
>          }
>      }
> @@ -6391,7 +6391,7 @@ int main_cpupoolnumasplit(int argc, char
>          fprintf(stderr, "failed to offline vcpus\n");
>          goto out;
>      }
> -    libxl_cpumap_set_none(&cpumap);
> +    libxl_bitmap_set_none(&cpumap);
> 
>      for (c = 0; c < n_cpus; c++) {
>          if (topology[c].node == LIBXL_CPUTOPOLOGY_INVALID_ENTRY) {
> @@ -6429,7 +6429,7 @@ int main_cpupoolnumasplit(int argc, char
> 
>  out:
>      libxl_cputopology_list_free(topology, n_cpus);
> -    libxl_cpumap_dispose(&cpumap);
> +    libxl_bitmap_dispose(&cpumap);
> 
>      return ret;
>  }
> diff --git a/tools/python/xen/lowlevel/xl/xl.c 
> b/tools/python/xen/lowlevel/xl/xl.c
> --- a/tools/python/xen/lowlevel/xl/xl.c
> +++ b/tools/python/xen/lowlevel/xl/xl.c
> @@ -231,14 +231,14 @@ int attrib__libxl_cpuid_policy_list_set(
>      return -1;
>  }
> 
> -int attrib__libxl_cpumap_set(PyObject *v, libxl_cpumap *pptr)
> +int attrib__libxl_bitmap_set(PyObject *v, libxl_bitmap *pptr)
>  {
>      int i;
>      long cpu;
> 
>      for (i = 0; i < PyList_Size(v); i++) {
>          cpu = PyInt_AsLong(PyList_GetItem(v, i));
> -        libxl_cpumap_set(pptr, cpu);
> +        libxl_bitmap_set(pptr, cpu);
>      }
>      return 0;
>  }
> @@ -293,14 +293,14 @@ PyObject *attrib__libxl_cpuid_policy_lis
>      return NULL;
>  }
> 
> -PyObject *attrib__libxl_cpumap_get(libxl_cpumap *pptr)
> +PyObject *attrib__libxl_bitmap_get(libxl_bitmap *pptr)
>  {
>      PyObject *cpulist = NULL;
>      int i;
> 
>      cpulist = PyList_New(0);
> -    libxl_for_each_cpu(i, *pptr) {
> -        if ( libxl_cpumap_test(pptr, i) ) {
> +    libxl_for_each_bit(i, *pptr) {
> +        if ( libxl_bitmap_test(pptr, i) ) {
>              PyObject* pyint = PyInt_FromLong(i);
> 
>              PyList_Append(cpulist, pyint);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.