[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v2 5/7] libxl/vNUMA: VM config parsing functions



vNUMA VM config parsing functions.
Parses VM vNUMA related config, verifies and
sets default values for errorneous parameters.
If sum of all vnodes memory sizes does not match
memory, it will be adjusted accordingly;

Required configuration options:
- nr_vnodes - number of vNUMA nodes;
- vnumamem - vnodes memory ranges list;
optional:
- vdistance - array of distances;
default values: 10 for same node, 20 for the rest;
- vcpu_to_vnode - maps vcpu to vnode;
should be specified for each node, otherwise default
interleaved initialization used;

Examples:
a)
vnodes = 2
vnumamem = "512m, 512m"
b)
memory = 2048
vcpus = 6
vnodes = 2
vnumamem = "1g, 1g"
vnuma_distance = "10 20, 10 20"
vcpu_to_vnode ="1, 0, 0, 0, 1, 1"

Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>

---
Changes since v1:
* defined default initializers for config parameters;
* added domain memory adjustment in case vNUMA nodes
sizes do not match it;

TODO:
* have maxmem parsed as list and in conjunction with
nr_nodes use as domain initial memory and vNUMA nodes
sizes;
* use standard xl list parsing functions;
---
 tools/libxl/xl_cmdimpl.c |  205 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 205 insertions(+)

diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 884f050..1520140 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -540,6 +540,143 @@ vcpp_out:
     return rc;
 }
 
+static void vdistance_default(unsigned int *vdistance, unsigned int nr_vnodes)
+{
+    int i, j;
+    for (i = 0; i < nr_vnodes; i++)
+        for (j = 0; j < nr_vnodes; j++)
+            *(vdistance + j * nr_vnodes + i) = i == j ? 10 : 20;
+}
+
+static void vcputovnode_default(unsigned int *vcpu_to_vnode, unsigned int 
nr_vnodes, unsigned int max_vcpus)
+{
+    int i;
+    if (vcpu_to_vnode == NULL)
+        return;
+    for(i = 0; i < max_vcpus; i++)
+        vcpu_to_vnode[i] = i % nr_vnodes;
+}
+
+static int vdistance_parse(char *vdistcfg, unsigned int *vdistance, unsigned 
int nr_vnodes)
+{
+    char *endptr, *toka, *tokb, *saveptra = NULL, *saveptrb = NULL;
+    unsigned int *vdist_tmp = NULL;
+    int rc;
+    int i, j, dist, parsed = 0;        
+    rc = -EINVAL;
+
+    if(vdistance == NULL)
+        return rc;
+    vdist_tmp = (unsigned int *)malloc(nr_vnodes * nr_vnodes * 
sizeof(*vdistance));
+    if (vdist_tmp == NULL)
+        return rc;
+    i =0; j = 0;
+    for (toka = strtok_r(vdistcfg, ",", &saveptra); toka;
+        toka = strtok_r(NULL, ",", &saveptra)) {
+        if ( i >= nr_vnodes ) 
+            goto vdist_parse_err;
+        for (tokb = strtok_r(toka, " ", &saveptrb); tokb;
+            tokb = strtok_r(NULL, " ", &saveptrb)) {
+            if (j >= nr_vnodes) 
+                goto vdist_parse_err;
+            dist = strtol(tokb, &endptr, 10);
+            if (tokb == endptr)
+                goto vdist_parse_err;
+            *(vdist_tmp + j*nr_vnodes + i) = dist;
+            parsed++;
+            j++;
+        }
+        i++;
+        j = 0;
+    }
+    if (parsed == nr_vnodes * nr_vnodes) {
+        memcpy(vdistance, vdist_tmp, nr_vnodes * nr_vnodes * 
sizeof(*vdistance));
+        rc = 0;
+    }
+vdist_parse_err:
+    if (vdist_tmp !=NULL ) free(vdist_tmp);
+    return rc;
+}
+
+static int vcputovnode_parse(char *cfg, unsigned int *vmap, unsigned int 
nr_vnodes, unsigned int nr_vcpus)
+{
+    char *toka, *endptr, *saveptra = NULL;
+    unsigned int *vmap_tmp = NULL, nodemap = 0, smask;
+    
+    int rc = 0;
+    int i;
+    rc = -EINVAL;
+    i = 0;
+    smask = ~(~0 << nr_vnodes);
+    if(vmap == NULL)
+        return rc;
+    vmap_tmp = (unsigned int *)malloc(sizeof(*vmap) * nr_vcpus);
+    memset(vmap_tmp, 0, sizeof(*vmap) * nr_vcpus);
+    for (toka = strtok_r(cfg, " ", &saveptra); toka;
+        toka = strtok_r(NULL, " ", &saveptra)) {
+        if (i >= nr_vcpus) goto vmap_parse_out;
+            vmap_tmp[i] = strtoul(toka, &endptr, 10);
+            nodemap |= (1 << vmap_tmp[i]);
+            if( endptr == toka) 
+                goto vmap_parse_out;
+        i++;
+    }
+    memcpy(vmap, vmap_tmp, sizeof(*vmap) * nr_vcpus);
+    if( ((nodemap & smask) + 1) == (1 << nr_vnodes) )
+        rc = i;
+    else 
+        /* Not all nodes have vcpus, will use default map */
+        rc = -EINVAL;
+vmap_parse_out:
+    if (vmap_tmp != NULL) free(vmap_tmp);
+    return rc;
+}
+
+static int vnumamem_parse(char *vmemsizes, uint64_t *vmemregions, int 
nr_vnodes)
+{
+    uint64_t memsize;
+    char *endptr, *toka, *saveptr = NULL;
+    int rc;
+    int j;
+    
+    rc = -EINVAL;
+    memsize = j = 0;
+    if(vmemregions == NULL)
+        goto vmem_parse_out;
+    for (toka = strtok_r(vmemsizes, ",", &saveptr); toka;
+        toka = strtok_r(NULL, ",", &saveptr)) {
+        if ( j >= nr_vnodes ) 
+            goto vmem_parse_out;
+        memsize = strtoul(toka, &endptr, 10);
+        if (endptr == toka) 
+            goto vmem_parse_out;
+        switch (*endptr) {
+            case 'G':
+            case 'g':
+                memsize = memsize * 1024 * 1024 * 1024;
+                break;
+            case 'M':
+            case 'm':
+                memsize = memsize * 1024 * 1024;
+                break;
+            case 'K':
+            case 'k':
+                memsize = memsize * 1024 ;
+                break;
+            default:
+                continue;
+                break;
+        }
+        if (memsize > 0) {
+            vmemregions[j] = memsize;
+            j++;
+        }
+    }
+    rc = j;
+vmem_parse_out:   
+    return rc;
+}
+
 static void parse_config_data(const char *config_source,
                               const char *config_data,
                               int config_len,
@@ -871,6 +1008,11 @@ static void parse_config_data(const char *config_source,
     {
         char *cmdline = NULL;
         const char *root = NULL, *extra = "";
+        const char *vnumamemcfg = NULL;
+        int nr_vnuma_regions;
+        long unsigned int vnuma_memparsed = 0;
+        const char *vmapcfg  = NULL;
+        const char *vdistcfg = NULL;
 
         xlu_cfg_replace_string (config, "kernel", &b_info->u.pv.kernel, 0);
 
@@ -889,6 +1031,69 @@ static void parse_config_data(const char *config_source,
             exit(1);
         }
 
+        if (!xlu_cfg_get_long (config, "vnodes", &l, 0)) {
+                b_info->nr_vnodes = l;
+                if(b_info->nr_vnodes != 0 && 
+                  !xlu_cfg_get_string (config, "vnumamem", &vnumamemcfg, 0)) 
+                {
+                    b_info->vnuma_memszs = calloc(b_info->nr_vnodes,
+                                                sizeof(*b_info->vnuma_memszs));
+                    if (b_info->vnuma_memszs == NULL) 
+                        exit(1);
+                    char *buf2 = strdup(vnumamemcfg);
+                    nr_vnuma_regions = vnumamem_parse(buf2, 
b_info->vnuma_memszs,
+                                                            b_info->nr_vnodes);
+                    if(nr_vnuma_regions != b_info->nr_vnodes ) {
+                        fprintf(stderr, "WARNING: Incorrect vNUMA memory 
config\n");
+                        if(buf2) free(buf2);
+                        exit(1);
+                    }
+                    for(i = 0; i < b_info->nr_vnodes; i++)
+                        vnuma_memparsed = vnuma_memparsed + 
(b_info->vnuma_memszs[i] >> 10);
+                    if(vnuma_memparsed != b_info->max_memkb)
+                    /* setting up new memory limit */
+                    {
+                        fprintf(stderr, "WARNING: vNUMA memory is not equal to 
max_mem, adjusting.\n");
+                        b_info->max_memkb = vnuma_memparsed;
+                        b_info->target_memkb = vnuma_memparsed;
+                    }
+                    if (buf2) free(buf2);
+                    b_info->vdistance = (unsigned int 
*)calloc(b_info->nr_vnodes * b_info->nr_vnodes, 
+                                                            
sizeof(*b_info->vdistance));
+                        if (b_info->vdistance == NULL) 
+                           exit(1);
+
+                    if(!xlu_cfg_get_string(config, "vnuma_distance", 
&vdistcfg, 0)) {
+                                                buf2 = strdup(vdistcfg);
+                        if(vdistance_parse(buf2, b_info->vdistance, 
b_info->nr_vnodes) < 0)
+                        {
+                            vdistance_default(b_info->vdistance, 
b_info->nr_vnodes);
+                        } 
+                        if(buf2) free(buf2);
+                    }
+                    else
+                        vdistance_default(b_info->vdistance, 
b_info->nr_vnodes);
+                    
+                    b_info->vcpu_to_vnode = (unsigned int 
*)calloc(b_info->max_vcpus,
+                                                                        
sizeof(*b_info->vcpu_to_vnode));
+                    if (b_info->vcpu_to_vnode == NULL) 
+                        exit(1);
+                    if(!xlu_cfg_get_string(config, "vcpu_to_vnode", &vmapcfg, 
0))
+                    {
+                        buf2 = strdup(vmapcfg);
+                        if (vcputovnode_parse(buf2, b_info->vcpu_to_vnode,
+                                                b_info->nr_vnodes, 
b_info->max_vcpus) < 0) {
+                            vcputovnode_default(b_info->vcpu_to_vnode, 
b_info->nr_vnodes, b_info->max_vcpus);
+                        }
+                        if(buf2) free(buf2);
+                    }
+                    else
+                        vcputovnode_default(b_info->vcpu_to_vnode, 
b_info->nr_vnodes, b_info->max_vcpus);
+                }
+                else 
+                    b_info->nr_vnodes=0;
+        }
+        
         xlu_cfg_replace_string (config, "bootloader", 
&b_info->u.pv.bootloader, 0);
         switch (xlu_cfg_get_list_as_string_list(config, "bootloader_args",
                                       &b_info->u.pv.bootloader_args, 1))
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.