[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] xenoprof: dynamic buffer array allocation



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1262765735 0
# Node ID 2b2d888e5a804c81dc5a1dcd0de4cf5e83106bf4
# Parent  f6017e7c6615687365ae7e66dd673898285d884d
xenoprof: dynamic buffer array allocation

The recent change to locally define MAX_VIRT_CPUS wasn't really
appropriate - with there not being a hard limit on the number of
vCPU-s anymore, these arrays should be allocated dynamically.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 drivers/xen/xenoprof/xenoprofile.c |   57 +++++++++++++++++++++++++++++++------
 1 files changed, 49 insertions(+), 8 deletions(-)

diff -r f6017e7c6615 -r 2b2d888e5a80 drivers/xen/xenoprof/xenoprofile.c
--- a/drivers/xen/xenoprof/xenoprofile.c        Wed Jan 06 08:14:10 2010 +0000
+++ b/drivers/xen/xenoprof/xenoprofile.c        Wed Jan 06 08:15:35 2010 +0000
@@ -33,15 +33,14 @@
 #include "../../../drivers/oprofile/event_buffer.h"
 
 #define MAX_XENOPROF_SAMPLES 16
-#define MAX_VIRT_CPUS 128
 
 /* sample buffers shared with Xen */
-static xenoprof_buf_t *xenoprof_buf[MAX_VIRT_CPUS];
+static xenoprof_buf_t **__read_mostly xenoprof_buf;
 /* Shared buffer area */
 static struct xenoprof_shared_buffer shared_buffer;
 
 /* Passive sample buffers shared with Xen */
-static xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
+static xenoprof_buf_t **__read_mostly p_xenoprof_buf[MAX_OPROF_DOMAINS];
 /* Passive shared buffer area */
 static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
 
@@ -253,11 +252,32 @@ static int bind_virq(void)
 }
 
 
+static xenoprof_buf_t **get_buffer_array(unsigned int nbuf)
+{
+       size_t size = nbuf * sizeof(xenoprof_buf_t);
+
+       if (size <= PAGE_SIZE)
+               return kmalloc(size, GFP_KERNEL);
+       return vmalloc(size);
+}
+
+static void release_buffer_array(xenoprof_buf_t **buf, unsigned int nbuf)
+{
+       if (nbuf * sizeof(xenoprof_buf_t) <= PAGE_SIZE)
+               kfree(buf);
+       else
+               vfree(buf);
+}
+
+
 static void unmap_passive_list(void)
 {
        int i;
-       for (i = 0; i < pdomains; i++)
+       for (i = 0; i < pdomains; i++) {
                xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
+               release_buffer_array(p_xenoprof_buf[i],
+                                    passive_domains[i].nbuf);
+       }
        pdomains = 0;
 }
 
@@ -277,10 +297,16 @@ static int map_xenoprof_buffer(int max_s
                return ret;
        nbuf = get_buffer.nbuf;
 
+       xenoprof_buf = get_buffer_array(nbuf);
+       if (!xenoprof_buf) {
+               xenoprof_arch_unmap_shared_buffer(&shared_buffer);
+               return -ENOMEM;
+       }
+
        for (i=0; i< nbuf; i++) {
                buf = (struct xenoprof_buf*) 
                        &shared_buffer.buffer[i * get_buffer.bufsize];
-               BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
+               BUG_ON(buf->vcpu_id >= nbuf);
                xenoprof_buf[buf->vcpu_id] = buf;
        }
 
@@ -295,8 +321,10 @@ static int xenoprof_setup(void)
        if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
                return ret;
 
-       if ( (ret = bind_virq()) )
+       if ( (ret = bind_virq()) ) {
+               release_buffer_array(xenoprof_buf, nbuf);
                return ret;
+       }
 
        if (xenoprof_is_primary) {
                /* Define dom0 as an active domain if not done yet */
@@ -339,6 +367,7 @@ static int xenoprof_setup(void)
        return 0;
  err:
        unbind_virq();
+       release_buffer_array(xenoprof_buf, nbuf);
        return ret;
 }
 
@@ -360,6 +389,7 @@ static void xenoprof_shutdown(void)
        xenoprof_arch_unmap_shared_buffer(&shared_buffer);
        if (xenoprof_is_primary)
                unmap_passive_list();
+       release_buffer_array(xenoprof_buf, nbuf);
 }
 
 
@@ -452,11 +482,19 @@ static int xenoprof_set_passive(int * p_
                                                &p_shared_buffer[i]);
                if (ret)
                        goto out;
+
+               p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf);
+               if (!p_xenoprof_buf[i]) {
+                       ++i;
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
                for (j = 0; j < passive_domains[i].nbuf; j++) {
                        buf = (struct xenoprof_buf *)
                                &p_shared_buffer[i].buffer[
                                j * passive_domains[i].bufsize];
-                       BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
+                       BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf);
                        p_xenoprof_buf[i][buf->vcpu_id] = buf;
                }
        }
@@ -465,8 +503,11 @@ static int xenoprof_set_passive(int * p_
        return 0;
 
 out:
-       for (j = 0; j < i; j++)
+       for (j = 0; j < i; j++) {
                xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
+               release_buffer_array(p_xenoprof_buf[i],
+                                    passive_domains[i].nbuf);
+       }
 
        return ret;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.