[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Add new operation XENOPROF_get_buffer in xenoprof hypercall. Also



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1ece34466781ec55f41fd29d53f6dafd208ba2fa
# Parent  98fcd017c5f357d25e1fe648784eb3d42d0c115d
Add new operation XENOPROF_get_buffer in xenoprof hypercall. Also
remove buffer related arguments from XENOPROF_init operation. This
is the first step to allow dynamic mapping/unmapping of xenoprof
buffers to
enable profiling a domain in passive mode and then switch to active
mode (or vice-versa). Currently a domain can only be profiled in a
single mode. Also passive domains cannot have oprofile builtin the
kernel or passive profiling will fail.

Signed-off-by: Jose Renato Santos <jsantos@xxxxxxxxxx>
---
 linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c |   91 ++++++++++++---------
 xen/arch/x86/oprofile/xenoprof.c                   |   75 +++++++++--------
 xen/include/public/xenoprof.h                      |   15 ++-
 3 files changed, 105 insertions(+), 76 deletions(-)

diff -r 98fcd017c5f3 -r 1ece34466781 
linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c
--- a/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c        Wed Sep 13 
14:01:23 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/i386/oprofile/xenoprof.c        Wed Sep 13 
14:05:33 2006 +0100
@@ -32,6 +32,8 @@
 #include <../../../drivers/oprofile/cpu_buffer.h>
 #include <../../../drivers/oprofile/event_buffer.h>
 
+#define MAX_XENOPROF_SAMPLES 16
+
 static int xenoprof_start(void);
 static void xenoprof_stop(void);
 
@@ -43,7 +45,7 @@ static int active_defined;
 /* sample buffers shared with Xen */
 xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
 /* Shared buffer area */
-char * shared_buffer;
+char * shared_buffer = NULL;
 /* Number of buffers in shared area (one per VCPU) */
 int nbuf;
 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
@@ -233,13 +235,57 @@ static int bind_virq(void)
 }
 
 
+static int map_xenoprof_buffer(int max_samples)
+{
+       struct xenoprof_get_buffer get_buffer;
+       struct xenoprof_buf *buf;
+       int npages, ret, i;
+       struct vm_struct *area;
+
+       if ( shared_buffer )
+               return 0;
+
+       get_buffer.max_samples = max_samples;
+
+       if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, &get_buffer)) )
+               return ret;
+
+       nbuf = get_buffer.nbuf;
+       npages = (get_buffer.bufsize * nbuf - 1) / PAGE_SIZE + 1;
+
+       area = alloc_vm_area(npages * PAGE_SIZE);
+       if (area == NULL)
+               return -ENOMEM;
+
+       if ( (ret = direct_kernel_remap_pfn_range(
+                     (unsigned long)area->addr,
+                     get_buffer.buf_maddr >> PAGE_SHIFT,
+                     npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE), DOMID_SELF)) 
) {
+               vunmap(area->addr);
+               return ret;
+       }
+
+       shared_buffer = area->addr;
+       for (i=0; i< nbuf; i++) {
+               buf = (struct xenoprof_buf*) 
+                       &shared_buffer[i * get_buffer.bufsize];
+               BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
+               xenoprof_buf[buf->vcpu_id] = buf;
+       }
+
+       return 0;
+}
+
+
 static int xenoprof_setup(void)
 {
        int ret;
        int i;
 
-       ret = bind_virq();
-       if (ret)
+       if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
+               return ret;
+
+       if ( (ret = bind_virq()) )
                return ret;
 
        if (is_primary) {
@@ -482,50 +528,18 @@ int __init oprofile_arch_init(struct opr
 int __init oprofile_arch_init(struct oprofile_operations * ops)
 {
        struct xenoprof_init init;
-       struct xenoprof_buf *buf;
-       int npages, ret, i;
-       struct vm_struct *area;
-
-       init.max_samples = 16;
+       int ret, i;
+
        ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
 
        if (!ret) {
-               pgprot_t prot = __pgprot(_KERNPG_TABLE);
-
                num_events = init.num_events;
                is_primary = init.is_primary;
-               nbuf = init.nbuf;
 
                /* just in case - make sure we do not overflow event list 
-                   (i.e. counter_config list) */
+                  (i.e. counter_config list) */
                if (num_events > OP_MAX_COUNTER)
                        num_events = OP_MAX_COUNTER;
-
-               npages = (init.bufsize * nbuf - 1) / PAGE_SIZE + 1;
-
-               area = alloc_vm_area(npages * PAGE_SIZE);
-               if (area == NULL) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               ret = direct_kernel_remap_pfn_range(
-                       (unsigned long)area->addr,
-                       init.buf_maddr >> PAGE_SHIFT,
-                       npages * PAGE_SIZE, prot, DOMID_SELF);
-               if (ret) {
-                       vunmap(area->addr);
-                       goto out;
-               }
-
-               shared_buffer = area->addr;
-
-               for (i=0; i< nbuf; i++) {
-                       buf = (struct xenoprof_buf*) 
-                               &shared_buffer[i * init.bufsize];
-                       BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
-                       xenoprof_buf[buf->vcpu_id] = buf;
-               }
 
                /*  cpu_type is detected by Xen */
                cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
@@ -541,7 +555,6 @@ int __init oprofile_arch_init(struct opr
 
                active_defined = 0;
        }
- out:
        printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, "
               "is_primary %d\n", ret, num_events, is_primary);
        return ret;
diff -r 98fcd017c5f3 -r 1ece34466781 xen/arch/x86/oprofile/xenoprof.c
--- a/xen/arch/x86/oprofile/xenoprof.c  Wed Sep 13 14:01:23 2006 +0100
+++ b/xen/arch/x86/oprofile/xenoprof.c  Wed Sep 13 14:05:33 2006 +0100
@@ -437,54 +437,59 @@ int xenoprof_op_init(XEN_GUEST_HANDLE(vo
 int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
 {
     struct xenoprof_init xenoprof_init;
-    int is_primary, num_events;
+    int ret;
+
+    if ( copy_from_guest(&xenoprof_init, arg, 1) )
+        return -EFAULT;
+
+    if ( (ret = nmi_init(&xenoprof_init.num_events, 
+                         &xenoprof_init.is_primary, 
+                         xenoprof_init.cpu_type)) )
+        return ret;
+
+    if ( copy_to_guest(arg, &xenoprof_init, 1) )
+        return -EFAULT;
+
+    if ( xenoprof_init.is_primary )
+        primary_profiler = current->domain;
+
+    return 0;
+}
+
+int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
+{
+    struct xenoprof_get_buffer xenoprof_get_buffer;
     struct domain *d = current->domain;
     int ret;
 
-    if ( copy_from_guest(&xenoprof_init, arg, 1) )
+    if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
         return -EFAULT;
 
-    ret = nmi_init(&num_events, 
-                   &is_primary, 
-                   xenoprof_init.cpu_type);
-    if ( ret < 0 )
-        goto err;
-
-    if ( is_primary )
-        primary_profiler = current->domain;
-
     /*
-     * We allocate xenoprof struct and buffers only at first time xenoprof_init
+     * We allocate xenoprof struct and buffers only at first time 
xenoprof_get_buffer
      * is called. Memory is then kept until domain is destroyed.
      */
     if ( (d->xenoprof == NULL) &&
-         ((ret = alloc_xenoprof_struct(d, xenoprof_init.max_samples, 0)) < 0) )
-        goto err;
+         ((ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0)) 
< 0) )
+        return ret;
 
     xenoprof_reset_buf(d);
 
     d->xenoprof->domain_type  = XENOPROF_DOMAIN_IGNORED;
     d->xenoprof->domain_ready = 0;
-    d->xenoprof->is_primary = is_primary;
-
-    xenoprof_init.is_primary = is_primary;
-    xenoprof_init.num_events = num_events;
-    xenoprof_init.nbuf = d->xenoprof->nbuf;
-    xenoprof_init.bufsize = d->xenoprof->bufsize;
-    xenoprof_init.buf_maddr = __pa(d->xenoprof->rawbuf);
-
-    if ( copy_to_guest(arg, &xenoprof_init, 1) )
-    {
-        ret = -EFAULT;
-        goto err;
-    }
-
-    return ret;
-
- err:
     if ( primary_profiler == current->domain )
-        primary_profiler = NULL;
-    return ret;
+        d->xenoprof->is_primary = 1;
+    else
+        d->xenoprof->is_primary = 0;
+        
+    xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
+    xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
+    xenoprof_get_buffer.buf_maddr = __pa(d->xenoprof->rawbuf);
+
+    if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
+        return -EFAULT;
+
+    return 0;
 }
 
 #define PRIV_OP(op) ( (op == XENOPROF_set_active)       \
@@ -510,6 +515,10 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
     {
     case XENOPROF_init:
         ret = xenoprof_op_init(arg);
+        break;
+
+    case XENOPROF_get_buffer:
+        ret = xenoprof_op_get_buffer(arg);
         break;
 
     case XENOPROF_reset_active_list:
diff -r 98fcd017c5f3 -r 1ece34466781 xen/include/public/xenoprof.h
--- a/xen/include/public/xenoprof.h     Wed Sep 13 14:01:23 2006 +0100
+++ b/xen/include/public/xenoprof.h     Wed Sep 13 14:05:33 2006 +0100
@@ -28,6 +28,8 @@
 #define XENOPROF_disable_virq       11
 #define XENOPROF_release_counters   12
 #define XENOPROF_shutdown           13
+#define XENOPROF_get_buffer         14
+#define XENOPROF_last_op            14
 
 #define MAX_OPROF_EVENTS    32
 #define MAX_OPROF_DOMAINS   25 
@@ -56,16 +58,21 @@ DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
 DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
 
 struct xenoprof_init {
-    int32_t  max_samples;
     int32_t  num_events;
     int32_t  is_primary;
-    int32_t  nbuf;
-    int32_t  bufsize;
-    uint64_t buf_maddr;
     char cpu_type[XENOPROF_CPU_TYPE_SIZE];
 };
 typedef struct xenoprof_init xenoprof_init_t;
 DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
+
+struct xenoprof_get_buffer {
+    int32_t  max_samples;
+    int32_t  nbuf;
+    int32_t  bufsize;
+    uint64_t buf_maddr;
+};
+typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
+DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
 
 struct xenoprof_counter {
     uint32_t ind;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.