[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [PATCH 3/3] Sample implementation of Xenoprof for ia64



patch for oprofile-0.9.1 which oprofile-0.9.1-xen.patch has already
been applied

Signed-off-by: SUZUKI Kazuhiro <kaz@xxxxxxxxxxxxxx>

diff -Nur oprofile-0.9.1-xen/daemon/opd_events.c 
oprofile-0.9.1-xenoprof/daemon/opd_events.c
--- oprofile-0.9.1-xen/daemon/opd_events.c      2005-05-03 00:06:57.000000000 
+0900
+++ oprofile-0.9.1-xenoprof/daemon/opd_events.c 2006-07-03 14:37:28.000000000 
+0900
@@ -88,6 +88,7 @@
                        = event->count = event->um = 0;
                event->kernel = 1;
                event->user = 1;
+               event->domain = 1;
                return;
        }
 
@@ -111,6 +112,7 @@
                event->um = copy_ulong(&c, ':');
                event->kernel = copy_ulong(&c, ':');
                event->user = copy_ulong(&c, ',');
+               event->domain = xenimage != NULL;
                ++cur;
        }
 
diff -Nur oprofile-0.9.1-xen/daemon/opd_events.h 
oprofile-0.9.1-xenoprof/daemon/opd_events.h
--- oprofile-0.9.1-xen/daemon/opd_events.h      2005-05-03 00:06:58.000000000 
+0900
+++ oprofile-0.9.1-xenoprof/daemon/opd_events.h 2006-06-07 13:32:38.000000000 
+0900
@@ -24,6 +24,7 @@
        unsigned long um;
        unsigned long kernel;
        unsigned long user;
+       unsigned long domain;
 };
 
 /* needed for opd_perfmon.c */
diff -Nur oprofile-0.9.1-xen/daemon/opd_perfmon.c 
oprofile-0.9.1-xenoprof/daemon/opd_perfmon.c
--- oprofile-0.9.1-xen/daemon/opd_perfmon.c     2004-12-13 08:26:35.000000000 
+0900
+++ oprofile-0.9.1-xenoprof/daemon/opd_perfmon.c        2006-07-03 
14:10:55.000000000 +0900
@@ -33,6 +33,7 @@
 #ifdef HAVE_SCHED_SETAFFINITY
 #include <sched.h>
 #endif
+#include <mntent.h>
 
 extern op_cpu cpu_type;
 
@@ -241,6 +242,7 @@
  */
 #define PMC_MANDATORY (1UL << 23)
 #define PMC_USER (1UL << 3)
+#define PMC_DOMAIN (1UL << 2)
 #define PMC_KERNEL (1UL << 0)
        for (i = 0; i < op_nr_counters && opd_events[i].name; ++i) {
                struct opd_event * event = &opd_events[i];
@@ -252,6 +254,8 @@
                              : (pc[i].reg_value &= ~PMC_USER);
                (event->kernel) ? (pc[i].reg_value |= PMC_KERNEL)
                                : (pc[i].reg_value &= ~PMC_KERNEL);
+               (event->domain) ? (pc[i].reg_value |= PMC_DOMAIN)
+                               : (pc[i].reg_value &= ~PMC_DOMAIN);
                pc[i].reg_value &= ~(0xff << 8);
                pc[i].reg_value |= ((event->value & 0xff) << 8);
                pc[i].reg_value &= ~(0xf << 16);
@@ -381,6 +385,101 @@
 }
 
 
+/*
+ * We get # of online CPUs from /proc/processor,
+ * because patches/linux-2.6.16.13/xen-hotplug.patch does 
+ * not work correctlly.
+ */
+# define GET_NPROCS_PARSER(FP, BUFFER, RESULT)                         \
+  do                                                                   \
+    {                                                                  \
+      (RESULT) = 0;                                                    \
+      /* Read all lines and count the lines starting with the string   \
+        "processor".  We don't have to fear extremely long lines since \
+        the kernel will not generate them.  8192 bytes are really      \
+        enough.  */                                                    \
+      while (fgets_unlocked (BUFFER, sizeof (BUFFER), FP) != 0)        \
+       if (strncmp (BUFFER, "processor", 9) == 0)                      \
+         ++(RESULT);                                                   \
+    }                                                                  \
+  while (0)
+
+static const char path_proc[] = "/proc";
+
+static const char *
+get_proc_path (char *buffer, size_t bufsize)
+{
+  struct mntent mount_point;
+  struct mntent *entry;
+  char *result = NULL;
+  char *copy_result;
+  FILE *fp;
+
+  /* First find the mount point of the proc filesystem.  */
+  fp = setmntent (_PATH_MOUNTED, "r");
+  if (fp == NULL)
+    fp = setmntent (_PATH_MNTTAB, "r");
+  if (fp != NULL)
+    {
+      /* We don't need locking.  */
+      while ((entry = getmntent_r (fp, &mount_point, buffer, bufsize))
+            != NULL)
+       if (strcmp (mount_point.mnt_type, "proc") == 0)
+         {
+           result = mount_point.mnt_dir;
+           break;
+         }
+      endmntent (fp);
+    }
+
+  /* If we haven't found anything this is generally a bad sign but we
+     handle it gracefully.  We return what is hopefully the right
+     answer (/proc) but we don't remember this.  This will enable
+     programs which started before the system is fully running to
+     adjust themselves.  */
+  if (result == NULL)
+    return path_proc;
+
+  /* Make a copy we can keep around.  */
+  copy_result = strdup (result);
+  if (copy_result == NULL)
+    return result;
+
+  return copy_result;
+}
+
+int get_nprocs (void)
+{
+  FILE *fp;
+  char buffer[8192];
+  const char *proc_path;
+  int result = 1;
+
+  /* XXX Here will come a test for the new system call.  */
+
+  /* Get mount point of proc filesystem.  */
+  proc_path = get_proc_path (buffer, sizeof buffer);
+
+  /* If we haven't found an appropriate entry return 1.  */
+  if (proc_path != NULL)
+    {
+      char *proc_fname = alloca (strlen (proc_path) + sizeof ("/cpuinfo"));
+
+      __stpcpy (__stpcpy (proc_fname, proc_path), "/cpuinfo");
+
+      fp = fopen (proc_fname, "rc");
+      if (fp != NULL)
+       {
+         /* No threads use this stream.  */
+         GET_NPROCS_PARSER (fp, buffer, result);
+         fclose (fp);
+       }
+    }
+
+  return result;
+}
+
+
 void perfmon_init(void)
 {
        size_t i;
@@ -389,11 +488,19 @@
        if (cpu_type == CPU_TIMER_INT)
                return;
 
+       /*
+        * patches/linux-2.6.16.13/xen-hotplug.patch does not work correctlly.
+        * So, we get # of online CPUs from /proc/processor.
+        */
+#if 0
        nr = sysconf(_SC_NPROCESSORS_ONLN);
        if (nr == -1) {
                fprintf(stderr, "Couldn't determine number of CPUs.\n");
                exit(EXIT_FAILURE);
        }
+#else
+       nr = get_nprocs();
+#endif
 
        nr_cpus = nr;
 
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.