[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Deliver cpufreq actual average freq to libxc interface



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1237538937 0
# Node ID 0fc0de02856acae11dcea4b38a2fde9ece03a02a
# Parent  532e25fda238b3ddba22d5c93e4b1bb36e2b4cb0
Deliver cpufreq actual average freq to libxc interface

Calculate cpu actual average freq according to aperf and mperf MSR and
then deliver to libxc interface

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>
---
 tools/libxc/xc_pm.c         |   18 ++++++++++++++++++
 tools/libxc/xenctrl.h       |    1 +
 xen/drivers/acpi/pmstat.c   |   16 ++++++++++++++++
 xen/include/public/sysctl.h |    2 ++
 4 files changed, 37 insertions(+)

diff -r 532e25fda238 -r 0fc0de02856a tools/libxc/xc_pm.c
--- a/tools/libxc/xc_pm.c       Fri Mar 20 08:48:17 2009 +0000
+++ b/tools/libxc/xc_pm.c       Fri Mar 20 08:48:57 2009 +0000
@@ -307,6 +307,24 @@ int xc_set_cpufreq_para(int xc_handle, i
     return xc_sysctl(xc_handle, &sysctl);
 }
 
+int xc_get_cpufreq_avgfreq(int xc_handle, int cpuid, int *avg_freq)
+{
+    int ret = 0;
+    DECLARE_SYSCTL;
+
+    if ( (xc_handle < 0) || (!avg_freq) )
+        return -EINVAL;
+
+    sysctl.cmd = XEN_SYSCTL_pm_op;
+    sysctl.u.pm_op.cmd = GET_CPUFREQ_AVGFREQ;
+    sysctl.u.pm_op.cpuid = cpuid;
+    ret = xc_sysctl(xc_handle, &sysctl);
+
+    *avg_freq = sysctl.u.pm_op.get_avgfreq;
+
+    return ret;
+}
+
 int xc_get_cputopo(int xc_handle, struct xc_get_cputopo *info)
 {
     int rc;
diff -r 532e25fda238 -r 0fc0de02856a tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Fri Mar 20 08:48:17 2009 +0000
+++ b/tools/libxc/xenctrl.h     Fri Mar 20 08:48:57 2009 +0000
@@ -1242,6 +1242,7 @@ int xc_set_cpufreq_gov(int xc_handle, in
 int xc_set_cpufreq_gov(int xc_handle, int cpuid, char *govname);
 int xc_set_cpufreq_para(int xc_handle, int cpuid,
                         int ctrl_type, int ctrl_value);
+int xc_get_cpufreq_avgfreq(int xc_handle, int cpuid, int *avg_freq);
 
 struct xc_get_cputopo {
      /* IN: maximum addressable entry in
diff -r 532e25fda238 -r 0fc0de02856a xen/drivers/acpi/pmstat.c
--- a/xen/drivers/acpi/pmstat.c Fri Mar 20 08:48:17 2009 +0000
+++ b/xen/drivers/acpi/pmstat.c Fri Mar 20 08:48:57 2009 +0000
@@ -409,6 +409,16 @@ static int set_cpufreq_para(struct xen_s
     return ret;
 }
 
+static int get_cpufreq_avgfreq(struct xen_sysctl_pm_op *op)
+{
+    if ( !op || !cpu_online(op->cpuid) )
+        return -EINVAL;
+
+    op->get_avgfreq = cpufreq_driver_getavg(op->cpuid, USR_GETAVG);
+
+    return 0;
+}
+
 static int get_cputopo (struct xen_sysctl_pm_op *op)
 {
     uint32_t i, nr_cpus;
@@ -494,6 +504,12 @@ int do_pm_op(struct xen_sysctl_pm_op *op
         break;
     }
 
+    case GET_CPUFREQ_AVGFREQ:
+    {
+        ret = get_cpufreq_avgfreq(op);
+        break;
+    }
+
     case XEN_SYSCTL_pm_op_get_cputopo:
     {
         ret = get_cputopo(op);
diff -r 532e25fda238 -r 0fc0de02856a xen/include/public/sysctl.h
--- a/xen/include/public/sysctl.h       Fri Mar 20 08:48:17 2009 +0000
+++ b/xen/include/public/sysctl.h       Fri Mar 20 08:48:57 2009 +0000
@@ -376,6 +376,7 @@ struct xen_sysctl_pm_op {
     #define GET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x01)
     #define SET_CPUFREQ_GOV            (CPUFREQ_PARA | 0x02)
     #define SET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x03)
+    #define GET_CPUFREQ_AVGFREQ        (CPUFREQ_PARA | 0x04)
 
     /* get CPU topology */
     #define XEN_SYSCTL_pm_op_get_cputopo  0x20
@@ -386,6 +387,7 @@ struct xen_sysctl_pm_op {
         struct xen_get_cpufreq_para get_para;
         struct xen_set_cpufreq_gov  set_gov;
         struct xen_set_cpufreq_para set_para;
+        uint64_t get_avgfreq;
         struct xen_get_cputopo      get_topo;
     };
 };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.