[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: fix powernow



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1222095372 -3600
# Node ID 415c3da25b26296335a3c6cf0511957f48dfd601
# Parent  81483e49c74c314ae3ed098c1373dfc3f2d3f31e
x86: fix powernow

... by allocating the necessary cpufreq_policy structures.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/acpi/cpufreq/powernow.c |   24 +++++++++++++++++++++---
 1 files changed, 21 insertions(+), 3 deletions(-)

diff -r 81483e49c74c -r 415c3da25b26 xen/arch/x86/acpi/cpufreq/powernow.c
--- a/xen/arch/x86/acpi/cpufreq/powernow.c      Mon Sep 22 15:50:59 2008 +0100
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c      Mon Sep 22 15:56:12 2008 +0100
@@ -283,9 +283,27 @@ int powernow_cpufreq_init(void)
 
     /* setup cpufreq infrastructure */
     for_each_online_cpu(i) {
-        cpufreq_cpu_policy[i]->cpu = i;
-
-        ret = powernow_cpufreq_cpu_init(cpufreq_cpu_policy[i]);
+        struct cpufreq_policy *policy = cpufreq_cpu_policy[i];
+
+        if (!policy) {
+            unsigned int firstcpu;
+
+            firstcpu = first_cpu(processor_pminfo[i]->perf.shared_cpu_map);
+            if (i == firstcpu) {
+                policy = xmalloc(struct cpufreq_policy);
+                if (!policy) {
+                    ret = -ENOMEM;
+                    goto cpufreq_init_out;
+                }
+                memset(policy, 0, sizeof(struct cpufreq_policy));
+                policy->cpu = i;
+            } else
+                policy = cpufreq_cpu_policy[firstcpu];
+            cpu_set(i, policy->cpus);
+            cpufreq_cpu_policy[i] = policy;
+        }
+
+        ret = powernow_cpufreq_cpu_init(policy);
         if (ret)
             goto cpufreq_init_out;
     }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.