[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] cpufreq: fix statistic lock problem



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1269593353 0
# Node ID 415c0bf4ac7aac7dd31da15d055fe961f29522f9
# Parent  19cc497863a442c8d23e77fb2f1b201651687aa2
cpufreq: fix statistic lock problem

cpufreq_statistic_lock should not only protect the statistic memory
pointed by cpufreq_statistic_data[cpu], but also have to protect the
pointer in cpufreq_statistic_data[cpu] itself. So move the read
operation of cpufreq_statistic_data[cpu] after
spin_lock(cpufreq_statistic_lock).

Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>
---
 xen/drivers/cpufreq/utility.c |   12 ++++++++----
 1 files changed, 8 insertions(+), 4 deletions(-)

diff -r 19cc497863a4 -r 415c0bf4ac7a xen/drivers/cpufreq/utility.c
--- a/xen/drivers/cpufreq/utility.c     Fri Mar 26 08:48:08 2010 +0000
+++ b/xen/drivers/cpufreq/utility.c     Fri Mar 26 08:49:13 2010 +0000
@@ -63,13 +63,14 @@ void cpufreq_residency_update(unsigned i
 
 void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
 {
-    struct pm_px *pxpt = cpufreq_statistic_data[cpu];
+    struct pm_px *pxpt;
     struct processor_pminfo *pmpt = processor_pminfo[cpu];
     spinlock_t *cpufreq_statistic_lock = 
                &per_cpu(cpufreq_statistic_lock, cpu);
 
     spin_lock(cpufreq_statistic_lock);
 
+    pxpt = cpufreq_statistic_data[cpu];
     if ( !pxpt || !pmpt ) {
         spin_unlock(cpufreq_statistic_lock);
         return;
@@ -89,7 +90,7 @@ int cpufreq_statistic_init(unsigned int 
 int cpufreq_statistic_init(unsigned int cpuid)
 {
     uint32_t i, count;
-    struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
+    struct pm_px *pxpt;
     const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
     spinlock_t *cpufreq_statistic_lock = 
                           &per_cpu(cpufreq_statistic_lock, cpuid);
@@ -99,6 +100,7 @@ int cpufreq_statistic_init(unsigned int 
 
     spin_lock(cpufreq_statistic_lock);
 
+    pxpt = cpufreq_statistic_data[cpuid];
     if ( pxpt ) {
         spin_unlock(cpufreq_statistic_lock);
         return 0;
@@ -148,12 +150,13 @@ int cpufreq_statistic_init(unsigned int 
 
 void cpufreq_statistic_exit(unsigned int cpuid)
 {
-    struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
+    struct pm_px *pxpt;
     spinlock_t *cpufreq_statistic_lock = 
                &per_cpu(cpufreq_statistic_lock, cpuid);
 
     spin_lock(cpufreq_statistic_lock);
 
+    pxpt = cpufreq_statistic_data[cpuid];
     if (!pxpt) {
         spin_unlock(cpufreq_statistic_lock);
         return;
@@ -170,13 +173,14 @@ void cpufreq_statistic_reset(unsigned in
 void cpufreq_statistic_reset(unsigned int cpuid)
 {
     uint32_t i, j, count;
-    struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
+    struct pm_px *pxpt;
     const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
     spinlock_t *cpufreq_statistic_lock = 
                &per_cpu(cpufreq_statistic_lock, cpuid);
 
     spin_lock(cpufreq_statistic_lock);
 
+    pxpt = cpufreq_statistic_data[cpuid];
     if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
         spin_unlock(cpufreq_statistic_lock);
         return;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.