[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] cpufreq: stats_lock doesn't appear to need to be irq-safe.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1231327310 0
# Node ID 6d96d3c02e94152f52529a7b36f884228164e1e5
# Parent  b0beee2685433da53ba3943880282c3e38e7a257
cpufreq: stats_lock doesn't appear to need to be irq-safe.

Changing this fixes some bugchecks (xmalloc() with irqs disabled).

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/drivers/acpi/pmstat.c     |   10 +++++-----
 xen/drivers/cpufreq/utility.c |   30 +++++++++++++++---------------
 2 files changed, 20 insertions(+), 20 deletions(-)

diff -r b0beee268543 -r 6d96d3c02e94 xen/drivers/acpi/pmstat.c
--- a/xen/drivers/acpi/pmstat.c Wed Jan 07 14:28:33 2009 +0900
+++ b/xen/drivers/acpi/pmstat.c Wed Jan 07 11:21:50 2009 +0000
@@ -92,11 +92,11 @@ int do_get_pm_info(struct xen_sysctl_get
         spinlock_t *cpufreq_statistic_lock = 
                    &per_cpu(cpufreq_statistic_lock, op->cpuid);
 
-        spin_lock_irq(cpufreq_statistic_lock);
+        spin_lock(cpufreq_statistic_lock);
 
         if ( !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
         {
-            spin_unlock_irq(cpufreq_statistic_lock);
+            spin_unlock(cpufreq_statistic_lock);
             return -ENODATA;
         }
 
@@ -107,14 +107,14 @@ int do_get_pm_info(struct xen_sysctl_get
         ct = pmpt->perf.state_count;
         if ( copy_to_guest(op->u.getpx.trans_pt, pxpt->u.trans_pt, ct*ct) )
         {
-            spin_unlock_irq(cpufreq_statistic_lock);
+            spin_unlock(cpufreq_statistic_lock);
             ret = -EFAULT;
             break;
         }
 
         if ( copy_to_guest(op->u.getpx.pt, pxpt->u.pt, ct) )
         {
-            spin_unlock_irq(cpufreq_statistic_lock);
+            spin_unlock(cpufreq_statistic_lock);
             ret = -EFAULT;
             break;
         }
@@ -124,7 +124,7 @@ int do_get_pm_info(struct xen_sysctl_get
         op->u.getpx.last = pxpt->u.last;
         op->u.getpx.cur = pxpt->u.cur;
 
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
 
         break;
     }
diff -r b0beee268543 -r 6d96d3c02e94 xen/drivers/cpufreq/utility.c
--- a/xen/drivers/cpufreq/utility.c     Wed Jan 07 14:28:33 2009 +0900
+++ b/xen/drivers/cpufreq/utility.c     Wed Jan 07 11:21:50 2009 +0000
@@ -68,10 +68,10 @@ void cpufreq_statistic_update(unsigned i
     spinlock_t *cpufreq_statistic_lock = 
                &per_cpu(cpufreq_statistic_lock, cpu);
 
-    spin_lock_irq(cpufreq_statistic_lock);
+    spin_lock(cpufreq_statistic_lock);
 
     if ( !pxpt || !pmpt ) {
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return;
     }
 
@@ -83,7 +83,7 @@ void cpufreq_statistic_update(unsigned i
 
     (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
 
-    spin_unlock_irq(cpufreq_statistic_lock);
+    spin_unlock(cpufreq_statistic_lock);
 }
 
 int cpufreq_statistic_init(unsigned int cpuid)
@@ -97,10 +97,10 @@ int cpufreq_statistic_init(unsigned int 
     if ( !pmpt )
         return -EINVAL;
 
-    spin_lock_irq(cpufreq_statistic_lock);
+    spin_lock(cpufreq_statistic_lock);
 
     if ( pxpt ) {
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return 0;
     }
 
@@ -108,7 +108,7 @@ int cpufreq_statistic_init(unsigned int 
 
     pxpt = xmalloc(struct pm_px);
     if ( !pxpt ) {
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return -ENOMEM;
     }
     memset(pxpt, 0, sizeof(*pxpt));
@@ -117,7 +117,7 @@ int cpufreq_statistic_init(unsigned int 
     pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count);
     if (!pxpt->u.trans_pt) {
         xfree(pxpt);
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return -ENOMEM;
     }
 
@@ -125,7 +125,7 @@ int cpufreq_statistic_init(unsigned int 
     if (!pxpt->u.pt) {
         xfree(pxpt->u.trans_pt);
         xfree(pxpt);
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return -ENOMEM;
     }
 
@@ -141,7 +141,7 @@ int cpufreq_statistic_init(unsigned int 
     pxpt->prev_state_wall = NOW();
     pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
 
-    spin_unlock_irq(cpufreq_statistic_lock);
+    spin_unlock(cpufreq_statistic_lock);
 
     return 0;
 }
@@ -152,10 +152,10 @@ void cpufreq_statistic_exit(unsigned int
     spinlock_t *cpufreq_statistic_lock = 
                &per_cpu(cpufreq_statistic_lock, cpuid);
 
-    spin_lock_irq(cpufreq_statistic_lock);
+    spin_lock(cpufreq_statistic_lock);
 
     if (!pxpt) {
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return;
     }
 
@@ -164,7 +164,7 @@ void cpufreq_statistic_exit(unsigned int
     xfree(pxpt);
     cpufreq_statistic_data[cpuid] = NULL;
 
-    spin_unlock_irq(cpufreq_statistic_lock);
+    spin_unlock(cpufreq_statistic_lock);
 }
 
 void cpufreq_statistic_reset(unsigned int cpuid)
@@ -175,10 +175,10 @@ void cpufreq_statistic_reset(unsigned in
     spinlock_t *cpufreq_statistic_lock = 
                &per_cpu(cpufreq_statistic_lock, cpuid);
 
-    spin_lock_irq(cpufreq_statistic_lock);
+    spin_lock(cpufreq_statistic_lock);
 
     if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
-        spin_unlock_irq(cpufreq_statistic_lock);
+        spin_unlock(cpufreq_statistic_lock);
         return;
     }
 
@@ -195,7 +195,7 @@ void cpufreq_statistic_reset(unsigned in
     pxpt->prev_state_wall = NOW();
     pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
 
-    spin_unlock_irq(cpufreq_statistic_lock);
+    spin_unlock(cpufreq_statistic_lock);
 }
 
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.