[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix local_irq_save() and irqs_disabled() to be preemption-safe.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 6cf6f878328241adf7e07aa58b01b9d5a53574c0
# Parent  475e2a8493b803916b63a68712a4e49abebe1026
Fix local_irq_save() and irqs_disabled() to be preemption-safe.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 475e2a8493b8 -r 6cf6f8783282 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h    Sat Oct 15 
16:19:43 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h    Sun Oct 16 
10:45:51 2005
@@ -497,22 +497,11 @@
  * includes these barriers, for example.
  */
 
-/*
- * Don't use smp_processor_id() in preemptible code: debug builds will barf.
- * It's okay in these cases as we only read the upcall mask in preemptible
- * regions, which is always safe.
- */
-#ifdef CONFIG_SMP
-#define __this_cpu()   __smp_processor_id()
-#else
-#define __this_cpu()   0
-#endif
-
 #define __cli()                                                                
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
        barrier();                                                      \
@@ -523,7 +512,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
@@ -534,8 +523,10 @@
 #define __save_flags(x)                                                        
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
+       preempt_enable();                                               \
 } while (0)
 
 #define __restore_flags(x)                                             \
@@ -543,7 +534,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
@@ -559,7 +550,7 @@
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
@@ -572,8 +563,15 @@
 #define local_irq_disable()    __cli()
 #define local_irq_enable()     __sti()
 
+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
 #define irqs_disabled()                                                        
\
-       HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
+({     int ___x;                                                       \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       ___x = (_vcpu->evtchn_upcall_mask != 0);                        \
+       preempt_enable_no_resched();                                    \
+       ___x; })
 
 /*
  * disable hlt during certain critical i/o operations
diff -r 475e2a8493b8 -r 6cf6f8783282 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h  Sat Oct 15 
16:19:43 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h  Sun Oct 16 
10:45:51 2005
@@ -321,22 +321,11 @@
  * includes these barriers, for example.
  */
 
-/*
- * Don't use smp_processor_id() in preemptible code: debug builds will barf.
- * It's okay in these cases as we only read the upcall mask in preemptible
- * regions, which is always safe.
- */
-#ifdef CONFIG_SMP
-#define __this_cpu()   __smp_processor_id()
-#else
-#define __this_cpu()   0
-#endif
-
 #define __cli()                                                                
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
        barrier();                                                      \
@@ -347,7 +336,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        _vcpu->evtchn_upcall_mask = 0;                                  \
        barrier(); /* unmask then check (avoid races) */                \
        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
@@ -358,8 +347,10 @@
 #define __save_flags(x)                                                        
\
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
+       preempt_enable();                                               \
 } while (0)
 
 #define __restore_flags(x)                                             \
@@ -367,7 +358,7 @@
        vcpu_info_t *_vcpu;                                             \
        barrier();                                                      \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
                barrier(); /* unmask then check (avoid races) */        \
                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
@@ -383,7 +374,7 @@
 do {                                                                   \
        vcpu_info_t *_vcpu;                                             \
        preempt_disable();                                              \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];       \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
        (x) = _vcpu->evtchn_upcall_mask;                                \
        _vcpu->evtchn_upcall_mask = 1;                                  \
        preempt_enable_no_resched();                                    \
@@ -398,8 +389,15 @@
 #define local_irq_disable()    __cli()
 #define local_irq_enable()     __sti()
 
+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
 #define irqs_disabled()                                                        
\
-       HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
+({     int ___x;                                                       \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       ___x = (_vcpu->evtchn_upcall_mask != 0);                        \
+       preempt_enable_no_resched();                                    \
+       ___x; })
 
 /*
  * disable hlt during certain critical i/o operations

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.