[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.0-testing] C6 state with EOI issue fix for some Intel processors



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1284537730 -3600
# Node ID aced003668226f1007305092086611d12eaff396
# Parent  081ba5a13718f12f0d306d8f094c1b73a7f7d173
C6 state with EOI issue fix for some Intel processors

There is an errata in some of Intel processors.

AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6
During an Interrupt Service Routine

If core C6 is entered after the start of an interrupt service routine
but before a write to the APIC EOI register, the core may not send an
EOI transaction (if needed) and further interrupts from the same
priority level or lower may be blocked.

This patch fix this issue, by checking if ISR is pending before enter
deep Cx state. If so, it would use power->safe_state instead of deep
Cx state to prevent the above issue happen.

Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   22160:1087f9a03ab6
xen-unstable date:        Wed Sep 15 09:00:35 2010 +0100
---
 xen/arch/x86/acpi/cpu_idle.c |   28 ++++++++++++++++++++++++++++
 xen/arch/x86/irq.c           |    5 +++++
 xen/include/asm-x86/irq.h    |    2 ++
 3 files changed, 35 insertions(+)

diff -r 081ba5a13718 -r aced00366822 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Wed Sep 15 08:22:44 2010 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c      Wed Sep 15 09:02:10 2010 +0100
@@ -226,6 +226,31 @@ static int sched_has_urgent_vcpu(void)
     return atomic_read(&this_cpu(schedule_data).urgent_count);
 }
 
+/*
+ * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During 
+ * an Interrupt Service Routine"
+ * 
+ * There was an errata with some Core i7 processors that an EOI transaction 
+ * may not be sent if software enters core C6 during an interrupt service 
+ * routine. So we don't enter deep Cx state if there is an EOI pending.
+ */
+bool_t errata_c6_eoi_workaround(void)
+{
+    static bool_t fix_needed = -1;
+
+    if ( unlikely(fix_needed == -1) )
+    {
+        int model = boot_cpu_data.x86_model;
+        fix_needed = (cpu_has_apic && !directed_eoi_enabled &&
+                      (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+                      (boot_cpu_data.x86 == 6) &&
+                      ((model == 0x1a) || (model == 0x1e) || (model == 0x1f) ||
+                       (model == 0x25) || (model == 0x2c) || (model == 0x2f)));
+    }
+
+    return (fix_needed && cpu_has_pending_apic_eoi());
+}
+
 static void acpi_processor_idle(void)
 {
     struct acpi_processor_power *power = processor_powers[smp_processor_id()];
@@ -276,6 +301,9 @@ static void acpi_processor_idle(void)
         cpufreq_dbs_timer_resume();
         return;
     }
+
+    if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() )
+        cx = power->safe_state;
 
     power->last_state = cx;
 
diff -r 081ba5a13718 -r aced00366822 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Wed Sep 15 08:22:44 2010 +0100
+++ b/xen/arch/x86/irq.c        Wed Sep 15 09:02:10 2010 +0100
@@ -756,6 +756,11 @@ static DEFINE_PER_CPU(struct pending_eoi
 static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_DYNAMIC_VECTORS]);
 #define pending_eoi_sp(p) ((p)[NR_DYNAMIC_VECTORS-1].vector)
 
+bool_t cpu_has_pending_apic_eoi(void)
+{
+    return (pending_eoi_sp(this_cpu(pending_eoi)) != 0);
+}
+
 static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
 {
     if ( d->arch.pirq_eoi_map )
diff -r 081ba5a13718 -r aced00366822 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Wed Sep 15 08:22:44 2010 +0100
+++ b/xen/include/asm-x86/irq.h Wed Sep 15 09:02:10 2010 +0100
@@ -150,4 +150,6 @@ void irq_set_affinity(int irq, cpumask_t
 #define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
 #define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
 
+bool_t cpu_has_pending_apic_eoi(void);
+
 #endif /* _ASM_HW_IRQ_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.