[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: fix unmaskable msi assignment issue.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1263370718 0
# Node ID 6512068aa0db7747df4f180b03d269afb7da5bb0
# Parent  3f8fd65732cc7c2324c905fe055e522077672d82
x86: fix unmaskable msi assignment issue.

Currently, unmasked msi irq's EOI write is deferred untile guest
writes EOI, so needs to keep eoi_vector unchanged before guest writes
EOI. However, irq migration breaks the assumption and changs
eoi_vector when interrupts are generated through new vector.

The patch removes the dependency for eoi_vector and directly recoreds
the irq info in the EOI stack, and when guest writes EOI, just do the
physical EOI for the specific irq(recorded in EOI stack)on the cpus
according to the cpu_eoi_map.

Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
---
 xen/arch/x86/irq.c |   12 ++++++------
 1 files changed, 6 insertions(+), 6 deletions(-)

diff -r 3f8fd65732cc -r 6512068aa0db xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Wed Jan 13 08:17:00 2010 +0000
+++ b/xen/arch/x86/irq.c        Wed Jan 13 08:18:38 2010 +0000
@@ -740,7 +740,6 @@ typedef struct {
 #define ACKTYPE_UNMASK 1     /* Unmask PIC hardware (from any CPU)   */
 #define ACKTYPE_EOI    2     /* EOI on the CPU that was interrupted  */
     cpumask_t cpu_eoi_map;   /* CPUs that need to EOI this interrupt */
-    u8 eoi_vector;           /* vector awaiting the EOI*/
     struct domain *guest[IRQ_MAX_GUESTS];
 } irq_guest_action_t;
 
@@ -749,8 +748,9 @@ typedef struct {
  * order, as only the current highest-priority pending irq can be EOIed.
  */
 struct pending_eoi {
-    u8 vector; /* vector awaiting EOI */
-    u8 ready;  /* Ready for EOI now?  */
+    u32 ready:1;  /* Ready for EOI now?  */
+    u32 irq:23;   /* irq of the vector */
+    u32 vector:8; /* vector awaiting EOI */
 };
 
 static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
@@ -817,11 +817,11 @@ static void __do_IRQ_guest(int irq)
         sp = pending_eoi_sp(peoi);
         ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
         ASSERT(sp < (NR_VECTORS-1));
+        peoi[sp].irq = irq;
         peoi[sp].vector = vector;
         peoi[sp].ready = 0;
         pending_eoi_sp(peoi) = sp+1;
         cpu_set(smp_processor_id(), action->cpu_eoi_map);
-        action->eoi_vector = vector;
     }
 
     for ( i = 0; i < action->nr_guests; i++ )
@@ -913,7 +913,7 @@ static void flush_ready_eoi(void)
 
     while ( (--sp >= 0) && peoi[sp].ready )
     {
-        irq = __get_cpu_var(vector_irq[peoi[sp].vector]);
+        irq = peoi[sp].irq;
         ASSERT(irq > 0);
         desc = irq_to_desc(irq);
         spin_lock(&desc->lock);
@@ -941,7 +941,7 @@ static void __set_eoi_ready(struct irq_d
 
     do {
         ASSERT(sp > 0);
-    } while ( peoi[--sp].vector != action->eoi_vector );
+    } while ( peoi[--sp].irq != irq );
     ASSERT(!peoi[sp].ready);
     peoi[sp].ready = 1;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.