[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Revert Cset 20334:dcc5d5d954e9



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1264521150 0
# Node ID cb0375fcec23ea188511c130feaee0c18b352968
# Parent  efeef2c5e96da54775f73a069e6215a259ac0923
x86: Revert Cset 20334:dcc5d5d954e9

Recording old MSI info doesn't solve all the corner cases
when guest's irq migration occurs.

Signed-off-by : Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
---
 xen/arch/x86/hvm/vmsi.c      |   10 ++--------
 xen/drivers/passthrough/io.c |   24 +++++-------------------
 xen/include/xen/hvm/irq.h    |    4 +---
 3 files changed, 8 insertions(+), 30 deletions(-)

diff -r efeef2c5e96d -r cb0375fcec23 xen/arch/x86/hvm/vmsi.c
--- a/xen/arch/x86/hvm/vmsi.c   Tue Jan 26 15:51:53 2010 +0000
+++ b/xen/arch/x86/hvm/vmsi.c   Tue Jan 26 15:52:30 2010 +0000
@@ -92,11 +92,8 @@ int vmsi_deliver(struct domain *d, int p
     case dest_LowestPrio:
     {
         target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
-        if ( target != NULL ) {
+        if ( target != NULL )
             vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
-            hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-        }
         else
             HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
                         "vector=%x delivery_mode=%x\n",
@@ -109,12 +106,9 @@ int vmsi_deliver(struct domain *d, int p
     {
         for_each_vcpu ( d, v )
             if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
-                                   0, dest, dest_mode) ) {
+                                   0, dest, dest_mode) )
                 vmsi_inj_irq(d, vcpu_vlapic(v),
                              vector, trig_mode, delivery_mode);
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-            }
         break;
     }
 
diff -r efeef2c5e96d -r cb0375fcec23 xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c      Tue Jan 26 15:51:53 2010 +0000
+++ b/xen/drivers/passthrough/io.c      Tue Jan 26 15:52:30 2010 +0000
@@ -164,9 +164,7 @@ int pt_irq_create_bind_vtd(
         {
             hvm_irq_dpci->mirq[pirq].flags = HVM_IRQ_DPCI_MACH_MSI |
                                              HVM_IRQ_DPCI_GUEST_MSI;
-            hvm_irq_dpci->mirq[pirq].gmsi.old_gvec = pt_irq_bind->u.msi.gvec;
             hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
-            hvm_irq_dpci->mirq[pirq].gmsi.old_gflags = 
pt_irq_bind->u.msi.gflags;
             hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
             /* bind after hvm_irq_dpci is setup to avoid race with irq 
handler*/
             rc = pirq_guest_bind(d->vcpu[0], pirq, 0);
@@ -180,8 +178,6 @@ int pt_irq_create_bind_vtd(
             {
                 hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
                 hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gvec = 0;
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gflags = 0;
                 hvm_irq_dpci->mirq[pirq].flags = 0;
                 clear_bit(pirq, hvm_irq_dpci->mapping);
                 spin_unlock(&d->event_lock);
@@ -200,11 +196,8 @@ int pt_irq_create_bind_vtd(
 
             /* if pirq is already mapped as vmsi, update the guest data/addr */
             if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec 
||
-                hvm_irq_dpci->mirq[pirq].gmsi.gflags != 
pt_irq_bind->u.msi.gflags) {
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gvec;
-                hvm_irq_dpci->mirq[pirq].gmsi.old_gflags =
-                                    hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+                    hvm_irq_dpci->mirq[pirq].gmsi.gflags != 
pt_irq_bind->u.msi.gflags) {
+
                 hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
                 hvm_irq_dpci->mirq[pirq].gmsi.gflags = 
pt_irq_bind->u.msi.gflags;
             }
@@ -435,21 +428,14 @@ void hvm_dpci_msi_eoi(struct domain *d, 
           pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
     {
         if ( (!(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI)) ||
-                (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector &&
-                 hvm_irq_dpci->mirq[pirq].gmsi.old_gvec != vector) )
+                (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector) )
             continue;
 
-        if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec == vector ) {
-            dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
-            dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & 
VMSI_DM_MASK);
-        } else {
-            dest = hvm_irq_dpci->mirq[pirq].gmsi.old_gflags & 
VMSI_DEST_ID_MASK;
-            dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.old_gflags & 
VMSI_DM_MASK);
-        }
+        dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+        dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
         if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) 
)
             break;
     }
-
     if ( pirq < d->nr_pirqs )
         __msi_pirq_eoi(d, pirq);
     spin_unlock(&d->event_lock);
diff -r efeef2c5e96d -r cb0375fcec23 xen/include/xen/hvm/irq.h
--- a/xen/include/xen/hvm/irq.h Tue Jan 26 15:51:53 2010 +0000
+++ b/xen/include/xen/hvm/irq.h Tue Jan 26 15:52:30 2010 +0000
@@ -58,10 +58,8 @@ struct dev_intx_gsi_link {
 #define GLFAGS_SHIFT_TRG_MODE       15
 
 struct hvm_gmsi_info {
-    uint16_t gvec;
-    uint16_t old_gvec;
+    uint32_t gvec;
     uint32_t gflags;
-    uint32_t old_gflags;
     int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
 };
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.