[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 22/28] x86/vmsi: Hook delivering remapping format msi to guest and handling eoi



When delivering guest msi, firstly, the format of the msi is determined
by the 'check_irq_remmapping' method of viommu. Then, msi of
non-remapping format is delivered as normal and remapping format msi is
handled by viommu. When handling eoi, the interrupt attributes (vector,
affinity) are used to search the physical irq. It is clear that for
remapping format msi, the interrupt attributs should be decodes from
IRTE.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
Signed-off-by: Lan Tianyu <tianyu.lan@xxxxxxxxx>
---
 xen/arch/x86/hvm/irq.c       |  6 ++++++
 xen/arch/x86/hvm/vmsi.c      | 33 +++++++++++++++++++++------------
 xen/drivers/passthrough/io.c | 35 +++++++++++++++++++++++++++--------
 3 files changed, 54 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index e425df9..b561480 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -23,6 +23,7 @@
 #include <xen/sched.h>
 #include <xen/irq.h>
 #include <xen/keyhandler.h>
+#include <xen/viommu.h>
 #include <asm/hvm/domain.h>
 #include <asm/hvm/support.h>
 #include <asm/msi.h>
@@ -339,6 +340,11 @@ int hvm_inject_msi(struct domain *d, uint64_t addr, 
uint32_t data)
     uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK)
         >> MSI_DATA_TRIGGER_SHIFT;
     uint8_t vector = data & MSI_DATA_VECTOR_MASK;
+    struct arch_irq_remapping_request request;
+
+    irq_request_msi_fill(&request, addr, data);
+    if ( viommu_check_irq_remapping(d, &request) )
+        return viommu_handle_irq_request(d, &request);
 
     if ( !vector )
     {
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 5edb0e7..9dc5631 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -31,6 +31,7 @@
 #include <xen/errno.h>
 #include <xen/sched.h>
 #include <xen/irq.h>
+#include <xen/viommu.h>
 #include <public/hvm/ioreq.h>
 #include <asm/hvm/io.h>
 #include <asm/hvm/vpic.h>
@@ -101,21 +102,29 @@ int vmsi_deliver(
 
 void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
 {
-    uint8_t vector = pirq_dpci->gmsi.data & MSI_DATA_VECTOR_MASK;
-    uint8_t dest = MASK_EXTR(pirq_dpci->gmsi.addr, MSI_ADDR_DEST_ID_MASK);
-    bool dest_mode = pirq_dpci->gmsi.addr & MSI_ADDR_DESTMODE_MASK;
-    uint8_t delivery_mode = MASK_EXTR(pirq_dpci->gmsi.data,
-                                      MSI_DATA_DELIVERY_MODE_MASK);
-    bool trig_mode = pirq_dpci->gmsi.data & MSI_DATA_TRIGGER_MASK;
-
-    HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
-                "msi: dest=%x dest_mode=%x delivery_mode=%x "
-                "vector=%x trig_mode=%x\n",
-                dest, dest_mode, delivery_mode, vector, trig_mode);
+    struct arch_irq_remapping_request request;
 
     ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI);
 
-    vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+    irq_request_msi_fill(&request, pirq_dpci->gmsi.addr, pirq_dpci->gmsi.data);
+    if ( viommu_check_irq_remapping(d, &request) )
+        viommu_handle_irq_request(d, &request);
+    else
+    {
+        uint8_t vector = pirq_dpci->gmsi.data & MSI_DATA_VECTOR_MASK;
+        uint8_t dest = MASK_EXTR(pirq_dpci->gmsi.addr, MSI_ADDR_DEST_ID_MASK);
+        bool dest_mode = pirq_dpci->gmsi.addr & MSI_ADDR_DESTMODE_MASK;
+        uint8_t delivery_mode = MASK_EXTR(pirq_dpci->gmsi.data,
+                                          MSI_DATA_DELIVERY_MODE_MASK);
+        bool trig_mode = pirq_dpci->gmsi.data & MSI_DATA_TRIGGER_MASK;
+
+        HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
+                    "msi: dest=%x dest_mode=%x delivery_mode=%x "
+                    "vector=%x trig_mode=%x\n",
+                    dest, dest_mode, delivery_mode, vector, trig_mode);
+
+        vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+    }
 }
 
 /* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 9198ef5..34a3cf1 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -872,16 +872,35 @@ static void __msi_pirq_eoi(struct hvm_pirq_dpci 
*pirq_dpci)
 static int _hvm_dpci_msi_eoi(struct domain *d,
                              struct hvm_pirq_dpci *pirq_dpci, void *arg)
 {
-    int vector = (long)arg;
-
-    if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
-         (pirq_dpci->gmsi.gvec == vector) )
+    if ( pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI )
     {
-        uint32_t dest = MASK_EXTR(pirq_dpci->gmsi.addr, MSI_ADDR_DEST_ID_MASK);
-        bool dest_mode = pirq_dpci->gmsi.addr & MSI_ADDR_DESTMODE_MASK;
+        uint8_t vector, vector_target = (long)arg;
+        uint32_t dest;
+        bool dm;
+        struct arch_irq_remapping_request request;
+
+        irq_request_msi_fill(&request, pirq_dpci->gmsi.addr,
+                             pirq_dpci->gmsi.data);
+        if ( viommu_check_irq_remapping(d, &request) )
+        {
+            struct arch_irq_remapping_info info;
+
+            if ( viommu_get_irq_info(d, &request, &info) )
+                return 0;
+
+            vector = info.vector;
+            dest = info.dest;
+            dm = info.dest_mode;
+        }
+        else
+        {
+            vector = pirq_dpci->gmsi.data & MSI_DATA_VECTOR_MASK;
+            dest = MASK_EXTR(pirq_dpci->gmsi.addr, MSI_ADDR_DEST_ID_MASK);
+            dm = pirq_dpci->gmsi.addr & MSI_ADDR_DESTMODE_MASK;
+        }
 
-        if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
-                               dest_mode) )
+        if ( vector == vector_target &&
+             vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dm) )
         {
             __msi_pirq_eoi(pirq_dpci);
             return 1;
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.