[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/4] xen/MSI-X: latch MSI-X table writes



The remaining log message in pci_msix_write() is wrong, as there guest
behavior may only appear to be wrong: For one, the old logic didn't
take the mask-all bit into account. And then this shouldn't depend on
host device state (i.e. the host may have masked the entry without the
guest having done so). Plus these writes shouldn't be dropped even when
an entry gets unmasked. Instead, if they can't be made take effect
right away, they should take effect on the next unmasking or enabling
operation - the specification explicitly describes such caching
behavior.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Pass original vec_ctrl to xen_pt_msix_update_one() instead of
    (ab)using latch[].

--- a/hw/xen/xen_pt_config_init.c
+++ b/hw/xen/xen_pt_config_init.c
@@ -1499,6 +1499,8 @@ static int xen_pt_msixctrl_reg_write(Xen
         xen_pt_msix_disable(s);
     }
 
+    s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL;
+
     debug_msix_enabled_old = s->msix->enabled;
     s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
     if (s->msix->enabled != debug_msix_enabled_old) {
--- a/hw/xen/xen_pt.h
+++ b/hw/xen/xen_pt.h
@@ -187,13 +187,13 @@ typedef struct XenPTMSIXEntry {
     int pirq;
     uint64_t addr;
     uint32_t data;
-    uint32_t vector_ctrl;
+    uint32_t latch[4];
     bool updated; /* indicate whether MSI ADDR or DATA is updated */
-    bool warned;  /* avoid issuing (bogus) warning more than once */
 } XenPTMSIXEntry;
 typedef struct XenPTMSIX {
     uint32_t ctrl_offset;
     bool enabled;
+    bool maskall;
     int total_entries;
     int bar_index;
     uint64_t table_base;
--- a/hw/xen/xen_pt_msi.c
+++ b/hw/xen/xen_pt_msi.c
@@ -25,6 +25,7 @@
 #define XEN_PT_GFLAGSSHIFT_DELIV_MODE     12
 #define XEN_PT_GFLAGSSHIFT_TRG_MODE       15
 
+#define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)]
 
 /*
  * Helpers
@@ -314,7 +315,8 @@ static int msix_set_enable(XenPCIPassthr
                            enabled);
 }
 
-static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr)
+static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr,
+                                  uint32_t vec_ctrl)
 {
     XenPTMSIXEntry *entry = NULL;
     int pirq;
@@ -332,6 +334,13 @@ static int xen_pt_msix_update_one(XenPCI
 
     pirq = entry->pirq;
 
+    if (pirq == XEN_PT_UNASSIGNED_PIRQ || s->msix->maskall ||
+        (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
+        entry->addr = entry->latch(LOWER_ADDR) |
+                      ((uint64_t)entry->latch(UPPER_ADDR) << 32);
+        entry->data = entry->latch(DATA);
+    }
+
     rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr,
                         entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
     if (rc) {
@@ -357,7 +366,7 @@ int xen_pt_msix_update(XenPCIPassthrough
     int i;
 
     for (i = 0; i < msix->total_entries; i++) {
-        xen_pt_msix_update_one(s, i);
+        xen_pt_msix_update_one(s, i, msix->msix_entry[i].latch(VECTOR_CTRL));
     }
 
     return 0;
@@ -406,35 +415,15 @@ int xen_pt_msix_update_remap(XenPCIPasst
 
 static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset)
 {
-    switch (offset) {
-    case PCI_MSIX_ENTRY_LOWER_ADDR:
-        return e->addr & UINT32_MAX;
-    case PCI_MSIX_ENTRY_UPPER_ADDR:
-        return e->addr >> 32;
-    case PCI_MSIX_ENTRY_DATA:
-        return e->data;
-    case PCI_MSIX_ENTRY_VECTOR_CTRL:
-        return e->vector_ctrl;
-    default:
-        return 0;
-    }
+    return !(offset % sizeof(*e->latch))
+           ? e->latch[offset / sizeof(*e->latch)] : 0;
 }
 
 static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val)
 {
-    switch (offset) {
-    case PCI_MSIX_ENTRY_LOWER_ADDR:
-        e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val;
-        break;
-    case PCI_MSIX_ENTRY_UPPER_ADDR:
-        e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX);
-        break;
-    case PCI_MSIX_ENTRY_DATA:
-        e->data = val;
-        break;
-    case PCI_MSIX_ENTRY_VECTOR_CTRL:
-        e->vector_ctrl = val;
-        break;
+    if (!(offset % sizeof(*e->latch)))
+    {
+        e->latch[offset / sizeof(*e->latch)] = val;
     }
 }
 
@@ -454,39 +443,26 @@ static void pci_msix_write(void *opaque,
     offset = addr % PCI_MSIX_ENTRY_SIZE;
 
     if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
-        const volatile uint32_t *vec_ctrl;
-
         if (get_entry_value(entry, offset) == val
             && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
             return;
         }
 
+        entry->updated = true;
+    } else if (msix->enabled && entry->updated &&
+               !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
+        const volatile uint32_t *vec_ctrl;
+
         /*
          * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
          * up-to-date. Read from hardware directly.
          */
         vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE
             + PCI_MSIX_ENTRY_VECTOR_CTRL;
-
-        if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
-            if (!entry->warned) {
-                entry->warned = true;
-                XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is"
-                           " already enabled.\n", entry_nr);
-            }
-            return;
-        }
-
-        entry->updated = true;
+        xen_pt_msix_update_one(s, entry_nr, *vec_ctrl);
     }
 
     set_entry_value(entry, offset, val);
-
-    if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) {
-        if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
-            xen_pt_msix_update_one(s, entry_nr);
-        }
-    }
 }
 
 static uint64_t pci_msix_read(void *opaque, hwaddr addr,


Attachment: qemu-MSI-X-latch-writes.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.