[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 RFC 1/6] x86/PCI: add config space write abstract intercept logic



This is to be used by MSI code, and later to also be hooked up to
MMCFG accesses by Dom0.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -1111,6 +1111,12 @@ void pci_cleanup_msi(struct pci_dev *pde
     msi_free_irqs(pdev);
 }
 
+int pci_msi_conf_write_intercept(struct pci_dev *pdev, unsigned int reg,
+                                 unsigned int size, uint32_t *data)
+{
+    return 0;
+}
+
 int pci_restore_msi_state(struct pci_dev *pdev)
 {
     unsigned long flags;
--- a/xen/arch/x86/pci.c
+++ b/xen/arch/x86/pci.c
@@ -67,3 +67,28 @@ void pci_conf_write(uint32_t cf8, uint8_
 
     spin_unlock_irqrestore(&pci_config_lock, flags);
 }
+
+int pci_conf_write_intercept(unsigned int seg, unsigned int bdf,
+                             unsigned int reg, unsigned int size,
+                             uint32_t *data)
+{
+    struct pci_dev *pdev;
+    int rc = 0;
+
+    /*
+     * Avoid expensive operations when no hook is going to do anything
+     * for the access anyway.
+     */
+    if ( reg < 64 || reg >= 256 )
+        return 0;
+
+    spin_lock(&pcidevs_lock);
+
+    pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
+    if ( pdev )
+        rc = pci_msi_conf_write_intercept(pdev, reg, size, data);
+
+    spin_unlock(&pcidevs_lock);
+
+    return rc;
+}
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1771,8 +1771,8 @@ static bool_t admin_io_okay(unsigned int
     return ioports_access_permitted(d, port, port + bytes - 1);
 }
 
-static bool_t pci_cfg_ok(struct domain *currd, bool_t write,
-                         unsigned int start, unsigned int size)
+static bool_t pci_cfg_ok(struct domain *currd, unsigned int start,
+                         unsigned int size, uint32_t *write)
 {
     uint32_t machine_bdf;
 
@@ -1804,8 +1804,12 @@ static bool_t pci_cfg_ok(struct domain *
             start |= CF8_ADDR_HI(currd->arch.pci_cf8);
     }
 
-    return !xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
-                                      start, start + size - 1, write);
+    if ( xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf,
+                                   start, start + size - 1, !!write) != 0 )
+         return 0;
+
+    return !write ||
+           pci_conf_write_intercept(0, machine_bdf, start, size, write) >= 0;
 }
 
 uint32_t guest_io_read(unsigned int port, unsigned int bytes,
@@ -1857,7 +1861,7 @@ uint32_t guest_io_read(unsigned int port
             size = min(bytes, 4 - (port & 3));
             if ( size == 3 )
                 size = 2;
-            if ( pci_cfg_ok(currd, 0, port & 3, size) )
+            if ( pci_cfg_ok(currd, port & 3, size, NULL) )
                 sub_data = pci_conf_read(currd->arch.pci_cf8, port & 3, size);
         }
 
@@ -1928,7 +1932,7 @@ void guest_io_write(unsigned int port, u
             size = min(bytes, 4 - (port & 3));
             if ( size == 3 )
                 size = 2;
-            if ( pci_cfg_ok(currd, 1, port & 3, size) )
+            if ( pci_cfg_ok(currd, port & 3, size, &data) )
                 pci_conf_write(currd->arch.pci_cf8, port & 3, size, data);
         }
 
--- a/xen/include/asm-x86/pci.h
+++ b/xen/include/asm-x86/pci.h
@@ -15,4 +15,10 @@ struct arch_pci_dev {
     vmask_t used_vectors;
 };
 
+int pci_conf_write_intercept(unsigned int seg, unsigned int bdf,
+                             unsigned int reg, unsigned int size,
+                             uint32_t *data);
+int pci_msi_conf_write_intercept(struct pci_dev *, unsigned int reg,
+                                 unsigned int size, uint32_t *data);
+
 #endif /* __X86_PCI_H__ */



Attachment: x86-PCI-CFG-write-intercept.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.