[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/IOMMU: Use altcall, and __initconst_cf_clobber



commit f04231775c179b3424fca45d64f29e226a41a610
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Feb 21 17:09:15 2022 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Feb 23 15:33:43 2022 +0000

    x86/IOMMU: Use altcall, and __initconst_cf_clobber
    
    Most IOMMU hooks are already altcall for performance reasons.  Convert the
    rest of them so we can harden all the hooks in Control Flow Integrity
    configurations.  This necessitates the use of iommu_{v,}call() in debug 
builds
    too.  Switch to using an ASSERT() as all forms should resolve to &iommu_ops.
    
    Move the root iommu_ops from __read_mostly to __ro_after_init now that the
    latter exists.
    
    Since c/s 3330013e6739 ("VT-d / x86: re-arrange cache syncing"), vtd_ops is
    not modified and doesn't need a forward declaration, so we can use
    __initconst_cf_clobber for both VT-d and AMD.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/include/asm/iommu.h            | 10 ++++------
 xen/drivers/passthrough/amd/pci_amd_iommu.c |  2 +-
 xen/drivers/passthrough/iommu.c             |  7 ++++---
 xen/drivers/passthrough/vtd/iommu.c         |  3 +--
 xen/drivers/passthrough/x86/iommu.c         |  4 ++--
 5 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/include/asm/iommu.h b/xen/arch/x86/include/asm/iommu.h
index 8a96ba1f09..d38c334087 100644
--- a/xen/arch/x86/include/asm/iommu.h
+++ b/xen/arch/x86/include/asm/iommu.h
@@ -72,18 +72,16 @@ struct arch_iommu
 
 extern struct iommu_ops iommu_ops;
 
-#ifdef NDEBUG
 # include <asm/alternative.h>
 # define iommu_call(ops, fn, args...) ({      \
-    (void)(ops);                              \
+    ASSERT((ops) == &iommu_ops);              \
     alternative_call(iommu_ops.fn, ## args);  \
 })
 
 # define iommu_vcall(ops, fn, args...) ({     \
-    (void)(ops);                              \
+    ASSERT((ops) == &iommu_ops);              \
     alternative_vcall(iommu_ops.fn, ## args); \
 })
-#endif
 
 static inline const struct iommu_ops *iommu_get_ops(void)
 {
@@ -106,7 +104,7 @@ int iommu_setup_hpet_msi(struct msi_desc *);
 static inline int iommu_adjust_irq_affinities(void)
 {
     return iommu_ops.adjust_irq_affinities
-           ? iommu_ops.adjust_irq_affinities()
+           ? iommu_call(&iommu_ops, adjust_irq_affinities)
            : 0;
 }
 
@@ -122,7 +120,7 @@ int iommu_enable_x2apic(void);
 static inline void iommu_disable_x2apic(void)
 {
     if ( x2apic_enabled && iommu_ops.disable_x2apic )
-        iommu_ops.disable_x2apic();
+        iommu_vcall(&iommu_ops, disable_x2apic);
 }
 
 int iommu_identity_mapping(struct domain *d, p2m_access_t p2ma,
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index e57f555d00..4b59a4efe9 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -628,7 +628,7 @@ static void cf_check amd_dump_page_tables(struct domain *d)
                               hd->arch.amd.paging_mode, 0, 0);
 }
 
-static const struct iommu_ops __initconstrel _iommu_ops = {
+static const struct iommu_ops __initconst_cf_clobber _iommu_ops = {
     .init = amd_iommu_domain_init,
     .hwdom_init = amd_iommu_hwdom_init,
     .quarantine_init = amd_iommu_quarantine_init,
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index e220fea72c..c6b2c384d1 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -540,7 +540,7 @@ int __init iommu_setup(void)
 int iommu_suspend()
 {
     if ( iommu_enabled )
-        return iommu_get_ops()->suspend();
+        return iommu_call(iommu_get_ops(), suspend);
 
     return 0;
 }
@@ -548,7 +548,7 @@ int iommu_suspend()
 void iommu_resume()
 {
     if ( iommu_enabled )
-        iommu_get_ops()->resume();
+        iommu_vcall(iommu_get_ops(), resume);
 }
 
 int iommu_do_domctl(
@@ -578,7 +578,8 @@ void iommu_crash_shutdown(void)
         return;
 
     if ( iommu_enabled )
-        iommu_get_ops()->crash_shutdown();
+        iommu_vcall(iommu_get_ops(), crash_shutdown);
+
     iommu_enabled = false;
 #ifndef iommu_intremap
     iommu_intremap = iommu_intremap_off;
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 56968a06a1..6a65ba1d82 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -56,7 +56,6 @@ bool __read_mostly iommu_snoop = true;
 
 static unsigned int __read_mostly nr_iommus;
 
-static struct iommu_ops vtd_ops;
 static struct tasklet vtd_fault_tasklet;
 
 static int cf_check setup_hwdom_device(u8 devfn, struct pci_dev *);
@@ -2794,7 +2793,7 @@ static int __init cf_check 
intel_iommu_quarantine_init(struct domain *d)
     return rc;
 }
 
-static struct iommu_ops __initdata vtd_ops = {
+static const struct iommu_ops __initconst_cf_clobber vtd_ops = {
     .init = intel_iommu_domain_init,
     .hwdom_init = intel_iommu_hwdom_init,
     .quarantine_init = intel_iommu_quarantine_init,
diff --git a/xen/drivers/passthrough/x86/iommu.c 
b/xen/drivers/passthrough/x86/iommu.c
index ad5f44e13d..58a422fb5f 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -27,7 +27,7 @@
 #include <asm/setup.h>
 
 const struct iommu_init_ops *__initdata iommu_init_ops;
-struct iommu_ops __read_mostly iommu_ops;
+struct iommu_ops __ro_after_init iommu_ops;
 bool __read_mostly iommu_non_coherent;
 
 enum iommu_intremap __read_mostly iommu_intremap = iommu_intremap_full;
@@ -129,7 +129,7 @@ int iommu_enable_x2apic(void)
     if ( !iommu_ops.enable_x2apic )
         return -EOPNOTSUPP;
 
-    return iommu_ops.enable_x2apic();
+    return iommu_call(&iommu_ops, enable_x2apic);
 }
 
 void iommu_update_ire_from_apic(
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.