|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 13/13] IOMMU: patch certain indirect calls to direct ones
This is intentionally not touching hooks used rarely (or not at all)
during the lifetime of a VM, unless perhaps sitting on an error path
next to a call which gets changed (in which case I think the error
path better remains consistent with the respective main path).
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v5: Re-base over type-safe changes and dropped IOMMU_MIXED patch. Also
patch the new lookup_page() hook.
v4: New.
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -226,8 +226,8 @@ void __hwdom_init iommu_hwdom_init(struc
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- ret = hd->platform_ops->map_page(d, _dfn(dfn), _mfn(mfn),
- mapping);
+ ret = iommu_call(hd->platform_ops, map_page,
+ d, _dfn(dfn), _mfn(mfn), mapping);
if ( !rc )
rc = ret;
@@ -313,7 +313,7 @@ int iommu_map_page(struct domain *d, dfn
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->map_page(d, dfn, mfn, flags);
+ rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn, flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
@@ -336,7 +336,7 @@ int iommu_unmap_page(struct domain *d, d
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->unmap_page(d, dfn);
+ rc = iommu_call(hd->platform_ops, unmap_page, d, dfn);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
@@ -359,7 +359,7 @@ int iommu_lookup_page(struct domain *d,
if ( !iommu_enabled || !hd->platform_ops )
return -EOPNOTSUPP;
- return hd->platform_ops->lookup_page(d, dfn, mfn, flags);
+ return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags);
}
static void iommu_free_pagetables(unsigned long unused)
@@ -372,7 +372,7 @@ static void iommu_free_pagetables(unsign
spin_unlock(&iommu_pt_cleanup_lock);
if ( !pg )
return;
- iommu_get_ops()->free_page_table(pg);
+ iommu_vcall(iommu_get_ops(), free_page_table, pg);
} while ( !softirq_pending(smp_processor_id()) );
tasklet_schedule_on_cpu(&iommu_pt_cleanup_tasklet,
@@ -387,7 +387,7 @@ int iommu_iotlb_flush(struct domain *d,
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush
)
return 0;
- rc = hd->platform_ops->iotlb_flush(d, dfn, page_count);
+ rc = iommu_call(hd->platform_ops, iotlb_flush, d, dfn, page_count);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
@@ -410,7 +410,7 @@ int iommu_iotlb_flush_all(struct domain
if ( !iommu_enabled || !hd->platform_ops ||
!hd->platform_ops->iotlb_flush_all )
return 0;
- rc = hd->platform_ops->iotlb_flush_all(d);
+ rc = iommu_call(hd->platform_ops, iotlb_flush_all, d);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1301,14 +1301,14 @@ int iommu_update_ire_from_msi(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
return iommu_intremap
- ? iommu_get_ops()->update_ire_from_msi(msi_desc, msg) : 0;
+ ? iommu_call(&iommu_ops, update_ire_from_msi, msi_desc, msg) : 0;
}
void iommu_read_msi_from_ire(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
if ( iommu_intremap )
- iommu_get_ops()->read_msi_from_ire(msi_desc, msg);
+ iommu_vcall(&iommu_ops, read_msi_from_ire, msi_desc, msg);
}
static int iommu_add_device(struct pci_dev *pdev)
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -28,14 +28,12 @@ struct iommu_ops iommu_ops;
void iommu_update_ire_from_apic(
unsigned int apic, unsigned int reg, unsigned int value)
{
- const struct iommu_ops *ops = iommu_get_ops();
- ops->update_ire_from_apic(apic, reg, value);
+ iommu_vcall(&iommu_ops, update_ire_from_apic, apic, reg, value);
}
unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
{
- const struct iommu_ops *ops = iommu_get_ops();
- return ops->read_apic_from_ire(apic, reg);
+ return iommu_call(&iommu_ops, read_apic_from_ire, apic, reg);
}
int __init iommu_setup_hpet_msi(struct msi_desc *msi)
@@ -46,7 +44,6 @@ int __init iommu_setup_hpet_msi(struct m
int arch_iommu_populate_page_table(struct domain *d)
{
- const struct domain_iommu *hd = dom_iommu(d);
struct page_info *page;
int rc = 0, n = 0;
@@ -68,9 +65,8 @@ int arch_iommu_populate_page_table(struc
{
ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
BUG_ON(SHARED_M2P(gfn));
- rc = hd->platform_ops->map_page(d, _dfn(gfn), _mfn(mfn),
- IOMMUF_readable |
- IOMMUF_writable);
+ rc = iommu_call(&iommu_ops, map_page, d, _dfn(gfn), _mfn(mfn),
+ IOMMUF_readable | IOMMUF_writable);
}
if ( rc )
{
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -61,6 +61,12 @@ int amd_iov_detect(void);
extern struct iommu_ops iommu_ops;
+#ifdef NDEBUG
+# include <asm/alternative.h>
+# define iommu_call(ops, fn, args...) alternative_call(iommu_ops.fn, ## args)
+# define iommu_vcall(ops, fn, args...) alternative_vcall(iommu_ops.fn, ## args)
+#endif
+
static inline const struct iommu_ops *iommu_get_ops(void)
{
BUG_ON(!iommu_ops.init);
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -195,6 +195,11 @@ struct iommu_ops {
#include <asm/iommu.h>
+#ifndef iommu_call
+# define iommu_call(ops, fn, args...) ((ops)->fn(args))
+# define iommu_vcall iommu_call
+#endif
+
enum iommu_status
{
IOMMU_STATUS_disabled,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |