[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 10/10] IOMMU: patch certain indirect calls to direct ones
This is intentionally not touching hooks used rarely (or not at all) during the lifetime of a VM, unless perhaps sitting on an error path next to a call which gets changed (in which case I think the error path better remains consistent with the respective main path). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx> --- v6: Re-base. v5: Re-base over type-safe changes and dropped IOMMU_MIXED patch. Also patch the new lookup_page() hook. v4: New. --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -226,8 +226,8 @@ void __hwdom_init iommu_hwdom_init(struc == PGT_writable_page) ) mapping |= IOMMUF_writable; - ret = hd->platform_ops->map_page(d, _dfn(dfn), _mfn(mfn), - mapping); + ret = iommu_call(hd->platform_ops, map_page, + d, _dfn(dfn), _mfn(mfn), mapping); if ( !rc ) rc = ret; @@ -319,8 +319,8 @@ int iommu_map(struct domain *d, dfn_t df for ( i = 0; i < (1ul << page_order); i++ ) { - rc = hd->platform_ops->map_page(d, dfn_add(dfn, i), - mfn_add(mfn, i), flags); + rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i), + mfn_add(mfn, i), flags); if ( likely(!rc) ) continue; @@ -333,7 +333,7 @@ int iommu_map(struct domain *d, dfn_t df while ( i-- ) /* if statement to satisfy __must_check */ - if ( hd->platform_ops->unmap_page(d, dfn_add(dfn, i)) ) + if ( iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i)) ) continue; if ( !is_hardware_domain(d) ) @@ -358,7 +358,7 @@ int iommu_unmap(struct domain *d, dfn_t for ( i = 0; i < (1ul << page_order); i++ ) { - int err = hd->platform_ops->unmap_page(d, dfn_add(dfn, i)); + int err = iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i)); if ( likely(!err) ) continue; @@ -389,7 +389,7 @@ int iommu_lookup_page(struct domain *d, if ( !iommu_enabled || !hd->platform_ops ) return -EOPNOTSUPP; - return hd->platform_ops->lookup_page(d, dfn, mfn, flags); + return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags); } static void iommu_free_pagetables(unsigned long unused) @@ -402,7 +402,7 @@ static void iommu_free_pagetables(unsign spin_unlock(&iommu_pt_cleanup_lock); if ( !pg ) return; - iommu_get_ops()->free_page_table(pg); + iommu_vcall(iommu_get_ops(), free_page_table, pg); } while ( !softirq_pending(smp_processor_id()) ); tasklet_schedule_on_cpu(&iommu_pt_cleanup_tasklet, @@ -417,7 +417,7 @@ int iommu_iotlb_flush(struct domain *d, if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush ) return 0; - rc = hd->platform_ops->iotlb_flush(d, dfn, page_count); + rc = iommu_call(hd->platform_ops, iotlb_flush, d, dfn, page_count); if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) @@ -440,7 +440,7 @@ int iommu_iotlb_flush_all(struct domain if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all ) return 0; - rc = hd->platform_ops->iotlb_flush_all(d); + rc = iommu_call(hd->platform_ops, iotlb_flush_all, d); if ( unlikely(rc) ) { if ( !d->is_shutting_down && printk_ratelimit() ) --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -1349,14 +1349,14 @@ int iommu_update_ire_from_msi( struct msi_desc *msi_desc, struct msi_msg *msg) { return iommu_intremap - ? iommu_get_ops()->update_ire_from_msi(msi_desc, msg) : 0; + ? iommu_call(&iommu_ops, update_ire_from_msi, msi_desc, msg) : 0; } void iommu_read_msi_from_ire( struct msi_desc *msi_desc, struct msi_msg *msg) { if ( iommu_intremap ) - iommu_get_ops()->read_msi_from_ire(msi_desc, msg); + iommu_vcall(&iommu_ops, read_msi_from_ire, msi_desc, msg); } static int iommu_add_device(struct pci_dev *pdev) --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -28,14 +28,12 @@ struct iommu_ops iommu_ops; void iommu_update_ire_from_apic( unsigned int apic, unsigned int reg, unsigned int value) { - const struct iommu_ops *ops = iommu_get_ops(); - ops->update_ire_from_apic(apic, reg, value); + iommu_vcall(&iommu_ops, update_ire_from_apic, apic, reg, value); } unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg) { - const struct iommu_ops *ops = iommu_get_ops(); - return ops->read_apic_from_ire(apic, reg); + return iommu_call(&iommu_ops, read_apic_from_ire, apic, reg); } int __init iommu_setup_hpet_msi(struct msi_desc *msi) @@ -46,7 +44,6 @@ int __init iommu_setup_hpet_msi(struct m int arch_iommu_populate_page_table(struct domain *d) { - const struct domain_iommu *hd = dom_iommu(d); struct page_info *page; int rc = 0, n = 0; @@ -68,9 +65,8 @@ int arch_iommu_populate_page_table(struc { ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH)); BUG_ON(SHARED_M2P(gfn)); - rc = hd->platform_ops->map_page(d, _dfn(gfn), _mfn(mfn), - IOMMUF_readable | - IOMMUF_writable); + rc = iommu_call(&iommu_ops, map_page, d, _dfn(gfn), _mfn(mfn), + IOMMUF_readable | IOMMUF_writable); } if ( rc ) { --- a/xen/include/asm-x86/iommu.h +++ b/xen/include/asm-x86/iommu.h @@ -62,6 +62,12 @@ int amd_iov_detect(void); extern struct iommu_ops iommu_ops; +#ifdef NDEBUG +# include <asm/alternative.h> +# define iommu_call(ops, fn, args...) alternative_call(iommu_ops.fn, ## args) +# define iommu_vcall(ops, fn, args...) alternative_vcall(iommu_ops.fn, ## args) +#endif + static inline const struct iommu_ops *iommu_get_ops(void) { BUG_ON(!iommu_ops.init); --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -196,6 +196,11 @@ struct iommu_ops { #include <asm/iommu.h> +#ifndef iommu_call +# define iommu_call(ops, fn, args...) ((ops)->fn(args)) +# define iommu_vcall iommu_call +#endif + enum iommu_status { IOMMU_STATUS_disabled, _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |