[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging-4.11] IOMMU: generalize VT-d's tracking of mapped RMRR regions
commit fb23026c1489023bdad225795719343c640a97fc Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Wed Aug 25 16:01:16 2021 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Aug 25 16:01:16 2021 +0200 IOMMU: generalize VT-d's tracking of mapped RMRR regions In order to re-use it elsewhere, move the logic to vendor independent code and strip it of RMRR specifics. Note that the prior "map" parameter gets folded into the new "p2ma" one (which AMD IOMMU code will want to make use of), assigning alternative meaning ("unmap") to p2m_access_x. Prepare set_identity_p2m_entry() and p2m_get_iommu_flags() for getting passed access types other than p2m_access_rw (in the latter case just for p2m_mmio_direct requests). Note also that, to be on the safe side, an overlap check gets added to the main loop of iommu_identity_mapping(). This is part of XSA-378. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul@xxxxxxx> master commit: c0e19d7c6c42f0bfccccd96b4f7b03b5515e10fc master date: 2021-08-25 14:15:57 +0200 --- xen/arch/x86/mm/p2m.c | 3 +- xen/drivers/passthrough/vtd/iommu.c | 98 +++++-------------------------------- xen/drivers/passthrough/x86/iommu.c | 95 ++++++++++++++++++++++++++++++++++- xen/include/asm-x86/iommu.h | 8 ++- xen/include/asm-x86/mem_access.h | 6 +-- xen/include/asm-x86/p2m.h | 35 +++++++++++-- 6 files changed, 150 insertions(+), 95 deletions(-) diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index b8480e0e1c..5802b580ed 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1157,7 +1157,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l, { if ( !need_iommu(d) ) return 0; - return iommu_map_page(d, gfn_l, gfn_l, IOMMUF_readable|IOMMUF_writable); + return iommu_map_page(d, gfn_l, gfn_l, + p2m_access_to_iommu_flags(p2ma)); } gfn_lock(p2m, gfn, 0); diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 698b176a81..3e6fa11dd5 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -42,12 +42,6 @@ #include "vtd.h" #include "../ats.h" -struct mapped_rmrr { - struct list_head list; - u64 base, end; - unsigned int count; -}; - /* Possible unfiltered LAPIC/MSI messages from untrusted sources? */ bool __read_mostly untrusted_msi; @@ -1785,16 +1779,11 @@ out: static void iommu_domain_teardown(struct domain *d) { struct domain_iommu *hd = dom_iommu(d); - struct mapped_rmrr *mrmrr, *tmp; if ( list_empty(&acpi_drhd_units) ) return; - list_for_each_entry_safe ( mrmrr, tmp, &hd->arch.mapped_rmrrs, list ) - { - list_del(&mrmrr->list); - xfree(mrmrr); - } + iommu_identity_map_teardown(d); if ( iommu_use_hap_pt(d) ) return; @@ -1903,74 +1892,6 @@ static void iommu_set_pgd(struct domain *d) pagetable_get_paddr(pagetable_from_mfn(pgd_mfn)); } -static int rmrr_identity_mapping(struct domain *d, bool_t map, - const struct acpi_rmrr_unit *rmrr, - u32 flag) -{ - unsigned long base_pfn = rmrr->base_address >> PAGE_SHIFT_4K; - unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K; - struct mapped_rmrr *mrmrr; - struct domain_iommu *hd = dom_iommu(d); - - ASSERT(pcidevs_locked()); - ASSERT(rmrr->base_address < rmrr->end_address); - - /* - * No need to acquire hd->arch.mapping_lock: Both insertion and removal - * get done while holding pcidevs_lock. - */ - list_for_each_entry( mrmrr, &hd->arch.mapped_rmrrs, list ) - { - if ( mrmrr->base == rmrr->base_address && - mrmrr->end == rmrr->end_address ) - { - int ret = 0; - - if ( map ) - { - ++mrmrr->count; - return 0; - } - - if ( --mrmrr->count ) - return 0; - - while ( base_pfn < end_pfn ) - { - if ( clear_identity_p2m_entry(d, base_pfn) ) - ret = -ENXIO; - base_pfn++; - } - - list_del(&mrmrr->list); - xfree(mrmrr); - return ret; - } - } - - if ( !map ) - return -ENOENT; - - while ( base_pfn < end_pfn ) - { - int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag); - - if ( err ) - return err; - base_pfn++; - } - - mrmrr = xmalloc(struct mapped_rmrr); - if ( !mrmrr ) - return -ENOMEM; - mrmrr->base = rmrr->base_address; - mrmrr->end = rmrr->end_address; - mrmrr->count = 1; - list_add_tail(&mrmrr->list, &hd->arch.mapped_rmrrs); - - return 0; -} - static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev) { struct acpi_rmrr_unit *rmrr; @@ -2002,7 +1923,9 @@ static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev) * Since RMRRs are always reserved in the e820 map for the hardware * domain, there shouldn't be a conflict. */ - ret = rmrr_identity_mapping(pdev->domain, 1, rmrr, 0); + ret = iommu_identity_mapping(pdev->domain, p2m_access_rw, + rmrr->base_address, rmrr->end_address, + 0); if ( ret ) dprintk(XENLOG_ERR VTDPREFIX, "d%d: RMRR mapping failed\n", pdev->domain->domain_id); @@ -2047,7 +1970,8 @@ static int intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev) * Any flag is nothing to clear these mappings but here * its always safe and strict to set 0. */ - rmrr_identity_mapping(pdev->domain, 0, rmrr, 0); + iommu_identity_mapping(pdev->domain, p2m_access_x, rmrr->base_address, + rmrr->end_address, 0); } return domain_context_unmap(pdev->domain, devfn, pdev); @@ -2214,7 +2138,8 @@ static void __hwdom_init setup_hwdom_rmrr(struct domain *d) * domain, there shouldn't be a conflict. So its always safe and * strict to set 0. */ - ret = rmrr_identity_mapping(d, 1, rmrr, 0); + ret = iommu_identity_mapping(d, p2m_access_rw, rmrr->base_address, + rmrr->end_address, 0); if ( ret ) dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); @@ -2371,7 +2296,9 @@ static int reassign_device_ownership( * Any RMRR flag is always ignored when remove a device, * but its always safe and strict to set 0. */ - ret = rmrr_identity_mapping(source, 0, rmrr, 0); + ret = iommu_identity_mapping(source, p2m_access_x, + rmrr->base_address, + rmrr->end_address, 0); if ( ret != -ENOENT ) return ret; } @@ -2468,7 +2395,8 @@ static int intel_iommu_assign_device( PCI_BUS(bdf) == bus && PCI_DEVFN2(bdf) == devfn ) { - ret = rmrr_identity_mapping(d, 1, rmrr, flag); + ret = iommu_identity_mapping(d, p2m_access_rw, rmrr->base_address, + rmrr->end_address, flag); if ( ret ) { int rc; diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c index 228409c24a..c467342d36 100644 --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -144,7 +144,7 @@ int arch_iommu_domain_init(struct domain *d) struct domain_iommu *hd = dom_iommu(d); spin_lock_init(&hd->arch.mapping_lock); - INIT_LIST_HEAD(&hd->arch.mapped_rmrrs); + INIT_LIST_HEAD(&hd->arch.identity_maps); return 0; } @@ -153,6 +153,99 @@ void arch_iommu_domain_destroy(struct domain *d) { } +struct identity_map { + struct list_head list; + paddr_t base, end; + p2m_access_t access; + unsigned int count; +}; + +int iommu_identity_mapping(struct domain *d, p2m_access_t p2ma, + paddr_t base, paddr_t end, + unsigned int flag) +{ + unsigned long base_pfn = base >> PAGE_SHIFT_4K; + unsigned long end_pfn = PAGE_ALIGN_4K(end) >> PAGE_SHIFT_4K; + struct identity_map *map; + struct domain_iommu *hd = dom_iommu(d); + + ASSERT(pcidevs_locked()); + ASSERT(base < end); + + /* + * No need to acquire hd->arch.mapping_lock: Both insertion and removal + * get done while holding pcidevs_lock. + */ + list_for_each_entry( map, &hd->arch.identity_maps, list ) + { + if ( map->base == base && map->end == end ) + { + int ret = 0; + + if ( p2ma != p2m_access_x ) + { + if ( map->access != p2ma ) + return -EADDRINUSE; + ++map->count; + return 0; + } + + if ( --map->count ) + return 0; + + while ( base_pfn < end_pfn ) + { + if ( clear_identity_p2m_entry(d, base_pfn) ) + ret = -ENXIO; + base_pfn++; + } + + list_del(&map->list); + xfree(map); + + return ret; + } + + if ( end >= map->base && map->end >= base ) + return -EADDRINUSE; + } + + if ( p2ma == p2m_access_x ) + return -ENOENT; + + while ( base_pfn < end_pfn ) + { + int err = set_identity_p2m_entry(d, base_pfn, p2ma, flag); + + if ( err ) + return err; + base_pfn++; + } + + map = xmalloc(struct identity_map); + if ( !map ) + return -ENOMEM; + map->base = base; + map->end = end; + map->access = p2ma; + map->count = 1; + list_add_tail(&map->list, &hd->arch.identity_maps); + + return 0; +} + +void iommu_identity_map_teardown(struct domain *d) +{ + struct domain_iommu *hd = dom_iommu(d); + struct identity_map *map, *tmp; + + list_for_each_entry_safe ( map, tmp, &hd->arch.identity_maps, list ) + { + list_del(&map->list); + xfree(map); + } +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/iommu.h b/xen/include/asm-x86/iommu.h index dfc9b77594..1913606393 100644 --- a/xen/include/asm-x86/iommu.h +++ b/xen/include/asm-x86/iommu.h @@ -16,6 +16,7 @@ #include <xen/errno.h> #include <xen/list.h> +#include <xen/mem_access.h> #include <xen/spinlock.h> #include <asm/processor.h> #include <asm/hvm/vmx/vmcs.h> @@ -36,7 +37,7 @@ struct arch_iommu spinlock_t mapping_lock; /* io page table lock */ int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */ - struct list_head mapped_rmrrs; + struct list_head identity_maps; /* amd iommu support */ int paging_mode; @@ -94,6 +95,11 @@ bool_t iommu_supports_eim(void); int iommu_enable_x2apic_IR(void); void iommu_disable_x2apic_IR(void); +int iommu_identity_mapping(struct domain *d, p2m_access_t p2ma, + paddr_t base, paddr_t end, + unsigned int flag); +void iommu_identity_map_teardown(struct domain *d); + extern bool untrusted_msi; int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq, diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h index 4043c9fb4d..c5775edff4 100644 --- a/xen/include/asm-x86/mem_access.h +++ b/xen/include/asm-x86/mem_access.h @@ -44,10 +44,8 @@ bool p2m_mem_access_emulate_check(struct vcpu *v, const vm_event_response_t *rsp); /* Sanity check for mem_access hardware support */ -static inline bool p2m_mem_access_sanity_check(struct domain *d) -{ - return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d); -} +#define p2m_mem_access_sanity_check(d) \ + (is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d)) #endif /*__ASM_X86_MEM_ACCESS_H__ */ diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index ebba14a85c..0e1888b8b3 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -836,6 +836,34 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma); +/* p2m access to IOMMU flags */ +static inline unsigned int p2m_access_to_iommu_flags(p2m_access_t p2ma) +{ + switch ( p2ma ) + { + case p2m_access_rw: + case p2m_access_rwx: + return IOMMUF_readable | IOMMUF_writable; + + case p2m_access_r: + case p2m_access_rx: + case p2m_access_rx2rw: + return IOMMUF_readable; + + case p2m_access_w: + case p2m_access_wx: + return IOMMUF_writable; + + case p2m_access_n: + case p2m_access_x: + case p2m_access_n2rwx: + return 0; + } + + ASSERT_UNREACHABLE(); + return 0; +} + /* * p2m type to IOMMU flags */ @@ -857,9 +885,10 @@ static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt, flags = IOMMUF_readable; break; case p2m_mmio_direct: - flags = IOMMUF_readable; - if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) - flags |= IOMMUF_writable; + flags = p2m_access_to_iommu_flags(p2ma); + if ( (flags & IOMMUF_writable) && + rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) + flags &= ~IOMMUF_writable; break; default: flags = 0; -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.11
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |