[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.11] AMD/IOMMU: re-arrange/complete re-assignment handling
commit ba79e525c1c7982e93d5f889cd66c783ecdf499a Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Wed Aug 25 16:01:42 2021 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Aug 25 16:01:42 2021 +0200 AMD/IOMMU: re-arrange/complete re-assignment handling Prior to the assignment step having completed successfully, devices should not get associated with their new owner. Hand the device to DomIO (perhaps temporarily), until after the de-assignment step has completed. De-assignment of a device (from other than Dom0) as well as failure of reassign_device() during assignment should result in unity mappings getting torn down. This in turn requires switching to a refcounted mapping approach, as was already used by VT-d for its RMRRs, to prevent unmapping a region used by multiple devices. This is CVE-2021-28696 / part of XSA-378. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Paul Durrant <paul@xxxxxxx> master commit: 899272539cbe1acda736a850015416fff653a1b6 master date: 2021-08-25 14:16:26 +0200 --- xen/drivers/passthrough/amd/iommu_map.c | 54 +++++++++++++++++++-------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 54 +++++++++++++++++++++------ xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 6 ++- 3 files changed, 84 insertions(+), 30 deletions(-) diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index 24d027e7d1..90a801a79d 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -716,27 +716,49 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long gfn) return 0; } -int amd_iommu_reserve_domain_unity_map(struct domain *domain, - u64 phys_addr, - unsigned long size, int iw, int ir) +int amd_iommu_reserve_domain_unity_map(struct domain *d, + const struct ivrs_unity_map *map, + unsigned int flag) { - unsigned long npages, i; - unsigned long gfn; - unsigned int flags = !!ir; - int rt = 0; + int rc; - if ( iw ) - flags |= IOMMUF_writable; + if ( d == dom_io ) + return 0; - npages = region_to_pages(phys_addr, size); - gfn = phys_addr >> PAGE_SHIFT; - for ( i = 0; i < npages; i++ ) + for ( rc = 0; !rc && map; map = map->next ) { - rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags); - if ( rt != 0 ) - return rt; + p2m_access_t p2ma = p2m_access_n; + + if ( map->read ) + p2ma |= p2m_access_r; + if ( map->write ) + p2ma |= p2m_access_w; + + rc = iommu_identity_mapping(d, p2ma, map->addr, + map->addr + map->length - 1, flag); } - return 0; + + return rc; +} + +int amd_iommu_reserve_domain_unity_unmap(struct domain *d, + const struct ivrs_unity_map *map) +{ + int rc; + + if ( d == dom_io ) + return 0; + + for ( rc = 0; map; map = map->next ) + { + int ret = iommu_identity_mapping(d, p2m_access_x, map->addr, + map->addr + map->length - 1, 0); + + if ( ret && ret != -ENOENT && !rc ) + rc = ret; + } + + return rc; } /* Share p2m table with iommu. */ diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 03df7c0dee..49e34de60b 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -333,6 +333,7 @@ static int reassign_device(struct domain *source, struct domain *target, struct amd_iommu *iommu; int bdf, rc; struct domain_iommu *t = dom_iommu(target); + const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg); bdf = PCI_BDF2(pdev->bus, pdev->devfn); iommu = find_iommu_for_device(pdev->seg, bdf); @@ -347,10 +348,24 @@ static int reassign_device(struct domain *source, struct domain *target, amd_iommu_disable_domain_device(source, iommu, devfn, pdev); - if ( devfn == pdev->devfn ) + /* + * If the device belongs to the hardware domain, and it has a unity mapping, + * don't remove it from the hardware domain, because BIOS may reference that + * mapping. + */ + if ( !is_hardware_domain(source) ) { - list_move(&pdev->domain_list, &target->arch.pdev_list); - pdev->domain = target; + rc = amd_iommu_reserve_domain_unity_unmap( + source, + ivrs_mappings[get_dma_requestor_id(pdev->seg, bdf)].unity_map); + if ( rc ) + return rc; + } + + if ( devfn == pdev->devfn && pdev->domain != dom_io ) + { + list_move(&pdev->domain_list, &dom_io->arch.pdev_list); + pdev->domain = dom_io; } rc = allocate_domain_resources(t); @@ -362,6 +377,12 @@ static int reassign_device(struct domain *source, struct domain *target, pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), source->domain_id, target->domain_id); + if ( devfn == pdev->devfn && pdev->domain != target ) + { + list_move(&pdev->domain_list, &target->arch.pdev_list); + pdev->domain = target; + } + return 0; } @@ -372,20 +393,28 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn, struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg); int bdf = PCI_BDF2(pdev->bus, devfn); int req_id = get_dma_requestor_id(pdev->seg, bdf); - const struct ivrs_unity_map *unity_map; + int rc = amd_iommu_reserve_domain_unity_map( + d, ivrs_mappings[req_id].unity_map, flag); + + if ( !rc ) + rc = reassign_device(pdev->domain, d, devfn, pdev); - for ( unity_map = ivrs_mappings[req_id].unity_map; unity_map; - unity_map = unity_map->next ) + if ( rc && !is_hardware_domain(d) ) { - int rc = amd_iommu_reserve_domain_unity_map( - d, unity_map->addr, unity_map->length, - unity_map->write, unity_map->read); + int ret = amd_iommu_reserve_domain_unity_unmap( + d, ivrs_mappings[req_id].unity_map); - if ( rc ) - return rc; + if ( ret ) + { + printk(XENLOG_ERR "AMD-Vi: " + "unity-unmap for d%d/%04x:%02x:%02x.%u failed (%d)\n", + d->domain_id, pdev->seg, pdev->bus, + PCI_SLOT(devfn), PCI_FUNC(devfn), ret); + domain_crash(d); + } } - return reassign_device(pdev->domain, d, devfn, pdev); + return rc; } static void deallocate_next_page_table(struct page_info *pg, int level) @@ -451,6 +480,7 @@ static void deallocate_iommu_page_tables(struct domain *d) static void amd_iommu_domain_destroy(struct domain *d) { + iommu_identity_map_teardown(d); deallocate_iommu_page_tables(d); amd_iommu_flush_all_pages(d); } diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h index 22d6614169..7c082ef88e 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -60,8 +60,10 @@ int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn); u64 amd_iommu_get_next_table_from_pte(u32 *entry); int __must_check amd_iommu_alloc_root(struct domain_iommu *hd); int amd_iommu_reserve_domain_unity_map(struct domain *domain, - u64 phys_addr, unsigned long size, - int iw, int ir); + const struct ivrs_unity_map *map, + unsigned int flag); +int amd_iommu_reserve_domain_unity_unmap(struct domain *d, + const struct ivrs_unity_map *map); /* Share p2m table with iommu */ void amd_iommu_share_p2m(struct domain *d); -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.11
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |