[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] AMD-IOMMU: fix initialization order (after 23863:9e0259239822)
That original patch caused alloc_ivrs_mappings() to be called too early, so things get moved back to where they were, just converting the single call there to a loop over all IOMMUs. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/drivers/passthrough/amd/iommu_detect.c +++ b/xen/drivers/passthrough/amd/iommu_detect.c @@ -121,10 +121,6 @@ int __init amd_iommu_detect_one_acpi(voi spin_lock_init(&iommu->lock); iommu->seg = ivhd_block->pci_segment; - if (alloc_ivrs_mappings(ivhd_block->pci_segment)) { - xfree(iommu); - return -ENOMEM; - } iommu->bdf = ivhd_block->header.dev_id; iommu->cap_offset = ivhd_block->cap_offset; iommu->mmio_base_phys = ivhd_block->mmio_base; --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -769,7 +769,7 @@ int iterate_ivrs_entries(int (*handler)( return rc; } -int __init alloc_ivrs_mappings(u16 seg) +static int __init alloc_ivrs_mappings(u16 seg) { struct ivrs_mappings *ivrs_mappings; int bdf; @@ -881,8 +881,9 @@ int __init amd_iommu_init(void) goto error_out; radix_tree_init(&ivrs_maps); - if ( alloc_ivrs_mappings(0) != 0 ) - goto error_out; + for_each_amd_iommu ( iommu ) + if ( alloc_ivrs_mappings(iommu->seg) != 0 ) + goto error_out; if ( amd_iommu_update_ivrs_mapping_acpi() != 0 ) goto error_out; --- a/xen/include/asm-x86/amd-iommu.h +++ b/xen/include/asm-x86/amd-iommu.h @@ -103,7 +103,6 @@ struct ivrs_mappings { extern unsigned short ivrs_bdf_entries; -int alloc_ivrs_mappings(u16 seg); struct ivrs_mappings *get_ivrs_mappings(u16 seg); int iterate_ivrs_mappings(int (*)(u16 seg, struct ivrs_mappings *)); int iterate_ivrs_entries(int (*)(u16 seg, struct ivrs_mappings *)); Attachment:
pci-multi-seg-amd-iommu-fix.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |