[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v8 4/6] AMD/IOMMU: respect AtsDisabled device flag
IVHD entries may specify that ATS is to be blocked for a device or range of devices. Honor firmware telling us so. While adding respective checks I noticed that the 2nd conditional in amd_iommu_setup_domain_device() failed to check the IOMMU's capability. Add the missing part of the condition there, as no good can come from enabling ATS on a device when the IOMMU is not capable of dealing with ATS requests. For actually using ACPI_IVHD_ATS_DISABLED, make its expansion no longer exhibit UB. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- TBD: As an alternative to adding the missing IOMMU capability check, we may want to consider simply using dte->i in the 2nd conditional in amd_iommu_setup_domain_device(). Note that while ATS enabling/disabling gets invoked without any locks held, the two functions should not be possible to race with one another for any individual device (or else we'd be in trouble already, as ATS might then get re-enabled immediately after it was disabled, with the DTE out of sync with this setting). --- v7: New. --- a/xen/drivers/passthrough/amd/iommu.h +++ b/xen/drivers/passthrough/amd/iommu.h @@ -120,6 +120,7 @@ struct ivrs_mappings { uint16_t dte_requestor_id; bool valid:1; bool dte_allow_exclusion:1; + bool block_ats:1; /* ivhd device data settings */ uint8_t device_flags; --- a/xen/drivers/passthrough/amd/iommu_acpi.c +++ b/xen/drivers/passthrough/amd/iommu_acpi.c @@ -55,8 +55,8 @@ union acpi_ivhd_device { }; static void __init add_ivrs_mapping_entry( - uint16_t bdf, uint16_t alias_id, uint8_t flags, bool alloc_irt, - struct amd_iommu *iommu) + uint16_t bdf, uint16_t alias_id, uint8_t flags, unsigned int ext_flags, + bool alloc_irt, struct amd_iommu *iommu) { struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg); @@ -66,6 +66,7 @@ static void __init add_ivrs_mapping_entr ivrs_mappings[bdf].dte_requestor_id = alias_id; /* override flags for range of devices */ + ivrs_mappings[bdf].block_ats = ext_flags & ACPI_IVHD_ATS_DISABLED; ivrs_mappings[bdf].device_flags = flags; /* Don't map an IOMMU by itself. */ @@ -499,7 +500,7 @@ static u16 __init parse_ivhd_device_sele return 0; } - add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, false, + add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, 0, false, iommu); return sizeof(*select); @@ -545,7 +546,7 @@ static u16 __init parse_ivhd_device_rang AMD_IOMMU_DEBUG(" Dev_Id Range: %#x -> %#x\n", first_bdf, last_bdf); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) - add_ivrs_mapping_entry(bdf, bdf, range->start.header.data_setting, + add_ivrs_mapping_entry(bdf, bdf, range->start.header.data_setting, 0, false, iommu); return dev_length; @@ -580,7 +581,7 @@ static u16 __init parse_ivhd_device_alia AMD_IOMMU_DEBUG(" Dev_Id Alias: %#x\n", alias_id); - add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, true, + add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, 0, true, iommu); return dev_length; @@ -636,7 +637,7 @@ static u16 __init parse_ivhd_device_alia for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, alias_id, range->alias.header.data_setting, - true, iommu); + 0, true, iommu); return dev_length; } @@ -661,7 +662,8 @@ static u16 __init parse_ivhd_device_exte return 0; } - add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, false, iommu); + add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, + ext->extended_data, false, iommu); return dev_length; } @@ -708,7 +710,7 @@ static u16 __init parse_ivhd_device_exte for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) add_ivrs_mapping_entry(bdf, bdf, range->extended.header.data_setting, - false, iommu); + range->extended.extended_data, false, iommu); return dev_length; } @@ -800,7 +802,7 @@ static u16 __init parse_ivhd_device_spec AMD_IOMMU_DEBUG("IVHD Special: %pp variety %#x handle %#x\n", &PCI_SBDF2(seg, bdf), special->variety, special->handle); - add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, true, + add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, 0, true, iommu); switch ( special->variety ) --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -105,6 +105,7 @@ static int __must_check amd_iommu_setup_ int req_id, valid = 1, rc; u8 bus = pdev->bus; struct domain_iommu *hd = dom_iommu(domain); + const struct ivrs_mappings *ivrs_dev; if ( QUARANTINE_SKIP(domain) ) return 0; @@ -122,20 +123,18 @@ static int __must_check amd_iommu_setup_ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn)); table = iommu->dev_table.buffer; dte = &table[req_id]; + ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id]; spin_lock_irqsave(&iommu->lock, flags); if ( !dte->v || !dte->tv ) { - const struct ivrs_mappings *ivrs_dev; - /* bind DTE to domain page-tables */ amd_iommu_set_root_page_table( dte, page_to_maddr(hd->arch.amd.root_table), domain->domain_id, hd->arch.amd.paging_mode, valid); /* Undo what amd_iommu_disable_domain_device() may have done. */ - ivrs_dev = &get_ivrs_mappings(iommu->seg)[req_id]; if ( dte->it_root ) { dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED; @@ -146,6 +145,7 @@ static int __must_check amd_iommu_setup_ dte->sys_mgt = MASK_EXTR(ivrs_dev->device_flags, ACPI_IVHD_SYSTEM_MGMT); if ( pci_ats_device(iommu->seg, bus, pdev->devfn) && + !ivrs_dev->block_ats && iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) dte->i = ats_enabled; @@ -166,6 +166,8 @@ static int __must_check amd_iommu_setup_ ASSERT(pcidevs_locked()); if ( pci_ats_device(iommu->seg, bus, pdev->devfn) && + !ivrs_dev->block_ats && + iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) && !pci_ats_enabled(iommu->seg, bus, pdev->devfn) ) { if ( devfn == pdev->devfn ) --- a/xen/include/acpi/actbl2.h +++ b/xen/include/acpi/actbl2.h @@ -851,7 +851,7 @@ struct acpi_ivrs_device8b { /* Values for extended_data above */ -#define ACPI_IVHD_ATS_DISABLED (1<<31) +#define ACPI_IVHD_ATS_DISABLED (1u << 31) /* Type 72: 8-byte device entry */
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |