[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 2/4] iommu: make iommu_inclusive_mapping a suboption of dom0-iommu
Introduce a new dom0-iommu=inclusive generic option that supersedes iommu_inclusive_mapping. The previous behaviour is preserved and the option should only be enabled by default on Intel hardware. No functional change intended. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Changes since v2: - Fix typo in commit message. - Change style and text of the documentation in xen command line. - Set the defaults in {intel/amd}_iommu_hwdom_init for inclusive. - Re-add the iommu_dom0_passthrough || !is_pv_domain(d) check. Changes since v1: - Use dom0-iommu instead of the iommu option. - Only enable by default on Intel hardware. --- Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> Cc: Jan Beulich <jbeulich@xxxxxxxx> Cc: Julien Grall <julien.grall@xxxxxxx> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx> Cc: Tim Deegan <tim@xxxxxxx> Cc: Wei Liu <wei.liu2@xxxxxxxxxx> Cc: Kevin Tian <kevin.tian@xxxxxxxxx> --- docs/misc/xen-command-line.markdown | 17 +++++- xen/drivers/passthrough/amd/pci_amd_iommu.c | 4 ++ xen/drivers/passthrough/arm/iommu.c | 4 ++ xen/drivers/passthrough/iommu.c | 23 ++++++-- xen/drivers/passthrough/vtd/extern.h | 2 - xen/drivers/passthrough/vtd/iommu.c | 8 ++- xen/drivers/passthrough/vtd/x86/vtd.c | 58 +------------------- xen/drivers/passthrough/x86/iommu.c | 59 +++++++++++++++++++++ xen/include/xen/iommu.h | 2 + 9 files changed, 109 insertions(+), 68 deletions(-) diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown index ea451f088e..90b32fe3f0 100644 --- a/docs/misc/xen-command-line.markdown +++ b/docs/misc/xen-command-line.markdown @@ -1205,7 +1205,7 @@ detection of systems known to misbehave upon accesses to that port. >> Enable IOMMU debugging code (implies `verbose`). ### dom0-iommu -> `= List of [ none | strict | relaxed ]` +> `= List of [ none | strict | relaxed | inclusive ]` * `none`: disables DMA remapping for Dom0. @@ -1221,6 +1221,18 @@ PV Dom0: Note that all the above options are mutually exclusive. Specifying more than one on the `dom0-iommu` command line will result in undefined behavior. +The following options control whether non-RAM regions are added to the Dom0 +iommu tables. Note that they can be prefixed with `no-` to effect the inverse +meaning: + +* `inclusive`: sets up DMA remapping for all the non-RAM memory below 4GB + except for unusable ranges. Use this to work around firmware issues providing + incorrect RMRR/IVMD entries. Rather than only mapping RAM pages for IOMMU + accesses for Dom0, with this option all pages up to 4GB, not marked as + unusable in the E820 table, will get a mapping established. Note that this + option is only applicable to a PV Dom0 and is enabled by default on Intel + hardware. + ### iommu\_dev\_iotlb\_timeout > `= <integer>` @@ -1233,6 +1245,9 @@ wait descriptor timed out', try increasing this value. ### iommu\_inclusive\_mapping (VT-d) > `= <boolean>` +**WARNING: This command line option is deprecated, and superseded by +_dom0-iommu=inclusive_ - using both options in combination is undefined.** + > Default: `true` Use this to work around firmware issues providing incorrect RMRR entries. diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index eeacf713e4..0e0c99c942 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -253,6 +253,10 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d) unsigned long i; const struct amd_iommu *iommu; + /* Inclusive IOMMU mappings are disabled by default on AMD hardware. */ + iommu_dom0_inclusive = iommu_dom0_inclusive == -1 ? false + : iommu_dom0_inclusive; + if ( allocate_domain_resources(dom_iommu(d)) ) BUG(); diff --git a/xen/drivers/passthrough/arm/iommu.c b/xen/drivers/passthrough/arm/iommu.c index 95b1abb972..325997b19f 100644 --- a/xen/drivers/passthrough/arm/iommu.c +++ b/xen/drivers/passthrough/arm/iommu.c @@ -73,3 +73,7 @@ int arch_iommu_populate_page_table(struct domain *d) /* The IOMMU shares the p2m with the CPU */ return -ENOSYS; } + +void __hwdom_init arch_iommu_hwdom_init(struct domain *d) +{ +} diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 830560bdcf..f15c94be42 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -74,6 +74,7 @@ bool_t __read_mostly amd_iommu_perdev_intremap = 1; custom_param("dom0-iommu", parse_dom0_iommu_param); bool __hwdom_initdata iommu_dom0_strict; bool __read_mostly iommu_dom0_passthrough; +int8_t __hwdom_initdata iommu_dom0_inclusive = -1; DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb); @@ -144,16 +145,23 @@ static int __init parse_dom0_iommu_param(const char *s) int rc = 0; do { + bool val = !!strncmp(s, "no-", 3); + + if ( !val ) + s += 3; + ss = strchr(s, ','); if ( !ss ) ss = strchr(s, '\0'); - if ( !strncmp(s, "none", ss - s) ) + if ( !strncmp(s, "none", ss - s) && val ) iommu_dom0_passthrough = true; - else if ( !strncmp(s, "strict", ss - s) ) + else if ( !strncmp(s, "strict", ss - s) && val ) iommu_dom0_strict = true; - else if ( !strncmp(s, "relaxed", ss - s) ) + else if ( !strncmp(s, "relaxed", ss - s) && val ) iommu_dom0_strict = false; + else if ( !strncmp(s, "inclusive", ss - s) ) + iommu_dom0_inclusive = val; else rc = -EINVAL; @@ -202,6 +210,13 @@ void __hwdom_init iommu_hwdom_init(struct domain *d) if ( !iommu_enabled ) return; + if ( iommu_dom0_inclusive == true && !is_pv_domain(d) ) + { + printk(XENLOG_WARNING + "IOMMU inclusive mappings are only supported on PV Dom0\n"); + iommu_dom0_inclusive = false; + } + register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0); d->need_iommu = !!iommu_dom0_strict; if ( need_iommu(d) && !iommu_use_hap_pt(d) ) @@ -236,6 +251,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d) } hd->platform_ops->hwdom_init(d); + + arch_iommu_hwdom_init(d); } void iommu_teardown(struct domain *d) diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h index fb7edfaef9..91cadc602e 100644 --- a/xen/drivers/passthrough/vtd/extern.h +++ b/xen/drivers/passthrough/vtd/extern.h @@ -99,6 +99,4 @@ void pci_vtd_quirk(const struct pci_dev *); bool_t platform_supports_intremap(void); bool_t platform_supports_x2apic(void); -void vtd_set_hwdom_mapping(struct domain *d); - #endif // _VTD_EXTERN_H_ diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 8ac774215b..7c7e15755d 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -1304,11 +1304,9 @@ static void __hwdom_init intel_iommu_hwdom_init(struct domain *d) { struct acpi_drhd_unit *drhd; - if ( !iommu_dom0_passthrough && is_pv_domain(d) ) - { - /* Set up 1:1 page table for hardware domain. */ - vtd_set_hwdom_mapping(d); - } + /* Inclusive mappings are enabled by default on Intel hardware for PV. */ + iommu_dom0_inclusive = iommu_dom0_inclusive == -1 ? is_pv_domain(d) + : iommu_dom0_inclusive; setup_hwdom_pci_devices(d, setup_hwdom_device); setup_hwdom_rmrr(d); diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c b/xen/drivers/passthrough/vtd/x86/vtd.c index 00a9891005..20323051d0 100644 --- a/xen/drivers/passthrough/vtd/x86/vtd.c +++ b/xen/drivers/passthrough/vtd/x86/vtd.c @@ -25,7 +25,6 @@ #include <xen/irq.h> #include <xen/numa.h> #include <asm/fixmap.h> -#include <asm/setup.h> #include "../iommu.h" #include "../dmar.h" #include "../vtd.h" @@ -35,8 +34,7 @@ * iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0 * 1:1 iommu mappings except xen and unusable regions. */ -static bool_t __hwdom_initdata iommu_inclusive_mapping = 1; -boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping); +boolean_param("iommu_inclusive_mapping", iommu_dom0_inclusive); void *map_vtd_domain_page(u64 maddr) { @@ -108,57 +106,3 @@ void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) spin_unlock(&d->event_lock); } -void __hwdom_init vtd_set_hwdom_mapping(struct domain *d) -{ - unsigned long i, top, max_pfn; - - BUG_ON(!is_hardware_domain(d)); - - max_pfn = (GB(4) >> PAGE_SHIFT) - 1; - top = max(max_pdx, pfn_to_pdx(max_pfn) + 1); - - for ( i = 0; i < top; i++ ) - { - unsigned long pfn = pdx_to_pfn(i); - bool map; - int rc; - - /* - * Set up 1:1 mapping for dom0. Default to include only - * conventional RAM areas and let RMRRs include needed reserved - * regions. When set, the inclusive mapping additionally maps in - * every pfn up to 4GB except those that fall in unusable ranges. - */ - if ( pfn > max_pfn && !mfn_valid(_mfn(pfn)) ) - continue; - - if ( iommu_inclusive_mapping && pfn <= max_pfn ) - map = !page_is_ram_type(pfn, RAM_TYPE_UNUSABLE); - else - map = page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL); - - if ( !map ) - continue; - - /* Exclude Xen bits */ - if ( xen_in_range(pfn) ) - continue; - - /* - * If dom0-strict mode is enabled then exclude conventional RAM - * and let the common code map dom0's pages. - */ - if ( iommu_dom0_strict && - page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) ) - continue; - - rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable); - if ( rc ) - printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n", - d->domain_id, rc); - - if (!(i & 0xfffff)) - process_pending_softirqs(); - } -} - diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c index 68182afd91..5a7a765e9d 100644 --- a/xen/drivers/passthrough/x86/iommu.c +++ b/xen/drivers/passthrough/x86/iommu.c @@ -20,6 +20,8 @@ #include <xen/softirq.h> #include <xsm/xsm.h> +#include <asm/setup.h> + void iommu_update_ire_from_apic( unsigned int apic, unsigned int reg, unsigned int value) { @@ -132,6 +134,63 @@ void arch_iommu_domain_destroy(struct domain *d) { } +void __hwdom_init arch_iommu_hwdom_init(struct domain *d) +{ + unsigned long i, top, max_pfn; + + BUG_ON(!is_hardware_domain(d)); + + if ( iommu_dom0_passthrough || !is_pv_domain(d) ) + return; + + max_pfn = (GB(4) >> PAGE_SHIFT) - 1; + top = max(max_pdx, pfn_to_pdx(max_pfn) + 1); + + for ( i = 0; i < top; i++ ) + { + unsigned long pfn = pdx_to_pfn(i); + bool map; + int rc; + + /* + * Set up 1:1 mapping for dom0. Default to include only + * conventional RAM areas and let RMRRs include needed reserved + * regions. When set, the inclusive mapping additionally maps in + * every pfn up to 4GB except those that fall in unusable ranges. + */ + if ( pfn > max_pfn && !mfn_valid(_mfn(pfn)) ) + continue; + + if ( iommu_dom0_inclusive && pfn <= max_pfn ) + map = !page_is_ram_type(pfn, RAM_TYPE_UNUSABLE); + else + map = page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL); + + if ( !map ) + continue; + + /* Exclude Xen bits */ + if ( xen_in_range(pfn) ) + continue; + + /* + * If dom0-strict mode is enabled then exclude conventional RAM + * and let the common code map dom0's pages. + */ + if ( iommu_dom0_strict && + page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) ) + continue; + + rc = iommu_map_page(d, pfn, pfn, IOMMUF_readable|IOMMUF_writable); + if ( rc ) + printk(XENLOG_WARNING " d%d: IOMMU mapping failed: %d\n", + d->domain_id, rc); + + if (!(i & 0xfffff)) + process_pending_softirqs(); + } +} + /* * Local variables: * mode: C diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index c0c6975ac4..99e5b89c0f 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -37,6 +37,7 @@ extern bool_t iommu_debug; extern bool_t amd_iommu_perdev_intremap; extern bool iommu_dom0_strict, iommu_dom0_passthrough; +extern int8_t iommu_dom0_inclusive; extern unsigned int iommu_dev_iotlb_timeout; @@ -51,6 +52,7 @@ void arch_iommu_domain_destroy(struct domain *d); int arch_iommu_domain_init(struct domain *d); int arch_iommu_populate_page_table(struct domain *d); void arch_iommu_check_autotranslated_hwdom(struct domain *d); +void arch_iommu_hwdom_init(struct domain *d); int iommu_construct(struct domain *d); -- 2.18.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |