|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3] x86: suppress XPTI-related TLB flushes when possible
When there's no XPTI-enabled PV domain at all, there's no need to issue
respective TLB flushes. Hardwire opt_xpti_* to false when !PV, and
record the creation of PV domains by bumping opt_xpti_* accordingly.
As to the sticky opt_xpti_domu vs increment/decrement of opt_xpti_hwdom,
this is done this way to avoid
(a) widening the former variable,
(b) any risk of a missed flush, which would result in an XSA if a DomU
was able to exercise it, and
(c) any races updating the variable.
Fundamentally the TLB flush done when context switching out the domain's
vCPU-s the last time before destroying the domain ought to be
sufficient, so in principle DomU handling could be made match hwdom's.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v3: Re-base.
v2: Add comment to spec_ctrl.h. Explain difference in accounting of DomU
and hwdom.
---
TBD: The hardwiring to false could be extended to opt_pv_l1tf_* and (for
!HVM) opt_l1d_flush as well.
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -218,7 +218,7 @@ unsigned int flush_area_local(const void
*/
invpcid_flush_one(PCID_PV_PRIV, addr);
invpcid_flush_one(PCID_PV_USER, addr);
- if ( opt_xpti_hwdom || opt_xpti_domu )
+ if ( opt_xpti_hwdom > 1 || opt_xpti_domu > 1 )
{
invpcid_flush_one(PCID_PV_PRIV | PCID_PV_XPTI, addr);
invpcid_flush_one(PCID_PV_USER | PCID_PV_XPTI, addr);
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -272,6 +272,9 @@ void pv_domain_destroy(struct domain *d)
destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
+ opt_xpti_hwdom -= IS_ENABLED(CONFIG_LATE_HWDOM) &&
+ !d->domain_id && opt_xpti_hwdom;
+
XFREE(d->arch.pv.cpuidmasks);
FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
@@ -310,7 +313,16 @@ int pv_domain_initialise(struct domain *
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- d->arch.pv.xpti = is_hardware_domain(d) ? opt_xpti_hwdom : opt_xpti_domu;
+ if ( is_hardware_domain(d) && opt_xpti_hwdom )
+ {
+ d->arch.pv.xpti = true;
+ ++opt_xpti_hwdom;
+ }
+ if ( !is_hardware_domain(d) && opt_xpti_domu )
+ {
+ d->arch.pv.xpti = true;
+ opt_xpti_domu = 2;
+ }
if ( !is_pv_32bit_domain(d) && use_invpcid && cpu_has_pcid )
switch ( ACCESS_ONCE(opt_pcid) )
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -85,10 +85,12 @@ static int __init parse_spec_ctrl(const
opt_eager_fpu = 0;
+#ifdef CONFIG_PV
if ( opt_xpti_hwdom < 0 )
opt_xpti_hwdom = 0;
if ( opt_xpti_domu < 0 )
opt_xpti_domu = 0;
+#endif
if ( opt_smt < 0 )
opt_smt = 1;
@@ -187,6 +189,7 @@ static int __init parse_spec_ctrl(const
}
custom_param("spec-ctrl", parse_spec_ctrl);
+#ifdef CONFIG_PV
int8_t __read_mostly opt_xpti_hwdom = -1;
int8_t __read_mostly opt_xpti_domu = -1;
@@ -253,6 +256,9 @@ static __init int parse_xpti(const char
return rc;
}
custom_param("xpti", parse_xpti);
+#else /* !CONFIG_PV */
+# define xpti_init_default(caps) ((void)(caps))
+#endif /* CONFIG_PV */
int8_t __read_mostly opt_pv_l1tf_hwdom = -1;
int8_t __read_mostly opt_pv_l1tf_domu = -1;
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -43,7 +43,18 @@ extern bool bsp_delay_spec_ctrl;
extern uint8_t default_xen_spec_ctrl;
extern uint8_t default_spec_ctrl_flags;
+#ifdef CONFIG_PV
+/*
+ * Values -1, 0, and 1 have the usual meaning of "not established yet",
+ * "disabled", and "enabled". Values larger than 1 indicate there's actually
+ * at least one such domain (or there has been). This way XPTI-specific TLB
+ * flushes can be avoided when no XPTI-enabled domain is/was active.
+ */
extern int8_t opt_xpti_hwdom, opt_xpti_domu;
+#else
+# define opt_xpti_hwdom false
+# define opt_xpti_domu false
+#endif
extern int8_t opt_pv_l1tf_hwdom, opt_pv_l1tf_domu;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |