|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 17/19] x86/vmce, tools/libxl: expose LMCE capability in guest MSR_IA32_MCG_CAP
If LMCE is supported by host and "lmce = 1" is present in xl config, the
LMCE capability will be exposed in guest MSR_IA32_MCG_CAP. By default,
LMCE is not exposed to guest so as to keep the backwards migration
compatibility.
Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Christoph Egger <chegger@xxxxxxxxx>
Cc: Liu Jinsong <jinsong.liu@xxxxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
docs/man/xl.cfg.pod.5.in | 18 ++++++++++++++++++
tools/libxl/libxl_create.c | 1 +
tools/libxl/libxl_dom.c | 2 ++
tools/libxl/libxl_types.idl | 1 +
tools/libxl/xl_cmdimpl.c | 3 +++
xen/arch/x86/cpu/mcheck/vmce.c | 14 +++++++++++++-
xen/arch/x86/hvm/hvm.c | 7 +++++++
xen/include/asm-x86/mce.h | 1 +
xen/include/public/hvm/params.h | 5 ++++-
9 files changed, 50 insertions(+), 2 deletions(-)
diff --git a/docs/man/xl.cfg.pod.5.in b/docs/man/xl.cfg.pod.5.in
index 46f9caf..1cdf372 100644
--- a/docs/man/xl.cfg.pod.5.in
+++ b/docs/man/xl.cfg.pod.5.in
@@ -2021,6 +2021,24 @@ natively or via hardware backwards compatibility support.
=back
+=head3 Intel
+
+=over 4
+
+=item B<lmce=BOOLEAN>
+
+(HVM only) Enable/disable LMCE support for a HVM domain.
+
+=over 4
+
+=item B<default>
+
+Disabled.
+
+=back
+
+=back
+
=head1 SEE ALSO
=over 4
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index e3bc257..381e5dc 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -324,6 +324,7 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc,
libxl_defbool_setdefault(&b_info->u.hvm.altp2m, false);
libxl_defbool_setdefault(&b_info->u.hvm.usb, false);
libxl_defbool_setdefault(&b_info->u.hvm.xen_platform_pci, true);
+ libxl_defbool_setdefault(&b_info->u.hvm.lmce, false);
libxl_defbool_setdefault(&b_info->u.hvm.spice.enable, false);
if (!libxl_defbool_val(b_info->u.hvm.spice.enable) &&
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index d519c8d..f04adf4 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -293,6 +293,8 @@ static void hvm_set_conf_params(xc_interface *handle,
uint32_t domid,
libxl_defbool_val(info->u.hvm.nested_hvm));
xc_hvm_param_set(handle, domid, HVM_PARAM_ALTP2M,
libxl_defbool_val(info->u.hvm.altp2m));
+ xc_hvm_param_set(handle, domid, HVM_PARAM_LMCE,
+ libxl_defbool_val(info->u.hvm.lmce));
}
int libxl__build_pre(libxl__gc *gc, uint32_t domid,
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index a612d1f..3cb0d9a 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -550,6 +550,7 @@ libxl_domain_build_info = Struct("domain_build_info",[
("serial_list", libxl_string_list),
("rdm", libxl_rdm_reserve),
("rdm_mem_boundary_memkb", MemKB),
+ ("lmce", libxl_defbool),
])),
("pv", Struct(None, [("kernel", string),
("slack_memkb", MemKB),
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 37ebdce..4ed8e3e 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -1698,6 +1698,9 @@ static void parse_config_data(const char *config_source,
if (!xlu_cfg_get_long (config, "rdm_mem_boundary", &l, 0))
b_info->u.hvm.rdm_mem_boundary_memkb = l * 1024;
+
+ xlu_cfg_get_defbool(config, "lmce", &b_info->u.hvm.lmce, 0);
+
break;
case LIBXL_DOMAIN_TYPE_PV:
{
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 2a4d3f0..fa9b499 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -74,7 +74,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct
hvm_vmce_vcpu *ctxt)
unsigned long guest_mcg_cap;
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
- guest_mcg_cap = INTEL_GUEST_MCG_CAP;
+ guest_mcg_cap = INTEL_GUEST_MCG_CAP | (lmce_support ? MCG_LMCE_P : 0);
else
guest_mcg_cap = AMD_GUEST_MCG_CAP;
@@ -519,3 +519,15 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn,
unsigned long gfn)
return rc;
}
+int vmce_enable_lmce(struct domain *d)
+{
+ struct vcpu *v;
+
+ if ( !lmce_support )
+ return -EINVAL;
+
+ for_each_vcpu(d, v)
+ v->arch.vmce.mcg_cap |= MCG_LMCE_P;
+
+ return 0;
+}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 266f708..19389c0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4007,6 +4007,7 @@ static int hvm_allow_set_param(struct domain *d,
case HVM_PARAM_IOREQ_SERVER_PFN:
case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
case HVM_PARAM_ALTP2M:
+ case HVM_PARAM_LMCE:
if ( value != 0 && a->value != value )
rc = -EEXIST;
break;
@@ -4185,6 +4186,12 @@ static int hvmop_set_param(
}
d->arch.x87_fip_width = a.value;
break;
+ case HVM_PARAM_LMCE:
+ if ( a.value > 1 )
+ rc = -EINVAL;
+ else if ( a.value == 1 )
+ rc = vmce_enable_lmce(d);
+ break;
}
if ( rc != 0 )
diff --git a/xen/include/asm-x86/mce.h b/xen/include/asm-x86/mce.h
index 525a9e8..f5a9ff9 100644
--- a/xen/include/asm-x86/mce.h
+++ b/xen/include/asm-x86/mce.h
@@ -38,6 +38,7 @@ extern int vmce_restore_vcpu(struct vcpu *, const struct
hvm_vmce_vcpu *);
extern int vmce_wrmsr(uint32_t msr, uint64_t val);
extern int vmce_rdmsr(uint32_t msr, uint64_t *val);
extern bool vmce_support_lmce(const struct vcpu *v);
+extern int vmce_enable_lmce(struct domain *d);
extern unsigned int nr_mce_banks;
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 3f54a49..6b6ecbe 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -253,6 +253,9 @@
*/
#define HVM_PARAM_X87_FIP_WIDTH 36
-#define HVM_NR_PARAMS 37
+/* Boolean: Enable LMCE */
+#define HVM_PARAM_LMCE 37
+
+#define HVM_NR_PARAMS 38
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
--
2.10.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |