[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 11/12] x86/altp2m: Add altp2mhvm HVM domain parameter.
The altp2mhvm and nestedhvm parameters are mutually exclusive and cannot be set together. Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx> --- docs/man/xl.cfg.pod.5 | 12 ++++++++++++ tools/libxl/libxl_create.c | 1 + tools/libxl/libxl_dom.c | 2 ++ tools/libxl/libxl_types.idl | 1 + tools/libxl/xl_cmdimpl.c | 8 ++++++++ xen/arch/x86/hvm/hvm.c | 15 ++++++++++++++- xen/include/public/hvm/params.h | 5 ++++- 7 files changed, 42 insertions(+), 2 deletions(-) diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5 index a3e0e2e..18afd46 100644 --- a/docs/man/xl.cfg.pod.5 +++ b/docs/man/xl.cfg.pod.5 @@ -1035,6 +1035,18 @@ enabled by default and you should usually omit it. It may be necessary to disable the HPET in order to improve compatibility with guest Operating Systems (X86 only) +=item B<altp2mhvm=BOOLEAN> + +Enables or disables hvm guest access to alternate-p2m capability. +Alternate-p2m allows a guest to manage multiple p2m guest physical +"memory views" (as opposed to a single p2m). This option is +disabled by default and is available only to hvm domains. +You may want this option if you want to access-control/isolate +access to specific guest physical memory pages accessed by +the guest, e.g. for HVM domain memory introspection or +for isolation/access-control of memory between components within +a single guest hvm domain. + =item B<nestedhvm=BOOLEAN> Enable or disables guest access to hardware virtualisation features, diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c index 86384d2..35e322e 100644 --- a/tools/libxl/libxl_create.c +++ b/tools/libxl/libxl_create.c @@ -329,6 +329,7 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc, libxl_defbool_setdefault(&b_info->u.hvm.hpet, true); libxl_defbool_setdefault(&b_info->u.hvm.vpt_align, true); libxl_defbool_setdefault(&b_info->u.hvm.nested_hvm, false); + libxl_defbool_setdefault(&b_info->u.hvm.altp2mhvm, false); libxl_defbool_setdefault(&b_info->u.hvm.usb, false); libxl_defbool_setdefault(&b_info->u.hvm.xen_platform_pci, true); diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index 867172a..c925fec 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -300,6 +300,8 @@ static void hvm_set_conf_params(xc_interface *handle, uint32_t domid, libxl_defbool_val(info->u.hvm.vpt_align)); xc_hvm_param_set(handle, domid, HVM_PARAM_NESTEDHVM, libxl_defbool_val(info->u.hvm.nested_hvm)); + xc_hvm_param_set(handle, domid, HVM_PARAM_ALTP2MHVM, + libxl_defbool_val(info->u.hvm.altp2mhvm)); } int libxl__build_pre(libxl__gc *gc, uint32_t domid, diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index 23f27d4..66a89cf 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -437,6 +437,7 @@ libxl_domain_build_info = Struct("domain_build_info",[ ("mmio_hole_memkb", MemKB), ("timer_mode", libxl_timer_mode), ("nested_hvm", libxl_defbool), + ("altp2mhvm", libxl_defbool), ("smbios_firmware", string), ("acpi_firmware", string), ("nographic", libxl_defbool), diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c index c858068..ccb0de9 100644 --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -1500,6 +1500,14 @@ static void parse_config_data(const char *config_source, xlu_cfg_get_defbool(config, "nestedhvm", &b_info->u.hvm.nested_hvm, 0); + xlu_cfg_get_defbool(config, "altp2mhvm", &b_info->u.hvm.altp2mhvm, 0); + + if (strcmp(libxl_defbool_to_string(b_info->u.hvm.nested_hvm), "True") == 0 && + strcmp(libxl_defbool_to_string(b_info->u.hvm.altp2mhvm), "True") == 0) { + fprintf(stderr, "ERROR: nestedhvm and altp2mhvm cannot be used together\n"); + exit (1); + } + xlu_cfg_replace_string(config, "smbios_firmware", &b_info->u.hvm.smbios_firmware, 0); xlu_cfg_replace_string(config, "acpi_firmware", diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index b3e74ce..8453489 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -5732,6 +5732,7 @@ static int hvm_allow_set_param(struct domain *d, case HVM_PARAM_VIRIDIAN: case HVM_PARAM_IOREQ_SERVER_PFN: case HVM_PARAM_NR_IOREQ_SERVER_PAGES: + case HVM_PARAM_ALTP2MHVM: if ( value != 0 && a->value != value ) rc = -EEXIST; break; @@ -5854,6 +5855,9 @@ static int hvmop_set_param( */ if ( cpu_has_svm && !paging_mode_hap(d) && a.value ) rc = -EINVAL; + if ( a.value && + d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] ) + rc = -EINVAL; /* Set up NHVM state for any vcpus that are already up. */ if ( a.value && !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] ) @@ -5864,6 +5868,13 @@ static int hvmop_set_param( for_each_vcpu(d, v) nestedhvm_vcpu_destroy(v); break; + case HVM_PARAM_ALTP2MHVM: + if ( a.value > 1 ) + rc = -EINVAL; + if ( a.value && + d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] ) + rc = -EINVAL; + break; case HVM_PARAM_BUFIOREQ_EVTCHN: rc = -EINVAL; break; @@ -6437,7 +6448,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) return -ESRCH; rc = -EINVAL; - if ( !is_hvm_domain(d) || !hvm_altp2m_supported() ) + if ( !is_hvm_domain(d) || !hvm_altp2m_supported() || + !d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] ) goto param_fail9; a.state = altp2mhvm_active(d); @@ -6464,6 +6476,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) rc = -EINVAL; if ( !is_hvm_domain(d) || !hvm_altp2m_supported() || + !d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] || nestedhvm_enabled(d) ) goto param_fail10; diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h index 7c73089..1b5f840 100644 --- a/xen/include/public/hvm/params.h +++ b/xen/include/public/hvm/params.h @@ -187,6 +187,9 @@ /* Location of the VM Generation ID in guest physical address space. */ #define HVM_PARAM_VM_GENERATION_ID_ADDR 34 -#define HVM_NR_PARAMS 35 +/* Boolean: Enable altp2m (hvm only) */ +#define HVM_PARAM_ALTP2MHVM 35 + +#define HVM_NR_PARAMS 36 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |