[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 2/5] x86/msr: add VMX MSRs into HVM_max domain policy
Currently, when nested virt is enabled, the set of L1 VMX features is fixed and calculated by nvmx_msr_read_intercept() as an intersection between the full set of Xen's supported L1 VMX features, the set of actual H/W features and, for MSR_IA32_VMX_EPT_VPID_CAP, the set of features that Xen uses. Add calculate_hvm_max_vmx_policy() which will save the end result of nvmx_msr_read_intercept() on current H/W into HVM_max domain policy. There will be no functional change to what L1 sees in VMX MSRs. But the actual use of HVM_max domain policy will happen later, when VMX MSRs are handled by guest_rd/wrmsr(). Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> --- v5 --> v6: - Replaced !cpu_has_vmx check with !hvm_max_cpuid_policy.basic.vmx - Added a TODO reminder - Added brackets around bit or expressions --- xen/arch/x86/msr.c | 135 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index 87239e151e..01a5b52f95 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -106,6 +106,139 @@ static void __init calculate_host_policy(void) dp->plaform_info.cpuid_faulting = cpu_has_cpuid_faulting; } +static void vmx_clear_policy(struct msr_domain_policy *dp) +{ + memset(dp->vmx.raw, 0, sizeof(dp->vmx.raw)); + dp->vmx_procbased_ctls2.raw = 0; + dp->vmx_ept_vpid_cap.raw = 0; + memset(dp->vmx_true_ctls.raw, 0, sizeof(dp->vmx_true_ctls.raw)); + dp->vmx_vmfunc.raw = 0; +} + +static void __init calculate_hvm_max_vmx_policy(struct msr_domain_policy *dp) +{ + const struct msr_domain_policy *hp = &host_msr_domain_policy; + uint32_t supported; + + if ( !hvm_max_cpuid_policy.basic.vmx ) + return; + + vmx_clear_policy(dp); + + /* TODO: actually make vmx features selection sane */ + dp->vmx.basic.raw = hp->vmx.basic.raw; + + dp->vmx.pinbased_ctls.allowed_0.raw = VMX_PINBASED_CTLS_DEFAULT1; + dp->vmx.pinbased_ctls.allowed_1.raw = VMX_PINBASED_CTLS_DEFAULT1; + supported = (PIN_BASED_EXT_INTR_MASK | + PIN_BASED_NMI_EXITING | + PIN_BASED_PREEMPT_TIMER); + dp->vmx.pinbased_ctls.allowed_1.raw |= supported; + dp->vmx.pinbased_ctls.allowed_1.raw &= hp->vmx.pinbased_ctls.allowed_1.raw; + + dp->vmx.procbased_ctls.allowed_0.raw = VMX_PROCBASED_CTLS_DEFAULT1; + dp->vmx.procbased_ctls.allowed_1.raw = VMX_PROCBASED_CTLS_DEFAULT1; + supported = (CPU_BASED_HLT_EXITING | + CPU_BASED_VIRTUAL_INTR_PENDING | + CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING | + CPU_BASED_INVLPG_EXITING | + CPU_BASED_MONITOR_EXITING | + CPU_BASED_MWAIT_EXITING | + CPU_BASED_MOV_DR_EXITING | + CPU_BASED_ACTIVATE_IO_BITMAP | + CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_UNCOND_IO_EXITING | + CPU_BASED_RDTSC_EXITING | + CPU_BASED_MONITOR_TRAP_FLAG | + CPU_BASED_VIRTUAL_NMI_PENDING | + CPU_BASED_ACTIVATE_MSR_BITMAP | + CPU_BASED_PAUSE_EXITING | + CPU_BASED_RDPMC_EXITING | + CPU_BASED_TPR_SHADOW | + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); + dp->vmx.procbased_ctls.allowed_1.raw |= supported; + dp->vmx.procbased_ctls.allowed_1.raw &= + hp->vmx.procbased_ctls.allowed_1.raw; + + dp->vmx.exit_ctls.allowed_0.raw = VMX_EXIT_CTLS_DEFAULT1; + dp->vmx.exit_ctls.allowed_1.raw = VMX_EXIT_CTLS_DEFAULT1; + supported = (VM_EXIT_ACK_INTR_ON_EXIT | + VM_EXIT_IA32E_MODE | + VM_EXIT_SAVE_PREEMPT_TIMER | + VM_EXIT_SAVE_GUEST_PAT | + VM_EXIT_LOAD_HOST_PAT | + VM_EXIT_SAVE_GUEST_EFER | + VM_EXIT_LOAD_HOST_EFER | + VM_EXIT_LOAD_PERF_GLOBAL_CTRL); + dp->vmx.exit_ctls.allowed_1.raw |= supported; + dp->vmx.exit_ctls.allowed_1.raw &= hp->vmx.exit_ctls.allowed_1.raw; + + dp->vmx.entry_ctls.allowed_0.raw = VMX_ENTRY_CTLS_DEFAULT1; + dp->vmx.entry_ctls.allowed_1.raw = VMX_ENTRY_CTLS_DEFAULT1; + supported = (VM_ENTRY_LOAD_GUEST_PAT | + VM_ENTRY_LOAD_GUEST_EFER | + VM_ENTRY_LOAD_PERF_GLOBAL_CTRL | + VM_ENTRY_IA32E_MODE); + dp->vmx.entry_ctls.allowed_1.raw |= supported; + dp->vmx.entry_ctls.allowed_1.raw &= hp->vmx.entry_ctls.allowed_1.raw; + + dp->vmx.misc.raw = hp->vmx.misc.raw; + /* Do not support CR3-target feature now */ + dp->vmx.misc.cr3_target = false; + + /* PG, PE bits must be 1 in VMX operation */ + dp->vmx.cr0_fixed0.allowed_0.pe = true; + dp->vmx.cr0_fixed0.allowed_0.pg = true; + + /* allow 0-settings for all bits */ + dp->vmx.cr0_fixed1.allowed_1.raw = 0xffffffff; + + /* VMXE bit must be 1 in VMX operation */ + dp->vmx.cr4_fixed0.allowed_0.vmxe = true; + + /* + * Allowed CR4 bits will be updated during domain creation by + * hvm_cr4_guest_valid_bits() + */ + dp->vmx.cr4_fixed1.allowed_1.raw = hp->vmx.cr4_fixed1.allowed_1.raw; + + /* The max index of VVMCS encoding is 0x1f. */ + dp->vmx.vmcs_enum.max_encoding = 0x1f; + + if ( vmx_procbased_ctls2_available(dp) ) + { + supported = (SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING | + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_ENABLE_VPID | + SECONDARY_EXEC_UNRESTRICTED_GUEST | + SECONDARY_EXEC_ENABLE_EPT); + dp->vmx_procbased_ctls2.allowed_1.raw |= supported; + dp->vmx_procbased_ctls2.allowed_1.raw &= + hp->vmx_procbased_ctls2.allowed_1.raw; + } + + if ( vmx_ept_vpid_cap_available(dp) ) + dp->vmx_ept_vpid_cap.raw = nept_get_ept_vpid_cap(); + + if ( vmx_true_ctls_available(dp) ) + { + dp->vmx_true_ctls.pinbased.raw = dp->vmx.pinbased_ctls.raw; + + dp->vmx_true_ctls.procbased.raw = dp->vmx.procbased_ctls.raw; + supported = (CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING); + dp->vmx_true_ctls.procbased.raw |= supported; + dp->vmx_true_ctls.procbased.raw &= hp->vmx_true_ctls.procbased.raw; + + dp->vmx_true_ctls.exit.raw = dp->vmx.exit_ctls.raw; + + dp->vmx_true_ctls.entry.raw = dp->vmx.entry_ctls.raw; + } + + /* MSR_IA32_VMX_VMFUNC is N/A */ +} + static void __init calculate_hvm_max_policy(void) { struct msr_domain_policy *dp = &hvm_max_msr_domain_policy; @@ -127,6 +260,8 @@ static void __init calculate_hvm_max_policy(void) /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */ vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting; + + calculate_hvm_max_vmx_policy(dp); } static void __init calculate_pv_max_policy(void) -- 2.14.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |