[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN PATCH v2 08/15] x86/vpmu: guard vmx/svm calls with cpu_has_{vmx,svm}



On Wed, 15 May 2024, Sergiy Kibrik wrote:
> If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
> PV guests. Yet some calls to vmx/svm-related routines needs to be guarded 
> then.
> 
> Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>

Question to the x86 maintainers: are we sure we want to support the case
where VMX/SVM is disabled in the build but still we want to run PV
guests with vPMU?

If the question is not, could we simplify this simply by making vpmu_amd
dependent on CONFIG_SVM and vpmu_intel dependent on CONFIG_VMX?

I realize that it is possible and technically correct to disable
CONFIG_SVM (or VMX) to run on AMD hardware (or Intel) with plain PV
guests only. But do we want to support it? I wonder if we could make
things easier by avoiding to support this configuration until somebody
asks for it.


> ---
>  xen/arch/x86/cpu/vpmu_amd.c   |  8 ++++----
>  xen/arch/x86/cpu/vpmu_intel.c | 20 ++++++++++----------
>  2 files changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
> index db2fa420e1..40b0c8932f 100644
> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -290,7 +290,7 @@ static int cf_check amd_vpmu_save(struct vcpu *v,  bool 
> to_guest)
>      context_save(v);
>  
>      if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> -         is_msr_bitmap_on(vpmu) )
> +         is_msr_bitmap_on(vpmu) && cpu_has_svm )
>          amd_vpmu_unset_msr_bitmap(v);
>  
>      if ( to_guest )
> @@ -363,7 +363,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>              return 0;
>          vpmu_set(vpmu, VPMU_RUNNING);
>  
> -        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
>               amd_vpmu_set_msr_bitmap(v);
>      }
>  
> @@ -372,7 +372,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>          (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, 
> VPMU_RUNNING) )
>      {
>          vpmu_reset(vpmu, VPMU_RUNNING);
> -        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
>               amd_vpmu_unset_msr_bitmap(v);
>          release_pmu_ownership(PMU_OWNER_HVM);
>      }
> @@ -415,7 +415,7 @@ static void cf_check amd_vpmu_destroy(struct vcpu *v)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
> -    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
>          amd_vpmu_unset_msr_bitmap(v);
>  
>      xfree(vpmu->context);
> diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
> index cd414165df..10c34a5691 100644
> --- a/xen/arch/x86/cpu/vpmu_intel.c
> +++ b/xen/arch/x86/cpu/vpmu_intel.c
> @@ -269,7 +269,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
>      if ( !is_hvm_vcpu(v) )
>          rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
>      /* Save MSR to private context to make it fork-friendly */
> -    else if ( mem_sharing_enabled(v->domain) )
> +    else if ( mem_sharing_enabled(v->domain) && cpu_has_vmx )
>          vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                             &core2_vpmu_cxt->global_ctrl);
>  }
> @@ -288,7 +288,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool 
> to_guest)
>  
>      /* Unset PMU MSR bitmap to trap lazy load. */
>      if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> -         cpu_has_vmx_msr_bitmap )
> +         cpu_has_vmx && cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v);
>  
>      if ( to_guest )
> @@ -333,7 +333,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
>          wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
>      }
>      /* Restore MSR from context when used with a fork */
> -    else if ( mem_sharing_is_fork(v->domain) )
> +    else if ( mem_sharing_is_fork(v->domain) && cpu_has_vmx )
>          vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                              core2_vpmu_cxt->global_ctrl);
>  }
> @@ -442,7 +442,7 @@ static int cf_check core2_vpmu_alloc_resource(struct vcpu 
> *v)
>      if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
>          return 0;
>  
> -    if ( is_hvm_vcpu(v) )
> +    if ( is_hvm_vcpu(v) && cpu_has_vmx )
>      {
>          if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
>              goto out_err;
> @@ -513,7 +513,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int 
> *type, int *index)
>          __core2_vpmu_load(current);
>          vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
>  
> -        if ( is_hvm_vcpu(current) && cpu_has_vmx_msr_bitmap )
> +        if ( is_hvm_vcpu(current) && cpu_has_vmx && cpu_has_vmx_msr_bitmap )
>              core2_vpmu_set_msr_bitmap(current);
>      }
>      return 1;
> @@ -584,7 +584,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>          if ( msr_content & fixed_ctrl_mask )
>              return -EINVAL;
>  
> -        if ( is_hvm_vcpu(v) )
> +        if ( is_hvm_vcpu(v) && cpu_has_vmx )
>              vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                                 &core2_vpmu_cxt->global_ctrl);
>          else
> @@ -653,7 +653,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>              if ( blocked )
>                  return -EINVAL;
>  
> -            if ( is_hvm_vcpu(v) )
> +            if ( is_hvm_vcpu(v) && cpu_has_vmx)
>                  vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                                     &core2_vpmu_cxt->global_ctrl);
>              else
> @@ -672,7 +672,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>          wrmsrl(msr, msr_content);
>      else
>      {
> -        if ( is_hvm_vcpu(v) )
> +        if ( is_hvm_vcpu(v) && cpu_has_vmx )
>              vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
>          else
>              wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> @@ -706,7 +706,7 @@ static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, 
> uint64_t *msr_content)
>              *msr_content = core2_vpmu_cxt->global_status;
>              break;
>          case MSR_CORE_PERF_GLOBAL_CTRL:
> -            if ( is_hvm_vcpu(v) )
> +            if ( is_hvm_vcpu(v) && cpu_has_vmx )
>                  vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 
> msr_content);
>              else
>                  rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
> @@ -808,7 +808,7 @@ static void cf_check core2_vpmu_destroy(struct vcpu *v)
>      vpmu->context = NULL;
>      xfree(vpmu->priv_context);
>      vpmu->priv_context = NULL;
> -    if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
> +    if ( is_hvm_vcpu(v) && cpu_has_vmx && cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v);
>      release_pmu_ownership(PMU_OWNER_HVM);
>      vpmu_clear(vpmu);
> -- 
> 2.25.1
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.