[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/vpmu: guard calls to vmx/svm functions



commit cddbee85d0c0386a2126c4698c1f63baeaf292f1
Author:     Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
AuthorDate: Tue Aug 6 08:35:09 2024 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Aug 6 08:35:09 2024 +0200

    x86/vpmu: guard calls to vmx/svm functions
    
    If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
    PV guests. Yet in such case before using VMX/SVM features and functions we 
have
    to explicitly check if they're available in the build. For this purpose
    (and also not to complicate conditionals) two helpers introduced --
    is_{vmx,svm}_vcpu(v) that check both HVM & VMX/SVM conditions at the same 
time,
    and they replace is_hvm_vcpu(v) macro in Intel/AMD PMU drivers.
    
    Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/vpmu_amd.c   | 11 ++++++-----
 xen/arch/x86/cpu/vpmu_intel.c | 28 +++++++++++++++-------------
 2 files changed, 21 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 97e6315bd9..a082450e92 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -27,6 +27,7 @@
 #define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT))
 #define set_guest_mode(msr) ((msr) |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
 #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH - 1))))
+#define is_svm_vcpu(v) (IS_ENABLED(CONFIG_AMD_SVM) && is_hvm_vcpu(v))
 
 static unsigned int __read_mostly num_counters;
 static const u32 __read_mostly *counters;
@@ -289,7 +290,7 @@ static int cf_check amd_vpmu_save(struct vcpu *v,  bool 
to_guest)
 
     context_save(v);
 
-    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_svm_vcpu(v) &&
          is_msr_bitmap_on(vpmu) )
         amd_vpmu_unset_msr_bitmap(v);
 
@@ -349,7 +350,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         return -EINVAL;
 
     /* For all counters, enable guest only mode for HVM guest */
-    if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
+    if ( is_svm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
          !is_guest_mode(msr_content) )
     {
         set_guest_mode(msr_content);
@@ -363,7 +364,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
             return 0;
         vpmu_set(vpmu, VPMU_RUNNING);
 
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
              amd_vpmu_set_msr_bitmap(v);
     }
 
@@ -372,7 +373,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
     {
         vpmu_reset(vpmu, VPMU_RUNNING);
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
              amd_vpmu_unset_msr_bitmap(v);
         release_pmu_ownership(PMU_OWNER_HVM);
     }
@@ -415,7 +416,7 @@ static void cf_check amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+    if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
         amd_vpmu_unset_msr_bitmap(v);
 
     xfree(vpmu->context);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 26dd3a9358..7ce98ee42e 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -54,6 +54,8 @@
 #define MSR_PMC_ALIAS_MASK       (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
 static bool __read_mostly full_width_write;
 
+#define is_vmx_vcpu(v) (IS_ENABLED(CONFIG_INTEL_VMX) && is_hvm_vcpu(v))
+
 /*
  * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
  * counters. 4 bits for every counter.
@@ -266,7 +268,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(MSR_P6_EVNTSEL(i), xen_pmu_cntr_pair[i].control);
     }
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_vmx_vcpu(v) )
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
     /* Save MSR to private context to make it fork-friendly */
     else if ( mem_sharing_enabled(v->domain) )
@@ -278,7 +280,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool 
to_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_vmx_vcpu(v) )
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 
     if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
@@ -287,7 +289,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool 
to_guest)
     __core2_vpmu_save(v);
 
     /* Unset PMU MSR bitmap to trap lazy load. */
-    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_vmx_vcpu(v) &&
          cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v);
 
@@ -326,7 +328,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
     if ( vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
         wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
 
-    if ( !is_hvm_vcpu(v) )
+    if ( !is_vmx_vcpu(v) )
     {
         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl);
         core2_vpmu_cxt->global_ovf_ctrl = 0;
@@ -381,7 +383,7 @@ static int core2_vpmu_verify(struct vcpu *v)
     }
 
     if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) &&
-         !(is_hvm_vcpu(v)
+         !(is_vmx_vcpu(v)
            ? is_canonical_address(core2_vpmu_cxt->ds_area)
            : __addr_ok(core2_vpmu_cxt->ds_area)) )
         return -EINVAL;
@@ -442,7 +444,7 @@ static int cf_check core2_vpmu_alloc_resource(struct vcpu 
*v)
     if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
         return 0;
 
-    if ( is_hvm_vcpu(v) )
+    if ( is_vmx_vcpu(v) )
     {
         if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
             goto out_err;
@@ -513,7 +515,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int 
*type, int *index)
         __core2_vpmu_load(current);
         vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 
-        if ( is_hvm_vcpu(current) && cpu_has_vmx_msr_bitmap )
+        if ( is_vmx_vcpu(current) && cpu_has_vmx_msr_bitmap )
             core2_vpmu_set_msr_bitmap(current);
     }
     return 1;
@@ -562,7 +564,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
             return -EINVAL;
         if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
         {
-            if ( !(is_hvm_vcpu(v) ? is_canonical_address(msr_content)
+            if ( !(is_vmx_vcpu(v) ? is_canonical_address(msr_content)
                                   : __addr_ok(msr_content)) )
             {
                 gdprintk(XENLOG_WARNING,
@@ -584,7 +586,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         if ( msr_content & fixed_ctrl_mask )
             return -EINVAL;
 
-        if ( is_hvm_vcpu(v) )
+        if ( is_vmx_vcpu(v) )
             vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                &core2_vpmu_cxt->global_ctrl);
         else
@@ -653,7 +655,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
             if ( blocked )
                 return -EINVAL;
 
-            if ( is_hvm_vcpu(v) )
+            if ( is_vmx_vcpu(v) )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                    &core2_vpmu_cxt->global_ctrl);
             else
@@ -673,7 +675,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, 
uint64_t msr_content)
         wrmsrl(msr, msr_content);
     else
     {
-        if ( is_hvm_vcpu(v) )
+        if ( is_vmx_vcpu(v) )
             vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
         else
             wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
@@ -707,7 +709,7 @@ static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, 
uint64_t *msr_content)
             *msr_content = core2_vpmu_cxt->global_status;
             break;
         case MSR_CORE_PERF_GLOBAL_CTRL:
-            if ( is_hvm_vcpu(v) )
+            if ( is_vmx_vcpu(v) )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
             else
                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
@@ -810,7 +812,7 @@ static void cf_check core2_vpmu_destroy(struct vcpu *v)
     vpmu->context = NULL;
     xfree(vpmu->priv_context);
     vpmu->priv_context = NULL;
-    if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
+    if ( is_vmx_vcpu(v) && cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v);
     release_pmu_ownership(PMU_OWNER_HVM);
     vpmu_clear(vpmu);
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.