[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept()


  • To: Xenia Ragiadakou <burzalodowa@xxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 1 Mar 2023 10:14:56 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=IB0USwdCN9iU8Pwcmnm845M1uRu2Y6Cqr26ibACaot0=; b=mcsiv8QdXmWFEsc8JQ2eCeh+3HX5vvFKMhXHKjCX/wbULQCR/RrBVyRF6bfDExUgqd+47pGYFym7myo9tRvjTMBbJ560TJnc/d+PLRt/8sU5dvipH8gce8G4QBhJqhzBWOuNpaDlNnVHdY9rK9fRTc4h+LpNgyiOWPypu3ROh1ETM57SC4ZPPGoqkHCqOw0bGmIJjWkv8Q4wh0bdQcxzbKOVNvRMMS0jC3YjgmDM8L/XZxD22w3GzddaSNnGgOgrCJAmSIiKzauyGTE9dMhXgTkU6dwgeXrZMknl1T6izTfc0BTOJe2Tt/PZ2JYYJXRNi8PIlUCGqPRzogcQoWjyEQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=X7UsbW6Uc5Rp9T7O6410mq5neJdtBljw2n36XixoNSRkHo25I4kecDG/hYsNXqGlI2WaIPJKIxqy5fArKBYt0QoEQp7WZAfL8FZywBa2aQghk+nY6aLHPvuC1/tthnR+RlYT9tacikBWUW0kYgC/wz4kyHGMsaRM5ZAykkLpsxulsc2uEktlI+cgwkMSNpySi6BLM8wPDQTp/mJak5bJflKjc/lV+UVLFYyp05LYf958c8NYYn7AP5TsKTa/Xy2WjKOaykZFUfDN88D5jlvmUCjPAgMlrC2aBIMCELOE/RbGex0y5MMlmuULuyw2nYXrePZFBr9QmIYk1vUarFdzaw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Roger Pau Monné <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Jun Nakajima <jun.nakajima@xxxxxxxxx>, Kevin Tian <kevin.tian@xxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Wed, 01 Mar 2023 09:15:10 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 28.02.2023 21:14, Xenia Ragiadakou wrote:
> 
> On 2/28/23 16:58, Jan Beulich wrote:
>> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>>> Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
>>> intercept in common vpmu code.
>>
>> What is this going to buy us? All calls ...
>>
>>> --- a/xen/arch/x86/cpu/vpmu_amd.c
>>> +++ b/xen/arch/x86/cpu/vpmu_amd.c
>>> @@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
>>>   
>>>       for ( i = 0; i < num_counters; i++ )
>>>       {
>>> -        svm_clear_msr_intercept(v, counters[i], MSR_RW);
>>> -        svm_set_msr_intercept(v, ctrls[i], MSR_W);
>>> -        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
>>> +        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
>>> +        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
>>> +        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
>>>       }
>>>   
>>>       msr_bitmap_on(vpmu);
>>> @@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
>>>   
>>>       for ( i = 0; i < num_counters; i++ )
>>>       {
>>> -        svm_set_msr_intercept(v, counters[i], MSR_RW);
>>> -        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
>>> +        hvm_set_msr_intercept(v, counters[i], MSR_RW);
>>> +        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
>>>       }
>>>   
>>>       msr_bitmap_off(vpmu);
>>
>> ... here will got to the SVM functions anyway, while ...
>>
>>> --- a/xen/arch/x86/cpu/vpmu_intel.c
>>> +++ b/xen/arch/x86/cpu/vpmu_intel.c
>>> @@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
>>>   
>>>       /* Allow Read/Write PMU Counters MSR Directly. */
>>>       for ( i = 0; i < fixed_pmc_cnt; i++ )
>>> -        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>> +        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>>   
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>>       {
>>> -        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>> +        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>>   
>>>           if ( full_width_write )
>>> -            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>> +            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>>       }
>>>   
>>>       /* Allow Read PMU Non-global Controls Directly. */
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>> -        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>> +        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>>   
>>> -    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> -    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>> +    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> +    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>>   }
>>>   
>>>   static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
>>> @@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu 
>>> *v)
>>>       unsigned int i;
>>>   
>>>       for ( i = 0; i < fixed_pmc_cnt; i++ )
>>> -        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>> +        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
>>>   
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>>       {
>>> -        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>> +        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
>>>   
>>>           if ( full_width_write )
>>> -            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>> +            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
>>>       }
>>>   
>>>       for ( i = 0; i < arch_pmc_cnt; i++ )
>>> -        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>> +        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
>>>   
>>> -    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> -    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>> +    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
>>> +    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
>>>   }
>>>   
>>>   static inline void __core2_vpmu_save(struct vcpu *v)
>>
>> ... all calls here will go to VMX'es. For making either vpmu_<vendor>.c
>> build without that vendor's virtualization enabled, isn't it all it
>> takes to have ...
>>
>>> @@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, 
>>> unsigned int reg, uint64_t val)
>>>       ASSERT_UNREACHABLE();
>>>   }
>>>   
>>> +static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
>>> +                                         int flags)
>>> +{
>>> +    ASSERT_UNREACHABLE();
>>> +}
>>> +
>>> +static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
>>> +                                           int flags)
>>> +{
>>> +    ASSERT_UNREACHABLE();
>>> +}
>>
>> ... respective SVM and VMX stubs in place instead?
> 
> IMO it is more readable and they looked very good candidates for being 
> abstracted because they are doing the same thing under both technologies.
> Are you suggesting that their usage in common code should be discouraged 
> and should not be exported via the hvm_funcs interface? Or just that the 
> amount of changes cannot be justified.

The amount of changes is okay if the route taken is deemed useful going
forward. For now I view vPMU as too special to provide sufficient
justification for yet another pair of hook functions. The more that - as
indicated before - every single call site will only ever call one of the
two possible callees.

> IIUC Andrew also suggested to use hvm_funcs for msr intercept handling 
> but I 'm not sure whether he had this or sth else in mind.

Andrew, any chance you could outline your thinking / further plans here?
In particular, do you have other future use sites in mind that would
live outside of vendor-specific code?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.