[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XEN][PATCH] x86/hvm: move hvm_shadow_handle_cd() under CONFIG_INTEL_VMX ifdef



Le 23/10/2025 à 17:22, Grygorii Strashko a écrit :
> From: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
>
> Functions:
>   hvm_shadow_handle_cd()
>   hvm_set_uc_mode()
>   domain_exit_uc_mode()
> are used only by Intel VMX code, so move them under CONFIG_INTEL_VMX ifdef.
>

If they are actually Intel VMX specific, they should rather be moved to
VMX code (and named appropriately) rather than if-defed in shared hvm
code. If AMD code happens to need these functions in the future, it
would make things break pretty confusingly (as headers are not updated
consistently).

> Signed-off-by: Grygorii Strashko <grygorii_strashko@xxxxxxxx>
> ---
>   xen/arch/x86/hvm/hvm.c | 50 ++++++++++++++++++++++--------------------
>   1 file changed, 26 insertions(+), 24 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index f1035fc9f645..3a30404d9940 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
>       return X86EMUL_OKAY;
>   }
>
> -/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. 
> */
> -static bool domain_exit_uc_mode(struct vcpu *v)
> -{
> -    struct domain *d = v->domain;
> -    struct vcpu *vs;
> -
> -    for_each_vcpu ( d, vs )
> -    {
> -        if ( (vs == v) || !vs->is_initialised )
> -            continue;
> -        if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> -             mtrr_pat_not_equal(vs, v) )
> -            return 0;
> -    }
> -
> -    return 1;
> -}
> -
> -static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> -{
> -    v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> -    shadow_blow_tables_per_domain(v->domain);
> -}
> -
>   int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
>   {
>       struct vcpu *curr = current;
> @@ -2273,6 +2249,31 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
>       return X86EMUL_UNHANDLEABLE;
>   }
>
> +#ifdef CONFIG_INTEL_VMX
> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. 
> */
> +static bool domain_exit_uc_mode(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +    struct vcpu *vs;
> +
> +    for_each_vcpu ( d, vs )
> +    {
> +        if ( (vs == v) || !vs->is_initialised )
> +            continue;
> +        if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> +             mtrr_pat_not_equal(vs, v) )
> +            return 0;
> +    }
> +
> +    return 1;
> +}
> +
> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> +{
> +    v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> +    shadow_blow_tables_per_domain(v->domain);
> +}
> +
>   void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>   {
>       if ( value & X86_CR0_CD )
> @@ -2306,6 +2307,7 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long 
> value)
>           spin_unlock(&v->domain->arch.hvm.uc_lock);
>       }
>   }
> +#endif
>
>   static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long 
> value)
>   {

Teddy


--
Teddy Astie | Vates XCP-ng Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech





 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.