[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()



On 09/06/2017 11:29 AM, Juergen Gross wrote:
> There are cases where a guest tries to switch spinlocks to bare metal
> behavior (e.g. by setting "xen_nopvspin" boot parameter). Today this
> has the downside of falling back to unfair test and set scheme for
> qspinlocks due to virt_spin_lock() detecting the virtualized
> environment.
>
> Add a static key controlling whether virt_spin_lock() should be
> called or not. When running on bare metal set the new key to false.
>
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
> ---
>  arch/x86/include/asm/qspinlock.h     | 11 +++++++++++
>  arch/x86/kernel/paravirt-spinlocks.c |  6 ++++++
>  arch/x86/kernel/smpboot.c            |  2 ++
>  kernel/locking/qspinlock.c           |  4 ++++
>  4 files changed, 23 insertions(+)
>
> diff --git a/arch/x86/include/asm/qspinlock.h 
> b/arch/x86/include/asm/qspinlock.h
> index 48a706f641f2..fc39389f196b 100644
> --- a/arch/x86/include/asm/qspinlock.h
> +++ b/arch/x86/include/asm/qspinlock.h
> @@ -1,6 +1,7 @@
>  #ifndef _ASM_X86_QSPINLOCK_H
>  #define _ASM_X86_QSPINLOCK_H
>  
> +#include <linux/jump_label.h>
>  #include <asm/cpufeature.h>
>  #include <asm-generic/qspinlock_types.h>
>  #include <asm/paravirt.h>
> @@ -46,9 +47,15 @@ static inline void queued_spin_unlock(struct qspinlock 
> *lock)
>  #endif
>  
>  #ifdef CONFIG_PARAVIRT
> +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
> +
> +void native_pv_lock_init(void) __init;
> +
>  #define virt_spin_lock virt_spin_lock
>  static inline bool virt_spin_lock(struct qspinlock *lock)
>  {
> +     if (!static_branch_likely(&virt_spin_lock_key))
> +             return false;
>       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
>               return false;
>  
> @@ -65,6 +72,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>  
>       return true;
>  }
> +#else
> +static inline void native_pv_lock_init(void)
> +{
> +}
>  #endif /* CONFIG_PARAVIRT */
>  
>  #include <asm-generic/qspinlock.h>
> diff --git a/arch/x86/kernel/paravirt-spinlocks.c 
> b/arch/x86/kernel/paravirt-spinlocks.c
> index 8f2d1c9d43a8..2fc65ddea40d 100644
> --- a/arch/x86/kernel/paravirt-spinlocks.c
> +++ b/arch/x86/kernel/paravirt-spinlocks.c
> @@ -42,3 +42,9 @@ struct pv_lock_ops pv_lock_ops = {
>  #endif /* SMP */
>  };
>  EXPORT_SYMBOL(pv_lock_ops);
> +
> +void __init native_pv_lock_init(void)
> +{
> +     if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
> +             static_branch_disable(&virt_spin_lock_key);
> +}
> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
> index 54b9e89d4d6b..21500d3ba359 100644
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -77,6 +77,7 @@
>  #include <asm/i8259.h>
>  #include <asm/realmode.h>
>  #include <asm/misc.h>
> +#include <asm/qspinlock.h>
>  
>  /* Number of siblings per CPU package */
>  int smp_num_siblings = 1;
> @@ -1381,6 +1382,7 @@ void __init native_smp_prepare_boot_cpu(void)
>       /* already set me in cpu_online_mask in boot_cpu_init() */
>       cpumask_set_cpu(me, cpu_callout_mask);
>       cpu_set_state_online(me);
> +     native_pv_lock_init();
>  }
>  
>  void __init native_smp_cpus_done(unsigned int max_cpus)
> diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
> index 294294c71ba4..838d235b87ef 100644
> --- a/kernel/locking/qspinlock.c
> +++ b/kernel/locking/qspinlock.c
> @@ -76,6 +76,10 @@
>  #define MAX_NODES    4
>  #endif
>  
> +#ifdef CONFIG_PARAVIRT
> +DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
> +#endif
> +
>  /*
>   * Per-CPU queue node structures; we can never have more than 4 nested
>   * contexts: task, softirq, hardirq, nmi.

Acked-by: Waiman Long <longman@xxxxxxxxxx>


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.