[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 07/12] x86: add new features for paravirt patching



For being able to switch paravirt patching from special cased custom
code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed
as new features. This enables to have the standard indirect pv call
as the default code and to patch that with the non-Xen custom code
sequence via ALTERNATIVE patching later.

Make sure paravirt patching is performed before alternative patching.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 arch/x86/include/asm/cpufeatures.h |  3 +++
 arch/x86/kernel/alternative.c      | 28 ++++++++++++++++++++++++++--
 2 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index dad350d42ecf..ffa23c655412 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -237,6 +237,9 @@
 #define X86_FEATURE_VMCALL             ( 8*32+18) /* "" Hypervisor supports 
the VMCALL instruction */
 #define X86_FEATURE_VMW_VMMCALL                ( 8*32+19) /* "" VMware prefers 
VMMCALL hypercall instruction */
 #define X86_FEATURE_SEV_ES             ( 8*32+20) /* AMD Secure Encrypted 
Virtualization - Encrypted State */
+#define X86_FEATURE_NOT_XENPV          ( 8*32+21) /* "" Inverse of 
X86_FEATURE_XENPV */
+#define X86_FEATURE_NO_PVUNLOCK                ( 8*32+22) /* "" No PV unlock 
function */
+#define X86_FEATURE_NO_VCPUPREEMPT     ( 8*32+23) /* "" No PV 
vcpu_is_preempted function */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, 
RDGSBASE, WRGSBASE instructions*/
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 2400ad62f330..f8f9700719cf 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -593,6 +593,18 @@ int alternatives_text_reserved(void *start, void *end)
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_PARAVIRT
+static void __init paravirt_set_cap(void)
+{
+       if (!boot_cpu_has(X86_FEATURE_XENPV))
+               setup_force_cpu_cap(X86_FEATURE_NOT_XENPV);
+
+       if (pv_is_native_spin_unlock())
+               setup_force_cpu_cap(X86_FEATURE_NO_PVUNLOCK);
+
+       if (pv_is_native_vcpu_is_preempted())
+               setup_force_cpu_cap(X86_FEATURE_NO_VCPUPREEMPT);
+}
+
 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
                                     struct paravirt_patch_site *end)
 {
@@ -616,6 +628,8 @@ void __init_or_module apply_paravirt(struct 
paravirt_patch_site *start,
 }
 extern struct paravirt_patch_site __start_parainstructions[],
        __stop_parainstructions[];
+#else
+static void __init paravirt_set_cap(void) { }
 #endif /* CONFIG_PARAVIRT */
 
 /*
@@ -723,6 +737,18 @@ void __init alternative_instructions(void)
         * patching.
         */
 
+       paravirt_set_cap();
+
+       /*
+        * First patch paravirt functions, such that we overwrite the indirect
+        * call with the direct call.
+        */
+       apply_paravirt(__parainstructions, __parainstructions_end);
+
+       /*
+        * Then patch alternatives, such that those paravirt calls that are in
+        * alternatives can be overwritten by their immediate fragments.
+        */
        apply_alternatives(__alt_instructions, __alt_instructions_end);
 
 #ifdef CONFIG_SMP
@@ -741,8 +767,6 @@ void __init alternative_instructions(void)
        }
 #endif
 
-       apply_paravirt(__parainstructions, __parainstructions_end);
-
        restart_nmi();
        alternatives_patched = 1;
 }
-- 
2.26.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.