[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/cpu: CFI hardening
commit 78f14da74c79864d5b1c5b9df3d3f0ac5a28a546 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Oct 29 19:11:52 2021 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Wed Feb 23 15:33:43 2022 +0000 x86/cpu: CFI hardening Control Flow Integrity schemes use toolchain and optionally hardware support to help protect against call/jump/return oriented programming attacks. Use cf_check to annotate function pointer targets for the toolchain. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/cpu/amd.c | 6 +++--- xen/arch/x86/cpu/centaur.c | 2 +- xen/arch/x86/cpu/common.c | 2 +- xen/arch/x86/cpu/cpu.h | 2 +- xen/arch/x86/cpu/hygon.c | 2 +- xen/arch/x86/cpu/intel.c | 6 +++--- xen/arch/x86/cpu/shanghai.c | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index 2d18223f20..4999f8be2b 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -208,7 +208,7 @@ static void __init noinline probe_masking_msrs(void) * parameter of NULL is used to context switch to the default host state (by * the cpu bringup-code, crash path, etc). */ -static void amd_ctxt_switch_masking(const struct vcpu *next) +static void cf_check amd_ctxt_switch_masking(const struct vcpu *next) { struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); const struct domain *nextd = next ? next->domain : NULL; @@ -634,7 +634,7 @@ void amd_log_freq(const struct cpuinfo_x86 *c) #undef FREQ } -void early_init_amd(struct cpuinfo_x86 *c) +void cf_check early_init_amd(struct cpuinfo_x86 *c) { if (c == &boot_cpu_data) amd_init_levelling(); @@ -744,7 +744,7 @@ void __init detect_zen2_null_seg_behaviour(void) } -static void init_amd(struct cpuinfo_x86 *c) +static void cf_check init_amd(struct cpuinfo_x86 *c) { u32 l, h; diff --git a/xen/arch/x86/cpu/centaur.c b/xen/arch/x86/cpu/centaur.c index 34a5bfcaee..eac49d78db 100644 --- a/xen/arch/x86/cpu/centaur.c +++ b/xen/arch/x86/cpu/centaur.c @@ -48,7 +48,7 @@ static void init_c3(struct cpuinfo_x86 *c) display_cacheinfo(c); } -static void init_centaur(struct cpuinfo_x86 *c) +static void cf_check init_centaur(struct cpuinfo_x86 *c) { if (c->x86 == 6) init_c3(c); diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index 3d61d95386..2429818e60 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -104,7 +104,7 @@ bool __init is_forced_cpu_cap(unsigned int cap) return test_bit(cap, forced_caps); } -static void default_init(struct cpuinfo_x86 * c) +static void cf_check default_init(struct cpuinfo_x86 * c) { /* Not much we can do here... */ /* Check if at least it has cpuid */ diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h index b593bd85f0..a228087f91 100644 --- a/xen/arch/x86/cpu/cpu.h +++ b/xen/arch/x86/cpu/cpu.h @@ -18,7 +18,7 @@ extern void display_cacheinfo(struct cpuinfo_x86 *c); extern void detect_ht(struct cpuinfo_x86 *c); extern bool detect_extended_topology(struct cpuinfo_x86 *c); -void early_init_amd(struct cpuinfo_x86 *c); +void cf_check early_init_amd(struct cpuinfo_x86 *c); void amd_log_freq(const struct cpuinfo_x86 *c); void amd_init_lfence(struct cpuinfo_x86 *c); void amd_init_ssbd(const struct cpuinfo_x86 *c); diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c index cdc94130dd..3c8516e014 100644 --- a/xen/arch/x86/cpu/hygon.c +++ b/xen/arch/x86/cpu/hygon.c @@ -28,7 +28,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->phys_proc_id, c->cpu_core_id); } -static void init_hygon(struct cpuinfo_x86 *c) +static void cf_check init_hygon(struct cpuinfo_x86 *c) { unsigned long long value; diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c index eb5fba35ca..dc6a0c7807 100644 --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -176,7 +176,7 @@ static void __init probe_masking_msrs(void) * parameter of NULL is used to context switch to the default host state (by * the cpu bringup-code, crash path, etc). */ -static void intel_ctxt_switch_masking(const struct vcpu *next) +static void cf_check intel_ctxt_switch_masking(const struct vcpu *next) { struct cpuidmasks *these_masks = &this_cpu(cpuidmasks); const struct domain *nextd = next ? next->domain : NULL; @@ -286,7 +286,7 @@ static void __init noinline intel_init_levelling(void) ctxt_switch_masking = intel_ctxt_switch_masking; } -static void early_init_intel(struct cpuinfo_x86 *c) +static void cf_check early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable, disable; @@ -518,7 +518,7 @@ static void intel_log_freq(const struct cpuinfo_x86 *c) printk("%u MHz\n", (factor * max_ratio + 50) / 100); } -static void init_intel(struct cpuinfo_x86 *c) +static void cf_check init_intel(struct cpuinfo_x86 *c) { /* Detect the extended topology information if available */ detect_extended_topology(c); diff --git a/xen/arch/x86/cpu/shanghai.c b/xen/arch/x86/cpu/shanghai.c index 08a81f0f0c..95ae544f8c 100644 --- a/xen/arch/x86/cpu/shanghai.c +++ b/xen/arch/x86/cpu/shanghai.c @@ -3,7 +3,7 @@ #include <asm/processor.h> #include "cpu.h" -static void init_shanghai(struct cpuinfo_x86 *c) +static void cf_check init_shanghai(struct cpuinfo_x86 *c) { if ( cpu_has(c, X86_FEATURE_ITSC) ) { -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |