[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/pmu: CFI hardening
commit 8ec706d9024f82e4be8deadf7bebbb25efae8396 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Oct 29 19:07:04 2021 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Wed Feb 23 15:33:43 2022 +0000 x86/pmu: CFI hardening Control Flow Integrity schemes use toolchain and optionally hardware support to help protect against call/jump/return oriented programming attacks. Use cf_check to annotate function pointer targets for the toolchain. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/cpu/vpmu_amd.c | 16 ++++++++-------- xen/arch/x86/cpu/vpmu_intel.c | 16 ++++++++-------- xen/arch/x86/oprofile/op_model_athlon.c | 16 ++++++++-------- xen/arch/x86/oprofile/op_model_p4.c | 14 +++++++------- xen/arch/x86/oprofile/op_model_ppro.c | 26 ++++++++++++++------------ 5 files changed, 45 insertions(+), 43 deletions(-) diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c index 25ad4ecf48..5963ce9015 100644 --- a/xen/arch/x86/cpu/vpmu_amd.c +++ b/xen/arch/x86/cpu/vpmu_amd.c @@ -186,7 +186,7 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v) msr_bitmap_off(vpmu); } -static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs) +static int cf_check amd_vpmu_do_interrupt(struct cpu_user_regs *regs) { return 1; } @@ -206,7 +206,7 @@ static inline void context_load(struct vcpu *v) } } -static int amd_vpmu_load(struct vcpu *v, bool_t from_guest) +static int cf_check amd_vpmu_load(struct vcpu *v, bool from_guest) { struct vpmu_struct *vpmu = vcpu_vpmu(v); struct xen_pmu_amd_ctxt *ctxt; @@ -280,7 +280,7 @@ static inline void context_save(struct vcpu *v) rdmsrl(counters[i], counter_regs[i]); } -static int amd_vpmu_save(struct vcpu *v, bool_t to_guest) +static int cf_check amd_vpmu_save(struct vcpu *v, bool to_guest) { struct vpmu_struct *vpmu = vcpu_vpmu(v); unsigned int i; @@ -348,7 +348,7 @@ static void context_update(unsigned int msr, u64 msr_content) } } -static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) +static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { struct vcpu *v = current; struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -404,7 +404,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) return 0; } -static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) +static int cf_check amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) { struct vcpu *v = current; struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -422,7 +422,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) return 0; } -static void amd_vpmu_destroy(struct vcpu *v) +static void cf_check amd_vpmu_destroy(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -440,7 +440,7 @@ static void amd_vpmu_destroy(struct vcpu *v) } /* VPMU part of the 'q' keyhandler */ -static void amd_vpmu_dump(const struct vcpu *v) +static void cf_check amd_vpmu_dump(const struct vcpu *v) { const struct vpmu_struct *vpmu = vcpu_vpmu(v); const struct xen_pmu_amd_ctxt *ctxt = vpmu->context; @@ -480,7 +480,7 @@ static void amd_vpmu_dump(const struct vcpu *v) } } -static int svm_vpmu_initialise(struct vcpu *v) +static int cf_check svm_vpmu_initialise(struct vcpu *v) { struct xen_pmu_amd_ctxt *ctxt; struct vpmu_struct *vpmu = vcpu_vpmu(v); diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c index 22dd4469d9..48b81ab6f0 100644 --- a/xen/arch/x86/cpu/vpmu_intel.c +++ b/xen/arch/x86/cpu/vpmu_intel.c @@ -288,7 +288,7 @@ static inline void __core2_vpmu_save(struct vcpu *v) rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status); } -static int core2_vpmu_save(struct vcpu *v, bool_t to_guest) +static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -407,7 +407,7 @@ static int core2_vpmu_verify(struct vcpu *v) return 0; } -static int core2_vpmu_load(struct vcpu *v, bool_t from_guest) +static int cf_check core2_vpmu_load(struct vcpu *v, bool from_guest) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -522,7 +522,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index) return 1; } -static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) +static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { int i, tmp; int type = -1, index = -1; @@ -690,7 +690,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) return 0; } -static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) +static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) { int type = -1, index = -1; struct vcpu *v = current; @@ -730,7 +730,7 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) } /* Dump vpmu info on console, called in the context of keyhandler 'q'. */ -static void core2_vpmu_dump(const struct vcpu *v) +static void cf_check core2_vpmu_dump(const struct vcpu *v) { const struct vpmu_struct *vpmu = vcpu_vpmu(v); unsigned int i; @@ -775,7 +775,7 @@ static void core2_vpmu_dump(const struct vcpu *v) } } -static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) +static int cf_check core2_vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vcpu *v = current; u64 msr_content; @@ -802,7 +802,7 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs) return 1; } -static void core2_vpmu_destroy(struct vcpu *v) +static void cf_check core2_vpmu_destroy(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -816,7 +816,7 @@ static void core2_vpmu_destroy(struct vcpu *v) vpmu_clear(vpmu); } -static int vmx_vpmu_initialise(struct vcpu *v) +static int cf_check vmx_vpmu_initialise(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); u64 msr_content; diff --git a/xen/arch/x86/oprofile/op_model_athlon.c b/xen/arch/x86/oprofile/op_model_athlon.c index 2177f02946..7bc5853a6c 100644 --- a/xen/arch/x86/oprofile/op_model_athlon.c +++ b/xen/arch/x86/oprofile/op_model_athlon.c @@ -164,7 +164,7 @@ static inline u64 op_amd_randomize_ibs_op(u64 val) return val; } -static void athlon_fill_in_addresses(struct op_msrs * const msrs) +static void cf_check athlon_fill_in_addresses(struct op_msrs * const msrs) { msrs->counters[0].addr = MSR_K7_PERFCTR0; msrs->counters[1].addr = MSR_K7_PERFCTR1; @@ -177,7 +177,7 @@ static void athlon_fill_in_addresses(struct op_msrs * const msrs) msrs->controls[3].addr = MSR_K7_EVNTSEL3; } -static void fam15h_fill_in_addresses(struct op_msrs * const msrs) +static void cf_check fam15h_fill_in_addresses(struct op_msrs * const msrs) { msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0; msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1; @@ -194,7 +194,7 @@ static void fam15h_fill_in_addresses(struct op_msrs * const msrs) msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5; } -static void athlon_setup_ctrs(struct op_msrs const * const msrs) +static void cf_check athlon_setup_ctrs(struct op_msrs const * const msrs) { uint64_t msr_content; int i; @@ -308,9 +308,9 @@ static inline int handle_ibs(int mode, struct cpu_user_regs const * const regs) return 1; } -static int athlon_check_ctrs(unsigned int const cpu, - struct op_msrs const * const msrs, - struct cpu_user_regs const * const regs) +static int cf_check athlon_check_ctrs( + unsigned int const cpu, struct op_msrs const * const msrs, + struct cpu_user_regs const * const regs) { uint64_t msr_content; @@ -386,7 +386,7 @@ static inline void start_ibs(void) } } -static void athlon_start(struct op_msrs const * const msrs) +static void cf_check athlon_start(struct op_msrs const * const msrs) { uint64_t msr_content; int i; @@ -415,7 +415,7 @@ static void stop_ibs(void) wrmsrl(MSR_AMD64_IBSOPCTL, 0); } -static void athlon_stop(struct op_msrs const * const msrs) +static void cf_check athlon_stop(struct op_msrs const * const msrs) { uint64_t msr_content; int i; diff --git a/xen/arch/x86/oprofile/op_model_p4.c b/xen/arch/x86/oprofile/op_model_p4.c index b08ba53cbd..d047258644 100644 --- a/xen/arch/x86/oprofile/op_model_p4.c +++ b/xen/arch/x86/oprofile/op_model_p4.c @@ -390,7 +390,7 @@ static unsigned int get_stagger(void) static unsigned long reset_value[NUM_COUNTERS_NON_HT]; -static void p4_fill_in_addresses(struct op_msrs * const msrs) +static void cf_check p4_fill_in_addresses(struct op_msrs * const msrs) { unsigned int i; unsigned int addr, stag; @@ -530,7 +530,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) } -static void p4_setup_ctrs(struct op_msrs const * const msrs) +static void cf_check p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; uint64_t msr_content; @@ -609,9 +609,9 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) } } -static int p4_check_ctrs(unsigned int const cpu, - struct op_msrs const * const msrs, - struct cpu_user_regs const * const regs) +static int cf_check p4_check_ctrs( + unsigned int const cpu, struct op_msrs const * const msrs, + struct cpu_user_regs const * const regs) { unsigned long ctr, stag, real; uint64_t msr_content; @@ -665,7 +665,7 @@ static int p4_check_ctrs(unsigned int const cpu, } -static void p4_start(struct op_msrs const * const msrs) +static void cf_check p4_start(struct op_msrs const * const msrs) { unsigned int stag; uint64_t msr_content; @@ -683,7 +683,7 @@ static void p4_start(struct op_msrs const * const msrs) } -static void p4_stop(struct op_msrs const * const msrs) +static void cf_check p4_stop(struct op_msrs const * const msrs) { unsigned int stag; uint64_t msr_content; diff --git a/xen/arch/x86/oprofile/op_model_ppro.c b/xen/arch/x86/oprofile/op_model_ppro.c index 72c504a102..8d7e13ea87 100644 --- a/xen/arch/x86/oprofile/op_model_ppro.c +++ b/xen/arch/x86/oprofile/op_model_ppro.c @@ -63,7 +63,7 @@ static int counter_width = 32; static unsigned long reset_value[OP_MAX_COUNTER]; int ppro_has_global_ctrl = 0; -static void ppro_fill_in_addresses(struct op_msrs * const msrs) +static void cf_check ppro_fill_in_addresses(struct op_msrs * const msrs) { int i; @@ -74,7 +74,7 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) } -static void ppro_setup_ctrs(struct op_msrs const * const msrs) +static void cf_check ppro_setup_ctrs(struct op_msrs const * const msrs) { uint64_t msr_content; int i; @@ -128,9 +128,9 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) } } -static int ppro_check_ctrs(unsigned int const cpu, - struct op_msrs const * const msrs, - struct cpu_user_regs const * const regs) +static int cf_check ppro_check_ctrs( + unsigned int const cpu, struct op_msrs const * const msrs, + struct cpu_user_regs const * const regs) { u64 val; int i; @@ -170,7 +170,7 @@ static int ppro_check_ctrs(unsigned int const cpu, } -static void ppro_start(struct op_msrs const * const msrs) +static void cf_check ppro_start(struct op_msrs const * const msrs) { uint64_t msr_content; int i; @@ -190,7 +190,7 @@ static void ppro_start(struct op_msrs const * const msrs) } -static void ppro_stop(struct op_msrs const * const msrs) +static void cf_check ppro_stop(struct op_msrs const * const msrs) { uint64_t msr_content; int i; @@ -206,7 +206,7 @@ static void ppro_stop(struct op_msrs const * const msrs) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0ULL); } -static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index) +static int cf_check ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index) { if ( (msr_index >= MSR_IA32_PERFCTR0) && (msr_index < (MSR_IA32_PERFCTR0 + num_counters)) ) @@ -226,7 +226,7 @@ static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index) return 0; } -static int ppro_allocate_msr(struct vcpu *v) +static int cf_check ppro_allocate_msr(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); struct arch_msr_pair *msr_content; @@ -245,7 +245,7 @@ out: return 0; } -static void ppro_free_msr(struct vcpu *v) +static void cf_check ppro_free_msr(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); @@ -255,7 +255,8 @@ static void ppro_free_msr(struct vcpu *v) vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED); } -static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content) +static void cf_check ppro_load_msr( + struct vcpu *v, int type, int index, u64 *msr_content) { struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; switch ( type ) @@ -269,7 +270,8 @@ static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content) } } -static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content) +static void cf_check ppro_save_msr( + struct vcpu *v, int type, int index, u64 msr_content) { struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |