[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/mce: address MISRA C:2012 Rule 5.3
commit 48627540a11bebc97ef86c8bed3d5984be052103 Author: Nicola Vetrini <nicola.vetrini@xxxxxxxxxxx> AuthorDate: Fri Aug 4 10:03:33 2023 +0200 Commit: Stefano Stabellini <stefano.stabellini@xxxxxxx> CommitDate: Mon Aug 7 13:14:52 2023 -0700 x86/mce: address MISRA C:2012 Rule 5.3 Suitable mechanical renames are made to avoid shadowing the function identifier 'wait' declared in 'xen/include/xen/wait.h', thus addressing violations of MISRA C:2012 Rule 5.3: "An identifier declared in an inner scope shall not hide an identifier declared in an outer scope" The parameter name 'bar' is added as well to comply with MISRA C:2012 Rules 8.2 and 8.3. Signed-off-by: Nicola Vetrini <nicola.vetrini@xxxxxxxxxxx> Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/cpu/mcheck/barrier.c | 8 ++++---- xen/arch/x86/cpu/mcheck/barrier.h | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/barrier.c b/xen/arch/x86/cpu/mcheck/barrier.c index a7e5b19a44..51a1d37a76 100644 --- a/xen/arch/x86/cpu/mcheck/barrier.c +++ b/xen/arch/x86/cpu/mcheck/barrier.c @@ -16,11 +16,11 @@ void mce_barrier_dec(struct mce_softirq_barrier *bar) atomic_dec(&bar->val); } -void mce_barrier_enter(struct mce_softirq_barrier *bar, bool wait) +void mce_barrier_enter(struct mce_softirq_barrier *bar, bool do_wait) { int gen; - if ( !wait ) + if ( !do_wait ) return; atomic_inc(&bar->ingen); gen = atomic_read(&bar->outgen); @@ -34,11 +34,11 @@ void mce_barrier_enter(struct mce_softirq_barrier *bar, bool wait) } } -void mce_barrier_exit(struct mce_softirq_barrier *bar, bool wait) +void mce_barrier_exit(struct mce_softirq_barrier *bar, bool do_wait) { int gen; - if ( !wait ) + if ( !do_wait ) return; atomic_inc(&bar->outgen); gen = atomic_read(&bar->ingen); diff --git a/xen/arch/x86/cpu/mcheck/barrier.h b/xen/arch/x86/cpu/mcheck/barrier.h index c4d52b6192..7ec483226f 100644 --- a/xen/arch/x86/cpu/mcheck/barrier.h +++ b/xen/arch/x86/cpu/mcheck/barrier.h @@ -20,7 +20,7 @@ struct mce_softirq_barrier { /* * Initialize a barrier. Just set it to 0. */ -void mce_barrier_init(struct mce_softirq_barrier *); +void mce_barrier_init(struct mce_softirq_barrier *bar); /* * This function will need to be used when offlining a CPU in the @@ -29,17 +29,17 @@ void mce_barrier_init(struct mce_softirq_barrier *); * Decrement a barrier only. Needed for cases where the CPU * in question can't do it itself (e.g. it is being offlined). */ -void mce_barrier_dec(struct mce_softirq_barrier *); +void mce_barrier_dec(struct mce_softirq_barrier *bar); /* - * If @wait is false, mce_barrier_enter/exit() will return immediately + * If @do_wait is false, mce_barrier_enter/exit() will return immediately * without touching the barrier. It's used when handling a * non-broadcasting MCE (e.g. MCE on some old Intel CPU, MCE on AMD * CPU and LMCE on Intel Skylake-server CPU) which is received on only * one CPU and thus does not invoke mce_barrier_enter/exit() calls on * all CPUs. * - * If @wait is true, mce_barrier_enter/exit() will handle the given + * If @do_wait is true, mce_barrier_enter/exit() will handle the given * barrier as below. * * Increment the generation number and the value. The generation number @@ -53,9 +53,9 @@ void mce_barrier_dec(struct mce_softirq_barrier *); * These barrier functions should always be paired, so that the * counter value will reach 0 again after all CPUs have exited. */ -void mce_barrier_enter(struct mce_softirq_barrier *, bool wait); -void mce_barrier_exit(struct mce_softirq_barrier *, bool wait); +void mce_barrier_enter(struct mce_softirq_barrier *bar, bool do_wait); +void mce_barrier_exit(struct mce_softirq_barrier *bar, bool do_wait); -void mce_barrier(struct mce_softirq_barrier *); +void mce_barrier(struct mce_softirq_barrier *bar); #endif /* _MCHECK_BARRIER_H */ -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |