[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] xen/x86: Replace appropriate mandatory barriers with SMP barriers
commit 971d299c04df379734d10c44d637433e9e564f36 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Wed Oct 5 12:42:15 2016 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Thu Dec 21 11:02:00 2017 +0000 xen/x86: Replace appropriate mandatory barriers with SMP barriers There is no functional change. Xen currently assignes smp_* meaning to the non-smp_* barriers. All of these uses are just to deal with shared memory between multiple processors, which means that the smp_*() varients are the correct ones to use. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/cpu/mcheck/barrier.c | 10 +++++----- xen/arch/x86/cpu/mcheck/mctelem.c | 4 ++-- xen/arch/x86/genapic/x2apic.c | 6 +++--- xen/arch/x86/hpet.c | 2 +- xen/arch/x86/hvm/ioreq.c | 4 ++-- xen/arch/x86/irq.c | 4 ++-- xen/arch/x86/smpboot.c | 12 ++++++------ xen/arch/x86/time.c | 8 ++++---- xen/include/asm-x86/desc.h | 8 ++++---- xen/include/asm-x86/system.h | 2 +- 10 files changed, 30 insertions(+), 30 deletions(-) diff --git a/xen/arch/x86/cpu/mcheck/barrier.c b/xen/arch/x86/cpu/mcheck/barrier.c index 7de8e45..a7e5b19 100644 --- a/xen/arch/x86/cpu/mcheck/barrier.c +++ b/xen/arch/x86/cpu/mcheck/barrier.c @@ -12,7 +12,7 @@ void mce_barrier_init(struct mce_softirq_barrier *bar) void mce_barrier_dec(struct mce_softirq_barrier *bar) { atomic_inc(&bar->outgen); - wmb(); + smp_wmb(); atomic_dec(&bar->val); } @@ -24,12 +24,12 @@ void mce_barrier_enter(struct mce_softirq_barrier *bar, bool wait) return; atomic_inc(&bar->ingen); gen = atomic_read(&bar->outgen); - mb(); + smp_mb(); atomic_inc(&bar->val); while ( atomic_read(&bar->val) != num_online_cpus() && atomic_read(&bar->outgen) == gen ) { - mb(); + smp_mb(); mce_panic_check(); } } @@ -42,12 +42,12 @@ void mce_barrier_exit(struct mce_softirq_barrier *bar, bool wait) return; atomic_inc(&bar->outgen); gen = atomic_read(&bar->ingen); - mb(); + smp_mb(); atomic_dec(&bar->val); while ( atomic_read(&bar->val) != 0 && atomic_read(&bar->ingen) == gen ) { - mb(); + smp_mb(); mce_panic_check(); } } diff --git a/xen/arch/x86/cpu/mcheck/mctelem.c b/xen/arch/x86/cpu/mcheck/mctelem.c index 35431df..3bb13e5 100644 --- a/xen/arch/x86/cpu/mcheck/mctelem.c +++ b/xen/arch/x86/cpu/mcheck/mctelem.c @@ -501,9 +501,9 @@ static void mctelem_append_processing(mctelem_class_t which) ltep->mcte_prev = *procltp; *procltp = dangling[target]; } - wmb(); + smp_wmb(); dangling[target] = NULL; - wmb(); + smp_wmb(); } mctelem_cookie_t mctelem_consume_oldest_begin(mctelem_class_t which) diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c index 5fffb31..4779b0d 100644 --- a/xen/arch/x86/genapic/x2apic.c +++ b/xen/arch/x86/genapic/x2apic.c @@ -106,12 +106,12 @@ static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector) * CPU is seen by notified remote CPUs. The WRMSR contained within * apic_icr_write() can otherwise be executed early. * - * The reason mb() is sufficient here is subtle: the register arguments + * The reason smp_mb() is sufficient here is subtle: the register arguments * to WRMSR must depend on a memory read executed after the barrier. This * is guaranteed by cpu_physical_id(), which reads from a global array (and * so cannot be hoisted above the barrier even by a clever compiler). */ - mb(); + smp_mb(); local_irq_save(flags); @@ -135,7 +135,7 @@ static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector) const cpumask_t *cluster_cpus; unsigned long flags; - mb(); /* See above for an explanation. */ + smp_mb(); /* See above for an explanation. */ local_irq_save(flags); diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c index 8229c63..bc7a851 100644 --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -608,7 +608,7 @@ void __init hpet_broadcast_init(void) hpet_events[i].shift = 32; hpet_events[i].next_event = STIME_MAX; spin_lock_init(&hpet_events[i].lock); - wmb(); + smp_wmb(); hpet_events[i].event_handler = handle_hpet_broadcast; hpet_events[i].msi.msi_attrib.maskbit = 1; diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index ec9b31f..5aeaaac 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -91,7 +91,7 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p) { unsigned int state = p->state; - rmb(); + smp_rmb(); switch ( state ) { case STATE_IOREQ_NONE: @@ -1329,7 +1329,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p) } /* Make the ioreq_t visible /before/ write_pointer. */ - wmb(); + smp_wmb(); pg->ptrs.write_pointer += qw ? 2 : 1; /* Canonicalize read/write pointers to prevent their overflow. */ diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index c0ab299..1c5213e 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -764,9 +764,9 @@ void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask) ASSERT(spin_is_locked(&desc->lock)); desc->status &= ~IRQ_MOVE_PENDING; - wmb(); + smp_wmb(); cpumask_copy(desc->arch.pending_mask, mask); - wmb(); + smp_wmb(); desc->status |= IRQ_MOVE_PENDING; } diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 7921008..0e585b3 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -85,7 +85,7 @@ static enum cpu_state { CPU_STATE_CALLIN, /* slave -> master: Completed phase 2 */ CPU_STATE_ONLINE /* master -> slave: Go fully online now. */ } cpu_state; -#define set_cpu_state(state) do { mb(); cpu_state = (state); } while (0) +#define set_cpu_state(state) do { smp_mb(); cpu_state = (state); } while (0) void *stack_base[NR_CPUS]; @@ -132,7 +132,7 @@ static void synchronize_tsc_master(unsigned int slave) for ( i = 1; i <= 5; i++ ) { tsc_value = rdtsc_ordered(); - wmb(); + smp_wmb(); atomic_inc(&tsc_count); while ( atomic_read(&tsc_count) != (i<<1) ) cpu_relax(); @@ -157,7 +157,7 @@ static void synchronize_tsc_slave(unsigned int slave) { while ( atomic_read(&tsc_count) != ((i<<1)-1) ) cpu_relax(); - rmb(); + smp_rmb(); /* * If a CPU has been physically hotplugged, we may as well write * to its TSC in spite of X86_FEATURE_TSC_RELIABLE. The platform does @@ -561,13 +561,13 @@ static int do_boot_cpu(int apicid, int cpu) } else if ( cpu_state == CPU_STATE_DEAD ) { - rmb(); + smp_rmb(); rc = cpu_error; } else { boot_error = 1; - mb(); + smp_mb(); if ( bootsym(trampoline_cpu_started) == 0xA5 ) /* trampoline started but...? */ printk("Stuck ??\n"); @@ -585,7 +585,7 @@ static int do_boot_cpu(int apicid, int cpu) /* mark "stuck" area as not stuck */ bootsym(trampoline_cpu_started) = 0; - mb(); + smp_mb(); smpboot_restore_warm_reset_vector(); diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index eba7aed..2a87950 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -976,10 +976,10 @@ static void __update_vcpu_system_time(struct vcpu *v, int force) /* 1. Update guest kernel version. */ _u.version = u->version = version_update_begin(u->version); - wmb(); + smp_wmb(); /* 2. Update all other guest kernel fields. */ *u = _u; - wmb(); + smp_wmb(); /* 3. Update guest kernel version. */ u->version = version_update_end(u->version); @@ -1006,10 +1006,10 @@ bool update_secondary_system_time(struct vcpu *v, update_guest_memory_policy(v, &policy); return false; } - wmb(); + smp_wmb(); /* 2. Update all other userspace fields. */ __copy_to_guest(user_u, u, 1); - wmb(); + smp_wmb(); /* 3. Update userspace version. */ u->version = version_update_end(u->version); __copy_field_to_guest(user_u, u, version); diff --git a/xen/include/asm-x86/desc.h b/xen/include/asm-x86/desc.h index 9778a35..4093c65 100644 --- a/xen/include/asm-x86/desc.h +++ b/xen/include/asm-x86/desc.h @@ -128,10 +128,10 @@ static inline void _write_gate_lower(volatile idt_entry_t *gate, #define _set_gate(gate_addr,type,dpl,addr) \ do { \ (gate_addr)->a = 0; \ - wmb(); /* disable gate /then/ rewrite */ \ + smp_wmb(); /* disable gate /then/ rewrite */ \ (gate_addr)->b = \ ((unsigned long)(addr) >> 32); \ - wmb(); /* rewrite /then/ enable gate */ \ + smp_wmb(); /* rewrite /then/ enable gate */ \ (gate_addr)->a = \ (((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \ ((unsigned long)(dpl) << 45) | \ @@ -174,11 +174,11 @@ static inline void _update_gate_addr_lower(idt_entry_t *gate, void *addr) #define _set_tssldt_desc(desc,addr,limit,type) \ do { \ (desc)[0].b = (desc)[1].b = 0; \ - wmb(); /* disable entry /then/ rewrite */ \ + smp_wmb(); /* disable entry /then/ rewrite */ \ (desc)[0].a = \ ((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \ (desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \ - wmb(); /* rewrite /then/ enable entry */ \ + smp_wmb(); /* rewrite /then/ enable entry */ \ (desc)[0].b = \ ((u32)(addr) & 0xFF000000U) | \ ((u32)(type) << 8) | 0x8000U | \ diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h index eb498f5..9cb6fd7 100644 --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -183,7 +183,7 @@ static always_inline unsigned long __xadd( #define smp_wmb() wmb() #define set_mb(var, value) do { xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0) #define local_irq_disable() asm volatile ( "cli" : : : "memory" ) #define local_irq_enable() asm volatile ( "sti" : : : "memory" ) -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |