|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] introduce and use relaxed cpumask bitops
commit 4aaca0e9cd09ea86df2d86a0ccbfbcdfb2c37453
Author: Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Feb 18 16:55:17 2015 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 18 16:55:17 2015 +0100
introduce and use relaxed cpumask bitops
Using atomic (LOCKed on x86) bitops for certain of the operations on
cpumask_t is overkill when the variables aren't concurrently accessible
(e.g. local function variables, or due to explicit locking). Introduce
alternatives using non-atomic bitops and use them where appropriate.
Note that this
- adds a volatile qualifier to cpumask_test_and_{clear,set}_cpu()
(should have been there from the beginning, like is the case for
cpumask_{clear,set}_cpu())
- replaces several cpumask_clear()+cpumask_set_cpu(, n) pairs by the
simpler cpumask_copy(, cpumask_of(n)) (or just cpumask_of(n) if we
can do without copying)
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
xen/arch/x86/hpet.c | 4 ++--
xen/arch/x86/irq.c | 2 +-
xen/arch/x86/mm.c | 2 +-
xen/arch/x86/platform_hypercall.c | 2 +-
xen/arch/x86/time.c | 2 +-
xen/common/core_parking.c | 20 ++++++++------------
xen/common/cpu.c | 2 +-
xen/common/page_alloc.c | 4 ++--
xen/common/sched_credit.c | 12 ++++++------
xen/common/sched_rt.c | 7 ++-----
xen/common/softirq.c | 6 +++---
xen/include/xen/cpumask.h | 24 ++++++++++++++++++++++--
12 files changed, 50 insertions(+), 37 deletions(-)
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 7aa740f..8f36f6f 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -158,7 +158,7 @@ static void evt_do_broadcast(cpumask_t *mask)
{
unsigned int cpu = smp_processor_id();
- if ( cpumask_test_and_clear_cpu(cpu, mask) )
+ if ( __cpumask_test_and_clear_cpu(cpu, mask) )
raise_softirq(TIMER_SOFTIRQ);
cpuidle_wakeup_mwait(mask);
@@ -197,7 +197,7 @@ again:
continue;
if ( deadline <= now )
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
else if ( deadline < next_event )
next_event = deadline;
}
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index f214072..92e1854 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1450,7 +1450,7 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq
*pirq)
cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
- if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
+ if ( __cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
__set_eoi_ready(desc);
spin_unlock(&desc->lock);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 12e5006..9ab7a26 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3218,7 +3218,7 @@ long do_mmuext_op(
for_each_online_cpu(cpu)
if ( !cpumask_intersects(&mask,
per_cpu(cpu_sibling_mask, cpu)) )
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
flush_mask(&mask, FLUSH_CACHE);
}
else
diff --git a/xen/arch/x86/platform_hypercall.c
b/xen/arch/x86/platform_hypercall.c
index b427852..c725501 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -489,7 +489,7 @@ ret_t
do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op)
if ( !idletime )
{
- cpumask_clear_cpu(cpu, cpumap);
+ __cpumask_clear_cpu(cpu, cpumap);
continue;
}
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index b609938..becff99 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -179,7 +179,7 @@ static void smp_send_timer_broadcast_ipi(void)
if ( cpumask_test_cpu(cpu, &mask) )
{
- cpumask_clear_cpu(cpu, &mask);
+ __cpumask_clear_cpu(cpu, &mask);
raise_softirq(TIMER_SOFTIRQ);
}
diff --git a/xen/common/core_parking.c b/xen/common/core_parking.c
index 3190fb7..de269e0 100644
--- a/xen/common/core_parking.c
+++ b/xen/common/core_parking.c
@@ -75,11 +75,10 @@ static unsigned int core_parking_performance(unsigned int
event)
if ( core_weight < core_tmp )
{
core_weight = core_tmp;
- cpumask_clear(&core_candidate_map);
- cpumask_set_cpu(cpu, &core_candidate_map);
+ cpumask_copy(&core_candidate_map, cpumask_of(cpu));
}
else if ( core_weight == core_tmp )
- cpumask_set_cpu(cpu, &core_candidate_map);
+ __cpumask_set_cpu(cpu, &core_candidate_map);
}
for_each_cpu(cpu, &core_candidate_map)
@@ -88,11 +87,10 @@ static unsigned int core_parking_performance(unsigned int
event)
if ( sibling_weight < sibling_tmp )
{
sibling_weight = sibling_tmp;
- cpumask_clear(&sibling_candidate_map);
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
}
else if ( sibling_weight == sibling_tmp )
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ __cpumask_set_cpu(cpu, &sibling_candidate_map);
}
cpu = cpumask_first(&sibling_candidate_map);
@@ -135,11 +133,10 @@ static unsigned int core_parking_power(unsigned int event)
if ( core_weight > core_tmp )
{
core_weight = core_tmp;
- cpumask_clear(&core_candidate_map);
- cpumask_set_cpu(cpu, &core_candidate_map);
+ cpumask_copy(&core_candidate_map, cpumask_of(cpu));
}
else if ( core_weight == core_tmp )
- cpumask_set_cpu(cpu, &core_candidate_map);
+ __cpumask_set_cpu(cpu, &core_candidate_map);
}
for_each_cpu(cpu, &core_candidate_map)
@@ -148,11 +145,10 @@ static unsigned int core_parking_power(unsigned int event)
if ( sibling_weight > sibling_tmp )
{
sibling_weight = sibling_tmp;
- cpumask_clear(&sibling_candidate_map);
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
}
else if ( sibling_weight == sibling_tmp )
- cpumask_set_cpu(cpu, &sibling_candidate_map);
+ __cpumask_set_cpu(cpu, &sibling_candidate_map);
}
cpu = cpumask_first(&sibling_candidate_map);
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 630881e..47e8b5b 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -192,7 +192,7 @@ int disable_nonboot_cpus(void)
break;
}
- cpumask_set_cpu(cpu, &frozen_cpus);
+ __cpumask_set_cpu(cpu, &frozen_cpus);
}
BUG_ON(!error && (num_online_cpus() != 1));
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index be6821b..3c27af9 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1337,7 +1337,7 @@ static int __init find_non_smt(unsigned int node,
cpumask_t *dest)
if ( cpumask_intersects(dest, per_cpu(cpu_sibling_mask, i)) )
continue;
cpu = cpumask_first(per_cpu(cpu_sibling_mask, i));
- cpumask_set_cpu(cpu, dest);
+ __cpumask_set_cpu(cpu, dest);
}
return cpumask_weight(dest);
}
@@ -1449,7 +1449,7 @@ void __init scrub_heap_pages(void)
cpus = find_non_smt(best_node, &node_cpus);
if ( cpus == 0 )
{
- cpumask_set_cpu(smp_processor_id(), &node_cpus);
+ __cpumask_set_cpu(smp_processor_id(), &node_cpus);
cpus = 1;
}
/* We already have the node information from round #0. */
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 8b02b7b..156b43b 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -372,7 +372,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
{
if ( cur->pri != CSCHED_PRI_IDLE )
SCHED_STAT_CRANK(tickle_idlers_none);
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
}
else if ( !idlers_empty )
{
@@ -422,7 +422,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
SCHED_VCPU_STAT_CRANK(cur, migrate_r);
SCHED_STAT_CRANK(migrate_kicked_away);
set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
- cpumask_set_cpu(cpu, &mask);
+ __cpumask_set_cpu(cpu, &mask);
}
else if ( !new_idlers_empty )
{
@@ -432,7 +432,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
{
this_cpu(last_tickle_cpu) =
cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask);
- cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
+ __cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
}
else
cpumask_or(&mask, &mask, &idle_mask);
@@ -675,7 +675,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu
*vc, bool_t commit)
*/
cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
if ( vc->processor == cpu && IS_RUNQ_IDLE(cpu) )
- cpumask_set_cpu(cpu, &idlers);
+ __cpumask_set_cpu(cpu, &idlers);
cpumask_and(&cpus, &cpus, &idlers);
/*
@@ -692,7 +692,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu
*vc, bool_t commit)
*/
if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
cpu = cpumask_cycle(cpu, &cpus);
- cpumask_clear_cpu(cpu, &cpus);
+ __cpumask_clear_cpu(cpu, &cpus);
while ( !cpumask_empty(&cpus) )
{
@@ -1536,7 +1536,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
/* Find out what the !idle are in this node */
cpumask_andnot(&workers, online, prv->idlers);
cpumask_and(&workers, &workers, &node_to_cpumask(peer_node));
- cpumask_clear_cpu(cpu, &workers);
+ __cpumask_clear_cpu(cpu, &workers);
peer_cpu = cpumask_first(&workers);
if ( peer_cpu >= nr_cpu_ids )
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index e70d6c7..df4adac 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -663,7 +663,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu
*svc, s_time_t now)
* lock is grabbed before calling this function
*/
static struct rt_vcpu *
-__runq_pick(const struct scheduler *ops, cpumask_t *mask)
+__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
{
struct list_head *runq = rt_runq(ops);
struct list_head *iter;
@@ -780,10 +780,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now,
bool_t tasklet_work_sched
}
else
{
- cpumask_t cur_cpu;
- cpumask_clear(&cur_cpu);
- cpumask_set_cpu(cpu, &cur_cpu);
- snext = __runq_pick(ops, &cur_cpu);
+ snext = __runq_pick(ops, cpumask_of(cpu));
if ( snext == NULL )
snext = rt_vcpu(idle_vcpu[cpu]);
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 22e417a..bb79aac 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -88,7 +88,7 @@ void cpumask_raise_softirq(const cpumask_t *mask, unsigned
int nr)
if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
cpu != this_cpu &&
!arch_skip_send_event_check(cpu) )
- cpumask_set_cpu(cpu, raise_mask);
+ __cpumask_set_cpu(cpu, raise_mask);
if ( raise_mask == &send_mask )
smp_send_event_check_mask(raise_mask);
@@ -106,7 +106,7 @@ void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
if ( !per_cpu(batching, this_cpu) || in_irq() )
smp_send_event_check_cpu(cpu);
else
- set_bit(nr, &per_cpu(batch_mask, this_cpu));
+ __cpumask_set_cpu(nr, &per_cpu(batch_mask, this_cpu));
}
void cpu_raise_softirq_batch_begin(void)
@@ -122,7 +122,7 @@ void cpu_raise_softirq_batch_finish(void)
ASSERT(per_cpu(batching, this_cpu));
for_each_cpu ( cpu, mask )
if ( !softirq_pending(cpu) )
- cpumask_clear_cpu(cpu, mask);
+ __cpumask_clear_cpu(cpu, mask);
smp_send_event_check_mask(mask);
cpumask_clear(mask);
--per_cpu(batching, this_cpu);
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 850b4a2..0e7108c 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -103,11 +103,21 @@ static inline void cpumask_set_cpu(int cpu, volatile
cpumask_t *dstp)
set_bit(cpumask_check(cpu), dstp->bits);
}
+static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp)
+{
+ __set_bit(cpumask_check(cpu), dstp->bits);
+}
+
static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpumask_check(cpu), dstp->bits);
}
+static inline void __cpumask_clear_cpu(int cpu, cpumask_t *dstp)
+{
+ __clear_bit(cpumask_check(cpu), dstp->bits);
+}
+
static inline void cpumask_setall(cpumask_t *dstp)
{
bitmap_fill(dstp->bits, nr_cpumask_bits);
@@ -122,16 +132,26 @@ static inline void cpumask_clear(cpumask_t *dstp)
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), (cpumask)->bits)
-static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
+static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr)
{
return test_and_set_bit(cpumask_check(cpu), addr->bits);
}
-static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
+static inline int __cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
+{
+ return __test_and_set_bit(cpumask_check(cpu), addr->bits);
+}
+
+static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t *addr)
{
return test_and_clear_bit(cpumask_check(cpu), addr->bits);
}
+static inline int __cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
+{
+ return __test_and_clear_bit(cpumask_check(cpu), addr->bits);
+}
+
static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p)
{
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |