[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] xen: introduce a cpumask with all bits set
There are several places in Xen allocating a cpumask on the stack and setting all bits in it just to use it as an initial mask for allowing all cpus. Save the stack space and omit the need for runtime initialization by defining a globally accessible cpumask_all variable. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- xen/arch/x86/io_apic.c | 4 +--- xen/common/cpu.c | 4 ++++ xen/common/schedule.c | 17 ++++------------- xen/include/xen/cpumask.h | 2 ++ 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c index daa5e9e5ff..a5344ed727 100644 --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -1881,7 +1881,6 @@ static void __init check_timer(void) int apic1, pin1, apic2, pin2; int vector, ret; unsigned long flags; - cpumask_t mask_all; local_irq_save(flags); @@ -1892,8 +1891,7 @@ static void __init check_timer(void) vector = IRQ0_VECTOR; clear_irq_vector(0); - cpumask_setall(&mask_all); - if ((ret = bind_irq_vector(0, vector, &mask_all))) + if ((ret = bind_irq_vector(0, vector, &cpumask_all))) printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret); irq_desc[0].status &= ~IRQ_DISABLED; diff --git a/xen/common/cpu.c b/xen/common/cpu.c index 653a56b840..836c62f97f 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -11,6 +11,10 @@ unsigned int __read_mostly nr_cpumask_bits = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG; #endif +const cpumask_t cpumask_all = { + .bits[0 ... (BITS_TO_LONGS(NR_CPUS) - 1)] = ~0UL +}; + /* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<<nr. diff --git a/xen/common/schedule.c b/xen/common/schedule.c index fd587622f4..60755a631e 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -256,9 +256,6 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2, int sched_init_vcpu(struct vcpu *v, unsigned int processor) { struct domain *d = v->domain; - cpumask_t allcpus; - - cpumask_setall(&allcpus); v->processor = processor; @@ -280,9 +277,9 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) * domain-0 VCPUs, are pinned onto their respective physical CPUs. */ if ( is_idle_domain(d) || d->is_pinned ) - sched_set_affinity(v, cpumask_of(processor), &allcpus); + sched_set_affinity(v, cpumask_of(processor), &cpumask_all); else - sched_set_affinity(v, &allcpus, &allcpus); + sched_set_affinity(v, &cpumask_all, &cpumask_all); /* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */ if ( is_idle_domain(d) ) @@ -361,7 +358,6 @@ int sched_move_domain(struct domain *d, struct cpupool *c) for_each_vcpu ( d, v ) { spinlock_t *lock; - cpumask_t allcpus; vcpudata = v->sched_priv; @@ -369,11 +365,9 @@ int sched_move_domain(struct domain *d, struct cpupool *c) migrate_timer(&v->singleshot_timer, new_p); migrate_timer(&v->poll_timer, new_p); - cpumask_setall(&allcpus); - lock = vcpu_schedule_lock_irq(v); - sched_set_affinity(v, &allcpus, &allcpus); + sched_set_affinity(v, &cpumask_all, &cpumask_all); v->processor = new_p; /* @@ -812,8 +806,6 @@ int cpu_disable_scheduler(unsigned int cpu) if ( cpumask_empty(&online_affinity) && cpumask_test_cpu(cpu, v->cpu_hard_affinity) ) { - cpumask_t allcpus; - if ( v->affinity_broken ) { /* The vcpu is temporarily pinned, can't move it. */ @@ -831,8 +823,7 @@ int cpu_disable_scheduler(unsigned int cpu) else printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v); - cpumask_setall(&allcpus); - sched_set_affinity(v, &allcpus, NULL); + sched_set_affinity(v, &cpumask_all, NULL); } if ( v->processor != cpu ) diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h index b4cc92a4f5..5a43438988 100644 --- a/xen/include/xen/cpumask.h +++ b/xen/include/xen/cpumask.h @@ -75,6 +75,8 @@ typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; +extern const cpumask_t cpumask_all; + extern unsigned int nr_cpu_ids; #if NR_CPUS > 4 * BITS_PER_LONG -- 2.16.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |