[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/cpufreq: pass pointers to cpu masks where possible
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1278927708 -3600 # Node ID 9fbcd7c2b396f0470b0e25be6e2c80bbe462da3b # Parent a53824f4dcdf1fb4353712ea902fad45cab07271 x86/cpufreq: pass pointers to cpu masks where possible This includes replacing the bogus definition of cpumask_test_cpu() (introduced by c/s 20073) with a Linux compatible one and replacing the bad uses with cpu_isset(). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/arch/x86/acpi/cpufreq/cpufreq.c | 39 ++++++++++++++++------------------- xen/arch/x86/acpi/cpufreq/powernow.c | 10 +++----- xen/arch/x86/hpet.c | 2 - xen/arch/x86/io_apic.c | 2 - xen/include/xen/cpumask.h | 17 ++++++--------- 5 files changed, 31 insertions(+), 39 deletions(-) diff -r a53824f4dcdf -r 9fbcd7c2b396 xen/arch/x86/acpi/cpufreq/cpufreq.c --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c Fri Jul 09 18:49:12 2010 +0100 +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c Mon Jul 12 10:41:48 2010 +0100 @@ -129,7 +129,7 @@ typedef union { struct drv_cmd { unsigned int type; - cpumask_t mask; + const cpumask_t *mask; drv_addr_union addr; u32 val; }; @@ -183,33 +183,32 @@ static void drv_read(struct drv_cmd *cmd ASSERT(cpus_weight(cmd->mask) == 1); /* to reduce IPI for the sake of performance */ - if (likely(cpu_isset(smp_processor_id(), cmd->mask))) + if (likely(cpumask_test_cpu(smp_processor_id(), cmd->mask))) do_drv_read((void *)cmd); else - on_selected_cpus(&cmd->mask, do_drv_read, cmd, 1); + on_selected_cpus(cmd->mask, do_drv_read, cmd, 1); } static void drv_write(struct drv_cmd *cmd) { - if ((cpus_weight(cmd->mask) == 1) && - cpu_isset(smp_processor_id(), cmd->mask)) + if (cpumask_equal(cmd->mask, cpumask_of(smp_processor_id()))) do_drv_write((void *)cmd); else - on_selected_cpus(&cmd->mask, do_drv_write, cmd, 1); -} - -static u32 get_cur_val(cpumask_t mask) + on_selected_cpus(cmd->mask, do_drv_write, cmd, 1); +} + +static u32 get_cur_val(const cpumask_t *mask) { struct cpufreq_policy *policy; struct processor_performance *perf; struct drv_cmd cmd; unsigned int cpu = smp_processor_id(); - if (unlikely(cpus_empty(mask))) - return 0; - - if (!cpu_isset(cpu, mask)) - cpu = first_cpu(mask); + if (unlikely(cpumask_empty(mask))) + return 0; + + if (!cpumask_test_cpu(cpu, mask)) + cpu = cpumask_first(mask); if (cpu >= NR_CPUS || !cpu_online(cpu)) return 0; @@ -232,7 +231,7 @@ static u32 get_cur_val(cpumask_t mask) return 0; } - cmd.mask = cpumask_of_cpu(cpu); + cmd.mask = cpumask_of(cpu); drv_read(&cmd); return cmd.val; @@ -378,7 +377,7 @@ static unsigned int get_cur_freq_on_cpu( data->acpi_data == NULL || data->freq_table == NULL)) return 0; - freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data); + freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); return freq; } @@ -402,7 +401,7 @@ static void feature_detect(void *info) } } -static unsigned int check_freqs(cpumask_t mask, unsigned int freq, +static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; @@ -473,12 +472,10 @@ static int acpi_cpufreq_target(struct cp return -ENODEV; } - cpus_clear(cmd.mask); - if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cmd.mask = online_policy_cpus; + cmd.mask = &online_policy_cpus; else - cpu_set(policy->cpu, cmd.mask); + cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; diff -r a53824f4dcdf -r 9fbcd7c2b396 xen/arch/x86/acpi/cpufreq/powernow.c --- a/xen/arch/x86/acpi/cpufreq/powernow.c Fri Jul 09 18:49:12 2010 +0100 +++ b/xen/arch/x86/acpi/cpufreq/powernow.c Mon Jul 12 10:41:48 2010 +0100 @@ -66,7 +66,7 @@ static struct cpufreq_driver powernow_cp struct drv_cmd { unsigned int type; - cpumask_t mask; + const cpumask_t *mask; u32 val; int turbo; }; @@ -124,12 +124,10 @@ static int powernow_cpufreq_target(struc return 0; } - cpus_clear(cmd.mask); - if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cmd.mask = online_policy_cpus; + cmd.mask = &online_policy_cpus; else - cpu_set(policy->cpu, cmd.mask); + cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; @@ -137,7 +135,7 @@ static int powernow_cpufreq_target(struc cmd.val = next_perf_state; cmd.turbo = policy->turbo; - on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 1); + on_selected_cpus(cmd.mask, transition_pstate, &cmd, 1); for_each_cpu_mask(j, online_policy_cpus) cpufreq_statistic_update(j, perf->state, next_perf_state); diff -r a53824f4dcdf -r 9fbcd7c2b396 xen/arch/x86/hpet.c --- a/xen/arch/x86/hpet.c Fri Jul 09 18:49:12 2010 +0100 +++ b/xen/arch/x86/hpet.c Mon Jul 12 10:41:48 2010 +0100 @@ -210,7 +210,7 @@ again: { write_lock_irq(&ch->cpumask_lock); - if ( cpumask_test_cpu(cpu, ch->cpumask) ) + if ( cpu_isset(cpu, ch->cpumask) ) { if ( per_cpu(timer_deadline_start, cpu) <= now ) cpu_set(cpu, mask); diff -r a53824f4dcdf -r 9fbcd7c2b396 xen/arch/x86/io_apic.c --- a/xen/arch/x86/io_apic.c Fri Jul 09 18:49:12 2010 +0100 +++ b/xen/arch/x86/io_apic.c Mon Jul 12 10:41:48 2010 +0100 @@ -460,7 +460,7 @@ void irq_complete_move(struct irq_desc * vector = get_irq_regs()->entry_vector; me = smp_processor_id(); - if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + if (vector == cfg->vector && cpu_isset(me, cfg->domain)) send_cleanup_vector(cfg); } diff -r a53824f4dcdf -r 9fbcd7c2b396 xen/include/xen/cpumask.h --- a/xen/include/xen/cpumask.h Fri Jul 09 18:49:12 2010 +0100 +++ b/xen/include/xen/cpumask.h Mon Jul 12 10:41:48 2010 +0100 @@ -106,22 +106,13 @@ static inline void __cpus_clear(cpumask_ } /* No static inline type checking - see Subtlety (1) above. */ +#define cpumask_test_cpu(cpu, cpumask) test_bit(cpu, (cpumask)->bits) #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) { return test_and_set_bit(cpu, addr->bits); -} - -/** - * cpumask_test_cpu - test for a cpu in a cpumask - */ -#define cpumask_test_cpu(cpu, cpumask) __cpu_test((cpu), &(cpumask)) - -static inline int __cpu_test(int cpu, cpumask_t *addr) -{ - return test_bit(cpu, addr->bits); } #define cpu_test_and_clear(cpu, cpumask) __cpu_test_and_clear((cpu), &(cpumask)) @@ -166,6 +157,7 @@ static inline void __cpus_complement(cpu bitmap_complement(dstp->bits, srcp->bits, nbits); } +#define cpumask_equal(src1, src2) __cpus_equal(src1, src2, NR_CPUS) #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) static inline int __cpus_equal(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) @@ -187,6 +179,7 @@ static inline int __cpus_subset(const cp return bitmap_subset(src1p->bits, src2p->bits, nbits); } +#define cpumask_empty(src) __cpus_empty(src, NR_CPUS) #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) static inline int __cpus_empty(const cpumask_t *srcp, int nbits) { @@ -227,18 +220,21 @@ static inline void __cpus_shift_left(cpu bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +#define cpumask_first(src) __first_cpu(src, NR_CPUS) #define first_cpu(src) __first_cpu(&(src), NR_CPUS) static inline int __first_cpu(const cpumask_t *srcp, int nbits) { return min_t(int, nbits, find_first_bit(srcp->bits, nbits)); } +#define cpumask_next(n, src) __next_cpu(n, src, NR_CPUS) #define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS) static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits) { return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1)); } +#define cpumask_last(src) __last_cpu(src, NR_CPUS) #define last_cpu(src) __last_cpu(&(src), NR_CPUS) static inline int __last_cpu(const cpumask_t *srcp, int nbits) { @@ -250,6 +246,7 @@ static inline int __last_cpu(const cpuma return pcpu; } +#define cpumask_cycle(n, src) __cycle_cpu(n, src, NR_CPUS) #define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS) static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits) { _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |