[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 07/15] xen: sched: rename v->cpu_affinity into v->cpu_hard_affinity
in order to distinguish it from the cpu_soft_affinity which will be introduced a later commit ("xen: sched: introduce soft-affinity and use it instead d->node-affinity"). This patch does not imply any functional change, it is basically the result of something like the following: s/cpu_affinity/cpu_hard_affinity/g s/cpu_affinity_tmp/cpu_hard_affinity_tmp/g s/cpu_affinity_saved/cpu_hard_affinity_saved/g Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> --- Changes from v2: * patch has been moved one step up in the series. --- xen/arch/x86/traps.c | 11 ++++++----- xen/common/domain.c | 22 +++++++++++----------- xen/common/domctl.c | 2 +- xen/common/keyhandler.c | 2 +- xen/common/sched_credit.c | 12 ++++++------ xen/common/sched_sedf.c | 2 +- xen/common/schedule.c | 21 +++++++++++---------- xen/common/wait.c | 4 ++-- xen/include/xen/sched.h | 8 ++++---- 9 files changed, 43 insertions(+), 41 deletions(-) diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index d8b3eac..157031e 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3093,7 +3093,8 @@ static void nmi_mce_softirq(void) /* Set the tmp value unconditionally, so that * the check in the iret hypercall works. */ - cpumask_copy(st->vcpu->cpu_affinity_tmp, st->vcpu->cpu_affinity); + cpumask_copy(st->vcpu->cpu_hard_affinity_tmp, + st->vcpu->cpu_hard_affinity); if ((cpu != st->processor) || (st->processor != st->vcpu->processor)) @@ -3128,11 +3129,11 @@ void async_exception_cleanup(struct vcpu *curr) return; /* Restore affinity. */ - if ( !cpumask_empty(curr->cpu_affinity_tmp) && - !cpumask_equal(curr->cpu_affinity_tmp, curr->cpu_affinity) ) + if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) && + !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) ) { - vcpu_set_affinity(curr, curr->cpu_affinity_tmp); - cpumask_clear(curr->cpu_affinity_tmp); + vcpu_set_affinity(curr, curr->cpu_hard_affinity_tmp); + cpumask_clear(curr->cpu_hard_affinity_tmp); } if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) ) diff --git a/xen/common/domain.c b/xen/common/domain.c index 2cbc489..d8116c7 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -125,9 +125,9 @@ struct vcpu *alloc_vcpu( tasklet_init(&v->continue_hypercall_tasklet, NULL, 0); - if ( !zalloc_cpumask_var(&v->cpu_affinity) || - !zalloc_cpumask_var(&v->cpu_affinity_tmp) || - !zalloc_cpumask_var(&v->cpu_affinity_saved) || + if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) || + !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) || + !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) || !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) ) goto fail_free; @@ -156,9 +156,9 @@ struct vcpu *alloc_vcpu( fail_wq: destroy_waitqueue_vcpu(v); fail_free: - free_cpumask_var(v->cpu_affinity); - free_cpumask_var(v->cpu_affinity_tmp); - free_cpumask_var(v->cpu_affinity_saved); + free_cpumask_var(v->cpu_hard_affinity); + free_cpumask_var(v->cpu_hard_affinity_tmp); + free_cpumask_var(v->cpu_hard_affinity_saved); free_cpumask_var(v->vcpu_dirty_cpumask); free_vcpu_struct(v); return NULL; @@ -371,7 +371,7 @@ void domain_update_node_affinity(struct domain *d) for_each_vcpu ( d, v ) { - cpumask_and(online_affinity, v->cpu_affinity, online); + cpumask_and(online_affinity, v->cpu_hard_affinity, online); cpumask_or(cpumask, cpumask, online_affinity); } @@ -734,9 +734,9 @@ static void complete_domain_destroy(struct rcu_head *head) for ( i = d->max_vcpus - 1; i >= 0; i-- ) if ( (v = d->vcpu[i]) != NULL ) { - free_cpumask_var(v->cpu_affinity); - free_cpumask_var(v->cpu_affinity_tmp); - free_cpumask_var(v->cpu_affinity_saved); + free_cpumask_var(v->cpu_hard_affinity); + free_cpumask_var(v->cpu_hard_affinity_tmp); + free_cpumask_var(v->cpu_hard_affinity_saved); free_cpumask_var(v->vcpu_dirty_cpumask); free_vcpu_struct(v); } @@ -875,7 +875,7 @@ int vcpu_reset(struct vcpu *v) v->async_exception_mask = 0; memset(v->async_exception_state, 0, sizeof(v->async_exception_state)); #endif - cpumask_clear(v->cpu_affinity_tmp); + cpumask_clear(v->cpu_hard_affinity_tmp); clear_bit(_VPF_blocked, &v->pause_flags); clear_bit(_VPF_in_reset, &v->pause_flags); diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 904d27b..5e0ac5c 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -629,7 +629,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) else { ret = cpumask_to_xenctl_bitmap( - &op->u.vcpuaffinity.cpumap, v->cpu_affinity); + &op->u.vcpuaffinity.cpumap, v->cpu_hard_affinity); } } break; diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index 8e4b3f8..c11f577 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -296,7 +296,7 @@ static void dump_domains(unsigned char key) !vcpu_event_delivery_is_enabled(v)); cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask); printk("dirty_cpus=%s ", tmpstr); - cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity); + cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity); printk("cpu_affinity=%s\n", tmpstr); printk(" pause_count=%d pause_flags=%lx\n", atomic_read(&v->pause_count), v->pause_flags); diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index db5512e..c6a2560 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -332,13 +332,13 @@ csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask) if ( step == CSCHED_BALANCE_NODE_AFFINITY ) { cpumask_and(mask, CSCHED_DOM(vc->domain)->node_affinity_cpumask, - vc->cpu_affinity); + vc->cpu_hard_affinity); if ( unlikely(cpumask_empty(mask)) ) - cpumask_copy(mask, vc->cpu_affinity); + cpumask_copy(mask, vc->cpu_hard_affinity); } else /* step == CSCHED_BALANCE_CPU_AFFINITY */ - cpumask_copy(mask, vc->cpu_affinity); + cpumask_copy(mask, vc->cpu_hard_affinity); } static void burn_credits(struct csched_vcpu *svc, s_time_t now) @@ -407,7 +407,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY && !__vcpu_has_node_affinity(new->vcpu, - new->vcpu->cpu_affinity) ) + new->vcpu->cpu_hard_affinity) ) continue; /* Are there idlers suitable for new (for this balance step)? */ @@ -642,7 +642,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) /* Store in cpus the mask of online cpus on which the domain can run */ online = cpupool_scheduler_cpumask(vc->domain->cpupool); - cpumask_and(&cpus, vc->cpu_affinity, online); + cpumask_and(&cpus, vc->cpu_hard_affinity, online); for_each_csched_balance_step( balance_step ) { @@ -1498,7 +1498,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) * or counter. */ if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY - && !__vcpu_has_node_affinity(vc, vc->cpu_affinity) ) + && !__vcpu_has_node_affinity(vc, vc->cpu_hard_affinity) ) continue; csched_balance_cpumask(vc, balance_step, csched_balance_mask); diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index 7c24171..c219aed 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -396,7 +396,7 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v) cpumask_t *online; online = cpupool_scheduler_cpumask(v->domain->cpupool); - cpumask_and(&online_affinity, v->cpu_affinity, online); + cpumask_and(&online_affinity, v->cpu_hard_affinity, online); return cpumask_cycle(v->vcpu_id % cpumask_weight(&online_affinity) - 1, &online_affinity); } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 0f45f07..c4236c5 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -194,9 +194,9 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) */ v->processor = processor; if ( is_idle_domain(d) || d->is_pinned ) - cpumask_copy(v->cpu_affinity, cpumask_of(processor)); + cpumask_copy(v->cpu_hard_affinity, cpumask_of(processor)); else - cpumask_setall(v->cpu_affinity); + cpumask_setall(v->cpu_hard_affinity); /* Initialise the per-vcpu timers. */ init_timer(&v->periodic_timer, vcpu_periodic_timer_fn, @@ -285,7 +285,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) migrate_timer(&v->singleshot_timer, new_p); migrate_timer(&v->poll_timer, new_p); - cpumask_setall(v->cpu_affinity); + cpumask_setall(v->cpu_hard_affinity); lock = vcpu_schedule_lock_irq(v); v->processor = new_p; @@ -457,7 +457,7 @@ static void vcpu_migrate(struct vcpu *v) */ if ( pick_called && (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) && - cpumask_test_cpu(new_cpu, v->cpu_affinity) && + cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) && cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) ) break; @@ -561,7 +561,7 @@ void restore_vcpu_affinity(struct domain *d) { printk(XENLOG_DEBUG "Restoring affinity for d%dv%d\n", d->domain_id, v->vcpu_id); - cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved); + cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved); v->affinity_broken = 0; } @@ -604,20 +604,21 @@ int cpu_disable_scheduler(unsigned int cpu) unsigned long flags; spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags); - cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid); + cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid); if ( cpumask_empty(&online_affinity) && - cpumask_test_cpu(cpu, v->cpu_affinity) ) + cpumask_test_cpu(cpu, v->cpu_hard_affinity) ) { printk(XENLOG_DEBUG "Breaking affinity for d%dv%d\n", d->domain_id, v->vcpu_id); if (system_state == SYS_STATE_suspend) { - cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity); + cpumask_copy(v->cpu_hard_affinity_saved, + v->cpu_hard_affinity); v->affinity_broken = 1; } - cpumask_setall(v->cpu_affinity); + cpumask_setall(v->cpu_hard_affinity); } if ( v->processor == cpu ) @@ -665,7 +666,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity) lock = vcpu_schedule_lock_irq(v); - cpumask_copy(v->cpu_affinity, affinity); + cpumask_copy(v->cpu_hard_affinity, affinity); /* Always ask the scheduler to re-evaluate placement * when changing the affinity */ diff --git a/xen/common/wait.c b/xen/common/wait.c index 3c9366c..3f6ff41 100644 --- a/xen/common/wait.c +++ b/xen/common/wait.c @@ -134,7 +134,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv) /* Save current VCPU affinity; force wakeup on *this* CPU only. */ wqv->wakeup_cpu = smp_processor_id(); - cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity); + cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity); if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) ) { gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); @@ -183,7 +183,7 @@ void check_wakeup_from_wait(void) { /* Re-set VCPU affinity and re-enter the scheduler. */ struct vcpu *curr = current; - cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity); + cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity); if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) ) { gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index cbdf377..40e5927 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -192,11 +192,11 @@ struct vcpu spinlock_t virq_lock; /* Bitmask of CPUs on which this VCPU may run. */ - cpumask_var_t cpu_affinity; + cpumask_var_t cpu_hard_affinity; /* Used to change affinity temporarily. */ - cpumask_var_t cpu_affinity_tmp; + cpumask_var_t cpu_hard_affinity_tmp; /* Used to restore affinity across S3. */ - cpumask_var_t cpu_affinity_saved; + cpumask_var_t cpu_hard_affinity_saved; /* Bitmask of CPUs which are holding onto this VCPU's state. */ cpumask_var_t vcpu_dirty_cpumask; @@ -792,7 +792,7 @@ void watchdog_domain_destroy(struct domain *d); #define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv) #define has_hvm_container_vcpu(v) (has_hvm_container_domain((v)->domain)) #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \ - cpumask_weight((v)->cpu_affinity) == 1) + cpumask_weight((v)->cpu_hard_affinity) == 1) #ifdef HAS_PASSTHROUGH #define need_iommu(d) ((d)->need_iommu) #else _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |