[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 23/49] xen/sched: move is_running indicator to struct sched_item
Move the is_running indicator from struct vcpu to struct sched_item. For non-scheduler parts introduce a vcpu_running() access function for obtaining the related value. At the same time introduce a state_entry_time field in struct sched_item being updated whenever the is_running indicator is changed. Use that new field in the schedulers instead of the similar vcpu field. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- xen/arch/x86/domain.c | 2 +- xen/arch/x86/hvm/hvm.c | 3 ++- xen/arch/x86/hvm/viridian/viridian.c | 1 + xen/arch/x86/hvm/vmx/vmcs.c | 6 ++++-- xen/arch/x86/hvm/vmx/vmx.c | 5 +++-- xen/common/domctl.c | 4 ++-- xen/common/keyhandler.c | 2 +- xen/common/sched_credit.c | 10 +++++----- xen/common/sched_credit2.c | 18 +++++++++--------- xen/common/sched_rt.c | 2 +- xen/common/schedule.c | 19 +++++++++++-------- xen/include/xen/sched-if.h | 11 ++++++++++- xen/include/xen/sched.h | 2 -- 13 files changed, 50 insertions(+), 35 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 5d8f3255cb..53b8fa1c9d 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -2137,7 +2137,7 @@ void vcpu_kick(struct vcpu *v) * NB2. We save the running flag across the unblock to avoid a needless * IPI for domains that we IPI'd to unblock. */ - bool running = v->is_running; + bool running = vcpu_running(v); vcpu_unblock(v); if ( running && (in_irq() || (v != current)) ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 8adbb61b57..f184136f81 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -23,6 +23,7 @@ #include <xen/lib.h> #include <xen/trace.h> #include <xen/sched.h> +#include <xen/sched-if.h> #include <xen/irq.h> #include <xen/softirq.h> #include <xen/domain.h> @@ -3984,7 +3985,7 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), /* Now that all VCPUs are signalled to deschedule, we wait... */ for_each_vcpu ( d, v ) if ( v != current && flush_vcpu(ctxt, v) ) - while ( !vcpu_runnable(v) && v->is_running ) + while ( !vcpu_runnable(v) && vcpu_running(v) ) cpu_relax(); /* All other vcpus are paused, safe to unlock now. */ diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index 425af56856..5779efc81f 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -6,6 +6,7 @@ */ #include <xen/sched.h> +#include <xen/sched-if.h> #include <xen/version.h> #include <xen/hypercall.h> #include <xen/domain_page.h> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 74f2a08cfd..257fb00528 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -23,6 +23,8 @@ #include <xen/event.h> #include <xen/kernel.h> #include <xen/keyhandler.h> +#include <xen/sched.h> +#include <xen/sched-if.h> #include <xen/vm_event.h> #include <asm/current.h> #include <asm/cpufeature.h> @@ -562,7 +564,7 @@ void vmx_vmcs_reload(struct vcpu *v) * v->arch.hvm.vmx.vmcs_lock here. However, with interrupts disabled * the VMCS can't be taken away from us anymore if we still own it. */ - ASSERT(v->is_running || !local_irq_is_enabled()); + ASSERT(vcpu_running(v) || !local_irq_is_enabled()); if ( v->arch.hvm.vmx.vmcs_pa == this_cpu(current_vmcs) ) return; @@ -1576,7 +1578,7 @@ void vmx_vcpu_flush_pml_buffer(struct vcpu *v) uint64_t *pml_buf; unsigned long pml_idx; - ASSERT((v == current) || (!vcpu_runnable(v) && !v->is_running)); + ASSERT((v == current) || (!vcpu_runnable(v) && !vcpu_running(v))); ASSERT(vmx_vcpu_pml_enabled(v)); vmx_vmcs_enter(v); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 725dd88c13..0056fd0191 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -19,6 +19,7 @@ #include <xen/lib.h> #include <xen/trace.h> #include <xen/sched.h> +#include <xen/sched-if.h> #include <xen/irq.h> #include <xen/softirq.h> #include <xen/domain_page.h> @@ -907,7 +908,7 @@ static void vmx_ctxt_switch_from(struct vcpu *v) if ( unlikely(!this_cpu(vmxon)) ) return; - if ( !v->is_running ) + if ( !vcpu_running(v) ) { /* * When this vCPU isn't marked as running anymore, a remote pCPU's @@ -2004,7 +2005,7 @@ static void vmx_process_isr(int isr, struct vcpu *v) static void __vmx_deliver_posted_interrupt(struct vcpu *v) { - bool_t running = v->is_running; + bool_t running = vcpu_running(v); vcpu_unblock(v); /* diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 8464713d2b..6a9a54130d 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -173,7 +173,7 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info) { if ( !(v->pause_flags & VPF_blocked) ) flags &= ~XEN_DOMINF_blocked; - if ( v->is_running ) + if ( vcpu_running(v) ) flags |= XEN_DOMINF_running; info->nr_online_vcpus++; } @@ -841,7 +841,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) op->u.getvcpuinfo.online = !(v->pause_flags & VPF_down); op->u.getvcpuinfo.blocked = !!(v->pause_flags & VPF_blocked); - op->u.getvcpuinfo.running = v->is_running; + op->u.getvcpuinfo.running = vcpu_running(v); op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running]; op->u.getvcpuinfo.cpu = v->processor; ret = 0; diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index f50df5841d..0d312ff953 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -306,7 +306,7 @@ static void dump_domains(unsigned char key) printk(" VCPU%d: CPU%d [has=%c] poll=%d " "upcall_pend=%02x upcall_mask=%02x ", v->vcpu_id, v->processor, - v->is_running ? 'T':'F', v->poll_evtchn, + vcpu_running(v) ? 'T':'F', v->poll_evtchn, vcpu_info(v, evtchn_upcall_pending), !vcpu_event_delivery_is_enabled(v)); if ( vcpu_cpu_dirty(v) ) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 29076e362b..6d0639109a 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -723,7 +723,7 @@ __csched_vcpu_is_migrateable(const struct csched_private *prv, struct vcpu *vc, * The caller is supposed to have already checked that vc is also * not running. */ - ASSERT(!vc->is_running); + ASSERT(!vcpu_running(vc)); return !__csched_vcpu_is_cache_hot(prv, vc) && cpumask_test_cpu(dest_cpu, mask); @@ -1047,7 +1047,7 @@ csched_item_insert(const struct scheduler *ops, struct sched_item *item) lock = item_schedule_lock_irq(item); - if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running ) + if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vcpu_running(vc) ) runq_insert(svc); item_schedule_unlock_irq(lock, item); @@ -1659,8 +1659,8 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) * vCPUs with useful soft affinities in some sort of bitmap * or counter. */ - if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY && - !has_soft_affinity(vc->sched_item)) ) + if ( vcpu_running(vc) || (balance_step == BALANCE_SOFT_AFFINITY && + !has_soft_affinity(vc->sched_item)) ) continue; affinity_balance_cpumask(vc->sched_item, balance_step, cpumask_scratch); @@ -1868,7 +1868,7 @@ csched_schedule( (unsigned char *)&d); } - runtime = now - current->runstate.state_entry_time; + runtime = now - current->sched_item->state_entry_time; if ( runtime < 0 ) /* Does this ever happen? */ runtime = 0; diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 9bf045d20f..5aa819b2c5 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -1283,7 +1283,7 @@ runq_insert(const struct scheduler *ops, struct csched2_item *svc) ASSERT(&svc->rqd->runq == runq); ASSERT(!is_idle_vcpu(svc->vcpu)); - ASSERT(!svc->vcpu->is_running); + ASSERT(!vcpu_running(svc->vcpu)); ASSERT(!(svc->flags & CSFLAG_scheduled)); list_for_each( iter, runq ) @@ -1340,8 +1340,8 @@ static inline bool is_preemptable(const struct csched2_item *svc, if ( ratelimit <= CSCHED2_RATELIMIT_TICKLE_TOLERANCE ) return true; - ASSERT(svc->vcpu->is_running); - return now - svc->vcpu->runstate.state_entry_time > + ASSERT(vcpu_running(svc->vcpu)); + return now - svc->vcpu->sched_item->state_entry_time > ratelimit - CSCHED2_RATELIMIT_TICKLE_TOLERANCE; } @@ -2931,7 +2931,7 @@ csched2_dom_cntl( { svc = csched2_item(v->sched_item); lock = item_schedule_lock(svc->vcpu->sched_item); - if ( v->is_running ) + if ( vcpu_running(v) ) { unsigned int cpu = v->processor; struct csched2_runqueue_data *rqd = c2rqd(ops, cpu); @@ -3204,8 +3204,8 @@ csched2_runtime(const struct scheduler *ops, int cpu, if ( prv->ratelimit_us ) { s_time_t ratelimit_min = MICROSECS(prv->ratelimit_us); - if ( snext->vcpu->is_running ) - ratelimit_min = snext->vcpu->runstate.state_entry_time + + if ( vcpu_running(snext->vcpu) ) + ratelimit_min = snext->vcpu->sched_item->state_entry_time + MICROSECS(prv->ratelimit_us) - now; if ( ratelimit_min > min_time ) min_time = ratelimit_min; @@ -3302,7 +3302,7 @@ runq_candidate(struct csched2_runqueue_data *rqd, * no point forcing it to do so until rate limiting expires. */ if ( !yield && prv->ratelimit_us && vcpu_runnable(scurr->vcpu) && - (now - scurr->vcpu->runstate.state_entry_time) < + (now - scurr->vcpu->sched_item->state_entry_time) < MICROSECS(prv->ratelimit_us) ) { if ( unlikely(tb_init_done) ) @@ -3313,7 +3313,7 @@ runq_candidate(struct csched2_runqueue_data *rqd, } d; d.dom = scurr->vcpu->domain->domain_id; d.vcpu = scurr->vcpu->vcpu_id; - d.runtime = now - scurr->vcpu->runstate.state_entry_time; + d.runtime = now - scurr->vcpu->sched_item->state_entry_time; __trace_var(TRC_CSCHED2_RATELIMIT, 1, sizeof(d), (unsigned char *)&d); @@ -3561,7 +3561,7 @@ csched2_schedule( if ( snext != scurr ) { ASSERT(snext->rqd == rqd); - ASSERT(!snext->vcpu->is_running); + ASSERT(!vcpu_running(snext->vcpu)); runq_remove(snext); __set_bit(__CSFLAG_scheduled, &snext->flags); diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 374a9d2383..9efe807230 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -914,7 +914,7 @@ rt_item_insert(const struct scheduler *ops, struct sched_item *item) { replq_insert(ops, svc); - if ( !vc->is_running ) + if ( !vcpu_running(vc) ) runq_insert(ops, svc); } item_schedule_unlock_irq(lock, item); diff --git a/xen/common/schedule.c b/xen/common/schedule.c index b295b0b81e..ae2a6d0323 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -356,7 +356,8 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) if ( is_idle_domain(d) ) { per_cpu(sched_res, v->processor)->curr = item; - v->is_running = 1; + item->is_running = 1; + item->state_entry_time = NOW(); } else { @@ -555,7 +556,7 @@ void vcpu_sleep_sync(struct vcpu *v) { vcpu_sleep_nosync(v); - while ( !vcpu_runnable(v) && v->is_running ) + while ( !vcpu_runnable(v) && vcpu_running(v) ) cpu_relax(); sync_vcpu_execstate(v); @@ -680,7 +681,7 @@ static void vcpu_migrate_finish(struct vcpu *v) * context_saved(); and in any case, if the bit is cleared, then * someone else has already done the work so we don't need to. */ - if ( v->is_running || !test_bit(_VPF_migrating, &v->pause_flags) ) + if ( vcpu_running(v) || !test_bit(_VPF_migrating, &v->pause_flags) ) return; old_cpu = new_cpu = v->processor; @@ -734,7 +735,7 @@ static void vcpu_migrate_finish(struct vcpu *v) * because they both happen in (different) spinlock regions, and those * regions are strictly serialised. */ - if ( v->is_running || + if ( vcpu_running(v) || !test_and_clear_bit(_VPF_migrating, &v->pause_flags) ) { sched_spin_unlock_double(old_lock, new_lock, flags); @@ -762,7 +763,7 @@ void vcpu_force_reschedule(struct vcpu *v) { spinlock_t *lock = item_schedule_lock_irq(v->sched_item); - if ( v->is_running ) + if ( vcpu_running(v) ) vcpu_migrate_start(v); item_schedule_unlock_irq(lock, v->sched_item); @@ -1597,8 +1598,9 @@ static void schedule(void) * switch, else lost_records resume will not work properly. */ - ASSERT(!next->is_running); - next->is_running = 1; + ASSERT(!vcpu_running(next)); + next->sched_item->is_running = 1; + next->sched_item->state_entry_time = now; pcpu_schedule_unlock_irq(lock, cpu); @@ -1619,7 +1621,8 @@ void context_saved(struct vcpu *prev) /* Clear running flag /after/ writing context to memory. */ smp_wmb(); - prev->is_running = 0; + prev->sched_item->is_running = 0; + prev->sched_item->state_entry_time = NOW(); /* Check for migration request /after/ clearing running flag. */ smp_mb(); diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 3dcf1dca19..5cacede473 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -59,8 +59,12 @@ struct sched_item { /* Last time when item has been scheduled out. */ uint64_t last_run_time; + /* Last time item got (de-)scheduled. */ + uint64_t state_entry_time; - /* Item needs affinity restored. */ + /* Currently running on a CPU? */ + bool is_running; + /* Item needs affinity restored */ bool affinity_broken; /* Does soft affinity actually play a role (given hard affinity)? */ bool soft_aff_effective; @@ -132,6 +136,11 @@ static inline struct sched_item *sched_idle_item(unsigned int cpu) return idle_vcpu[cpu]->sched_item; } +static inline bool vcpu_running(struct vcpu *v) +{ + return v->sched_item->is_running; +} + /* * Scratch space, for avoiding having too many cpumask_t on the stack. * Within each scheduler, when using the scratch mask of one pCPU: diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 4b59de42da..21a7fa14ce 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -181,8 +181,6 @@ struct vcpu bool fpu_dirtied; /* Initialization completed for this VCPU? */ bool is_initialised; - /* Currently running on a CPU? */ - bool is_running; /* VCPU should wake fast (do not deep sleep the CPU). */ bool is_urgent; -- 2.16.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |