[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] xen: Some code motion to avoid having to do forward-declaration
commit 4f9ab4330ece38382dc01a8df84b208af515d66a Author: George Dunlap <george.dunlap@xxxxxxxxxx> AuthorDate: Mon Jul 25 12:09:52 2016 +0100 Commit: George Dunlap <george.dunlap@xxxxxxxxxx> CommitDate: Tue Jul 26 10:42:21 2016 +0100 xen: Some code motion to avoid having to do forward-declaration For sched_credit2, move the vcpu insert / remove / free functions near the domain insert / remove / alloc / free functions (and after cpu_pick). For sched_rt, move rt_cpu_pick() further up. This is pure code motion; no functional change. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx> Reviewed-by: Meng Xu <mengxu@xxxxxxxxxxxxx>â?? Acked-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> --- xen/common/sched_credit2.c | 106 ++++++++++++++++++++++----------------------- xen/common/sched_rt.c | 46 ++++++++++---------- 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 1d79de0..3d2716a 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -1348,59 +1348,6 @@ runq_deassign(const struct scheduler *ops, struct vcpu *vc) } static void -csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) -{ - struct csched2_vcpu *svc = vc->sched_priv; - struct csched2_dom * const sdom = svc->sdom; - spinlock_t *lock; - - ASSERT(!is_idle_vcpu(vc)); - ASSERT(list_empty(&svc->runq_elem)); - - /* Add vcpu to runqueue of initial processor */ - lock = vcpu_schedule_lock_irq(vc); - - runq_assign(ops, vc); - - vcpu_schedule_unlock_irq(lock, vc); - - sdom->nr_vcpus++; - - SCHED_STAT_CRANK(vcpu_insert); - - CSCHED2_VCPU_CHECK(vc); -} - -static void -csched2_free_vdata(const struct scheduler *ops, void *priv) -{ - struct csched2_vcpu *svc = priv; - - xfree(svc); -} - -static void -csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) -{ - struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); - spinlock_t *lock; - - ASSERT(!is_idle_vcpu(vc)); - ASSERT(list_empty(&svc->runq_elem)); - - SCHED_STAT_CRANK(vcpu_remove); - - /* Remove from runqueue */ - lock = vcpu_schedule_lock_irq(vc); - - runq_deassign(ops, vc); - - vcpu_schedule_unlock_irq(lock, vc); - - svc->sdom->nr_vcpus--; -} - -static void csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); @@ -2098,6 +2045,59 @@ csched2_dom_destroy(const struct scheduler *ops, struct domain *dom) csched2_free_domdata(ops, CSCHED2_DOM(dom)); } +static void +csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) +{ + struct csched2_vcpu *svc = vc->sched_priv; + struct csched2_dom * const sdom = svc->sdom; + spinlock_t *lock; + + ASSERT(!is_idle_vcpu(vc)); + ASSERT(list_empty(&svc->runq_elem)); + + /* Add vcpu to runqueue of initial processor */ + lock = vcpu_schedule_lock_irq(vc); + + runq_assign(ops, vc); + + vcpu_schedule_unlock_irq(lock, vc); + + sdom->nr_vcpus++; + + SCHED_STAT_CRANK(vcpu_insert); + + CSCHED2_VCPU_CHECK(vc); +} + +static void +csched2_free_vdata(const struct scheduler *ops, void *priv) +{ + struct csched2_vcpu *svc = priv; + + xfree(svc); +} + +static void +csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +{ + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); + spinlock_t *lock; + + ASSERT(!is_idle_vcpu(vc)); + ASSERT(list_empty(&svc->runq_elem)); + + SCHED_STAT_CRANK(vcpu_remove); + + /* Remove from runqueue */ + lock = vcpu_schedule_lock_irq(vc); + + runq_deassign(ops, vc); + + vcpu_schedule_unlock_irq(lock, vc); + + svc->sdom->nr_vcpus--; +} + /* How long should we let this vcpu run for? */ static s_time_t csched2_runtime(const struct scheduler *ops, int cpu, struct csched2_vcpu *snext) diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 98524a6..bd3a2a0 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -582,6 +582,29 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc) } /* + * Pick a valid CPU for the vcpu vc + * Valid CPU of a vcpu is intesection of vcpu's affinity + * and available cpus + */ +static int +rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) +{ + cpumask_t cpus; + cpumask_t *online; + int cpu; + + online = cpupool_domain_cpumask(vc->domain); + cpumask_and(&cpus, online, vc->cpu_hard_affinity); + + cpu = cpumask_test_cpu(vc->processor, &cpus) + ? vc->processor + : cpumask_cycle(vc->processor, &cpus); + ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) ); + + return cpu; +} + +/* * Init/Free related code */ static int @@ -894,29 +917,6 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) } /* - * Pick a valid CPU for the vcpu vc - * Valid CPU of a vcpu is intesection of vcpu's affinity - * and available cpus - */ -static int -rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) -{ - cpumask_t cpus; - cpumask_t *online; - int cpu; - - online = cpupool_domain_cpumask(vc->domain); - cpumask_and(&cpus, online, vc->cpu_hard_affinity); - - cpu = cpumask_test_cpu(vc->processor, &cpus) - ? vc->processor - : cpumask_cycle(vc->processor, &cpus); - ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) ); - - return cpu; -} - -/* * Burn budget in nanosecond granularity */ static void -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |