[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/3] xen: Some code motion to avoid having to do forward-declaration
For sched_credit2, move the vcpu insert / remove / free functions near the domain insert / remove / alloc / free functions (and after cpu_pick). For sched_rt, move rt_cpu_pick() further up. This is pure code motion; no functional change. Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx> --- CC: Dario Faggioli <dario.faggioli@xxxxxxxxxx> CC: Anshul Makkar <anshul.makkar@xxxxxxxxxx> CC: Meng Xu <mengxu@xxxxxxxxxxxxx> --- xen/common/sched_credit2.c | 118 ++++++++++++++++++++++----------------------- xen/common/sched_rt.c | 46 +++++++++--------- 2 files changed, 82 insertions(+), 82 deletions(-) diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 8b95a47..3b9aa27 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -971,65 +971,6 @@ runq_deassign(const struct scheduler *ops, struct vcpu *vc) } static void -csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) -{ - struct csched2_vcpu *svc = vc->sched_priv; - struct csched2_dom * const sdom = svc->sdom; - spinlock_t *lock; - - printk("%s: Inserting %pv\n", __func__, vc); - - BUG_ON(is_idle_vcpu(vc)); - - /* Add vcpu to runqueue of initial processor */ - lock = vcpu_schedule_lock_irq(vc); - - runq_assign(ops, vc); - - vcpu_schedule_unlock_irq(lock, vc); - - sdom->nr_vcpus++; - - SCHED_STAT_CRANK(vcpu_insert); - - CSCHED2_VCPU_CHECK(vc); -} - -static void -csched2_free_vdata(const struct scheduler *ops, void *priv) -{ - struct csched2_vcpu *svc = priv; - - xfree(svc); -} - -static void -csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) -{ - struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); - struct csched2_dom * const sdom = svc->sdom; - - BUG_ON( sdom == NULL ); - BUG_ON( !list_empty(&svc->runq_elem) ); - - if ( ! is_idle_vcpu(vc) ) - { - spinlock_t *lock; - - SCHED_STAT_CRANK(vcpu_remove); - - /* Remove from runqueue */ - lock = vcpu_schedule_lock_irq(vc); - - runq_deassign(ops, vc); - - vcpu_schedule_unlock_irq(lock, vc); - - svc->sdom->nr_vcpus--; - } -} - -static void csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); @@ -1668,6 +1609,65 @@ csched2_dom_destroy(const struct scheduler *ops, struct domain *dom) csched2_free_domdata(ops, CSCHED2_DOM(dom)); } +static void +csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) +{ + struct csched2_vcpu *svc = vc->sched_priv; + struct csched2_dom * const sdom = svc->sdom; + spinlock_t *lock; + + printk("%s: Inserting %pv\n", __func__, vc); + + BUG_ON(is_idle_vcpu(vc)); + + /* Add vcpu to runqueue of initial processor */ + lock = vcpu_schedule_lock_irq(vc); + + runq_assign(ops, vc); + + vcpu_schedule_unlock_irq(lock, vc); + + sdom->nr_vcpus++; + + SCHED_STAT_CRANK(vcpu_insert); + + CSCHED2_VCPU_CHECK(vc); +} + +static void +csched2_free_vdata(const struct scheduler *ops, void *priv) +{ + struct csched2_vcpu *svc = priv; + + xfree(svc); +} + +static void +csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +{ + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); + struct csched2_dom * const sdom = svc->sdom; + + BUG_ON( sdom == NULL ); + BUG_ON( !list_empty(&svc->runq_elem) ); + + if ( ! is_idle_vcpu(vc) ) + { + spinlock_t *lock; + + SCHED_STAT_CRANK(vcpu_remove); + + /* Remove from runqueue */ + lock = vcpu_schedule_lock_irq(vc); + + runq_deassign(ops, vc); + + vcpu_schedule_unlock_irq(lock, vc); + + svc->sdom->nr_vcpus--; + } +} + /* How long should we let this vcpu run for? */ static s_time_t csched2_runtime(const struct scheduler *ops, int cpu, struct csched2_vcpu *snext) diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 98524a6..bd3a2a0 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -582,6 +582,29 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu *svc) } /* + * Pick a valid CPU for the vcpu vc + * Valid CPU of a vcpu is intesection of vcpu's affinity + * and available cpus + */ +static int +rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) +{ + cpumask_t cpus; + cpumask_t *online; + int cpu; + + online = cpupool_domain_cpumask(vc->domain); + cpumask_and(&cpus, online, vc->cpu_hard_affinity); + + cpu = cpumask_test_cpu(vc->processor, &cpus) + ? vc->processor + : cpumask_cycle(vc->processor, &cpus); + ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) ); + + return cpu; +} + +/* * Init/Free related code */ static int @@ -894,29 +917,6 @@ rt_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) } /* - * Pick a valid CPU for the vcpu vc - * Valid CPU of a vcpu is intesection of vcpu's affinity - * and available cpus - */ -static int -rt_cpu_pick(const struct scheduler *ops, struct vcpu *vc) -{ - cpumask_t cpus; - cpumask_t *online; - int cpu; - - online = cpupool_domain_cpumask(vc->domain); - cpumask_and(&cpus, online, vc->cpu_hard_affinity); - - cpu = cpumask_test_cpu(vc->processor, &cpus) - ? vc->processor - : cpumask_cycle(vc->processor, &cpus); - ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) ); - - return cpu; -} - -/* * Burn budget in nanosecond granularity */ static void -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |