[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC Patch 2/3] Remove extra queues, latency scaling, and weight support from sedf
From: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx> The extra queues and latency scaling are meant to make the sedf scheduler work conserving. While this was useful in the past, with the advent of the credit scheduler and cpupools it is no longer that useful. Also remove weight support, which adds extra complexity the scheduling code for the purpose of making setting the scheduler easier. Signed-off-by: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx> Signed-off-by: Joshua Whitehead <josh.whitehead@xxxxxxxxxxxxxxx> --- xen/common/sched_sedf.c | 573 +++-------------------------------------------- 1 file changed, 34 insertions(+), 539 deletions(-) diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index 6ebf72b..7a827c8 100755 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -25,19 +25,8 @@ #define CHECK(_p) ((void)0) #endif -#define EXTRA_NONE (0) -#define EXTRA_AWARE (1) -#define EXTRA_RUN_PEN (2) -#define EXTRA_RUN_UTIL (4) -#define EXTRA_WANT_PEN_Q (8) -#define EXTRA_PEN_Q (0) -#define EXTRA_UTIL_Q (1) #define SEDF_ASLEEP (16) -#define EXTRA_QUANTUM (MICROSECS(500)) -#define WEIGHT_PERIOD (MILLISECS(100)) -#define WEIGHT_SAFETY (MILLISECS(5)) - #define DEFAULT_PERIOD (MILLISECS(20)) #define DEFAULT_SLICE (MILLISECS(10)) @@ -61,24 +50,13 @@ struct sedf_priv_info { struct sedf_vcpu_info { struct vcpu *vcpu; struct list_head list; - struct list_head extralist[2]; /* Parameters for EDF */ s_time_t period; /* = relative deadline */ s_time_t slice; /* = worst case execution time */ - /* Advaced Parameters */ - - /* Latency Scaling */ - s_time_t period_orig; - s_time_t slice_orig; - s_time_t latency; - /* Status of domain */ int status; - /* Weights for "Scheduling for beginners/ lazy/ etc." ;) */ - short weight; - short extraweight; /* Bookkeeping */ s_time_t deadl_abs; s_time_t sched_start_abs; @@ -87,28 +65,18 @@ struct sedf_vcpu_info { s_time_t block_abs; s_time_t unblock_abs; - /* Scores for {util, block penalty}-weighted extratime distribution */ - int score[2]; - s_time_t short_block_lost_tot; - - /* Statistics */ - s_time_t extra_time_tot; - #ifdef SEDF_STATS s_time_t block_time_tot; s_time_t penalty_time_tot; int block_tot; int short_block_tot; int long_block_tot; - int pen_extra_blocks; - int pen_extra_slices; #endif }; struct sedf_cpu_info { struct list_head runnableq; struct list_head waitq; - struct list_head extraq[2]; s_time_t current_slice_expires; }; @@ -118,115 +86,19 @@ struct sedf_cpu_info { #define CPU_INFO(cpu) \ ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv) #define LIST(d) (&EDOM_INFO(d)->list) -#define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i])) #define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq) #define WAITQ(cpu) (&CPU_INFO(cpu)->waitq) -#define EXTRAQ(cpu,i) (&(CPU_INFO(cpu)->extraq[i])) #define IDLETASK(cpu) (idle_vcpu[cpu]) #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period) #define DIV_UP(x,y) (((x) + (y) - 1) / y) -#define extra_runs(inf) ((inf->status) & 6) -#define extra_get_cur_q(inf) (((inf->status & 6) >> 1)-1) #define sedf_runnable(edom) (!(EDOM_INFO(edom)->status & SEDF_ASLEEP)) static void sedf_dump_cpu_state(const struct scheduler *ops, int i); -static inline int extraq_on(struct vcpu *d, int i) -{ - return ((EXTRALIST(d,i)->next != NULL) && - (EXTRALIST(d,i)->next != EXTRALIST(d,i))); -} - -static inline void extraq_add_head(struct vcpu *d, int i) -{ - list_add(EXTRALIST(d,i), EXTRAQ(d->processor,i)); - ASSERT(extraq_on(d, i)); -} - -static inline void extraq_add_tail(struct vcpu *d, int i) -{ - list_add_tail(EXTRALIST(d,i), EXTRAQ(d->processor,i)); - ASSERT(extraq_on(d, i)); -} - -static inline void extraq_del(struct vcpu *d, int i) -{ - struct list_head *list = EXTRALIST(d,i); - ASSERT(extraq_on(d,i)); - list_del(list); - list->next = NULL; - ASSERT(!extraq_on(d, i)); -} - -/* - * Adds a domain to the queue of processes which are aware of extra time. List - * is sorted by score, where a lower score means higher priority for an extra - * slice. It also updates the score, by simply subtracting a fixed value from - * each entry, in order to avoid overflow. The algorithm works by simply - * charging each domain that recieved extratime with an inverse of its weight. - */ -static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub) -{ - struct list_head *cur; - struct sedf_vcpu_info *curinf; - - ASSERT(!extraq_on(d,i)); - - /* - * Iterate through all elements to find our "hole" and on our way - * update all the other scores. - */ - list_for_each ( cur, EXTRAQ(d->processor, i) ) - { - curinf = list_entry(cur,struct sedf_vcpu_info,extralist[i]); - curinf->score[i] -= sub; - if ( EDOM_INFO(d)->score[i] < curinf->score[i] ) - break; - } - - /* cur now contains the element, before which we'll enqueue */ - list_add(EXTRALIST(d,i),cur->prev); - - /* Continue updating the extraq */ - if ( (cur != EXTRAQ(d->processor,i)) && sub ) - { - for ( cur = cur->next; cur != EXTRAQ(d->processor,i); cur = cur->next ) - { - curinf = list_entry(cur,struct sedf_vcpu_info, extralist[i]); - curinf->score[i] -= sub; - } - } - - ASSERT(extraq_on(d,i)); -} -static inline void extraq_check(struct vcpu *d) -{ - if ( extraq_on(d, EXTRA_UTIL_Q) ) - { - if ( !(EDOM_INFO(d)->status & EXTRA_AWARE) && - !extra_runs(EDOM_INFO(d)) ) - extraq_del(d, EXTRA_UTIL_Q); - } - else - { - if ( (EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d) ) - extraq_add_sort_update(d, EXTRA_UTIL_Q, 0); - } -} - -static inline void extraq_check_add_unblocked(struct vcpu *d, int priority) -{ - struct sedf_vcpu_info *inf = EDOM_INFO(d); - - if ( inf->status & EXTRA_AWARE ) - /* Put on the weighted extraq without updating any scores */ - extraq_add_sort_update(d, EXTRA_UTIL_Q, 0); -} - static inline int __task_on_queue(struct vcpu *d) { return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d))); @@ -299,11 +171,7 @@ static inline void __add_to_runqueue_sort(struct vcpu *v) static void sedf_insert_vcpu(const struct scheduler *ops, struct vcpu *v) { - if ( !is_idle_vcpu(v) ) - { - extraq_check(v); - } - else + if ( is_idle_vcpu(v) ) { EDOM_INFO(v)->deadl_abs = 0; EDOM_INFO(v)->status &= ~SEDF_ASLEEP; @@ -320,11 +188,8 @@ static void *sedf_alloc_vdata(const struct scheduler *ops, struct vcpu *v, void inf->vcpu = v; - /* Every VCPU gets an equal share of extratime by default */ inf->deadl_abs = 0; - inf->latency = 0; inf->status = SEDF_ASLEEP; - inf->extraweight = 0; if (v->domain->domain_id == 0) { @@ -338,10 +203,7 @@ static void *sedf_alloc_vdata(const struct scheduler *ops, struct vcpu *v, void inf->slice = 0; } - inf->period_orig = inf->period; inf->slice_orig = inf->slice; INIT_LIST_HEAD(&(inf->list)); - INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q])); - INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q])); SCHED_STAT_CRANK(vcpu_init); @@ -357,8 +219,6 @@ sedf_alloc_pdata(const struct scheduler *ops, int cpu) BUG_ON(spc == NULL); INIT_LIST_HEAD(&spc->waitq); INIT_LIST_HEAD(&spc->runnableq); - INIT_LIST_HEAD(&spc->extraq[EXTRA_PEN_Q]); - INIT_LIST_HEAD(&spc->extraq[EXTRA_UTIL_Q]); return (void *)spc; } @@ -441,20 +301,6 @@ static void desched_edf_dom(s_time_t now, struct vcpu* d) if ( inf->cputime >= inf->slice ) { inf->cputime -= inf->slice; - - if ( inf->period < inf->period_orig ) - { - /* This domain runs in latency scaling or burst mode */ - inf->period *= 2; - inf->slice *= 2; - if ( (inf->period > inf->period_orig) || - (inf->slice > inf->slice_orig) ) - { - /* Reset slice and period */ - inf->period = inf->period_orig; - inf->slice = inf->slice_orig; - } - } /* Set next deadline */ inf->deadl_abs += inf->period; @@ -465,18 +311,8 @@ static void desched_edf_dom(s_time_t now, struct vcpu* d) { __add_to_waitqueue_sort(d); } - else - { - /* We have a blocked realtime task -> remove it from exqs too */ - if ( extraq_on(d, EXTRA_PEN_Q) ) - extraq_del(d, EXTRA_PEN_Q); - if ( extraq_on(d, EXTRA_UTIL_Q) ) - extraq_del(d, EXTRA_UTIL_Q); - } ASSERT(EQ(sedf_runnable(d), __task_on_queue(d))); - ASSERT(IMPLY(extraq_on(d, EXTRA_UTIL_Q) || extraq_on(d, EXTRA_PEN_Q), - sedf_runnable(d))); } @@ -564,175 +400,6 @@ static void update_queues( } -/* - * removes a domain from the head of the according extraQ and - * requeues it at a specified position: - * round-robin extratime: end of extraQ - * weighted ext.: insert in sorted list by score - * if the domain is blocked / has regained its short-block-loss - * time it is not put on any queue. - */ -static void desched_extra_dom(s_time_t now, struct vcpu *d) -{ - struct sedf_vcpu_info *inf = EDOM_INFO(d); - int i = extra_get_cur_q(inf); - unsigned long oldscore; - - ASSERT(extraq_on(d, i)); - - /* Unset all running flags */ - inf->status &= ~(EXTRA_RUN_PEN | EXTRA_RUN_UTIL); - /* Fresh slice for the next run */ - inf->cputime = 0; - /* Accumulate total extratime */ - inf->extra_time_tot += now - inf->sched_start_abs; - /* Remove extradomain from head of the queue. */ - extraq_del(d, i); - - /* Update the score */ - oldscore = inf->score[i]; - if ( i == EXTRA_PEN_Q ) - { - /* Domain was running in L0 extraq */ - /* reduce block lost, probably more sophistication here!*/ - /*inf->short_block_lost_tot -= EXTRA_QUANTUM;*/ - inf->short_block_lost_tot -= now - inf->sched_start_abs; -#if 0 - /* KAF: If we don't exit short-blocking state at this point - * domain0 can steal all CPU for up to 10 seconds before - * scheduling settles down (when competing against another - * CPU-bound domain). Doing this seems to make things behave - * nicely. Noone gets starved by default. - */ - if ( inf->short_block_lost_tot <= 0 ) -#endif - { - /* We have (over-)compensated our block penalty */ - inf->short_block_lost_tot = 0; - /* We don't want a place on the penalty queue anymore! */ - inf->status &= ~EXTRA_WANT_PEN_Q; - goto check_extra_queues; - } - - /* - * We have to go again for another try in the block-extraq, - * the score is not used incremantally here, as this is - * already done by recalculating the block_lost - */ - inf->score[EXTRA_PEN_Q] = (inf->period << 10) / - inf->short_block_lost_tot; - oldscore = 0; - } - else - { - /* - * Domain was running in L1 extraq => score is inverse of - * utilization and is used somewhat incremental! - */ - if ( !inf->extraweight ) - { - /* NB: use fixed point arithmetic with 10 bits */ - inf->score[EXTRA_UTIL_Q] = (inf->period << 10) / - inf->slice; - } - else - { - /* - * Conversion between realtime utilisation and extrawieght: - * full (ie 100%) utilization is equivalent to 128 extraweight - */ - inf->score[EXTRA_UTIL_Q] = (1<<17) / inf->extraweight; - } - } - - check_extra_queues: - /* Adding a runnable domain to the right queue and removing blocked ones */ - if ( sedf_runnable(d) ) - { - /* Add according to score: weighted round robin */ - if (((inf->status & EXTRA_AWARE) && (i == EXTRA_UTIL_Q)) || - ((inf->status & EXTRA_WANT_PEN_Q) && (i == EXTRA_PEN_Q))) - extraq_add_sort_update(d, i, oldscore); - } - else - { - /* Remove this blocked domain from the waitq! */ - __del_from_queue(d); - /* Make sure that we remove a blocked domain from the other - * extraq too. */ - if ( i == EXTRA_PEN_Q ) - { - if ( extraq_on(d, EXTRA_UTIL_Q) ) - extraq_del(d, EXTRA_UTIL_Q); - } - else - { - if ( extraq_on(d, EXTRA_PEN_Q) ) - extraq_del(d, EXTRA_PEN_Q); - } - } - - ASSERT(EQ(sedf_runnable(d), __task_on_queue(d))); - ASSERT(IMPLY(extraq_on(d, EXTRA_UTIL_Q) || extraq_on(d, EXTRA_PEN_Q), - sedf_runnable(d))); -} - - -static struct task_slice sedf_do_extra_schedule( - s_time_t now, s_time_t end_xt, struct list_head *extraq[], int cpu) -{ - struct task_slice ret = { 0 }; - struct sedf_vcpu_info *runinf; - ASSERT(end_xt > now); - - /* Enough time left to use for extratime? */ - if ( end_xt - now < EXTRA_QUANTUM ) - goto return_idle; - - if ( !list_empty(extraq[EXTRA_PEN_Q]) ) - { - /* - * We still have elements on the level 0 extraq - * => let those run first! - */ - runinf = list_entry(extraq[EXTRA_PEN_Q]->next, - struct sedf_vcpu_info, extralist[EXTRA_PEN_Q]); - runinf->status |= EXTRA_RUN_PEN; - ret.task = runinf->vcpu; - ret.time = EXTRA_QUANTUM; -#ifdef SEDF_STATS - runinf->pen_extra_slices++; -#endif - } - else - { - if ( !list_empty(extraq[EXTRA_UTIL_Q]) ) - { - /* Use elements from the normal extraqueue */ - runinf = list_entry(extraq[EXTRA_UTIL_Q]->next, - struct sedf_vcpu_info, - extralist[EXTRA_UTIL_Q]); - runinf->status |= EXTRA_RUN_UTIL; - ret.task = runinf->vcpu; - ret.time = EXTRA_QUANTUM; - } - else - goto return_idle; - } - - ASSERT(ret.time > 0); - ASSERT(sedf_runnable(ret.task)); - return ret; - - return_idle: - ret.task = IDLETASK(cpu); - ret.time = end_xt - now; - ASSERT(ret.time > 0); - ASSERT(sedf_runnable(ret.task)); - return ret; -} - - static int sedf_init(struct scheduler *ops) { struct sedf_priv_info *prv; @@ -772,8 +439,6 @@ static struct task_slice sedf_do_schedule( struct list_head *runq = RUNQ(cpu); struct list_head *waitq = WAITQ(cpu); struct sedf_vcpu_info *inf = EDOM_INFO(current); - struct list_head *extraq[] = { - EXTRAQ(cpu, EXTRA_PEN_Q), EXTRAQ(cpu, EXTRA_UTIL_Q)}; struct sedf_vcpu_info *runinf, *waitinf; struct task_slice ret; @@ -794,15 +459,7 @@ static struct task_slice sedf_do_schedule( if ( inf->status & SEDF_ASLEEP ) inf->block_abs = now; - if ( unlikely(extra_runs(inf)) ) - { - /* Special treatment of domains running in extra time */ - desched_extra_dom(now, current); - } - else - { - desched_edf_dom(now, current); - } + desched_edf_dom(now, current); check_waitq: update_queues(now, runq, waitq); @@ -844,12 +501,9 @@ static struct task_slice sedf_do_schedule( else { waitinf = list_entry(waitq->next,struct sedf_vcpu_info, list); - /* - * We could not find any suitable domain - * => look for domains that are aware of extratime - */ - ret = sedf_do_extra_schedule(now, PERIOD_BEGIN(waitinf), - extraq, cpu); + + ret.task = IDLETASK(cpu); + ret.time = PERIOD_BEGIN(waitinf) - now; } /* @@ -857,11 +511,8 @@ static struct task_slice sedf_do_schedule( * still can happen!!! */ if ( ret.time < 0) - { printk("Ouch! We are seriously BEHIND schedule! %"PRIi64"\n", ret.time); - ret.time = EXTRA_QUANTUM; - } ret.migrated = 0; @@ -872,7 +523,6 @@ static struct task_slice sedf_do_schedule( return ret; } - static void sedf_sleep(const struct scheduler *ops, struct vcpu *d) { if ( is_idle_vcpu(d) ) @@ -888,14 +538,9 @@ static void sedf_sleep(const struct scheduler *ops, struct vcpu *d) { if ( __task_on_queue(d) ) __del_from_queue(d); - if ( extraq_on(d, EXTRA_UTIL_Q) ) - extraq_del(d, EXTRA_UTIL_Q); - if ( extraq_on(d, EXTRA_PEN_Q) ) - extraq_del(d, EXTRA_PEN_Q); } } - /* * This function wakes up a domain, i.e. moves them into the waitqueue * things to mention are: admission control is taking place nowhere at @@ -928,8 +573,6 @@ static void sedf_sleep(const struct scheduler *ops, struct vcpu *d) * * -this also doesn't disturb scheduling, but might lead to the fact, that * the domain can't finish it's workload in the period - * -in addition to that the domain can be treated prioritised when - * extratime is available * -addition: experiments have shown that this may have a HUGE impact on * performance of other domains, becaus it can lead to excessive context * switches @@ -955,10 +598,6 @@ static void sedf_sleep(const struct scheduler *ops, struct vcpu *d) * DRB______D___URRRR___D...<prev [Thread] next> * (D) <- old deadline was here * -problem: deadlines don't occur isochronous anymore - * Part 2c (Improved Atropos design) - * -when a domain unblocks it is given a very short period (=latency hint) - * and slice length scaled accordingly - * -both rise again to the original value (e.g. get doubled every period) * * 3. Unconservative (i.e. incorrect) * -to boost the performance of I/O dependent domains it would be possible @@ -968,59 +607,11 @@ static void sedf_sleep(const struct scheduler *ops, struct vcpu *d) * -either behaviour can lead to missed deadlines in other domains as * opposed to approaches 1,2a,2b */ -static void unblock_short_extra_support( +static void unblock_short_very_cons( struct sedf_vcpu_info* inf, s_time_t now) { - /* - * This unblocking scheme tries to support the domain, by assigning it - * a priority in extratime distribution according to the loss of time - * in this slice due to blocking - */ - s_time_t pen; - - /* No more realtime execution in this period! */ + /* Run at the next period. */ inf->deadl_abs += inf->period; - if ( likely(inf->block_abs) ) - { - /* Treat blocked time as consumed by the domain */ - /*inf->cputime += now - inf->block_abs;*/ - /* - * Penalty is time the domain would have - * had if it continued to run. - */ - pen = (inf->slice - inf->cputime); - if ( pen < 0 ) - pen = 0; - /* Accumulate all penalties over the periods */ - /*inf->short_block_lost_tot += pen;*/ - /* Set penalty to the current value */ - inf->short_block_lost_tot = pen; - /* Not sure which one is better.. but seems to work well... */ - - if ( inf->short_block_lost_tot ) - { - inf->score[0] = (inf->period << 10) / - inf->short_block_lost_tot; -#ifdef SEDF_STATS - inf->pen_extra_blocks++; -#endif - if ( extraq_on(inf->vcpu, EXTRA_PEN_Q) ) - /* Remove domain for possible resorting! */ - extraq_del(inf->vcpu, EXTRA_PEN_Q); - else - /* - * Remember that we want to be on the penalty q - * so that we can continue when we (un-)block - * in penalty-extratime - */ - inf->status |= EXTRA_WANT_PEN_Q; - - /* (re-)add domain to the penalty extraq */ - extraq_add_sort_update(inf->vcpu, EXTRA_PEN_Q, 0); - } - } - - /* Give it a fresh slice in the next period! */ inf->cputime = 0; } @@ -1034,34 +625,12 @@ static void unblock_long_cons_b(struct sedf_vcpu_info* inf,s_time_t now) inf->cputime = 0; } - -#define DOMAIN_EDF 1 -#define DOMAIN_EXTRA_PEN 2 -#define DOMAIN_EXTRA_UTIL 3 -#define DOMAIN_IDLE 4 -static inline int get_run_type(struct vcpu* d) -{ - struct sedf_vcpu_info* inf = EDOM_INFO(d); - if (is_idle_vcpu(d)) - return DOMAIN_IDLE; - if (inf->status & EXTRA_RUN_PEN) - return DOMAIN_EXTRA_PEN; - if (inf->status & EXTRA_RUN_UTIL) - return DOMAIN_EXTRA_UTIL; - return DOMAIN_EDF; -} - - /* * Compares two domains in the relation of whether the one is allowed to * interrupt the others execution. * It returns true (!=0) if a switch to the other domain is good. - * Current Priority scheme is as follows: - * EDF > L0 (penalty based) extra-time > - * L1 (utilization) extra-time > idle-domain - * In the same class priorities are assigned as following: + * Priority scheme is as follows: * EDF: early deadline > late deadline - * L0 extra-time: lower score > higher score */ static inline int should_switch(struct vcpu *cur, struct vcpu *other, @@ -1070,32 +639,17 @@ static inline int should_switch(struct vcpu *cur, struct sedf_vcpu_info *cur_inf, *other_inf; cur_inf = EDOM_INFO(cur); other_inf = EDOM_INFO(other); - + + /* Always interrupt idle domain. */ + if ( is_idle_vcpu(cur) ) + return 1; + /* Check whether we need to make an earlier scheduling decision */ if ( PERIOD_BEGIN(other_inf) < CPU_INFO(other->processor)->current_slice_expires ) return 1; - /* No timing-based switches need to be taken into account here */ - switch ( get_run_type(cur) ) - { - case DOMAIN_EDF: - /* Do not interrupt a running EDF domain */ - return 0; - case DOMAIN_EXTRA_PEN: - /* Check whether we also want the L0 ex-q with lower score */ - return ((other_inf->status & EXTRA_WANT_PEN_Q) && - (other_inf->score[EXTRA_PEN_Q] < - cur_inf->score[EXTRA_PEN_Q])); - case DOMAIN_EXTRA_UTIL: - /* Check whether we want the L0 extraq. Don't - * switch if both domains want L1 extraq. */ - return !!(other_inf->status & EXTRA_WANT_PEN_Q); - case DOMAIN_IDLE: - return 1; - } - - return 1; + return 0; } static void sedf_wake(const struct scheduler *ops, struct vcpu *d) @@ -1111,8 +665,6 @@ static void sedf_wake(const struct scheduler *ops, struct vcpu *d) ASSERT(!sedf_runnable(d)); inf->status &= ~SEDF_ASLEEP; - ASSERT(!extraq_on(d, EXTRA_UTIL_Q)); - ASSERT(!extraq_on(d, EXTRA_PEN_Q)); if ( unlikely(inf->deadl_abs == 0) ) { @@ -1124,43 +676,21 @@ static void sedf_wake(const struct scheduler *ops, struct vcpu *d) inf->block_tot++; #endif - if ( unlikely(now < PERIOD_BEGIN(inf)) ) + if ( now < inf->deadl_abs ) { - /* Unblocking in extra-time! */ - if ( inf->status & EXTRA_WANT_PEN_Q ) - { - /* - * We have a domain that wants compensation - * for block penalty and did just block in - * its compensation time. Give it another - * chance! - */ - extraq_add_sort_update(d, EXTRA_PEN_Q, 0); - } - extraq_check_add_unblocked(d, 0); - } - else - { - if ( now < inf->deadl_abs ) - { - /* Short blocking */ + /* Short blocking */ #ifdef SEDF_STATS - inf->short_block_tot++; + inf->short_block_tot++; #endif - unblock_short_extra_support(inf, now); - - extraq_check_add_unblocked(d, 1); - } - else - { + unblock_short_very_cons(inf, now); + } + else + { /* Long unblocking */ #ifdef SEDF_STATS - inf->long_block_tot++; + inf->long_block_tot++; #endif - unblock_long_cons_b(inf, now); - - extraq_check_add_unblocked(d, 1); - } + unblock_long_cons_b(inf, now); } if ( PERIOD_BEGIN(inf) > now ) @@ -1178,8 +708,6 @@ static void sedf_wake(const struct scheduler *ops, struct vcpu *d) } #endif - /* Sanity check: make sure each extra-aware domain IS on the util-q! */ - ASSERT(IMPLY(inf->status & EXTRA_AWARE, extraq_on(d, EXTRA_UTIL_Q))); ASSERT(__task_on_queue(d)); /* * Check whether the awakened task needs to invoke the do_schedule @@ -1200,25 +728,18 @@ static void sedf_dump_domain(struct vcpu *d) { printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id, d->is_running ? 'T':'F'); - printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu" - " sc=%i xtr(%s)=%"PRIu64" ew=%hu", - EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs, - EDOM_INFO(d)->weight, - EDOM_INFO(d)->score[EXTRA_UTIL_Q], - (EDOM_INFO(d)->status & EXTRA_AWARE) ? "yes" : "no", - EDOM_INFO(d)->extra_time_tot, EDOM_INFO(d)->extraweight); + printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64, + EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs); #ifdef SEDF_STATS if ( EDOM_INFO(d)->block_time_tot != 0 ) printk(" pen=%"PRIu64"%%", (EDOM_INFO(d)->penalty_time_tot * 100) / EDOM_INFO(d)->block_time_tot); if ( EDOM_INFO(d)->block_tot != 0 ) - printk("\n blks=%u sh=%u (%u%%) (shex=%i "\ - "shexsl=%i) l=%u (%u%%) avg: b=%"PRIu64" p=%"PRIu64"", + printk("\n blks=%u sh=%u (%u%%) "\ + "l=%u (%u%%) avg: b=%"PRIu64" p=%"PRIu64"", EDOM_INFO(d)->block_tot, EDOM_INFO(d)->short_block_tot, (EDOM_INFO(d)->short_block_tot * 100) / EDOM_INFO(d)->block_tot, - EDOM_INFO(d)->pen_extra_blocks, - EDOM_INFO(d)->pen_extra_slices, EDOM_INFO(d)->long_block_tot, (EDOM_INFO(d)->long_block_tot * 100) / EDOM_INFO(d)->block_tot, (EDOM_INFO(d)->block_time_tot) / EDOM_INFO(d)->block_tot, @@ -1258,30 +779,6 @@ static void sedf_dump_cpu_state(const struct scheduler *ops, int i) sedf_dump_domain(d_inf->vcpu); } - queue = EXTRAQ(i,EXTRA_PEN_Q); loop = 0; - printk("\nEXTRAQ (penalty) rq %lx n: %lx, p: %lx\n", - (unsigned long)queue, (unsigned long) queue->next, - (unsigned long) queue->prev); - list_for_each_safe ( list, tmp, queue ) - { - d_inf = list_entry(list, struct sedf_vcpu_info, - extralist[EXTRA_PEN_Q]); - printk("%3d: ",loop++); - sedf_dump_domain(d_inf->vcpu); - } - - queue = EXTRAQ(i,EXTRA_UTIL_Q); loop = 0; - printk("\nEXTRAQ (utilization) rq %lx n: %lx, p: %lx\n", - (unsigned long)queue, (unsigned long) queue->next, - (unsigned long) queue->prev); - list_for_each_safe ( list, tmp, queue ) - { - d_inf = list_entry(list, struct sedf_vcpu_info, - extralist[EXTRA_UTIL_Q]); - printk("%3d: ",loop++); - sedf_dump_domain(d_inf->vcpu); - } - loop = 0; printk("\nnot on Q\n"); @@ -1314,9 +811,10 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen /* * Serialize against the pluggable scheduler lock to protect from * concurrent updates. We need to take the runq lock for the VCPUs - * as well, since we are touching extraweight, weight, slice and - * period. As in sched_credit2.c, runq locks nest inside the - * pluggable scheduler lock. + * as well, since we are touching slice and period. + * + * As in sched_credit2.c, runq locks nest inside the pluggable scheduler + * lock. */ spin_lock_irqsave(&prv->lock, flags); @@ -1330,8 +828,7 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen } /* - * Sanity checking: note that disabling extra weight requires - * that we set a non-zero slice. + * Sanity checking */ if ( (op->u.sedf.period > PERIOD_MAX) || (op->u.sedf.period < PERIOD_MIN) || @@ -1347,10 +844,8 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen { spinlock_t *lock = vcpu_schedule_lock(v); - EDOM_INFO(v)->period_orig = - EDOM_INFO(v)->period = op->u.sedf.period; - EDOM_INFO(v)->slice_orig = - EDOM_INFO(v)->slice = op->u.sedf.slice; + EDOM_INFO(v)->period = op->u.sedf.period; + EDOM_INFO(v)->slice = op->u.sedf.slice; vcpu_schedule_unlock(lock, v); } } -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |