diff -r bb9a7d40d0b5 xen/common/sched_credit.c --- a/xen/common/sched_credit.c Mon Sep 01 13:01:23 2008 +0100 +++ b/xen/common/sched_credit.c Mon Sep 01 13:01:37 2008 +0100 @@ -61,7 +61,9 @@ /* * Flags */ -#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_YIELD 0x0002 /* VCPU yielding */ +#define CSCHED_FLAG_VCPU_YIELD_PRI 0x0004 /* VCPU's priority reduced to force a yield */ /* @@ -711,6 +713,15 @@ csched_vcpu_wake(struct vcpu *vc) __runq_tickle(cpu, svc); } +static void +csched_vcpu_yield(struct vcpu *v) +{ + struct csched_vcpu * const sv = CSCHED_VCPU(v); + + /* Temporarily lower priority of vcpus that yield */ + sv->flags |= CSCHED_FLAG_VCPU_YIELD; +} + static int csched_dom_cntl( struct domain *d, @@ -1231,12 +1242,38 @@ csched_schedule(s_time_t now) /* * Select next runnable local VCPU (ie top of local runq) */ +retry: if ( vcpu_runnable(current) ) __runq_insert(cpu, scurr); else BUG_ON( is_idle_vcpu(current) || list_empty(runq) ); snext = __runq_elem(runq->next); + + /* + * Check to see if yield worked; if not, temporarily reduce priority + */ + if ( scurr->flags & CSCHED_FLAG_VCPU_YIELD ) + { + scurr->flags &= ~(CSCHED_FLAG_VCPU_YIELD); + /* Check to see if yield worked. If not, reduce priority and retry. */ + if ( (snext == scurr) + && (scurr->pri > CSCHED_PRI_TS_OVER) ) + { + __runq_remove(scurr); + scurr->flags |= CSCHED_FLAG_VCPU_YIELD_PRI; + scurr->pri = CSCHED_PRI_TS_OVER; + goto retry; + } + } + + /* If we reduced priority due to yielding, reinstate it. */ + if ( snext->flags & CSCHED_FLAG_VCPU_YIELD_PRI ) + { + snext->flags &= ~CSCHED_FLAG_VCPU_YIELD_PRI; + if ( snext->pri == CSCHED_PRI_TS_OVER ) + snext->pri = CSCHED_PRI_TS_UNDER; + } /* * SMP Load balance: @@ -1448,6 +1485,7 @@ struct scheduler sched_credit_def = { .sleep = csched_vcpu_sleep, .wake = csched_vcpu_wake, + .yield = csched_vcpu_yield, .adjust = csched_dom_cntl, diff -r bb9a7d40d0b5 xen/common/schedule.c --- a/xen/common/schedule.c Mon Sep 01 13:01:23 2008 +0100 +++ b/xen/common/schedule.c Mon Sep 01 13:01:25 2008 +0100 @@ -386,6 +386,12 @@ static long do_poll(struct sched_poll *s /* Voluntarily yield the processor for this allocation. */ static long do_yield(void) { + struct vcpu * v=current; + + vcpu_schedule_lock_irq(v); + SCHED_OP(yield, current); + vcpu_schedule_unlock_irq(v); + TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id); raise_softirq(SCHEDULE_SOFTIRQ); return 0; diff -r bb9a7d40d0b5 xen/include/xen/sched-if.h --- a/xen/include/xen/sched-if.h Mon Sep 01 13:01:23 2008 +0100 +++ b/xen/include/xen/sched-if.h Mon Sep 01 13:01:25 2008 +0100 @@ -69,6 +69,7 @@ struct scheduler { void (*sleep) (struct vcpu *); void (*wake) (struct vcpu *); + void (*yield) (struct vcpu *); struct task_slice (*do_schedule) (s_time_t);