[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] credit1: Use atomic bit operations for the flags structure
commit 48892d024a219e8a8bcc09f82046c454169bae96 Author: George Dunlap <george.dunlap@xxxxxxxxxxxxx> AuthorDate: Tue Mar 12 16:17:23 2013 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Mar 12 16:17:23 2013 +0100 credit1: Use atomic bit operations for the flags structure The flags structure is not protected by locks (or more precisely, it is protected using an inconsistent set of locks); we therefore need to make sure that all accesses are atomic-safe. This is particulary important in the case of the PARKED flag, which if clobbered while changing the YIELD bit will leave a vcpu wedged in an offline state. Using the atomic bitops also requires us to change the size of the "flags" element. Spotted-by: Igor Pavlikevich <ipavlikevich@xxxxxxxxx> Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> master changeset: be6507509454adf3bb5a50b9406c88504e996d5a master date: 2013-03-04 13:37:39 +0100 --- xen/common/sched_credit.c | 23 ++++++++++------------- 1 files changed, 10 insertions(+), 13 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 7df2699..68dc80b 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -58,8 +58,8 @@ /* * Flags */ -#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ -#define CSCHED_FLAG_VCPU_YIELD 0x0002 /* VCPU yielding */ +#define CSCHED_FLAG_VCPU_PARKED 0x0 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_YIELD 0x1 /* VCPU yielding */ /* @@ -132,7 +132,7 @@ struct csched_vcpu { struct vcpu *vcpu; atomic_t credit; s_time_t start_time; /* When we were scheduled (used for credit) */ - uint16_t flags; + unsigned flags; int16_t pri; #ifdef CSCHED_STATS struct { @@ -214,7 +214,7 @@ __runq_insert(unsigned int cpu, struct csched_vcpu *svc) /* If the vcpu yielded, try to put it behind one lower-priority * runnable vcpu if we can. The next runq_sort will bring it forward * within 30ms if the queue too long. */ - if ( svc->flags & CSCHED_FLAG_VCPU_YIELD + if ( test_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags) && __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) { iter=iter->next; @@ -776,7 +776,7 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) * those. */ if ( svc->pri == CSCHED_PRI_TS_UNDER && - !(svc->flags & CSCHED_FLAG_VCPU_PARKED) ) + !test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { svc->pri = CSCHED_PRI_TS_BOOST; } @@ -789,12 +789,12 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) static void csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const sv = CSCHED_VCPU(vc); + struct csched_vcpu * const svc = CSCHED_VCPU(vc); if ( !sched_credit_default_yield ) { /* Let the scheduler know that this vcpu is trying to yield */ - sv->flags |= CSCHED_FLAG_VCPU_YIELD; + set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags); } } @@ -1122,11 +1122,10 @@ csched_acct(void* dummy) /* Park running VCPUs of capped-out domains */ if ( sdom->cap != 0U && credit < -credit_cap && - !(svc->flags & CSCHED_FLAG_VCPU_PARKED) ) + !test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { CSCHED_STAT_CRANK(vcpu_park); vcpu_pause_nosync(svc->vcpu); - svc->flags |= CSCHED_FLAG_VCPU_PARKED; } /* Lower bound on credits */ @@ -1142,7 +1141,7 @@ csched_acct(void* dummy) svc->pri = CSCHED_PRI_TS_UNDER; /* Unpark any capped domains whose credits go positive */ - if ( svc->flags & CSCHED_FLAG_VCPU_PARKED) + if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { /* * It's important to unset the flag AFTER the unpause() @@ -1151,7 +1150,6 @@ csched_acct(void* dummy) */ CSCHED_STAT_CRANK(vcpu_unpark); vcpu_unpause(svc->vcpu); - svc->flags &= ~CSCHED_FLAG_VCPU_PARKED; } /* Upper bound on credits means VCPU stops earning */ @@ -1410,8 +1408,7 @@ csched_schedule( /* * Clear YIELD flag before scheduling out */ - if ( scurr->flags & CSCHED_FLAG_VCPU_YIELD ) - scurr->flags &= ~(CSCHED_FLAG_VCPU_YIELD); + clear_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags); /* * SMP Load balance: -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.2 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |