|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC Patch 3/3] Fix formatting and misleading comments/variables in sedf
From: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx>
Update the sedf scheduler to correct some of the more aggregious formatting
issues. Also update some of the misleading comments/variable names.
Specifically the sedf scheduler still implies that a domain and a vcpu
are the same thing, which while true in the past is no longer the case.
Signed-off-by: Nathan Studer <nate.studer@xxxxxxxxxxxxxxx>
Signed-off-by: Joshua Whitehead <josh.whitehead@xxxxxxxxxxxxxxx>
---
xen/common/sched_sedf.c | 278 +++++++++++++++++++++++------------------------
1 file changed, 139 insertions(+), 139 deletions(-)
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 7a827c8..16fa9f9 100755
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -34,8 +34,7 @@
#define PERIOD_MIN (MICROSECS(10)) /* 10us */
#define SLICE_MIN (MICROSECS(5)) /* 5us */
-#define IMPLY(a, b) (!(a) || (b))
-#define EQ(a, b) ((!!(a)) == (!!(b)))
+#define EQ(_A, _B) ((!!(_A)) == (!!(_B)))
struct sedf_dom_info {
@@ -55,13 +54,13 @@ struct sedf_vcpu_info {
s_time_t period; /* = relative deadline */
s_time_t slice; /* = worst case execution time */
- /* Status of domain */
+ /* Status of vcpu */
int status;
/* Bookkeeping */
s_time_t deadl_abs;
s_time_t sched_start_abs;
s_time_t cputime;
- /* Times the domain un-/blocked */
+ /* Times the vcpu un-/blocked */
s_time_t block_abs;
s_time_t unblock_abs;
@@ -82,35 +81,35 @@ struct sedf_cpu_info {
#define SEDF_PRIV(_ops) \
((struct sedf_priv_info *)((_ops)->sched_data))
-#define EDOM_INFO(d) ((struct sedf_vcpu_info *)((d)->sched_priv))
-#define CPU_INFO(cpu) \
- ((struct sedf_cpu_info *)per_cpu(schedule_data, cpu).sched_priv)
-#define LIST(d) (&EDOM_INFO(d)->list)
-#define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq)
-#define WAITQ(cpu) (&CPU_INFO(cpu)->waitq)
-#define IDLETASK(cpu) (idle_vcpu[cpu])
+#define SEDF_VCPU(_vcpu) ((struct sedf_vcpu_info *)((_vcpu)->sched_priv))
+#define SEDF_PCPU(_cpu) \
+ ((struct sedf_cpu_info *)per_cpu(schedule_data, _cpu).sched_priv)
+#define LIST(_vcpu) (&SEDF_VCPU(_vcpu)->list)
+#define RUNQ(_cpu) (&SEDF_PCPU(_cpu)->runnableq)
+#define WAITQ(_cpu) (&SEDF_PCPU(_cpu)->waitq)
+#define IDLETASK(_cpu) (idle_vcpu[_cpu])
#define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
-#define DIV_UP(x,y) (((x) + (y) - 1) / y)
+#define DIV_UP(_X, _Y) (((_X) + (_Y) - 1) / _Y)
-#define sedf_runnable(edom) (!(EDOM_INFO(edom)->status & SEDF_ASLEEP))
+#define sedf_runnable(edom) (!(SEDF_VCPU(edom)->status & SEDF_ASLEEP))
-static void sedf_dump_cpu_state(const struct scheduler *ops, int i);
+static void sedf_dump_cpu_state(const struct scheduler *ops, int cpu);
-static inline int __task_on_queue(struct vcpu *d)
+static inline int __task_on_queue(struct vcpu *v)
{
- return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
+ return (((LIST(v))->next != NULL) && (LIST(v)->next != LIST(v)));
}
-static inline void __del_from_queue(struct vcpu *d)
+static inline void __del_from_queue(struct vcpu *v)
{
- struct list_head *list = LIST(d);
- ASSERT(__task_on_queue(d));
+ struct list_head *list = LIST(v);
+ ASSERT(__task_on_queue(v));
list_del(list);
list->next = NULL;
- ASSERT(!__task_on_queue(d));
+ ASSERT(!__task_on_queue(v));
}
typedef int(*list_comparer)(struct list_head* el1, struct list_head* el2);
@@ -129,12 +128,12 @@ static inline void list_insert_sort(
list_add(element, cur->prev);
}
-#define DOMAIN_COMPARER(name, field, comp1, comp2) \
+#define VCPU_COMPARER(name, field, comp1, comp2) \
static int name##_comp(struct list_head* el1, struct list_head* el2) \
{ \
- struct sedf_vcpu_info *d1, *d2; \
- d1 = list_entry(el1,struct sedf_vcpu_info, field); \
- d2 = list_entry(el2,struct sedf_vcpu_info, field); \
+ struct sedf_vcpu_info *v1, *v2; \
+ v1 = list_entry(el1, struct sedf_vcpu_info, field); \
+ v2 = list_entry(el2, struct sedf_vcpu_info, field); \
if ( (comp1) == (comp2) ) \
return 0; \
if ( (comp1) < (comp2) ) \
@@ -144,11 +143,11 @@ static int name##_comp(struct list_head* el1, struct
list_head* el2) \
}
/*
- * Adds a domain to the queue of processes which wait for the beginning of the
+ * Adds a vcpu to the queue of processes which wait for the beginning of the
* next period; this list is therefore sortet by this time, which is simply
* absol. deadline - period.
*/
-DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2));
+VCPU_COMPARER(waitq, list, PERIOD_BEGIN(v1), PERIOD_BEGIN(v2));
static inline void __add_to_waitqueue_sort(struct vcpu *v)
{
ASSERT(!__task_on_queue(v));
@@ -157,12 +156,12 @@ static inline void __add_to_waitqueue_sort(struct vcpu *v)
}
/*
- * Adds a domain to the queue of processes which have started their current
+ * Adds a vcpu to the queue of processes which have started their current
* period and are runnable (i.e. not blocked, dieing,...). The first element
* on this list is running on the processor, if the list is empty the idle
* task will run. As we are implementing EDF, this list is sorted by deadlines.
*/
-DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs);
+VCPU_COMPARER(runq, list, v1->deadl_abs, v2->deadl_abs);
static inline void __add_to_runqueue_sort(struct vcpu *v)
{
list_insert_sort(RUNQ(v->processor), LIST(v), runq_comp);
@@ -173,8 +172,8 @@ static void sedf_insert_vcpu(const struct scheduler *ops,
struct vcpu *v)
{
if ( is_idle_vcpu(v) )
{
- EDOM_INFO(v)->deadl_abs = 0;
- EDOM_INFO(v)->status &= ~SEDF_ASLEEP;
+ SEDF_VCPU(v)->deadl_abs = 0;
+ SEDF_VCPU(v)->status &= ~SEDF_ASLEEP;
}
}
@@ -274,29 +273,29 @@ static int sedf_pick_cpu(const struct scheduler *ops,
struct vcpu *v)
}
/*
- * Handles the rescheduling & bookkeeping of domains running in their
+ * Handles the rescheduling & bookkeeping of vcpus running in their
* guaranteed timeslice.
*/
-static void desched_edf_dom(s_time_t now, struct vcpu* d)
+static void desched_edf_vcpu(s_time_t now, struct vcpu *v)
{
- struct sedf_vcpu_info* inf = EDOM_INFO(d);
+ struct sedf_vcpu_info* inf = SEDF_VCPU(v);
- /* Current domain is running in real time mode */
- ASSERT(__task_on_queue(d));
+ /* Current vcpu is running in real time mode */
+ ASSERT(__task_on_queue(v));
- /* Update the domain's cputime */
+ /* Update the vcpu's cputime */
inf->cputime += now - inf->sched_start_abs;
- /* Scheduling decisions which don't remove the running domain from
+ /* Scheduling decisions which don't remove the running vcpu from
* the runq */
- if ( (inf->cputime < inf->slice) && sedf_runnable(d) )
+ if ( (inf->cputime < inf->slice) && sedf_runnable(v) )
return;
- __del_from_queue(d);
+ __del_from_queue(v);
/*
* Manage bookkeeping (i.e. calculate next deadline, memorise
- * overrun-time of slice) of finished domains.
+ * overrun-time of slice) of finished vcpus.
*/
if ( inf->cputime >= inf->slice )
{
@@ -306,13 +305,13 @@ static void desched_edf_dom(s_time_t now, struct vcpu* d)
inf->deadl_abs += inf->period;
}
- /* Add a runnable domain to the waitqueue */
- if ( sedf_runnable(d) )
+ /* Add a runnable vcpu to the waitqueue */
+ if ( sedf_runnable(v) )
{
- __add_to_waitqueue_sort(d);
+ __add_to_waitqueue_sort(v);
}
- ASSERT(EQ(sedf_runnable(d), __task_on_queue(d)));
+ ASSERT(EQ(sedf_runnable(v), __task_on_queue(v)));
}
@@ -336,14 +335,14 @@ static void update_queues(
__add_to_runqueue_sort(curinf->vcpu);
}
- /* Process the runq, find domains that are on the runq that shouldn't */
+ /* Process the runq, find vcpus that are on the runq that shouldn't */
list_for_each_safe ( cur, tmp, runq )
{
- curinf = list_entry(cur,struct sedf_vcpu_info,list);
+ curinf = list_entry(cur, struct sedf_vcpu_info, list);
if ( unlikely(curinf->slice == 0) )
{
- /* Ignore domains with empty slice */
+ /* Ignore vcpus with empty slice */
__del_from_queue(curinf->vcpu);
/* Move them to their next period */
@@ -429,8 +428,8 @@ static void sedf_deinit(const struct scheduler *ops)
* Main scheduling function
* Reasons for calling this function are:
* -timeslice for the current period used up
- * -domain on waitqueue has started it's period
- * -and various others ;) in general: determine which domain to run next
+ * -vcpu on waitqueue has started it's period
+ * -and various others ;) in general: determine which vcpu to run next
*/
static struct task_slice sedf_do_schedule(
const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
@@ -438,7 +437,7 @@ static struct task_slice sedf_do_schedule(
int cpu = smp_processor_id();
struct list_head *runq = RUNQ(cpu);
struct list_head *waitq = WAITQ(cpu);
- struct sedf_vcpu_info *inf = EDOM_INFO(current);
+ struct sedf_vcpu_info *inf = SEDF_VCPU(current);
struct sedf_vcpu_info *runinf, *waitinf;
struct task_slice ret;
@@ -449,7 +448,7 @@ static struct task_slice sedf_do_schedule(
goto check_waitq;
/*
- * Create local state of the status of the domain, in order to avoid
+ * Create local state of the status of the vcpu, in order to avoid
* inconsistent state during scheduling decisions, because data for
* vcpu_runnable is not protected by the scheduling lock!
*/
@@ -459,12 +458,12 @@ static struct task_slice sedf_do_schedule(
if ( inf->status & SEDF_ASLEEP )
inf->block_abs = now;
- desched_edf_dom(now, current);
+ desched_edf_vcpu(now, current);
check_waitq:
update_queues(now, runq, waitq);
/*
- * Now simply pick the first domain from the runqueue, which has the
+ * Now simply pick the first vcpu from the runqueue, which has the
* earliest deadline, because the list is sorted
*
* Tasklet work (which runs in idle VCPU context) overrides all else.
@@ -479,15 +478,15 @@ static struct task_slice sedf_do_schedule(
}
else if ( !list_empty(runq) )
{
- runinf = list_entry(runq->next,struct sedf_vcpu_info,list);
+ runinf = list_entry(runq->next, struct sedf_vcpu_info, list);
ret.task = runinf->vcpu;
if ( !list_empty(waitq) )
{
waitinf = list_entry(waitq->next,
- struct sedf_vcpu_info,list);
+ struct sedf_vcpu_info, list);
/*
- * Rerun scheduler, when scheduled domain reaches it's
- * end of slice or the first domain from the waitqueue
+ * Rerun scheduler, when scheduled vcpu reaches it's
+ * end of slice or the first vcpu from the waitqueue
* gets ready.
*/
ret.time = MIN(now + runinf->slice - runinf->cputime,
@@ -500,7 +499,7 @@ static struct task_slice sedf_do_schedule(
}
else
{
- waitinf = list_entry(waitq->next,struct sedf_vcpu_info, list);
+ waitinf = list_entry(waitq->next, struct sedf_vcpu_info, list);
ret.task = IDLETASK(cpu);
ret.time = PERIOD_BEGIN(waitinf) - now;
@@ -516,55 +515,55 @@ static struct task_slice sedf_do_schedule(
ret.migrated = 0;
- EDOM_INFO(ret.task)->sched_start_abs = now;
+ SEDF_VCPU(ret.task)->sched_start_abs = now;
CHECK(ret.time > 0);
ASSERT(sedf_runnable(ret.task));
- CPU_INFO(cpu)->current_slice_expires = now + ret.time;
+ SEDF_PCPU(cpu)->current_slice_expires = now + ret.time;
return ret;
}
-static void sedf_sleep(const struct scheduler *ops, struct vcpu *d)
+static void sedf_sleep(const struct scheduler *ops, struct vcpu *v)
{
- if ( is_idle_vcpu(d) )
+ if ( is_idle_vcpu(v) )
return;
- EDOM_INFO(d)->status |= SEDF_ASLEEP;
+ SEDF_VCPU(v)->status |= SEDF_ASLEEP;
- if ( per_cpu(schedule_data, d->processor).curr == d )
+ if ( per_cpu(schedule_data, v->processor).curr == v )
{
- cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
+ cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
}
else
{
- if ( __task_on_queue(d) )
- __del_from_queue(d);
+ if ( __task_on_queue(v) )
+ __del_from_queue(v);
}
}
/*
- * This function wakes up a domain, i.e. moves them into the waitqueue
+ * This function wakes up a vcpu, i.e. moves them into the waitqueue
* things to mention are: admission control is taking place nowhere at
- * the moment, so we can't be sure, whether it is safe to wake the domain
+ * the moment, so we can't be sure, whether it is safe to wake the vcpu
* up at all. Anyway, even if it is safe (total cpu usage <=100%) there are
- * some considerations on when to allow the domain to wake up and have it's
+ * some considerations on when to allow the vcpu to wake up and have it's
* first deadline...
* I detected 3 cases, which could describe the possible behaviour of the
* scheduler,
* and I'll try to make them more clear:
*
* 1. Very conservative
- * -when a blocked domain unblocks, it is allowed to start execution at
+ * -when a blocked vcpu unblocks, it is allowed to start execution at
* the beginning of the next complete period
* (D..deadline, R..running, B..blocking/sleeping, U..unblocking/waking up
*
* DRRB_____D__U_____DRRRRR___D________ ...
*
- * -this causes the domain to miss a period (and a deadlline)
+ * -this causes the vcpu to miss a period (and a deadlline)
* -doesn't disturb the schedule at all
* -deadlines keep occuring isochronous
*
* 2. Conservative Part 1: Short Unblocking
- * -when a domain unblocks in the same period as it was blocked it
+ * -when a vcpu unblocks in the same period as it was blocked it
* unblocks and may consume the rest of it's original time-slice minus
* the time it was blocked
* (assume period=9, slice=5)
@@ -572,16 +571,16 @@ static void sedf_sleep(const struct scheduler *ops,
struct vcpu *d)
* DRB_UR___DRRRRR___D...
*
* -this also doesn't disturb scheduling, but might lead to the fact, that
- * the domain can't finish it's workload in the period
+ * the vcpu can't finish it's workload in the period
* -addition: experiments have shown that this may have a HUGE impact on
- * performance of other domains, becaus it can lead to excessive context
+ * performance of other vcpus, becaus it can lead to excessive context
* switches
*
* Part2: Long Unblocking
* Part 2a
* -it is obvious that such accounting of block time, applied when
* unblocking is happening in later periods, works fine aswell
- * -the domain is treated as if it would have been running since the start
+ * -the vcpu is treated as if it would have been running since the start
* of its new period
*
* DRB______D___UR___D...
@@ -600,11 +599,11 @@ static void sedf_sleep(const struct scheduler *ops,
struct vcpu *d)
* -problem: deadlines don't occur isochronous anymore
*
* 3. Unconservative (i.e. incorrect)
- * -to boost the performance of I/O dependent domains it would be possible
- * to put the domain into the runnable queue immediately, and let it run
+ * -to boost the performance of I/O dependent vcpus it would be possible
+ * to put the vcpu into the runnable queue immediately, and let it run
* for the remainder of the slice of the current period
- * (or even worse: allocate a new full slice for the domain)
- * -either behaviour can lead to missed deadlines in other domains as
+ * (or even worse: allocate a new full slice for the vcpu)
+ * -either behaviour can lead to missed deadlines in other vcpus as
* opposed to approaches 1,2a,2b
*/
static void unblock_short_very_cons(
@@ -616,7 +615,7 @@ static void unblock_short_very_cons(
}
-static void unblock_long_cons_b(struct sedf_vcpu_info* inf,s_time_t now)
+static void unblock_long_cons_b(struct sedf_vcpu_info* inf, s_time_t now)
{
/* Conservative 2b */
@@ -626,9 +625,9 @@ static void unblock_long_cons_b(struct sedf_vcpu_info*
inf,s_time_t now)
}
/*
- * Compares two domains in the relation of whether the one is allowed to
+ * Compares two vcpus in the relation of whether the one is allowed to
* interrupt the others execution.
- * It returns true (!=0) if a switch to the other domain is good.
+ * It returns true (!=0) if a switch to the other vcpu is good.
* Priority scheme is as follows:
* EDF: early deadline > late deadline
*/
@@ -637,33 +636,33 @@ static inline int should_switch(struct vcpu *cur,
s_time_t now)
{
struct sedf_vcpu_info *cur_inf, *other_inf;
- cur_inf = EDOM_INFO(cur);
- other_inf = EDOM_INFO(other);
+ cur_inf = SEDF_VCPU(cur);
+ other_inf = SEDF_VCPU(other);
- /* Always interrupt idle domain. */
+ /* Always interrupt idle vcpu. */
if ( is_idle_vcpu(cur) )
return 1;
/* Check whether we need to make an earlier scheduling decision */
if ( PERIOD_BEGIN(other_inf) <
- CPU_INFO(other->processor)->current_slice_expires )
+ SEDF_PCPU(other->processor)->current_slice_expires )
return 1;
return 0;
}
-static void sedf_wake(const struct scheduler *ops, struct vcpu *d)
+static void sedf_wake(const struct scheduler *ops, struct vcpu *v)
{
s_time_t now = NOW();
- struct sedf_vcpu_info* inf = EDOM_INFO(d);
+ struct sedf_vcpu_info* inf = SEDF_VCPU(v);
- if ( unlikely(is_idle_vcpu(d)) )
+ if ( unlikely(is_idle_vcpu(v)) )
return;
- if ( unlikely(__task_on_queue(d)) )
+ if ( unlikely(__task_on_queue(v)) )
return;
- ASSERT(!sedf_runnable(d));
+ ASSERT(!sedf_runnable(v));
inf->status &= ~SEDF_ASLEEP;
if ( unlikely(inf->deadl_abs == 0) )
@@ -694,9 +693,9 @@ static void sedf_wake(const struct scheduler *ops, struct
vcpu *d)
}
if ( PERIOD_BEGIN(inf) > now )
- __add_to_waitqueue_sort(d);
+ __add_to_waitqueue_sort(v);
else
- __add_to_runqueue_sort(d);
+ __add_to_runqueue_sort(v);
#ifdef SEDF_STATS
/* Do some statistics here... */
@@ -708,75 +707,76 @@ static void sedf_wake(const struct scheduler *ops, struct
vcpu *d)
}
#endif
- ASSERT(__task_on_queue(d));
+ ASSERT(__task_on_queue(v));
/*
* Check whether the awakened task needs to invoke the do_schedule
* routine. Try to avoid unnecessary runs but:
* Save approximation: Always switch to scheduler!
*/
- ASSERT(d->processor >= 0);
- ASSERT(d->processor < nr_cpu_ids);
- ASSERT(per_cpu(schedule_data, d->processor).curr);
+ ASSERT(v->processor >= 0);
+ ASSERT(v->processor < nr_cpu_ids);
+ ASSERT(per_cpu(schedule_data, v->processor).curr);
- if ( should_switch(per_cpu(schedule_data, d->processor).curr, d, now) )
- cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
+ if ( should_switch(per_cpu(schedule_data, v->processor).curr, v, now) )
+ cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
}
-/* Print a lot of useful information about a domains in the system */
-static void sedf_dump_domain(struct vcpu *d)
+/* Print a lot of useful information about a vcpus in the system */
+static void sedf_dump_vcpu(struct vcpu *v)
{
- printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
- d->is_running ? 'T':'F');
+ printk("%i.%i has=%c ", v->domain->domain_id, v->vcpu_id,
+ v->is_running ? 'T':'F');
printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64,
- EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs);
+ SEDF_VCPU(v)->period, SEDF_VCPU(v)->slice, SEDF_VCPU(v)->deadl_abs);
#ifdef SEDF_STATS
- if ( EDOM_INFO(d)->block_time_tot != 0 )
- printk(" pen=%"PRIu64"%%", (EDOM_INFO(d)->penalty_time_tot * 100) /
- EDOM_INFO(d)->block_time_tot);
- if ( EDOM_INFO(d)->block_tot != 0 )
+ if ( SEDF_VCPU(v)->block_time_tot != 0 )
+ printk(" pen=%"PRIu64"%%", (SEDF_VCPU(v)->penalty_time_tot * 100) /
+ SEDF_VCPU(v)->block_time_tot);
+ if ( SEDF_VCPU(v)->block_tot != 0 )
printk("\n blks=%u sh=%u (%u%%) "\
"l=%u (%u%%) avg: b=%"PRIu64" p=%"PRIu64"",
- EDOM_INFO(d)->block_tot, EDOM_INFO(d)->short_block_tot,
- (EDOM_INFO(d)->short_block_tot * 100) / EDOM_INFO(d)->block_tot,
- EDOM_INFO(d)->long_block_tot,
- (EDOM_INFO(d)->long_block_tot * 100) / EDOM_INFO(d)->block_tot,
- (EDOM_INFO(d)->block_time_tot) / EDOM_INFO(d)->block_tot,
- (EDOM_INFO(d)->penalty_time_tot) / EDOM_INFO(d)->block_tot);
+ SEDF_VCPU(v)->block_tot, SEDF_VCPU(v)->short_block_tot,
+ (SEDF_VCPU(v)->short_block_tot * 100) / SEDF_VCPU(v)->block_tot,
+ SEDF_VCPU(v)->long_block_tot,
+ (SEDF_VCPU(v)->long_block_tot * 100) / SEDF_VCPU(v)->block_tot,
+ (SEDF_VCPU(v)->block_time_tot) / SEDF_VCPU(v)->block_tot,
+ (SEDF_VCPU(v)->penalty_time_tot) / SEDF_VCPU(v)->block_tot);
#endif
printk("\n");
}
-/* Dumps all domains on the specified cpu */
-static void sedf_dump_cpu_state(const struct scheduler *ops, int i)
+/* Dumps all vcpus on the specified cpu */
+static void sedf_dump_cpu_state(const struct scheduler *ops, int cpu)
{
struct list_head *list, *queue, *tmp;
- struct sedf_vcpu_info *d_inf;
+ struct sedf_vcpu_info *v_inf;
struct domain *d;
- struct vcpu *ed;
+ struct vcpu *v;
int loop = 0;
- printk("now=%"PRIu64"\n",NOW());
- queue = RUNQ(i);
+ printk("now=%"PRIu64"\n", NOW());
+ queue = RUNQ(cpu);
printk("RUNQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
(unsigned long) queue->next, (unsigned long) queue->prev);
list_for_each_safe ( list, tmp, queue )
{
- printk("%3d: ",loop++);
- d_inf = list_entry(list, struct sedf_vcpu_info, list);
- sedf_dump_domain(d_inf->vcpu);
+ printk("%3d: ", loop++);
+ v_inf = list_entry(list, struct sedf_vcpu_info, list);
+ sedf_dump_vcpu(v_inf->vcpu);
}
- queue = WAITQ(i); loop = 0;
+ queue = WAITQ(cpu);
+ loop = 0;
printk("\nWAITQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
(unsigned long) queue->next, (unsigned long) queue->prev);
list_for_each_safe ( list, tmp, queue )
{
- printk("%3d: ",loop++);
- d_inf = list_entry(list, struct sedf_vcpu_info, list);
- sedf_dump_domain(d_inf->vcpu);
+ printk("%3d: ", loop++);
+ v_inf = list_entry(list, struct sedf_vcpu_info, list);
+ sedf_dump_vcpu(v_inf->vcpu);
}
loop = 0;
@@ -787,12 +787,12 @@ static void sedf_dump_cpu_state(const struct scheduler
*ops, int i)
{
if ( (d->cpupool ? d->cpupool->sched : &sched_sedf_def) != ops )
continue;
- for_each_vcpu(d, ed)
+ for_each_vcpu(d, v)
{
- if ( !__task_on_queue(ed) && (ed->processor == i) )
+ if ( !__task_on_queue(v) && (v->processor == cpu) )
{
- printk("%3d: ",loop++);
- sedf_dump_domain(ed);
+ printk("%3d: ", loop++);
+ sedf_dump_vcpu(v);
}
}
}
@@ -801,7 +801,7 @@ static void sedf_dump_cpu_state(const struct scheduler
*ops, int i)
/* Set or fetch domain scheduling parameters */
-static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct
xen_domctl_scheduler_op *op)
+static int sedf_adjust(const struct scheduler *ops, struct domain *d, struct
xen_domctl_scheduler_op *op)
{
struct sedf_priv_info *prv = SEDF_PRIV(ops);
unsigned long flags;
@@ -840,25 +840,25 @@ static int sedf_adjust(const struct scheduler *ops,
struct domain *p, struct xen
}
/* Time-driven domains */
- for_each_vcpu ( p, v )
+ for_each_vcpu ( d, v )
{
spinlock_t *lock = vcpu_schedule_lock(v);
- EDOM_INFO(v)->period = op->u.sedf.period;
- EDOM_INFO(v)->slice = op->u.sedf.slice;
+ SEDF_VCPU(v)->period = op->u.sedf.period;
+ SEDF_VCPU(v)->slice = op->u.sedf.slice;
vcpu_schedule_unlock(lock, v);
}
}
else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
{
- if ( p->vcpu[0] == NULL )
+ if ( d->vcpu[0] == NULL )
{
rc = -EINVAL;
goto out;
}
- op->u.sedf.period = EDOM_INFO(p->vcpu[0])->period;
- op->u.sedf.slice = EDOM_INFO(p->vcpu[0])->slice;
+ op->u.sedf.period = SEDF_VCPU(d->vcpu[0])->period;
+ op->u.sedf.slice = SEDF_VCPU(d->vcpu[0])->slice;
}
out:
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |