[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/9] xen: credit2: make accessor helpers inline functions instead of macros



There isn't any particular reason for the accessor helpers
to be macro, so turn them into 'static inline'-s, which are
better.

Note that it is necessary to move the function definitions
below the structure declarations.

No functional change intended.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx>
---
 xen/common/sched_credit2.c |  158 +++++++++++++++++++++++++-------------------
 1 file changed, 90 insertions(+), 68 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 322cf6b..29973cf 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -209,18 +209,6 @@ static unsigned int __read_mostly opt_migrate_resist = 500;
 integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
 
 /*
- * Useful macros
- */
-#define CSCHED2_PRIV(_ops)   \
-    ((struct csched2_private *)((_ops)->sched_data))
-#define CSCHED2_VCPU(_vcpu)  ((struct csched2_vcpu *) (_vcpu)->sched_priv)
-#define CSCHED2_DOM(_dom)    ((struct csched2_dom *) (_dom)->sched_priv)
-/* CPU to runq_id macro */
-#define c2r(_ops, _cpu)     (CSCHED2_PRIV(_ops)->runq_map[(_cpu)])
-/* CPU to runqueue struct macro */
-#define RQD(_ops, _cpu)     (&CSCHED2_PRIV(_ops)->rqd[c2r(_ops, _cpu)])
-
-/*
  * Load tracking and load balancing
  *
  * Load history of runqueues and vcpus is accounted for by using an
@@ -441,6 +429,40 @@ struct csched2_dom {
 };
 
 /*
+ * Accessor helpers functions.
+ */
+static always_inline
+struct csched2_private *csched2_priv(const struct scheduler *ops)
+{
+    return ops->sched_data;
+}
+
+static always_inline
+struct csched2_vcpu *csched2_vcpu(struct vcpu *v)
+{
+    return v->sched_priv;
+}
+
+static always_inline
+struct csched2_dom *csched2_dom(struct domain *d)
+{
+    return d->sched_priv;
+}
+
+/* CPU to runq_id macro */
+static always_inline int c2r(const struct scheduler *ops, unsigned cpu)
+{
+    return (csched2_priv(ops))->runq_map[(cpu)];
+}
+
+/* CPU to runqueue struct macro */
+static always_inline
+struct csched2_runqueue_data *c2rqd(const struct scheduler *ops, unsigned cpu)
+{
+    return &csched2_priv(ops)->rqd[c2r(ops, cpu)];
+}
+
+/*
  * Hyperthreading (SMT) support.
  *
  * We use a special per-runq mask (smt_idle) and update it according to the
@@ -694,7 +716,7 @@ static void
 __update_runq_load(const struct scheduler *ops,
                   struct csched2_runqueue_data *rqd, int change, s_time_t now)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, load = rqd->load;
     unsigned int P, W;
 
@@ -781,7 +803,7 @@ static void
 __update_svc_load(const struct scheduler *ops,
                   struct csched2_vcpu *svc, int change, s_time_t now)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, vcpu_load;
     unsigned int P, W;
 
@@ -878,7 +900,7 @@ static void
 runq_insert(const struct scheduler *ops, struct csched2_vcpu *svc)
 {
     unsigned int cpu = svc->vcpu->processor;
-    struct list_head * runq = &RQD(ops, cpu)->runq;
+    struct list_head * runq = &c2rqd(ops, cpu)->runq;
     int pos = 0;
 
     ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
@@ -936,7 +958,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
     int i, ipid = -1;
     s_time_t lowest = (1<<30);
     unsigned int cpu = new->vcpu->processor;
-    struct csched2_runqueue_data *rqd = RQD(ops, cpu);
+    struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     cpumask_t mask;
     struct csched2_vcpu * cur;
 
@@ -1006,7 +1028,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
     cpumask_and(&mask, &mask, cpumask_scratch_cpu(cpu));
     if ( cpumask_test_cpu(cpu, &mask) )
     {
-        cur = CSCHED2_VCPU(curr_on_cpu(cpu));
+        cur = csched2_vcpu(curr_on_cpu(cpu));
         burn_credits(rqd, cur, now);
 
         if ( cur->credit < new->credit )
@@ -1023,7 +1045,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
         if ( i == cpu )
             continue;
 
-        cur = CSCHED2_VCPU(curr_on_cpu(i));
+        cur = csched2_vcpu(curr_on_cpu(i));
 
         /*
          * Even if the cpu is not in rqd->idle, it may be running the
@@ -1096,7 +1118,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
 static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now,
                          struct csched2_vcpu *snext)
 {
-    struct csched2_runqueue_data *rqd = RQD(ops, cpu);
+    struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct list_head *iter;
     int m;
 
@@ -1174,7 +1196,7 @@ void burn_credits(struct csched2_runqueue_data *rqd,
 {
     s_time_t delta;
 
-    ASSERT(svc == CSCHED2_VCPU(curr_on_cpu(svc->vcpu->processor)));
+    ASSERT(svc == csched2_vcpu(curr_on_cpu(svc->vcpu->processor)));
 
     if ( unlikely(is_idle_vcpu(svc->vcpu)) )
     {
@@ -1262,11 +1284,11 @@ static void update_max_weight(struct 
csched2_runqueue_data *rqd, int new_weight,
 static /*inline*/ void
 __csched2_vcpu_check(struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu * const svc = csched2_vcpu(vc);
     struct csched2_dom * const sdom = svc->sdom;
 
     BUG_ON( svc->vcpu != vc );
-    BUG_ON( sdom != CSCHED2_DOM(vc->domain) );
+    BUG_ON( sdom != csched2_dom(vc->domain) );
     if ( sdom )
     {
         BUG_ON( is_idle_vcpu(vc) );
@@ -1306,7 +1328,7 @@ csched2_alloc_vdata(const struct scheduler *ops, struct 
vcpu *vc, void *dd)
         svc->credit = CSCHED2_CREDIT_INIT;
         svc->weight = svc->sdom->weight;
         /* Starting load of 50% */
-        svc->avgload = 1ULL << (CSCHED2_PRIV(ops)->load_precision_shift - 1);
+        svc->avgload = 1ULL << (csched2_priv(ops)->load_precision_shift - 1);
         svc->load_last_update = NOW() >> LOADAVG_GRANULARITY_SHIFT;
     }
     else
@@ -1358,7 +1380,7 @@ runq_assign(const struct scheduler *ops, struct vcpu *vc)
 
     ASSERT(svc->rqd == NULL);
 
-    __runq_assign(svc, RQD(ops, vc->processor));
+    __runq_assign(svc, c2rqd(ops, vc->processor));
 }
 
 static void
@@ -1383,7 +1405,7 @@ runq_deassign(const struct scheduler *ops, struct vcpu 
*vc)
 {
     struct csched2_vcpu *svc = vc->sched_priv;
 
-    ASSERT(svc->rqd == RQD(ops, vc->processor));
+    ASSERT(svc->rqd == c2rqd(ops, vc->processor));
 
     __runq_deassign(svc);
 }
@@ -1391,7 +1413,7 @@ runq_deassign(const struct scheduler *ops, struct vcpu 
*vc)
 static void
 csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu * const svc = csched2_vcpu(vc);
 
     ASSERT(!is_idle_vcpu(vc));
     SCHED_STAT_CRANK(vcpu_sleep);
@@ -1400,7 +1422,7 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct 
vcpu *vc)
         cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
     else if ( __vcpu_on_runq(svc) )
     {
-        ASSERT(svc->rqd == RQD(ops, vc->processor));
+        ASSERT(svc->rqd == c2rqd(ops, vc->processor));
         update_load(ops, svc->rqd, svc, -1, NOW());
         __runq_remove(svc);
     }
@@ -1411,7 +1433,7 @@ csched2_vcpu_sleep(const struct scheduler *ops, struct 
vcpu *vc)
 static void
 csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu * const svc = csched2_vcpu(vc);
     unsigned int cpu = vc->processor;
     s_time_t now;
 
@@ -1449,7 +1471,7 @@ csched2_vcpu_wake(const struct scheduler *ops, struct 
vcpu *vc)
     if ( svc->rqd == NULL )
         runq_assign(ops, vc);
     else
-        ASSERT(RQD(ops, vc->processor) == svc->rqd );
+        ASSERT(c2rqd(ops, vc->processor) == svc->rqd );
 
     now = NOW();
 
@@ -1466,7 +1488,7 @@ out:
 static void
 csched2_vcpu_yield(const struct scheduler *ops, struct vcpu *v)
 {
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(v);
+    struct csched2_vcpu * const svc = csched2_vcpu(v);
 
     __set_bit(__CSFLAG_vcpu_yield, &svc->flags);
 }
@@ -1474,12 +1496,12 @@ csched2_vcpu_yield(const struct scheduler *ops, struct 
vcpu *v)
 static void
 csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu * const svc = csched2_vcpu(vc);
     spinlock_t *lock = vcpu_schedule_lock_irq(vc);
     s_time_t now = NOW();
 
-    BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
-    ASSERT(is_idle_vcpu(vc) || svc->rqd == RQD(ops, vc->processor));
+    BUG_ON( !is_idle_vcpu(vc) && svc->rqd != c2rqd(ops, vc->processor));
+    ASSERT(is_idle_vcpu(vc) || svc->rqd == c2rqd(ops, vc->processor));
 
     /* This vcpu is now eligible to be put on the runqueue again */
     __clear_bit(__CSFLAG_scheduled, &svc->flags);
@@ -1510,9 +1532,9 @@ csched2_context_saved(const struct scheduler *ops, struct 
vcpu *vc)
 static int
 csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     int i, min_rqi = -1, new_cpu, cpu = vc->processor;
-    struct csched2_vcpu *svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu *svc = csched2_vcpu(vc);
     s_time_t min_avgload = MAX_LOAD;
 
     ASSERT(!cpumask_empty(&prv->active_queues));
@@ -1775,7 +1797,7 @@ static bool_t vcpu_is_migrateable(struct csched2_vcpu 
*svc,
 
 static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     int i, max_delta_rqi = -1;
     struct list_head *push_iter, *pull_iter;
     bool_t inner_load_updated = 0;
@@ -1790,7 +1812,7 @@ static void balance_load(const struct scheduler *ops, int 
cpu, s_time_t now)
      */
 
     ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
-    st.lrqd = RQD(ops, cpu);
+    st.lrqd = c2rqd(ops, cpu);
 
     __update_runq_load(ops, st.lrqd, 0, now);
 
@@ -1961,12 +1983,12 @@ csched2_vcpu_migrate(
     const struct scheduler *ops, struct vcpu *vc, unsigned int new_cpu)
 {
     struct domain *d = vc->domain;
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu * const svc = csched2_vcpu(vc);
     struct csched2_runqueue_data *trqd;
     s_time_t now = NOW();
 
     /* Check if new_cpu is valid */
-    ASSERT(cpumask_test_cpu(new_cpu, &CSCHED2_PRIV(ops)->initialized));
+    ASSERT(cpumask_test_cpu(new_cpu, &csched2_priv(ops)->initialized));
     ASSERT(cpumask_test_cpu(new_cpu, vc->cpu_hard_affinity));
 
     /*
@@ -1997,7 +2019,7 @@ csched2_vcpu_migrate(
         return;
     }
 
-    trqd = RQD(ops, new_cpu);
+    trqd = c2rqd(ops, new_cpu);
 
     /*
      * Do the actual movement toward new_cpu, and update vc->processor.
@@ -2019,8 +2041,8 @@ csched2_dom_cntl(
     struct domain *d,
     struct xen_domctl_scheduler_op *op)
 {
-    struct csched2_dom * const sdom = CSCHED2_DOM(d);
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_dom * const sdom = csched2_dom(d);
+    struct csched2_private *prv = csched2_priv(ops);
     unsigned long flags;
     int rc = 0;
 
@@ -2053,10 +2075,10 @@ csched2_dom_cntl(
             /* Update weights for vcpus, and max_weight for runqueues on which 
they reside */
             for_each_vcpu ( d, v )
             {
-                struct csched2_vcpu *svc = CSCHED2_VCPU(v);
+                struct csched2_vcpu *svc = csched2_vcpu(v);
                 spinlock_t *lock = vcpu_schedule_lock(svc->vcpu);
 
-                ASSERT(svc->rqd == RQD(ops, svc->vcpu->processor));
+                ASSERT(svc->rqd == c2rqd(ops, svc->vcpu->processor));
 
                 svc->weight = sdom->weight;
                 update_max_weight(svc->rqd, svc->weight, old_weight);
@@ -2080,7 +2102,7 @@ static int csched2_sys_cntl(const struct scheduler *ops,
                             struct xen_sysctl_scheduler_op *sc)
 {
     xen_sysctl_credit2_schedule_t *params = &sc->u.sched_credit2;
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     unsigned long flags;
 
     switch (sc->cmd )
@@ -2111,7 +2133,7 @@ static int csched2_sys_cntl(const struct scheduler *ops,
 static void *
 csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     struct csched2_dom *sdom;
     unsigned long flags;
 
@@ -2127,7 +2149,7 @@ csched2_alloc_domdata(const struct scheduler *ops, struct 
domain *dom)
 
     write_lock_irqsave(&prv->lock, flags);
 
-    list_add_tail(&sdom->sdom_elem, &CSCHED2_PRIV(ops)->sdom);
+    list_add_tail(&sdom->sdom_elem, &csched2_priv(ops)->sdom);
 
     write_unlock_irqrestore(&prv->lock, flags);
 
@@ -2156,7 +2178,7 @@ csched2_free_domdata(const struct scheduler *ops, void 
*data)
 {
     unsigned long flags;
     struct csched2_dom *sdom = data;
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
 
     write_lock_irqsave(&prv->lock, flags);
 
@@ -2170,9 +2192,9 @@ csched2_free_domdata(const struct scheduler *ops, void 
*data)
 static void
 csched2_dom_destroy(const struct scheduler *ops, struct domain *dom)
 {
-    ASSERT(CSCHED2_DOM(dom)->nr_vcpus == 0);
+    ASSERT(csched2_dom(dom)->nr_vcpus == 0);
 
-    csched2_free_domdata(ops, CSCHED2_DOM(dom));
+    csched2_free_domdata(ops, csched2_dom(dom));
 }
 
 static void
@@ -2217,7 +2239,7 @@ csched2_free_vdata(const struct scheduler *ops, void 
*priv)
 static void
 csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
 {
-    struct csched2_vcpu * const svc = CSCHED2_VCPU(vc);
+    struct csched2_vcpu * const svc = csched2_vcpu(vc);
     spinlock_t *lock;
 
     ASSERT(!is_idle_vcpu(vc));
@@ -2242,9 +2264,9 @@ csched2_runtime(const struct scheduler *ops, int cpu,
 {
     s_time_t time, min_time;
     int rt_credit; /* Proposed runtime measured in credits */
-    struct csched2_runqueue_data *rqd = RQD(ops, cpu);
+    struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct list_head *runq = &rqd->runq;
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
 
     /*
      * If we're idle, just stay so. Others (or external events)
@@ -2333,7 +2355,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
 {
     struct list_head *iter;
     struct csched2_vcpu *snext = NULL;
-    struct csched2_private *prv = CSCHED2_PRIV(per_cpu(scheduler, cpu));
+    struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
     bool yield = __test_and_clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
 
     *skipped = 0;
@@ -2372,7 +2394,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
     if ( vcpu_runnable(scurr->vcpu) )
         snext = scurr;
     else
-        snext = CSCHED2_VCPU(idle_vcpu[cpu]);
+        snext = csched2_vcpu(idle_vcpu[cpu]);
 
     list_for_each( iter, &rqd->runq )
     {
@@ -2452,7 +2474,7 @@ csched2_schedule(
 {
     const int cpu = smp_processor_id();
     struct csched2_runqueue_data *rqd;
-    struct csched2_vcpu * const scurr = CSCHED2_VCPU(current);
+    struct csched2_vcpu * const scurr = csched2_vcpu(current);
     struct csched2_vcpu *snext = NULL;
     unsigned int skipped_vcpus = 0;
     struct task_slice ret;
@@ -2461,9 +2483,9 @@ csched2_schedule(
     SCHED_STAT_CRANK(schedule);
     CSCHED2_VCPU_CHECK(current);
 
-    BUG_ON(!cpumask_test_cpu(cpu, &CSCHED2_PRIV(ops)->initialized));
+    BUG_ON(!cpumask_test_cpu(cpu, &csched2_priv(ops)->initialized));
 
-    rqd = RQD(ops, cpu);
+    rqd = c2rqd(ops, cpu);
     BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
 
     ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
@@ -2521,7 +2543,7 @@ csched2_schedule(
     {
         __clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
         trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
-        snext = CSCHED2_VCPU(idle_vcpu[cpu]);
+        snext = csched2_vcpu(idle_vcpu[cpu]);
     }
     else
         snext = runq_candidate(rqd, scurr, cpu, now, &skipped_vcpus);
@@ -2642,7 +2664,7 @@ csched2_dump_vcpu(struct csched2_private *prv, struct 
csched2_vcpu *svc)
 static void
 csched2_dump_pcpu(const struct scheduler *ops, int cpu)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     struct list_head *runq, *iter;
     struct csched2_vcpu *svc;
     unsigned long flags;
@@ -2661,7 +2683,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
     lock = per_cpu(schedule_data, cpu).schedule_lock;
     spin_lock(lock);
 
-    runq = &RQD(ops, cpu)->runq;
+    runq = &c2rqd(ops, cpu)->runq;
 
     cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu));
     printk(" sibling=%s, ", cpustr);
@@ -2669,7 +2691,7 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu)
     printk("core=%s\n", cpustr);
 
     /* current VCPU */
-    svc = CSCHED2_VCPU(curr_on_cpu(cpu));
+    svc = csched2_vcpu(curr_on_cpu(cpu));
     if ( svc )
     {
         printk("\trun: ");
@@ -2696,7 +2718,7 @@ static void
 csched2_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom;
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     unsigned long flags;
     int i, loop;
 #define cpustr keyhandler_scratch
@@ -2756,7 +2778,7 @@ csched2_dump(const struct scheduler *ops)
 
         for_each_vcpu( sdom->dom, v )
         {
-            struct csched2_vcpu * const svc = CSCHED2_VCPU(v);
+            struct csched2_vcpu * const svc = csched2_vcpu(v);
             spinlock_t *lock;
 
             lock = vcpu_schedule_lock(svc->vcpu);
@@ -2899,7 +2921,7 @@ init_pdata(struct csched2_private *prv, unsigned int cpu)
 static void
 csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     spinlock_t *old_lock;
     unsigned long flags;
     unsigned rqi;
@@ -2927,7 +2949,7 @@ static void
 csched2_switch_sched(struct scheduler *new_ops, unsigned int cpu,
                      void *pdata, void *vdata)
 {
-    struct csched2_private *prv = CSCHED2_PRIV(new_ops);
+    struct csched2_private *prv = csched2_priv(new_ops);
     struct csched2_vcpu *svc = vdata;
     unsigned rqi;
 
@@ -2974,7 +2996,7 @@ static void
 csched2_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
     unsigned long flags;
-    struct csched2_private *prv = CSCHED2_PRIV(ops);
+    struct csched2_private *prv = csched2_priv(ops);
     struct csched2_runqueue_data *rqd;
     int rqi;
 
@@ -3083,7 +3105,7 @@ csched2_deinit(struct scheduler *ops)
 {
     struct csched2_private *prv;
 
-    prv = CSCHED2_PRIV(ops);
+    prv = csched2_priv(ops);
     ops->sched_data = NULL;
     xfree(prv);
 }


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.