[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 17/49] xen/sched: move some per-vcpu items to struct sched_item



Affinities are scheduler specific attributes, they should be per
scheduling item. So move all affinity related fields in struct vcpu
to struct sched_item. While at it switch affinity related functions in
sched-if.h to use a pointer to sched_item instead to vcpu as parameter.

vcpu->last_run_time is primarily used by sched_credit, so move it to
struct sched_item, too.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/domain.c          |   1 +
 xen/arch/x86/pv/emul-priv-op.c |   2 +
 xen/arch/x86/pv/traps.c        |   6 ++-
 xen/arch/x86/traps.c           |  10 ++--
 xen/common/domain.c            |  19 ++-----
 xen/common/domctl.c            |  13 +++--
 xen/common/keyhandler.c        |   5 +-
 xen/common/sched_credit.c      |  20 ++++----
 xen/common/sched_credit2.c     |  42 ++++++++--------
 xen/common/sched_null.c        |  16 +++---
 xen/common/sched_rt.c          |   9 ++--
 xen/common/schedule.c          | 110 ++++++++++++++++++++++++-----------------
 xen/common/wait.c              |   5 +-
 xen/include/xen/sched-if.h     |  33 ++++++++++---
 xen/include/xen/sched.h        |  20 +-------
 15 files changed, 168 insertions(+), 143 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 8d579e2cf9..5d8f3255cb 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -15,6 +15,7 @@
 #include <xen/lib.h>
 #include <xen/errno.h>
 #include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/domain.h>
 #include <xen/smp.h>
 #include <xen/delay.h>
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 3746e2ad54..f7d98c28f1 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -23,6 +23,8 @@
 #include <xen/event.h>
 #include <xen/guest_access.h>
 #include <xen/iocap.h>
+#include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/spinlock.h>
 #include <xen/trace.h>
 
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 1740784ff2..f586d486fc 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -22,6 +22,8 @@
 #include <xen/event.h>
 #include <xen/hypercall.h>
 #include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/trace.h>
 #include <xen/softirq.h>
 
@@ -155,8 +157,8 @@ static void nmi_mce_softirq(void)
      * Set the tmp value unconditionally, so that the check in the iret
      * hypercall works.
      */
-    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
-                 st->vcpu->cpu_hard_affinity);
+    cpumask_copy(st->vcpu->sched_item->cpu_hard_affinity_tmp,
+                 st->vcpu->sched_item->cpu_hard_affinity);
 
     if ( (cpu != st->processor) ||
          (st->processor != st->vcpu->processor) )
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 05ddc39bfe..481d0b1c37 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -26,6 +26,7 @@
 
 #include <xen/init.h>
 #include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/lib.h>
 #include <xen/err.h>
 #include <xen/errno.h>
@@ -1594,16 +1595,17 @@ static void pci_serr_softirq(void)
 void async_exception_cleanup(struct vcpu *curr)
 {
     int trap;
+    struct sched_item *item = curr->sched_item;
 
     if ( !curr->async_exception_mask )
         return;
 
     /* Restore affinity.  */
-    if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) &&
-         !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) )
+    if ( !cpumask_empty(item->cpu_hard_affinity_tmp) &&
+         !cpumask_equal(item->cpu_hard_affinity_tmp, item->cpu_hard_affinity) )
     {
-        vcpu_set_hard_affinity(curr, curr->cpu_hard_affinity_tmp);
-        cpumask_clear(curr->cpu_hard_affinity_tmp);
+        vcpu_set_hard_affinity(curr, item->cpu_hard_affinity_tmp);
+        cpumask_clear(item->cpu_hard_affinity_tmp);
     }
 
     if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 3b18f11f12..2045e762ac 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,11 +126,6 @@ static void vcpu_info_reset(struct vcpu *v)
 
 static void vcpu_destroy(struct vcpu *v)
 {
-    free_cpumask_var(v->cpu_hard_affinity);
-    free_cpumask_var(v->cpu_hard_affinity_tmp);
-    free_cpumask_var(v->cpu_hard_affinity_saved);
-    free_cpumask_var(v->cpu_soft_affinity);
-
     free_vcpu_struct(v);
 }
 
@@ -154,12 +149,6 @@ struct vcpu *vcpu_create(
 
     grant_table_init_vcpu(v);
 
-    if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
-         !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
-         !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
-         !zalloc_cpumask_var(&v->cpu_soft_affinity) )
-        goto fail;
-
     if ( is_idle_domain(d) )
     {
         v->runstate.state = RUNSTATE_running;
@@ -199,7 +188,6 @@ struct vcpu *vcpu_create(
     sched_destroy_vcpu(v);
  fail_wq:
     destroy_waitqueue_vcpu(v);
- fail:
     vcpu_destroy(v);
 
     return NULL;
@@ -559,9 +547,10 @@ void domain_update_node_affinity(struct domain *d)
          */
         for_each_vcpu ( d, v )
         {
-            cpumask_or(dom_cpumask, dom_cpumask, v->cpu_hard_affinity);
+            cpumask_or(dom_cpumask, dom_cpumask,
+                       v->sched_item->cpu_hard_affinity);
             cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
-                       v->cpu_soft_affinity);
+                       v->sched_item->cpu_soft_affinity);
         }
         /* Filter out non-online cpus */
         cpumask_and(dom_cpumask, dom_cpumask, online);
@@ -1230,7 +1219,7 @@ int vcpu_reset(struct vcpu *v)
     v->async_exception_mask = 0;
     memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
 #endif
-    cpumask_clear(v->cpu_hard_affinity_tmp);
+    cpumask_clear(v->sched_item->cpu_hard_affinity_tmp);
     clear_bit(_VPF_blocked, &v->pause_flags);
     clear_bit(_VPF_in_reset, &v->pause_flags);
 
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index bade9a63b1..8464713d2b 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -614,6 +614,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
     case XEN_DOMCTL_getvcpuaffinity:
     {
         struct vcpu *v;
+        struct sched_item *item;
         struct xen_domctl_vcpuaffinity *vcpuaff = &op->u.vcpuaffinity;
 
         ret = -EINVAL;
@@ -624,6 +625,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         if ( (v = d->vcpu[vcpuaff->vcpu]) == NULL )
             break;
 
+        item = v->sched_item;
         ret = -EINVAL;
         if ( vcpuaffinity_params_invalid(vcpuaff) )
             break;
@@ -643,7 +645,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                 ret = -ENOMEM;
                 break;
             }
-            cpumask_copy(old_affinity, v->cpu_hard_affinity);
+            cpumask_copy(old_affinity, item->cpu_hard_affinity);
 
             if ( !alloc_cpumask_var(&new_affinity) )
             {
@@ -676,7 +678,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                  * For hard affinity, what we return is the intersection of
                  * cpupool's online mask and the new hard affinity.
                  */
-                cpumask_and(new_affinity, online, v->cpu_hard_affinity);
+                cpumask_and(new_affinity, online, item->cpu_hard_affinity);
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
                                                new_affinity);
             }
@@ -705,7 +707,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
                  * hard affinity.
                  */
                 cpumask_and(new_affinity, new_affinity, online);
-                cpumask_and(new_affinity, new_affinity, v->cpu_hard_affinity);
+                cpumask_and(new_affinity, new_affinity,
+                            item->cpu_hard_affinity);
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
                                                new_affinity);
             }
@@ -718,10 +721,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
         {
             if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
-                                               v->cpu_hard_affinity);
+                                               item->cpu_hard_affinity);
             if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
-                                               v->cpu_soft_affinity);
+                                               item->cpu_soft_affinity);
         }
         break;
     }
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 4f4a660b0c..f50df5841d 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -9,6 +9,7 @@
 #include <xen/console.h>
 #include <xen/serial.h>
 #include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/tasklet.h>
 #include <xen/domain.h>
 #include <xen/rangeset.h>
@@ -312,8 +313,8 @@ static void dump_domains(unsigned char key)
                 printk("dirty_cpu=%u", v->dirty_cpu);
             printk("\n");
             printk("    cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n",
-                   nr_cpu_ids, cpumask_bits(v->cpu_hard_affinity),
-                   nr_cpu_ids, cpumask_bits(v->cpu_soft_affinity));
+                   nr_cpu_ids, cpumask_bits(v->sched_item->cpu_hard_affinity),
+                   nr_cpu_ids, cpumask_bits(v->sched_item->cpu_soft_affinity));
             printk("    pause_count=%d pause_flags=%lx\n",
                    atomic_read(&v->pause_count), v->pause_flags);
             arch_dump_vcpu_info(v);
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index de4face2bc..9e7c849b94 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -350,6 +350,7 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 static inline void __runq_tickle(struct csched_item *new)
 {
     unsigned int cpu = new->vcpu->processor;
+    struct sched_item *item = new->vcpu->sched_item;
     struct csched_item * const cur = CSCHED_ITEM(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask, idle_mask, *online;
@@ -375,7 +376,7 @@ static inline void __runq_tickle(struct csched_item *new)
     if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, &new->flags) &&
                   cpumask_test_cpu(cpu, &idle_mask)) )
     {
-        ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+        ASSERT(cpumask_cycle(cpu, item->cpu_hard_affinity) == cpu);
         SCHED_STAT_CRANK(tickled_idle_cpu_excl);
         __cpumask_set_cpu(cpu, &mask);
         goto tickle;
@@ -410,11 +411,11 @@ static inline void __runq_tickle(struct csched_item *new)
             int new_idlers_empty;
 
             if ( balance_step == BALANCE_SOFT_AFFINITY
-                 && !has_soft_affinity(new->vcpu) )
+                 && !has_soft_affinity(item) )
                 continue;
 
             /* Are there idlers suitable for new (for this balance step)? */
-            affinity_balance_cpumask(new->vcpu, balance_step,
+            affinity_balance_cpumask(item, balance_step,
                                      cpumask_scratch_cpu(cpu));
             cpumask_and(cpumask_scratch_cpu(cpu),
                         cpumask_scratch_cpu(cpu), &idle_mask);
@@ -443,8 +444,7 @@ static inline void __runq_tickle(struct csched_item *new)
              */
             if ( new_idlers_empty && new->pri > cur->pri )
             {
-                if ( cpumask_intersects(cur->vcpu->cpu_hard_affinity,
-                                        &idle_mask) )
+                if ( cpumask_intersects(item->cpu_hard_affinity, &idle_mask) )
                 {
                     SCHED_VCPU_STAT_CRANK(cur, kicked_away);
                     SCHED_VCPU_STAT_CRANK(cur, migrate_r);
@@ -704,7 +704,7 @@ static inline bool
 __csched_vcpu_is_cache_hot(const struct csched_private *prv, struct vcpu *v)
 {
     bool hot = prv->vcpu_migr_delay &&
-               (NOW() - v->last_run_time) < prv->vcpu_migr_delay;
+               (NOW() - v->sched_item->last_run_time) < prv->vcpu_migr_delay;
 
     if ( hot )
         SCHED_STAT_CRANK(vcpu_hot);
@@ -742,7 +742,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
 
     for_each_affinity_balance_step( balance_step )
     {
-        affinity_balance_cpumask(vc, balance_step, cpus);
+        affinity_balance_cpumask(vc->sched_item, balance_step, cpus);
         cpumask_and(cpus, online, cpus);
         /*
          * We want to pick up a pcpu among the ones that are online and
@@ -761,7 +761,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu 
*vc, bool_t commit)
          * balancing step all together.
          */
         if ( balance_step == BALANCE_SOFT_AFFINITY &&
-             (!has_soft_affinity(vc) || cpumask_empty(cpus)) )
+             (!has_soft_affinity(vc->sched_item) || cpumask_empty(cpus)) )
             continue;
 
         /* If present, prefer vc's current processor */
@@ -1660,10 +1660,10 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
          * or counter.
          */
         if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
-                                !has_soft_affinity(vc)) )
+                                !has_soft_affinity(vc->sched_item)) )
             continue;
 
-        affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
+        affinity_balance_cpumask(vc->sched_item, balance_step, 
cpumask_scratch);
         if ( __csched_vcpu_is_migrateable(prv, vc, cpu, cpumask_scratch) )
         {
             /* We got a candidate. Grab it! */
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 6106293b3f..5c1794db61 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -699,10 +699,10 @@ static int get_fallback_cpu(struct csched2_item *svc)
     {
         int cpu = v->processor;
 
-        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v->sched_item) )
             continue;
 
-        affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
+        affinity_balance_cpumask(v->sched_item, bs, cpumask_scratch_cpu(cpu));
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     cpupool_domain_cpumask(v->domain));
 
@@ -1390,10 +1390,10 @@ static s_time_t tickle_score(const struct scheduler 
*ops, s_time_t now,
      */
     if ( score > 0 )
     {
-        if ( cpumask_test_cpu(cpu, new->vcpu->cpu_soft_affinity) )
+        if ( cpumask_test_cpu(cpu, new->vcpu->sched_item->cpu_soft_affinity) )
             score += CSCHED2_CREDIT_INIT;
 
-        if ( !cpumask_test_cpu(cpu, cur->vcpu->cpu_soft_affinity) )
+        if ( !cpumask_test_cpu(cpu, cur->vcpu->sched_item->cpu_soft_affinity) )
             score += CSCHED2_CREDIT_INIT;
     }
 
@@ -1436,6 +1436,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_item *new, s_time_t now)
 {
     int i, ipid = -1;
     s_time_t max = 0;
+    struct sched_item *item = new->vcpu->sched_item;
     unsigned int bs, cpu = new->vcpu->processor;
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     cpumask_t *online = cpupool_domain_cpumask(new->vcpu->domain);
@@ -1473,7 +1474,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_item *new, s_time_t now)
                   cpumask_test_cpu(cpu, &rqd->idle) &&
                   !cpumask_test_cpu(cpu, &rqd->tickled)) )
     {
-        ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+        ASSERT(cpumask_cycle(cpu, item->cpu_hard_affinity) == cpu);
         SCHED_STAT_CRANK(tickled_idle_cpu_excl);
         ipid = cpu;
         goto tickle;
@@ -1482,10 +1483,10 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_item *new, s_time_t now)
     for_each_affinity_balance_step( bs )
     {
         /* Just skip first step, if we don't have a soft affinity */
-        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(item) )
             continue;
 
-        affinity_balance_cpumask(new->vcpu, bs, cpumask_scratch_cpu(cpu));
+        affinity_balance_cpumask(item, bs, cpumask_scratch_cpu(cpu));
 
         /*
          * First of all, consider idle cpus, checking if we can just
@@ -1557,7 +1558,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_item *new, s_time_t now)
             ipid = cpu;
 
             /* If this is in new's soft affinity, just take it */
-            if ( cpumask_test_cpu(cpu, new->vcpu->cpu_soft_affinity) )
+            if ( cpumask_test_cpu(cpu, item->cpu_soft_affinity) )
             {
                 SCHED_STAT_CRANK(tickled_busy_cpu);
                 goto tickle;
@@ -2243,7 +2244,7 @@ csched2_res_pick(const struct scheduler *ops, struct 
sched_item *item)
         goto out;
     }
 
-    cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                 cpupool_domain_cpumask(vc->domain));
 
     /*
@@ -2288,7 +2289,7 @@ csched2_res_pick(const struct scheduler *ops, struct 
sched_item *item)
      *
      * Find both runqueues in one pass.
      */
-    has_soft = has_soft_affinity(vc);
+    has_soft = has_soft_affinity(item);
     for_each_cpu(i, &prv->active_queues)
     {
         struct csched2_runqueue_data *rqd;
@@ -2335,7 +2336,7 @@ csched2_res_pick(const struct scheduler *ops, struct 
sched_item *item)
             cpumask_t mask;
 
             cpumask_and(&mask, cpumask_scratch_cpu(cpu), &rqd->active);
-            if ( cpumask_intersects(&mask, svc->vcpu->cpu_soft_affinity) )
+            if ( cpumask_intersects(&mask, item->cpu_soft_affinity) )
             {
                 min_s_avgload = rqd_avgload;
                 min_s_rqi = i;
@@ -2357,9 +2358,9 @@ csched2_res_pick(const struct scheduler *ops, struct 
sched_item *item)
          * Note that, to obtain the soft-affinity mask, we "just" put what we
          * have in cpumask_scratch in && with vc->cpu_soft_affinity. This is
          * ok because:
-         * - we know that vc->cpu_hard_affinity and vc->cpu_soft_affinity have
+         * - we know that item->cpu_hard_affinity and ->cpu_soft_affinity have
          *   a non-empty intersection (because has_soft is true);
-         * - we have vc->cpu_hard_affinity & cpupool_domain_cpumask() already
+         * - we have item->cpu_hard_affinity & cpupool_domain_cpumask() already
          *   in cpumask_scratch, we do save a lot doing like this.
          *
          * It's kind of like open coding affinity_balance_cpumask() but, in
@@ -2367,7 +2368,7 @@ csched2_res_pick(const struct scheduler *ops, struct 
sched_item *item)
          * cpumask operations.
          */
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
-                    vc->cpu_soft_affinity);
+                    item->cpu_soft_affinity);
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     &prv->rqd[min_s_rqi].active);
     }
@@ -2475,6 +2476,7 @@ static void migrate(const struct scheduler *ops,
                     s_time_t now)
 {
     int cpu = svc->vcpu->processor;
+    struct sched_item *item = svc->vcpu->sched_item;
 
     if ( unlikely(tb_init_done) )
     {
@@ -2512,7 +2514,7 @@ static void migrate(const struct scheduler *ops,
         }
         _runq_deassign(svc);
 
-        cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
+        cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                     cpupool_domain_cpumask(svc->vcpu->domain));
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     &trqd->active);
@@ -2546,7 +2548,7 @@ static bool vcpu_is_migrateable(struct csched2_item *svc,
     struct vcpu *v = svc->vcpu;
     int cpu = svc->vcpu->processor;
 
-    cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), v->sched_item->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
 
     return !(svc->flags & CSFLAG_runq_migrate_request) &&
@@ -2780,7 +2782,7 @@ csched2_item_migrate(
 
     /* If here, new_cpu must be a valid Credit2 pCPU, and in our affinity. */
     ASSERT(cpumask_test_cpu(new_cpu, &csched2_priv(ops)->initialized));
-    ASSERT(cpumask_test_cpu(new_cpu, vc->cpu_hard_affinity));
+    ASSERT(cpumask_test_cpu(new_cpu, item->cpu_hard_affinity));
 
     trqd = c2rqd(ops, new_cpu);
 
@@ -3320,9 +3322,9 @@ runq_candidate(struct csched2_runqueue_data *rqd,
     }
 
     /* If scurr has a soft-affinity, let's check whether cpu is part of it */
-    if ( has_soft_affinity(scurr->vcpu) )
+    if ( has_soft_affinity(scurr->vcpu->sched_item) )
     {
-        affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
+        affinity_balance_cpumask(scurr->vcpu->sched_item, 
BALANCE_SOFT_AFFINITY,
                                  cpumask_scratch);
         if ( unlikely(!cpumask_test_cpu(cpu, cpumask_scratch)) )
         {
@@ -3377,7 +3379,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
         }
 
         /* Only consider vcpus that are allowed to run on this processor. */
-        if ( !cpumask_test_cpu(cpu, svc->vcpu->cpu_hard_affinity) )
+        if ( !cpumask_test_cpu(cpu, svc->vcpu->sched_item->cpu_hard_affinity) )
         {
             (*skipped)++;
             continue;
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index 620925e8ce..c45af9f8ee 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -123,7 +123,8 @@ static inline struct null_item *null_item(const struct 
sched_item *item)
 static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
                                        unsigned int balance_step)
 {
-    affinity_balance_cpumask(v, balance_step, cpumask_scratch_cpu(cpu));
+    affinity_balance_cpumask(v->sched_item, balance_step,
+                             cpumask_scratch_cpu(cpu));
     cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                 cpupool_domain_cpumask(v->domain));
 
@@ -281,10 +282,10 @@ pick_res(struct null_private *prv, struct sched_item 
*item)
 
     for_each_affinity_balance_step( bs )
     {
-        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(item) )
             continue;
 
-        affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
+        affinity_balance_cpumask(item, bs, cpumask_scratch_cpu(cpu));
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu), cpus);
 
         /*
@@ -321,7 +322,7 @@ pick_res(struct null_private *prv, struct sched_item *item)
      * as we will actually assign the vCPU to the pCPU we return from here,
      * only if the pCPU is free.
      */
-    cpumask_and(cpumask_scratch_cpu(cpu), cpus, v->cpu_hard_affinity);
+    cpumask_and(cpumask_scratch_cpu(cpu), cpus, item->cpu_hard_affinity);
     new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
 
  out:
@@ -438,7 +439,7 @@ static void null_item_insert(const struct scheduler *ops,
 
     lock = item_schedule_lock(item);
 
-    cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
 
     /* If the pCPU is free, we assign v to it */
@@ -496,7 +497,8 @@ static void _vcpu_remove(struct null_private *prv, struct 
vcpu *v)
     {
         list_for_each_entry( wvc, &prv->waitq, waitq_elem )
         {
-            if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
+            if ( bs == BALANCE_SOFT_AFFINITY &&
+                 !has_soft_affinity(wvc->vcpu->sched_item) )
                 continue;
 
             if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
@@ -775,7 +777,7 @@ static struct task_slice null_schedule(const struct 
scheduler *ops,
             list_for_each_entry( wvc, &prv->waitq, waitq_elem )
             {
                 if ( bs == BALANCE_SOFT_AFFINITY &&
-                     !has_soft_affinity(wvc->vcpu) )
+                     !has_soft_affinity(wvc->vcpu->sched_item) )
                     continue;
 
                 if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index a604a0d5a6..58560d086b 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -327,7 +327,7 @@ rt_dump_vcpu(const struct scheduler *ops, const struct 
rt_item *svc)
     mask = cpumask_scratch_cpu(svc->vcpu->processor);
 
     cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
-    cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
+    cpumask_and(mask, cpupool_mask, svc->vcpu->sched_item->cpu_hard_affinity);
     printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
            " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
            " \t\t priority_level=%d has_extratime=%d\n"
@@ -645,7 +645,7 @@ rt_res_pick(const struct scheduler *ops, struct sched_item 
*item)
     int cpu;
 
     online = cpupool_domain_cpumask(vc->domain);
-    cpumask_and(&cpus, online, vc->cpu_hard_affinity);
+    cpumask_and(&cpus, online, item->cpu_hard_affinity);
 
     cpu = cpumask_test_cpu(vc->processor, &cpus)
             ? vc->processor
@@ -1030,7 +1030,8 @@ runq_pick(const struct scheduler *ops, const cpumask_t 
*mask)
 
         /* mask cpu_hard_affinity & cpupool & mask */
         online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
-        cpumask_and(&cpu_common, online, iter_svc->vcpu->cpu_hard_affinity);
+        cpumask_and(&cpu_common, online,
+                    iter_svc->vcpu->sched_item->cpu_hard_affinity);
         cpumask_and(&cpu_common, mask, &cpu_common);
         if ( cpumask_empty(&cpu_common) )
             continue;
@@ -1199,7 +1200,7 @@ runq_tickle(const struct scheduler *ops, struct rt_item 
*new)
         return;
 
     online = cpupool_domain_cpumask(new->vcpu->domain);
-    cpumask_and(&not_tickled, online, new->vcpu->cpu_hard_affinity);
+    cpumask_and(&not_tickled, online, 
new->vcpu->sched_item->cpu_hard_affinity);
     cpumask_andnot(&not_tickled, &not_tickled, &prv->tickled);
 
     /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index a8382d9812..be85fb8000 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -274,6 +274,12 @@ static void sched_free_item(struct sched_item *item)
     }
 
     item->vcpu->sched_item = NULL;
+
+    free_cpumask_var(item->cpu_hard_affinity);
+    free_cpumask_var(item->cpu_hard_affinity_tmp);
+    free_cpumask_var(item->cpu_hard_affinity_saved);
+    free_cpumask_var(item->cpu_soft_affinity);
+
     xfree(item);
 }
 
@@ -297,7 +303,17 @@ static struct sched_item *sched_alloc_item(struct vcpu *v)
     item->next_in_list = *prev_item;
     *prev_item = item;
 
+    if ( !zalloc_cpumask_var(&item->cpu_hard_affinity) ||
+         !zalloc_cpumask_var(&item->cpu_hard_affinity_tmp) ||
+         !zalloc_cpumask_var(&item->cpu_hard_affinity_saved) ||
+         !zalloc_cpumask_var(&item->cpu_soft_affinity) )
+        goto fail;
+
     return item;
+
+ fail:
+    sched_free_item(item);
+    return NULL;
 }
 
 int sched_init_vcpu(struct vcpu *v, unsigned int processor)
@@ -367,7 +383,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
 
     for_each_vcpu ( d, v )
     {
-        if ( v->affinity_broken )
+        if ( v->sched_item->affinity_broken )
             return -EBUSY;
     }
 
@@ -692,7 +708,7 @@ static void vcpu_migrate_finish(struct vcpu *v)
              */
             if ( pick_called &&
                  (new_lock == per_cpu(sched_res, new_cpu)->schedule_lock) &&
-                 cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
+                 cpumask_test_cpu(new_cpu, v->sched_item->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
 
@@ -768,6 +784,7 @@ void restore_vcpu_affinity(struct domain *d)
     {
         spinlock_t *lock;
         unsigned int old_cpu = v->processor;
+        struct sched_item *item = v->sched_item;
 
         ASSERT(!vcpu_runnable(v));
 
@@ -779,15 +796,15 @@ void restore_vcpu_affinity(struct domain *d)
          * set v->processor of each of their vCPUs to something that will
          * make sense for the scheduler of the cpupool in which they are in.
          */
-        cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+        cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                     cpupool_domain_cpumask(d));
         if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
         {
-            if ( v->affinity_broken )
+            if ( item->affinity_broken )
             {
-                sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
-                v->affinity_broken = 0;
-                cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                sched_set_affinity(v, item->cpu_hard_affinity_saved, NULL);
+                item->affinity_broken = 0;
+                cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
 
@@ -795,18 +812,17 @@ void restore_vcpu_affinity(struct domain *d)
             {
                 printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
                 sched_set_affinity(v, &cpumask_all, NULL);
-                cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                cpumask_and(cpumask_scratch_cpu(cpu), item->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
         }
 
         v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
-        v->sched_item->res = per_cpu(sched_res, v->processor);
+        item->res = per_cpu(sched_res, v->processor);
 
-        lock = item_schedule_lock_irq(v->sched_item);
-        v->sched_item->res = SCHED_OP(vcpu_scheduler(v), pick_resource,
-                                      v->sched_item);
-        v->processor = v->sched_item->res->processor;
+        lock = item_schedule_lock_irq(item);
+        item->res = SCHED_OP(vcpu_scheduler(v), pick_resource, item);
+        v->processor = item->res->processor;
         spin_unlock_irq(lock);
 
         if ( old_cpu != v->processor )
@@ -838,16 +854,17 @@ int cpu_disable_scheduler(unsigned int cpu)
         for_each_vcpu ( d, v )
         {
             unsigned long flags;
-            spinlock_t *lock = item_schedule_lock_irqsave(v->sched_item, 
&flags);
+            struct sched_item *item = v->sched_item;
+            spinlock_t *lock = item_schedule_lock_irqsave(item, &flags);
 
-            cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
+            cpumask_and(&online_affinity, item->cpu_hard_affinity, 
c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
-                 cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
+                 cpumask_test_cpu(cpu, item->cpu_hard_affinity) )
             {
-                if ( v->affinity_broken )
+                if ( item->affinity_broken )
                 {
                     /* The vcpu is temporarily pinned, can't move it. */
-                    item_schedule_unlock_irqrestore(lock, flags, 
v->sched_item);
+                    item_schedule_unlock_irqrestore(lock, flags, item);
                     ret = -EADDRINUSE;
                     break;
                 }
@@ -860,7 +877,7 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor != cpu )
             {
                 /* The vcpu is not on this cpu, so we can move on. */
-                item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
+                item_schedule_unlock_irqrestore(lock, flags, item);
                 continue;
             }
 
@@ -873,7 +890,7 @@ int cpu_disable_scheduler(unsigned int cpu)
              *    things would have failed before getting in here.
              */
             vcpu_migrate_start(v);
-            item_schedule_unlock_irqrestore(lock, flags, v->sched_item);
+            item_schedule_unlock_irqrestore(lock, flags, item);
 
             vcpu_migrate_finish(v);
 
@@ -904,7 +921,7 @@ static int cpu_disable_scheduler_check(unsigned int cpu)
     {
         for_each_vcpu ( d, v )
         {
-            if ( v->affinity_broken )
+            if ( v->sched_item->affinity_broken )
                 return -EADDRINUSE;
             if ( system_state != SYS_STATE_suspend && v->processor == cpu )
                 return -EAGAIN;
@@ -924,29 +941,30 @@ static int cpu_disable_scheduler_check(unsigned int cpu)
 void sched_set_affinity(
     struct vcpu *v, const cpumask_t *hard, const cpumask_t *soft)
 {
-    SCHED_OP(dom_scheduler(v->domain), adjust_affinity, v->sched_item,
-             hard, soft);
+    struct sched_item *item = v->sched_item;
+    SCHED_OP(dom_scheduler(v->domain), adjust_affinity, item, hard, soft);
 
     if ( hard )
-        cpumask_copy(v->cpu_hard_affinity, hard);
+        cpumask_copy(item->cpu_hard_affinity, hard);
     if ( soft )
-        cpumask_copy(v->cpu_soft_affinity, soft);
+        cpumask_copy(item->cpu_soft_affinity, soft);
 
-    v->soft_aff_effective = !cpumask_subset(v->cpu_hard_affinity,
-                                            v->cpu_soft_affinity) &&
-                            cpumask_intersects(v->cpu_soft_affinity,
-                                               v->cpu_hard_affinity);
+    item->soft_aff_effective = !cpumask_subset(item->cpu_hard_affinity,
+                                               item->cpu_soft_affinity) &&
+                               cpumask_intersects(item->cpu_soft_affinity,
+                                                  item->cpu_hard_affinity);
 }
 
 static int vcpu_set_affinity(
     struct vcpu *v, const cpumask_t *affinity, const cpumask_t *which)
 {
+    struct sched_item *item = v->sched_item;
     spinlock_t *lock;
     int ret = 0;
 
-    lock = item_schedule_lock_irq(v->sched_item);
+    lock = item_schedule_lock_irq(item);
 
-    if ( v->affinity_broken )
+    if ( item->affinity_broken )
         ret = -EBUSY;
     else
     {
@@ -954,19 +972,19 @@ static int vcpu_set_affinity(
          * Tell the scheduler we changes something about affinity,
          * and ask to re-evaluate vcpu placement.
          */
-        if ( which == v->cpu_hard_affinity )
+        if ( which == item->cpu_hard_affinity )
         {
             sched_set_affinity(v, affinity, NULL);
         }
         else
         {
-            ASSERT(which == v->cpu_soft_affinity);
+            ASSERT(which == item->cpu_soft_affinity);
             sched_set_affinity(v, NULL, affinity);
         }
         vcpu_migrate_start(v);
     }
 
-    item_schedule_unlock_irq(lock, v->sched_item);
+    item_schedule_unlock_irq(lock, item);
 
     domain_update_node_affinity(v->domain);
 
@@ -988,12 +1006,12 @@ int vcpu_set_hard_affinity(struct vcpu *v, const 
cpumask_t *affinity)
     if ( cpumask_empty(&online_affinity) )
         return -EINVAL;
 
-    return vcpu_set_affinity(v, affinity, v->cpu_hard_affinity);
+    return vcpu_set_affinity(v, affinity, v->sched_item->cpu_hard_affinity);
 }
 
 int vcpu_set_soft_affinity(struct vcpu *v, const cpumask_t *affinity)
 {
-    return vcpu_set_affinity(v, affinity, v->cpu_soft_affinity);
+    return vcpu_set_affinity(v, affinity, v->sched_item->cpu_soft_affinity);
 }
 
 /* Block the currently-executing domain until a pertinent event occurs. */
@@ -1187,28 +1205,30 @@ void watchdog_domain_destroy(struct domain *d)
 
 int vcpu_pin_override(struct vcpu *v, int cpu)
 {
+    struct sched_item *item = v->sched_item;
     spinlock_t *lock;
     int ret = -EINVAL;
 
-    lock = item_schedule_lock_irq(v->sched_item);
+    lock = item_schedule_lock_irq(item);
 
     if ( cpu < 0 )
     {
-        if ( v->affinity_broken )
+        if ( item->affinity_broken )
         {
-            sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
-            v->affinity_broken = 0;
+            sched_set_affinity(v, item->cpu_hard_affinity_saved, NULL);
+            item->affinity_broken = 0;
             ret = 0;
         }
     }
     else if ( cpu < nr_cpu_ids )
     {
-        if ( v->affinity_broken )
+        if ( item->affinity_broken )
             ret = -EBUSY;
         else if ( cpumask_test_cpu(cpu, VCPU2ONLINE(v)) )
         {
-            cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity);
-            v->affinity_broken = 1;
+            cpumask_copy(item->cpu_hard_affinity_saved,
+                         item->cpu_hard_affinity);
+            item->affinity_broken = 1;
             sched_set_affinity(v, cpumask_of(cpu), NULL);
             ret = 0;
         }
@@ -1217,7 +1237,7 @@ int vcpu_pin_override(struct vcpu *v, int cpu)
     if ( ret == 0 )
         vcpu_migrate_start(v);
 
-    item_schedule_unlock_irq(lock, v->sched_item);
+    item_schedule_unlock_irq(lock, item);
 
     domain_update_node_affinity(v->domain);
 
@@ -1569,7 +1589,7 @@ static void schedule(void)
         ((prev->pause_flags & VPF_blocked) ? RUNSTATE_blocked :
          (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
         now);
-    prev->last_run_time = now;
+    prev->sched_item->last_run_time = now;
 
     ASSERT(next->runstate.state != RUNSTATE_running);
     vcpu_runstate_change(next, RUNSTATE_running, now);
diff --git a/xen/common/wait.c b/xen/common/wait.c
index 4f830a14e8..6b91092c71 100644
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -20,6 +20,7 @@
  */
 
 #include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/softirq.h>
 #include <xen/wait.h>
 #include <xen/errno.h>
@@ -132,7 +133,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
 
     /* Save current VCPU affinity; force wakeup on *this* CPU only. */
     wqv->wakeup_cpu = smp_processor_id();
-    cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
+    cpumask_copy(&wqv->saved_affinity, curr->sched_item->cpu_hard_affinity);
     if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
     {
         gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
@@ -199,7 +200,7 @@ void check_wakeup_from_wait(void)
     {
         /* Re-set VCPU affinity and re-enter the scheduler. */
         struct vcpu *curr = current;
-        cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
+        cpumask_copy(&wqv->saved_affinity, 
curr->sched_item->cpu_hard_affinity);
         if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
         {
             gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index d549ef696e..577015b868 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -54,6 +54,22 @@ struct sched_item {
     void                  *priv;      /* scheduler private data */
     struct sched_item     *next_in_list;
     struct sched_resource *res;
+
+    /* Last time when item has been scheduled out. */
+    uint64_t               last_run_time;
+
+    /* Item needs affinity restored. */
+    bool                   affinity_broken;
+    /* Does soft affinity actually play a role (given hard affinity)? */
+    bool                   soft_aff_effective;
+    /* Bitmask of CPUs on which this VCPU may run. */
+    cpumask_var_t          cpu_hard_affinity;
+    /* Used to change affinity temporarily. */
+    cpumask_var_t          cpu_hard_affinity_tmp;
+    /* Used to restore affinity across S3. */
+    cpumask_var_t          cpu_hard_affinity_saved;
+    /* Bitmask of CPUs on which this VCPU prefers to run. */
+    cpumask_var_t          cpu_soft_affinity;
 };
 
 #define for_each_sched_item(d, e)                                         \
@@ -290,11 +306,11 @@ static inline cpumask_t* cpupool_domain_cpumask(struct 
domain *d)
  * * The hard affinity is not a subset of soft affinity
  * * There is an overlap between the soft and hard affinity masks
  */
-static inline int has_soft_affinity(const struct vcpu *v)
+static inline int has_soft_affinity(const struct sched_item *item)
 {
-    return v->soft_aff_effective &&
-           !cpumask_subset(cpupool_domain_cpumask(v->domain),
-                           v->cpu_soft_affinity);
+    return item->soft_aff_effective &&
+           !cpumask_subset(cpupool_domain_cpumask(item->vcpu->domain),
+                           item->cpu_soft_affinity);
 }
 
 /*
@@ -304,17 +320,18 @@ static inline int has_soft_affinity(const struct vcpu *v)
  * to avoid running a vcpu where it would like, but is not allowed to!
  */
 static inline void
-affinity_balance_cpumask(const struct vcpu *v, int step, cpumask_t *mask)
+affinity_balance_cpumask(const struct sched_item *item, int step,
+                         cpumask_t *mask)
 {
     if ( step == BALANCE_SOFT_AFFINITY )
     {
-        cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity);
+        cpumask_and(mask, item->cpu_soft_affinity, item->cpu_hard_affinity);
 
         if ( unlikely(cpumask_empty(mask)) )
-            cpumask_copy(mask, v->cpu_hard_affinity);
+            cpumask_copy(mask, item->cpu_hard_affinity);
     }
     else /* step == BALANCE_HARD_AFFINITY */
-        cpumask_copy(mask, v->cpu_hard_affinity);
+        cpumask_copy(mask, item->cpu_hard_affinity);
 }
 
 #endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2e9ced29a8..4b59de42da 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -175,9 +175,6 @@ struct vcpu
     } runstate_guest; /* guest address */
 #endif
 
-    /* last time when vCPU is scheduled out */
-    uint64_t last_run_time;
-
     /* Has the FPU been initialised? */
     bool             fpu_initialised;
     /* Has the FPU been used since it was last saved? */
@@ -203,8 +200,6 @@ struct vcpu
     bool             defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool             paused_for_shutdown;
-    /* VCPU need affinity restored */
-    bool             affinity_broken;
 
     /* A hypercall has been preempted. */
     bool             hcall_preempted;
@@ -213,9 +208,6 @@ struct vcpu
     bool             hcall_compat;
 #endif
 
-    /* Does soft affinity actually play a role (given hard affinity)? */
-    bool             soft_aff_effective;
-
     /* The CPU, if any, which is holding onto this VCPU's state. */
 #define VCPU_CPU_CLEAN (~0u)
     unsigned int     dirty_cpu;
@@ -247,16 +239,6 @@ struct vcpu
     evtchn_port_t    virq_to_evtchn[NR_VIRQS];
     spinlock_t       virq_lock;
 
-    /* Bitmask of CPUs on which this VCPU may run. */
-    cpumask_var_t    cpu_hard_affinity;
-    /* Used to change affinity temporarily. */
-    cpumask_var_t    cpu_hard_affinity_tmp;
-    /* Used to restore affinity across S3. */
-    cpumask_var_t    cpu_hard_affinity_saved;
-
-    /* Bitmask of CPUs on which this VCPU prefers to run. */
-    cpumask_var_t    cpu_soft_affinity;
-
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;
 
@@ -964,7 +946,7 @@ static inline bool is_hvm_vcpu(const struct vcpu *v)
 }
 
 #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
-                           cpumask_weight((v)->cpu_hard_affinity) == 1)
+             cpumask_weight((v)->sched_item->cpu_hard_affinity) == 1)
 #ifdef CONFIG_HAS_PASSTHROUGH
 #define has_iommu_pt(d) (dom_iommu(d)->status != IOMMU_STATUS_disabled)
 #define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.