[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/4] xen: sched: optimize exclusive pinning case (Credit1 & 2)



Exclusive pinning of vCPUs is used, sometimes, for
achieving the highest level of determinism, and the
least possible overhead, for the vCPUs in question.

Although static 1:1 pinning is not recommended, for
general use cases, optimizing the tickling code (of
Credit1 and Credit2) is easy and cheap enough, so go
for it.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Cc: Anshul Makkar <anshulmakkar@xxxxxxxxx>
---
Changes from v1:
- use a flag during runtime, as suggested during review;
- make use of the affinity-change hook, introduced in pevious patch.
---
 xen/common/sched_credit.c    |   35 +++++++++++++++++++++++++++++++++++
 xen/common/sched_credit2.c   |   40 ++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/perfc_defn.h |    1 +
 3 files changed, 76 insertions(+)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4fdaa08..3efbfc8 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -73,6 +73,7 @@
 #define CSCHED_FLAG_VCPU_PARKED    0x0  /* VCPU over capped credits */
 #define CSCHED_FLAG_VCPU_YIELD     0x1  /* VCPU yielding */
 #define CSCHED_FLAG_VCPU_MIGRATING 0x2  /* VCPU may have moved to a new pcpu */
+#define CSCHED_FLAG_VCPU_PINNED    0x4  /* VCPU can run only on 1 pcpu */
 
 
 /*
@@ -362,6 +363,25 @@ static inline void __runq_tickle(struct csched_vcpu *new)
     idlers_empty = cpumask_empty(&idle_mask);
 
     /*
+     * Exclusive pinning is when a vcpu has hard-affinity with only one
+     * cpu, and there is no other vcpu that has hard-affinity with that
+     * same cpu. This is infrequent, but if it happens, is for achieving
+     * the most possible determinism, and least possible overhead for
+     * the vcpus in question.
+     *
+     * Try to identify the vast majority of these situations, and deal
+     * with them quickly.
+     */
+    if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, &new->flags) &&
+                  cpumask_test_cpu(cpu, &idle_mask)) )
+    {
+        ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+        SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+        __cpumask_set_cpu(cpu, &mask);
+        goto tickle;
+    }
+
+    /*
      * If the pcpu is idle, or there are no idlers and the new
      * vcpu is a higher priority than the old vcpu, run it here.
      *
@@ -457,6 +477,7 @@ static inline void __runq_tickle(struct csched_vcpu *new)
         }
     }
 
+ tickle:
     if ( !cpumask_empty(&mask) )
     {
         if ( unlikely(tb_init_done) )
@@ -1223,6 +1244,19 @@ csched_dom_cntl(
     return rc;
 }
 
+static void
+csched_aff_cntl(const struct scheduler *ops, struct vcpu *v,
+                const cpumask_t *hard, const cpumask_t *soft)
+{
+    struct csched_vcpu *svc = CSCHED_VCPU(v);
+
+    /* Are we becoming exclusively pinned? */
+    if ( cpumask_weight(hard) == 1 )
+        set_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags);
+    else
+        clear_bit(CSCHED_FLAG_VCPU_PINNED, &svc->flags);
+}
+
 static inline void
 __csched_set_tslice(struct csched_private *prv, unsigned timeslice)
 {
@@ -2270,6 +2304,7 @@ static const struct scheduler sched_credit_def = {
     .yield          = csched_vcpu_yield,
 
     .adjust         = csched_dom_cntl,
+    .adjust_affinity= csched_aff_cntl,
     .adjust_global  = csched_sys_cntl,
 
     .pick_cpu       = csched_cpu_pick,
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 32234ac..e1985fb 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -299,6 +299,12 @@
  */
 #define __CSFLAG_vcpu_yield 4
 #define CSFLAG_vcpu_yield (1U<<__CSFLAG_vcpu_yield)
+/*
+ * CSFLAGS_pinned: this vcpu is currently 'pinned', i.e., has its hard
+ * affinity set to one and only 1 cpu (and, hence, can only run there).
+ */
+#define __CSFLAG_pinned 5
+#define CSFLAG_pinned (1U<<__CSFLAG_pinned)
 
 static unsigned int __read_mostly opt_migrate_resist = 500;
 integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
@@ -1453,6 +1459,26 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_vcpu *new, s_time_t now)
                     (unsigned char *)&d);
     }
 
+    /*
+     * Exclusive pinning is when a vcpu has hard-affinity with only one
+     * cpu, and there is no other vcpu that has hard-affinity with that
+     * same cpu. This is infrequent, but if it happens, is for achieving
+     * the most possible determinism, and least possible overhead for
+     * the vcpus in question.
+     *
+     * Try to identify the vast majority of these situations, and deal
+     * with them quickly.
+     */
+    if ( unlikely((new->flags & CSFLAG_pinned) &&
+                  cpumask_test_cpu(cpu, &rqd->idle) &&
+                  !cpumask_test_cpu(cpu, &rqd->tickled)) )
+    {
+        ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+        SCHED_STAT_CRANK(tickled_idle_cpu_excl);
+        ipid = cpu;
+        goto tickle;
+    }
+
     for_each_affinity_balance_step( bs )
     {
         /* Just skip first step, if we don't have a soft affinity */
@@ -2826,6 +2852,19 @@ csched2_dom_cntl(
     return rc;
 }
 
+static void
+csched2_aff_cntl(const struct scheduler *ops, struct vcpu *v,
+                 const cpumask_t *hard, const cpumask_t *soft)
+{
+    struct csched2_vcpu *svc = csched2_vcpu(v);
+
+    /* Are we becoming exclusively pinned? */
+    if ( cpumask_weight(hard) == 1 )
+        __set_bit(__CSFLAG_pinned, &svc->flags);
+    else
+        __clear_bit(__CSFLAG_pinned, &svc->flags);
+}
+
 static int csched2_sys_cntl(const struct scheduler *ops,
                             struct xen_sysctl_scheduler_op *sc)
 {
@@ -3887,6 +3926,7 @@ static const struct scheduler sched_credit2_def = {
     .yield          = csched2_vcpu_yield,
 
     .adjust         = csched2_dom_cntl,
+    .adjust_affinity= csched2_aff_cntl,
     .adjust_global  = csched2_sys_cntl,
 
     .pick_cpu       = csched2_cpu_pick,
diff --git a/xen/include/xen/perfc_defn.h b/xen/include/xen/perfc_defn.h
index e8ff565..ef6f86b 100644
--- a/xen/include/xen/perfc_defn.h
+++ b/xen/include/xen/perfc_defn.h
@@ -32,6 +32,7 @@ PERFCOUNTER(vcpu_wake_runnable,     "sched: 
vcpu_wake_runnable")
 PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
 PERFCOUNTER(tickled_no_cpu,         "sched: tickled_no_cpu")
 PERFCOUNTER(tickled_idle_cpu,       "sched: tickled_idle_cpu")
+PERFCOUNTER(tickled_idle_cpu_excl,  "sched: tickled_idle_cpu_exclusive")
 PERFCOUNTER(tickled_busy_cpu,       "sched: tickled_busy_cpu")
 PERFCOUNTER(vcpu_check,             "sched: vcpu_check")
 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.