[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2 4/5] lib/ukschedcoop: Fix yield behavior when current thread is first in thread list


  • To: minios-devel@xxxxxxxxxxxxx
  • From: Costin Lupu <costin.lupu@xxxxxxxxx>
  • Date: Wed, 29 May 2019 07:56:31 +0300
  • Cc: Florian.Schmidt@xxxxxxxxx
  • Delivery-date: Wed, 29 May 2019 04:56:51 +0000
  • Ironport-phdr: 9a23:Jw/1eR8BVQasFP9uRHKM819IXTAuvvDOBiVQ1KB30egcTK2v8tzYMVDF4r011RmVBNydsqgawLqJ+4nbGkU4qa6bt34DdJEeHzQksu4x2zIaPcieFEfgJ+TrZSFpVO5LVVti4m3peRMNQJW2aFLduGC94iAPERvjKwV1Ov71GonPhMiryuy+4ZLebxhViDanfL9+MhS7oQrSu8QUnIBvNrs/xhzVr3VSZu9Y33loJVWdnxb94se/4ptu+DlOtvwi6sBNT7z0c7w3QrJEAjsmNXs15NDwuhnYUQSP/HocXX4InRdOHgPI8Qv1Xpb1siv9q+p9xCyXNtD4QLwoRTiv6bpgRRn1gykFKjE56nnahMxugqxGrhyvpBtxzIzaYIyOKPZyYrnQcc8ASGZdQspcUTFKDIOmb4sICuoMJfhVr47mp1sSrBu+AxSnCuXyyj9Qm3/9wKo33PgmEQHdwAwgHtQOvG7QrNXoLKcSTPu4w7XTzTnZd/NWxCzy5JLSchAmuPyBW697f8nJyUQ3Cg/IgUicpZbmMj6VzOgBrXaX4uh6We+hk2Irtg58riW1ysovioTFnJwZx17K+Clj3Yo4KsO0RUhmatC+CpRQrTuVN45uT8MnRGFnpTg1x6UduZ6+YCgK0JMnxwPDa/Cfc4iH/BbjVOGJLDdmgXJqYrO/hxKo/Ue80e3wTNG00EtRoSZfiNnMsWgB1x3V6seZVvtw5lqt1DmS2wzJ6exJIVo4mbTFJ5Mj2LI9mIIfsUHZES/3nEX2grWWdkIh+uWw9+vnZa7mpoOdN49ykA3+NrkumtSiAekjLgcORHKX+fyh1LL450H5WrJKguUskqbFqJDaOdgbpqmhDg9Q04Yj7Re/DzC90NgBgHYIMkxFdwyDj4XyP1HOIev4Deukg1i2nzZk2fbGMqfmApXXIXjJiK3hcqpl605A1AozyshS6ZJQCrEGOvLzW1X+tNrCAhEkLwO0w+PnCNRg2YMCQm+PBLGWP7/WsVCS/e0jOfeDZJJG8Ar6fv0k4f/pljo1lEEQeYGt3IALczaoE/IgJF+WMlT2hdJUOmARogs4BMj3kEDKBTVUfGqzWeQ4+ykmIIm9S5/eTMa3h+rSj2+AApRKazUeWRi3GnDyetDcVg==
  • Ironport-sdr: 2TCcYKkdLFk0ytpnhuUYIm0spxy0iRe6antlh5N/3n39nz8/bB+hxOr556Vd78QJsK+CMplCMz AWT7Uzl0h6oA==
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>

When the current thread is the only one in the thread list, if it wants
to yield after it creates a new thread then schedcoop_schedule() will
choose the same current thread (instead of the new one) because it is
the first one in the running list. This issue violates the expected
behavior of Round-Robin cooperative schedulers. The solution is to avoid
adding the current thread to the thread list as long as it is running. This
way the first thread in the running list is always the thread which will be
scheduled next.

However, this solution is not complete. If we have a single thread
(hence, the threads list is empty) and it wants to sleep for some time,
schedcoop_schedule() will enter in an infinite loop trying to find the
next thread. Therefore, the solution addition is to keep the sleeping
threads on a separate list: in scenarios like this when the "ready"
threads list is empty, schedcoop_schedule() will iterate over the
sleeping threads until one of them will wake up.

The described problem should also occur on Mini-OS, where this
implementation was ported from.

This patch also adds scheduler callbacks to be called when threads block
or wake up. On blocking, the cooperative scheduler will add the threads
in the sleeping threads list whenever a thread will want to sleep
(wakeup_time > 0). On waking up, the sleeping threads will be removed
from the sleeping threads list and added to the "ready" threads list.

Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
---
 lib/uksched/include/uk/sched.h | 23 ++++++++++++
 lib/uksched/include/uk/wait.h  |  1 +
 lib/uksched/thread.c           | 17 ++++++---
 lib/ukschedcoop/schedcoop.c    | 80 +++++++++++++++++++++++++++++-------------
 4 files changed, 92 insertions(+), 29 deletions(-)

diff --git a/lib/uksched/include/uk/sched.h b/lib/uksched/include/uk/sched.h
index f9dc16d0..f8a62dd6 100644
--- a/lib/uksched/include/uk/sched.h
+++ b/lib/uksched/include/uk/sched.h
@@ -64,6 +64,10 @@ typedef int   (*uk_sched_thread_add_func_t)
                        const uk_thread_attr_t *attr);
 typedef void  (*uk_sched_thread_remove_func_t)
                (struct uk_sched *s, struct uk_thread *t);
+typedef void  (*uk_sched_thread_blocked_func_t)
+               (struct uk_sched *s, struct uk_thread *t);
+typedef void  (*uk_sched_thread_woken_func_t)
+               (struct uk_sched *s, struct uk_thread *t);
 
 typedef int   (*uk_sched_thread_set_prio_func_t)
                (struct uk_sched *s, struct uk_thread *t, prio_t prio);
@@ -79,6 +83,8 @@ struct uk_sched {
 
        uk_sched_thread_add_func_t      thread_add;
        uk_sched_thread_remove_func_t   thread_remove;
+       uk_sched_thread_blocked_func_t  thread_blocked;
+       uk_sched_thread_woken_func_t    thread_woken;
 
        uk_sched_thread_set_prio_func_t   thread_set_prio;
        uk_sched_thread_get_prio_func_t   thread_get_prio;
@@ -133,6 +139,20 @@ static inline int uk_sched_thread_remove(struct uk_sched 
*s,
        return 0;
 }
 
+static inline void uk_sched_thread_blocked(struct uk_sched *s,
+               struct uk_thread *t)
+{
+       UK_ASSERT(s);
+       s->thread_blocked(s, t);
+}
+
+static inline void uk_sched_thread_woken(struct uk_sched *s,
+               struct uk_thread *t)
+{
+       UK_ASSERT(s);
+       s->thread_woken(s, t);
+}
+
 static inline int uk_sched_thread_set_prio(struct uk_sched *s,
                struct uk_thread *t, prio_t prio)
 {
@@ -194,12 +214,15 @@ static inline struct uk_thread *uk_sched_get_idle(struct 
uk_sched *s)
 
 #define uk_sched_init(s, yield_func, \
                thread_add_func, thread_remove_func, \
+               thread_blocked_func, thread_woken_func, \
                thread_set_prio_func, thread_get_prio_func, \
                thread_set_tslice_func, thread_get_tslice_func) \
        do { \
                (s)->yield           = yield_func; \
                (s)->thread_add      = thread_add_func; \
                (s)->thread_remove   = thread_remove_func; \
+               (s)->thread_blocked  = thread_blocked_func; \
+               (s)->thread_woken    = thread_woken_func; \
                (s)->thread_set_prio    = thread_set_prio_func; \
                (s)->thread_get_prio    = thread_get_prio_func; \
                (s)->thread_set_tslice  = thread_set_tslice_func; \
diff --git a/lib/uksched/include/uk/wait.h b/lib/uksched/include/uk/wait.h
index 905ba38b..bf0e0c5c 100644
--- a/lib/uksched/include/uk/wait.h
+++ b/lib/uksched/include/uk/wait.h
@@ -106,6 +106,7 @@ do { \
                uk_waitq_add(wq, &__wait); \
                __current->wakeup_time = deadline; \
                clear_runnable(__current); \
+               uk_sched_thread_blocked(__current->sched, __current); \
                ukplat_lcpu_restore_irqf(flags); \
                if ((condition) || (deadline_condition)) \
                        break; \
diff --git a/lib/uksched/thread.c b/lib/uksched/thread.c
index 7400baee..9625aea1 100644
--- a/lib/uksched/thread.c
+++ b/lib/uksched/thread.c
@@ -135,8 +135,13 @@ void uk_thread_fini(struct uk_thread *thread, struct 
uk_alloc *allocator)
 
 static void uk_thread_block_until(struct uk_thread *thread, __snsec until)
 {
+       unsigned long flags;
+
+       flags = ukplat_lcpu_save_irqf();
        thread->wakeup_time = until;
        clear_runnable(thread);
+       uk_sched_thread_blocked(thread->sched, thread);
+       ukplat_lcpu_restore_irqf(flags);
 }
 
 void uk_thread_block_timeout(struct uk_thread *thread, __nsec nsec)
@@ -153,11 +158,15 @@ void uk_thread_block(struct uk_thread *thread)
 
 void uk_thread_wake(struct uk_thread *thread)
 {
-       if (is_runnable(thread))
-               return;
+       unsigned long flags;
 
-       thread->wakeup_time = 0LL;
-       set_runnable(thread);
+       flags = ukplat_lcpu_save_irqf();
+       if (!is_runnable(thread)) {
+               uk_sched_thread_woken(thread->sched, thread);
+               thread->wakeup_time = 0LL;
+               set_runnable(thread);
+       }
+       ukplat_lcpu_restore_irqf(flags);
 }
 
 void uk_thread_exit(struct uk_thread *thread)
diff --git a/lib/ukschedcoop/schedcoop.c b/lib/ukschedcoop/schedcoop.c
index e0b116ee..28667bf1 100644
--- a/lib/ukschedcoop/schedcoop.c
+++ b/lib/ukschedcoop/schedcoop.c
@@ -37,6 +37,7 @@
 
 struct schedcoop_private {
        struct uk_thread_list thread_list;
+       struct uk_thread_list sleeping_threads;
 };
 
 #ifdef SCHED_DEBUG
@@ -78,33 +79,34 @@ static void schedcoop_schedule(struct uk_sched *s)
                __snsec now = ukplat_monotonic_clock();
                __snsec min_wakeup_time = now + ukarch_time_sec_to_nsec(10);
 
-               next = NULL;
-
-               UK_TAILQ_FOREACH_SAFE(thread, &prv->thread_list,
+               /* wake some sleeping threads */
+               UK_TAILQ_FOREACH_SAFE(thread, &prv->sleeping_threads,
                                      thread_list, tmp) {
 
-                       if (!is_runnable(thread)
-                           && thread->wakeup_time != 0LL) {
-                               if (thread->wakeup_time <= now)
-                                       uk_thread_wake(thread);
-                               else if (thread->wakeup_time < min_wakeup_time)
-                                       min_wakeup_time = thread->wakeup_time;
-                       }
-
-                       if (is_runnable(thread)) {
-                               next = thread;
-                               /* Put this thread on the end of the list */
-                               UK_TAILQ_REMOVE(&prv->thread_list, thread,
-                                               thread_list);
-                               UK_TAILQ_INSERT_TAIL(&prv->thread_list, thread,
-                                               thread_list);
-                               ukplat_stack_set_current_thread(next);
-                               break;
-                       }
+                       if (thread->wakeup_time && thread->wakeup_time <= now)
+                               uk_thread_wake(thread);
+
+                       else if (thread->wakeup_time < min_wakeup_time)
+                               min_wakeup_time = thread->wakeup_time;
                }
 
-               if (next)
+               next = UK_TAILQ_FIRST(&prv->thread_list);
+               if (next) {
+                       UK_ASSERT(next != prev);
+                       UK_ASSERT(is_runnable(next));
+                       UK_ASSERT(!is_exited(next));
+                       UK_TAILQ_REMOVE(&prv->thread_list, next,
+                                       thread_list);
+                       /* Put previous thread on the end of the list */
+                       if (is_runnable(prev))
+                               UK_TAILQ_INSERT_TAIL(&prv->thread_list, prev,
+                                               thread_list);
+                       ukplat_stack_set_current_thread(next);
                        break;
+               } else if (is_runnable(prev)) {
+                       next = prev;
+                       break;
+               }
 
                /* block until the next timeout expires, or for 10 secs,
                 * whichever comes first
@@ -156,7 +158,8 @@ static void schedcoop_thread_remove(struct uk_sched *s, 
struct uk_thread *t)
        flags = ukplat_lcpu_save_irqf();
 
        /* Remove from the thread list */
-       UK_TAILQ_REMOVE(&prv->thread_list, t, thread_list);
+       if (t != uk_thread_current())
+               UK_TAILQ_REMOVE(&prv->thread_list, t, thread_list);
        clear_runnable(t);
 
        uk_thread_exit(t);
@@ -166,13 +169,37 @@ static void schedcoop_thread_remove(struct uk_sched *s, 
struct uk_thread *t)
 
        ukplat_lcpu_restore_irqf(flags);
 
-       /* Schedule will free the resources */
-       while (1) {
+       /* Schedule only if current thread is exiting */
+       if (t == uk_thread_current()) {
                schedcoop_schedule(s);
                uk_pr_warn("schedule() returned! Trying again\n");
        }
 }
 
+static void schedcoop_thread_blocked(struct uk_sched *s, struct uk_thread *t)
+{
+       struct schedcoop_private *prv = s->prv;
+
+       UK_ASSERT(ukplat_lcpu_irqs_disabled());
+
+       if (t != uk_thread_current())
+               UK_TAILQ_REMOVE(&prv->thread_list, t, thread_list);
+       if (t->wakeup_time > 0)
+               UK_TAILQ_INSERT_TAIL(&prv->sleeping_threads, t, thread_list);
+}
+
+static void schedcoop_thread_woken(struct uk_sched *s, struct uk_thread *t)
+{
+       struct schedcoop_private *prv = s->prv;
+
+       UK_ASSERT(ukplat_lcpu_irqs_disabled());
+
+       if (t->wakeup_time > 0)
+               UK_TAILQ_REMOVE(&prv->sleeping_threads, t, thread_list);
+       if (t != uk_thread_current())
+               UK_TAILQ_INSERT_TAIL(&prv->thread_list, t, thread_list);
+}
+
 static void idle_thread_fn(void *unused __unused)
 {
        struct uk_thread *current = uk_thread_current();
@@ -207,6 +234,7 @@ struct uk_sched *uk_schedcoop_init(struct uk_alloc *a)
 
        prv = sched->prv;
        UK_TAILQ_INIT(&prv->thread_list);
+       UK_TAILQ_INIT(&prv->sleeping_threads);
 
        uk_sched_idle_init(sched, NULL, idle_thread_fn);
 
@@ -214,6 +242,8 @@ struct uk_sched *uk_schedcoop_init(struct uk_alloc *a)
                        schedcoop_yield,
                        schedcoop_thread_add,
                        schedcoop_thread_remove,
+                       schedcoop_thread_blocked,
+                       schedcoop_thread_woken,
                        NULL, NULL, NULL, NULL);
 
        return sched;
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.