[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] xenstore: Make sure that libxs reports an error if xenstored drops



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1275994822 -3600
# Node ID c4b9a16c20a2f8795d3be0b03387c2d2f7c02e4b
# Parent  2bbaf4e06c1d406c7fc5cba3cb53cfdc355aef70
xenstore: Make sure that libxs reports an error if xenstored drops
the connection, rather than getting stuck forever.

Patch from: Steven Smith <steven.smith@xxxxxxxxxxxxx>
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
xen-unstable changeset:   21463:5be2d2a7f445
xen-unstable date:        Thu May 27 08:20:26 2010 +0100

xenstore: Fix cleanup_pop() definition for some (buggy) pthread.h headers.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   21374:9d53864d7be6
xen-unstable date:        Thu May 13 12:21:16 2010 +0100

xs: avoid pthread_join deadlock in xs_daemon_close

Doing a pthread_cancel and join on the reader thread while holding all
the request/reply/watch mutexes can deadlock if the thread needs to
take any of those mutexes to exit.  Kill off the reader thread before
taking any mutexes (which should be redundant if we're
single-threaded at that point).

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
xen-unstable changeset:   21354:9de69d816b11
xen-unstable date:        Wed May 12 08:49:13 2010 +0100

xs: make sure mutexes are cleaned up and memory freed if the read
thread is cancelled

If the read thread is terminated with pthread cancel, it must make
sure all memory is freed and mutexes are unlocked.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
xen-unstable changeset:   21353:2dd3141b3e3e
xen-unstable date:        Wed May 12 08:48:14 2010 +0100
---
 tools/xenstore/xs.c |  104 ++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 77 insertions(+), 27 deletions(-)

diff -r 2bbaf4e06c1d -r c4b9a16c20a2 tools/xenstore/xs.c
--- a/tools/xenstore/xs.c       Fri Jun 04 10:52:01 2010 +0100
+++ b/tools/xenstore/xs.c       Tue Jun 08 12:00:22 2010 +0100
@@ -85,6 +85,16 @@ struct xs_handle {
 #define mutex_unlock(m)                pthread_mutex_unlock(m)
 #define condvar_signal(c)      pthread_cond_signal(c)
 #define condvar_wait(c,m,hnd)  pthread_cond_wait(c,m)
+#define cleanup_push(f, a)     \
+    pthread_cleanup_push((void (*)(void *))(f), (void *)(a))
+/*
+ * Some definitions of pthread_cleanup_pop() are a macro starting with an
+ * end-brace. GCC then complains if we immediately precede that with a label.
+ * Hence we insert a dummy statement to appease the compiler in this situation.
+ */
+#define cleanup_pop(run)        ((void)0); pthread_cleanup_pop(run)
+
+#define read_thread_exists(h)  (h->read_thr_exists)
 
 static void *read_thread(void *arg);
 
@@ -102,6 +112,9 @@ struct xs_handle {
 #define mutex_unlock(m)                ((void)0)
 #define condvar_signal(c)      ((void)0)
 #define condvar_wait(c,m,hnd)  read_message(hnd)
+#define cleanup_push(f, a)     ((void)0)
+#define cleanup_pop(run)       ((void)0)
+#define read_thread_exists(h)  (0)
 
 #endif
 
@@ -227,17 +240,16 @@ void xs_daemon_close(struct xs_handle *h
 {
        struct xs_stored_msg *msg, *tmsg;
 
+#ifdef USE_PTHREAD
+       if (h->read_thr_exists) {
+               pthread_cancel(h->read_thr);
+               pthread_join(h->read_thr, NULL);
+       }
+#endif
+
        mutex_lock(&h->request_mutex);
        mutex_lock(&h->reply_mutex);
        mutex_lock(&h->watch_mutex);
-
-#ifdef USE_PTHREAD
-       if (h->read_thr_exists) {
-               /* XXX FIXME: May leak an unpublished message buffer. */
-               pthread_cancel(h->read_thr);
-               pthread_join(h->read_thr, NULL);
-       }
-#endif
 
        list_for_each_entry_safe(msg, tmsg, &h->reply_list, list) {
                free(msg->body);
@@ -307,16 +319,25 @@ static void *read_reply(
 {
        struct xs_stored_msg *msg;
        char *body;
+       int read_from_thread;
+
+       read_from_thread = read_thread_exists(h);
 
 #ifdef USE_PTHREAD
        /* Read from comms channel ourselves if there is no reader thread. */
-       if (!h->read_thr_exists && (read_message(h) == -1))
+       if (!read_from_thread && (read_message(h) == -1))
                return NULL;
 #endif
 
        mutex_lock(&h->reply_mutex);
-       while (list_empty(&h->reply_list))
+       while (list_empty(&h->reply_list) && (!read_from_thread || 
read_thread_exists(h)))
                condvar_wait(&h->reply_condvar, &h->reply_mutex, h);
+       if (read_from_thread && !read_thread_exists(h)) {
+               mutex_unlock(&h->reply_mutex);
+               errno = EINVAL;
+               return NULL;
+       }
+       assert(!list_empty(&h->reply_list));
        msg = list_top(&h->reply_list, struct xs_stored_msg, list);
        list_del(&msg->list);
        assert(list_empty(&h->reply_list));
@@ -624,8 +645,13 @@ char **xs_read_watch(struct xs_handle *h
        mutex_lock(&h->watch_mutex);
 
        /* Wait on the condition variable for a watch to fire. */
-       while (list_empty(&h->watch_list))
+       while (list_empty(&h->watch_list) && read_thread_exists(h))
                condvar_wait(&h->watch_condvar, &h->watch_mutex, h);
+       if (!read_thread_exists(h)) {
+               mutex_unlock(&h->watch_mutex);
+               errno = EINVAL;
+               return NULL;
+       }
        msg = list_top(&h->watch_list, struct xs_stored_msg, list);
        list_del(&msg->list);
 
@@ -846,44 +872,53 @@ static int read_message(struct xs_handle
 {
        struct xs_stored_msg *msg = NULL;
        char *body = NULL;
-       int saved_errno;
+       int saved_errno = 0;
+       int ret = -1;
 
        /* Allocate message structure and read the message header. */
        msg = malloc(sizeof(*msg));
        if (msg == NULL)
                goto error;
-       if (!read_all(h->fd, &msg->hdr, sizeof(msg->hdr)))
-               goto error;
+       cleanup_push(free, msg);
+       if (!read_all(h->fd, &msg->hdr, sizeof(msg->hdr))) { /* Cancellation 
point */
+               saved_errno = errno;
+               goto error_freemsg;
+       }
 
        /* Allocate and read the message body. */
        body = msg->body = malloc(msg->hdr.len + 1);
        if (body == NULL)
-               goto error;
-       if (!read_all(h->fd, body, msg->hdr.len))
-               goto error;
+               goto error_freemsg;
+       cleanup_push(free, body);
+       if (!read_all(h->fd, body, msg->hdr.len)) { /* Cancellation point */
+               saved_errno = errno;
+               goto error_freebody;
+       }
+
        body[msg->hdr.len] = '\0';
 
        if (msg->hdr.type == XS_WATCH_EVENT) {
                mutex_lock(&h->watch_mutex);
+               cleanup_push(pthread_mutex_unlock, &h->watch_mutex);
 
                /* Kick users out of their select() loop. */
                if (list_empty(&h->watch_list) &&
                    (h->watch_pipe[1] != -1))
-                       while (write(h->watch_pipe[1], body, 1) != 1)
+                       while (write(h->watch_pipe[1], body, 1) != 1) /* 
Cancellation point */
                                continue;
 
                list_add_tail(&msg->list, &h->watch_list);
 
                condvar_signal(&h->watch_condvar);
 
-               mutex_unlock(&h->watch_mutex);
+               cleanup_pop(1);
        } else {
                mutex_lock(&h->reply_mutex);
 
                /* There should only ever be one response pending! */
                if (!list_empty(&h->reply_list)) {
                        mutex_unlock(&h->reply_mutex);
-                       goto error;
+                       goto error_freebody;
                }
 
                list_add_tail(&msg->list, &h->reply_list);
@@ -892,14 +927,16 @@ static int read_message(struct xs_handle
                mutex_unlock(&h->reply_mutex);
        }
 
-       return 0;
-
- error:
-       saved_errno = errno;
-       free(msg);
-       free(body);
+       ret = 0;
+
+error_freebody:
+       cleanup_pop(ret == -1);
+error_freemsg:
+       cleanup_pop(ret == -1);
+error:
        errno = saved_errno;
-       return -1;
+
+       return ret;
 }
 
 #ifdef USE_PTHREAD
@@ -909,6 +946,19 @@ static void *read_thread(void *arg)
 
        while (read_message(h) != -1)
                continue;
+
+       /* Kick anyone waiting for a reply */
+       pthread_mutex_lock(&h->request_mutex);
+       h->read_thr_exists = 0;
+       pthread_mutex_unlock(&h->request_mutex);
+
+       pthread_mutex_lock(&h->reply_mutex);
+       pthread_cond_signal(&h->reply_condvar);
+       pthread_mutex_unlock(&h->reply_mutex);
+
+       pthread_mutex_lock(&h->watch_mutex);
+       pthread_cond_signal(&h->watch_condvar);
+       pthread_mutex_unlock(&h->watch_mutex);
 
        return NULL;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.