[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxenstore: filter watch events in libxenstore when we unwatch


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Tue, 18 Dec 2012 13:33:08 +0000
  • Delivery-date: Tue, 18 Dec 2012 13:33:32 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Julien Grall <julien.grall@xxxxxxxxxx>
# Date 1355767494 0
# Node ID 3664b0420dfa656b73e389aff39c2af5eb9d2126
# Parent  f50aab21f9f2ca3411df7881b2b92293b3af3771
libxenstore: filter watch events in libxenstore when we unwatch

XenStore puts in queued watch events via a thread and notifies the user.
Sometimes xs_unwatch is called before all related message is read. The use
case is non-threaded libevent, we have two event A and B:
    - Event A will destroy something and call xs_unwatch;
    - Event B is used to notify that a node has changed in XenStore.
As the event is called one by one, event A can be handled before event B.
So on next xs_watch_read the user could retrieve an unwatch token and
a segfault occured if the token store the pointer of the structure
(ie: "backend:0xcafe").

To avoid problem with previous application using libXenStore, this behaviour
will only be enabled if XS_UNWATCH_FILTER is given to xs_open.

Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Committed-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---


diff -r f50aab21f9f2 -r 3664b0420dfa tools/xenstore/xenstore.h
--- a/tools/xenstore/xenstore.h Thu Dec 13 14:39:31 2012 +0000
+++ b/tools/xenstore/xenstore.h Mon Dec 17 18:04:54 2012 +0000
@@ -27,6 +27,27 @@
 #define XS_OPEN_READONLY       1UL<<0
 #define XS_OPEN_SOCKETONLY      1UL<<1
 
+/*
+ * Setting XS_UNWATCH_FILTER arranges that after xs_unwatch, no
+ * related watch events will be delivered via xs_read_watch.  But
+ * this relies on the couple token, subpath is unique.
+ *
+ * XS_UNWATCH_FILTER clear          XS_UNWATCH_FILTER set
+ *
+ * Even after xs_unwatch, "stale"   After xs_unwatch returns, no
+ * instances of the watch event     watch events with the same
+ * may be delivered.                token and with the same subpath
+ *                                  will be delivered.
+ *
+ * A path and a subpath can be      The application must avoid
+ * register with the same token.    registering a path (/foo/) and
+ *                                  a subpath (/foo/bar) with the
+ *                                  same path until a successful
+ *                                  xs_unwatch for the first watch
+ *                                  has returned.
+ */
+#define XS_UNWATCH_FILTER     1UL<<2
+
 struct xs_handle;
 typedef uint32_t xs_transaction_t;
 
diff -r f50aab21f9f2 -r 3664b0420dfa tools/xenstore/xs.c
--- a/tools/xenstore/xs.c       Thu Dec 13 14:39:31 2012 +0000
+++ b/tools/xenstore/xs.c       Mon Dec 17 18:04:54 2012 +0000
@@ -67,6 +67,8 @@ struct xs_handle {
 
        /* Clients can select() on this pipe to wait for a watch to fire. */
        int watch_pipe[2];
+       /* Filtering watch event in unwatch function? */
+       bool unwatch_filter;
 
        /*
          * A list of replies. Currently only one will ever be outstanding
@@ -125,6 +127,8 @@ struct xs_handle {
        struct list_head watch_list;
        /* Clients can select() on this pipe to wait for a watch to fire. */
        int watch_pipe[2];
+       /* Filtering watch event in unwatch function? */
+       bool unwatch_filter;
 };
 
 #define mutex_lock(m)          ((void)0)
@@ -247,6 +251,8 @@ static struct xs_handle *get_handle(cons
        /* Watch pipe is allocated on demand in xs_fileno(). */
        h->watch_pipe[0] = h->watch_pipe[1] = -1;
 
+       h->unwatch_filter = false;
+
 #ifdef USE_PTHREAD
        pthread_mutex_init(&h->watch_mutex, NULL);
        pthread_cond_init(&h->watch_condvar, NULL);
@@ -287,6 +293,9 @@ struct xs_handle *xs_open(unsigned long 
        if (!xsh && !(flags & XS_OPEN_SOCKETONLY))
                xsh = get_handle(xs_domain_dev());
 
+       if (xsh && (flags & XS_UNWATCH_FILTER))
+               xsh->unwatch_filter = true;
+
        return xsh;
 }
 
@@ -753,6 +762,19 @@ bool xs_watch(struct xs_handle *h, const
                                ARRAY_SIZE(iov), NULL));
 }
 
+
+/* Clear the pipe token if there are no more pending watchs.
+ * We suppose the watch_mutex is already taken.
+ */
+static void xs_maybe_clear_watch_pipe(struct xs_handle *h)
+{
+       char c;
+
+       if (list_empty(&h->watch_list) && (h->watch_pipe[0] != -1))
+               while (read(h->watch_pipe[0], &c, 1) != 1)
+                       continue;
+}
+
 /* Find out what node change was on (will block if nothing pending).
  * Returns array of two pointers: path and token, or NULL.
  * Call free() after use.
@@ -761,7 +783,7 @@ static char **read_watch_internal(struct
                                  int nonblocking)
 {
        struct xs_stored_msg *msg;
-       char **ret, *strings, c = 0;
+       char **ret, *strings;
        unsigned int num_strings, i;
 
        mutex_lock(&h->watch_mutex);
@@ -798,11 +820,7 @@ static char **read_watch_internal(struct
        msg = list_top(&h->watch_list, struct xs_stored_msg, list);
        list_del(&msg->list);
 
-       /* Clear the pipe token if there are no more pending watches. */
-       if (list_empty(&h->watch_list) && (h->watch_pipe[0] != -1))
-               while (read(h->watch_pipe[0], &c, 1) != 1)
-                       continue;
-
+       xs_maybe_clear_watch_pipe(h);
        mutex_unlock(&h->watch_mutex);
 
        assert(msg->hdr.type == XS_WATCH_EVENT);
@@ -855,14 +873,64 @@ char **xs_read_watch(struct xs_handle *h
 bool xs_unwatch(struct xs_handle *h, const char *path, const char *token)
 {
        struct iovec iov[2];
+       struct xs_stored_msg *msg, *tmsg;
+       bool res;
+       char *s, *p;
+       unsigned int i;
+       char *l_token, *l_path;
 
        iov[0].iov_base = (char *)path;
        iov[0].iov_len = strlen(path) + 1;
        iov[1].iov_base = (char *)token;
        iov[1].iov_len = strlen(token) + 1;
 
-       return xs_bool(xs_talkv(h, XBT_NULL, XS_UNWATCH, iov,
-                               ARRAY_SIZE(iov), NULL));
+       res = xs_bool(xs_talkv(h, XBT_NULL, XS_UNWATCH, iov,
+                              ARRAY_SIZE(iov), NULL));
+
+       if (!h->unwatch_filter) /* Don't filter the watch list */
+               return res;
+
+
+       /* Filter the watch list to remove potential message */
+       mutex_lock(&h->watch_mutex);
+
+       if (list_empty(&h->watch_list)) {
+               mutex_unlock(&h->watch_mutex);
+               return res;
+       }
+
+       list_for_each_entry_safe(msg, tmsg, &h->watch_list, list) {
+               assert(msg->hdr.type == XS_WATCH_EVENT);
+
+               s = msg->body;
+
+               l_token = NULL;
+               l_path = NULL;
+
+               for (p = s, i = 0; p < msg->body + msg->hdr.len; p++) {
+                       if (*p == '\0')
+                       {
+                               if (i == XS_WATCH_TOKEN)
+                                       l_token = s;
+                               else if (i == XS_WATCH_PATH)
+                                       l_path = s;
+                               i++;
+                               s = p + 1;
+                       }
+               }
+
+               if (l_token && !strcmp(token, l_token) &&
+                   l_path && xs_path_is_subpath(path, l_path)) {
+                       list_del(&msg->list);
+                       free(msg);
+               }
+       }
+
+       xs_maybe_clear_watch_pipe(h);
+
+       mutex_unlock(&h->watch_mutex);
+
+       return res;
 }
 
 /* Start a transaction: changes by others will not be seen during this

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.