[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix cancellation of pending watch events on watch unregistration.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 5134f3c512c8e140ca7454e27f1931870ca8b4d7
# Parent  03d69dbea1527720f11a358bf525efbb8c40aec7
Fix cancellation of pending watch events on watch unregistration.
Use wait_event_interruptible() so that our kernel threads spend
their time in the more acceptable 'S' state rather than the more
worrying 'D' state.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 03d69dbea152 -r 5134f3c512c8 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c    Mon Oct 10 
15:57:41 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c    Mon Oct 10 
17:16:03 2005
@@ -128,7 +128,7 @@
                void *dst;
                unsigned int avail;
 
-               wait_event(xb_waitq, output_avail(out));
+               wait_event_interruptible(xb_waitq, output_avail(out));
 
                mb();
                h = *out;
@@ -136,6 +136,8 @@
                        return -EIO;
 
                dst = get_output_chunk(&h, out->buf, &avail);
+               if (avail == 0)
+                       continue;
                if (avail > len)
                        avail = len;
                memcpy(dst, data, avail);
@@ -167,7 +169,7 @@
                unsigned int avail;
                const char *src;
 
-               wait_event(xb_waitq, xs_input_avail());
+               wait_event_interruptible(xb_waitq, xs_input_avail());
 
                mb();
                h = *in;
@@ -175,6 +177,8 @@
                        return -EIO;
 
                src = get_input_chunk(&h, in->buf, &avail);
+               if (avail == 0)
+                       continue;
                if (avail > len)
                        avail = len;
                was_full = !output_avail(&h);
diff -r 03d69dbea152 -r 5134f3c512c8 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c    Mon Oct 10 
15:57:41 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c    Mon Oct 10 
17:16:03 2005
@@ -43,9 +43,6 @@
 
 static struct notifier_block *xenstore_chain;
 
-/* Now used to protect xenbus probes against save/restore. */
-static DECLARE_MUTEX(xenbus_lock);
-
 /* If something in array of ids matches this device, return it. */
 static const struct xenbus_device_id *
 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -232,18 +229,13 @@
 static int xenbus_register_driver_common(struct xenbus_driver *drv,
                                         struct xen_bus_type *bus)
 {
-       int err;
-
        drv->driver.name = drv->name;
        drv->driver.bus = &bus->bus;
        drv->driver.owner = drv->owner;
        drv->driver.probe = xenbus_dev_probe;
        drv->driver.remove = xenbus_dev_remove;
 
-       down(&xenbus_lock);
-       err = driver_register(&drv->driver);
-       up(&xenbus_lock);
-       return err;
+       return driver_register(&drv->driver);
 }
 
 int xenbus_register_driver(struct xenbus_driver *drv)
@@ -259,9 +251,7 @@
 
 void xenbus_unregister_driver(struct xenbus_driver *drv)
 {
-       down(&xenbus_lock);
        driver_unregister(&drv->driver);
-       up(&xenbus_lock);
 }
 EXPORT_SYMBOL(xenbus_unregister_driver);
 
@@ -624,8 +614,6 @@
 
 void xenbus_suspend(void)
 {
-       /* We keep lock, so no comms can happen as page moves. */
-       down(&xenbus_lock);
        bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
        bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, suspend_dev);
        xs_suspend();
@@ -637,14 +625,11 @@
        xs_resume();
        bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
        bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, resume_dev);
-       up(&xenbus_lock);
 }
 
 int register_xenstore_notifier(struct notifier_block *nb)
 {
        int ret = 0;
-
-       down(&xenbus_lock);
 
        if (xen_start_info->store_evtchn) {
                ret = nb->notifier_call(nb, 0, NULL);
@@ -652,17 +637,13 @@
                notifier_chain_register(&xenstore_chain, nb);
        }
 
-       up(&xenbus_lock);
-
        return ret;
 }
 EXPORT_SYMBOL(register_xenstore_notifier);
 
 void unregister_xenstore_notifier(struct notifier_block *nb)
 {
-       down(&xenbus_lock);
        notifier_chain_unregister(&xenstore_chain, nb);
-       up(&xenbus_lock);
 }
 EXPORT_SYMBOL(unregister_xenstore_notifier);
 
@@ -683,16 +664,16 @@
                return err;
        }
 
-       down(&xenbus_lock);
        /* Enumerate devices in xenstore. */
        xenbus_probe_devices(&xenbus_frontend);
        xenbus_probe_devices(&xenbus_backend);
+
        /* Watch for changes. */
        register_xenbus_watch(&fe_watch);
        register_xenbus_watch(&be_watch);
+
        /* Notify others that xenstore is up */
        notifier_call_chain(&xenstore_chain, 0, 0);
-       up(&xenbus_lock);
 
        return 0;
 }
diff -r 03d69dbea152 -r 5134f3c512c8 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Mon Oct 10 
15:57:41 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Mon Oct 10 
17:16:03 2005
@@ -43,18 +43,18 @@
 #define streq(a, b) (strcmp((a), (b)) == 0)
 
 struct xs_stored_msg {
+       struct list_head list;
+
        struct xsd_sockmsg hdr;
 
        union {
-               /* Stored replies. */
+               /* Queued replies. */
                struct {
-                       struct list_head list;
                        char *body;
                } reply;
 
-               /* Queued watch callbacks. */
+               /* Queued watch events. */
                struct {
-                       struct work_struct work;
                        struct xenbus_watch *handle;
                        char **vec;
                        unsigned int vec_size;
@@ -77,9 +77,23 @@
 
 static struct xs_handle xs_state;
 
+/* List of registered watches, and a lock to protect it. */
 static LIST_HEAD(watches);
 static DEFINE_SPINLOCK(watches_lock);
-static struct workqueue_struct *watches_workq;
+
+/* List of pending watch calbback events, and a lock to protect it. */
+static LIST_HEAD(watch_events);
+static DEFINE_SPINLOCK(watch_events_lock);
+
+/*
+ * Details of the xenwatch callback kernel thread. The thread waits on the
+ * watch_events_waitq for work to do (queued on watch_events list). When it
+ * wakes up it acquires the xenwatch_mutex before reading the list and
+ * carrying out work.
+ */
+static pid_t xenwatch_pid;
+static DECLARE_MUTEX(xenwatch_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
 
 static int get_error(const char *errorstring)
 {
@@ -105,14 +119,14 @@
 
        while (list_empty(&xs_state.reply_list)) {
                spin_unlock(&xs_state.reply_lock);
-               wait_event(xs_state.reply_waitq,
-                          !list_empty(&xs_state.reply_list));
+               wait_event_interruptible(xs_state.reply_waitq,
+                                        !list_empty(&xs_state.reply_list));
                spin_lock(&xs_state.reply_lock);
        }
 
        msg = list_entry(xs_state.reply_list.next,
-                        struct xs_stored_msg, u.reply.list);
-       list_del(&msg->u.reply.list);
+                        struct xs_stored_msg, list);
+       list_del(&msg->list);
 
        spin_unlock(&xs_state.reply_lock);
 
@@ -606,6 +620,7 @@
 
 void unregister_xenbus_watch(struct xenbus_watch *watch)
 {
+       struct xs_stored_msg *msg, *tmp;
        char token[sizeof(watch) * 2 + 1];
        int err;
 
@@ -626,8 +641,22 @@
 
        up_read(&xs_state.suspend_mutex);
 
-       /* Make sure watch is not in use. */
-       flush_workqueue(watches_workq);
+       /* Cancel pending watch events. */
+       spin_lock(&watch_events_lock);
+       list_for_each_entry_safe(msg, tmp, &watch_events, list) {
+               if (msg->u.watch.handle != watch)
+                       continue;
+               list_del(&msg->list);
+               kfree(msg->u.watch.vec);
+               kfree(msg);
+       }
+       spin_unlock(&watch_events_lock);
+
+       /* Flush any currently-executing callback, unless we are it. :-) */
+       if (current->pid != xenwatch_pid) {
+               down(&xenwatch_mutex);
+               up(&xenwatch_mutex);
+       }
 }
 EXPORT_SYMBOL(unregister_xenbus_watch);
 
@@ -653,16 +682,35 @@
        up_write(&xs_state.suspend_mutex);
 }
 
-static void xenbus_fire_watch(void *arg)
-{
-       struct xs_stored_msg *msg = arg;
-
-       msg->u.watch.handle->callback(msg->u.watch.handle,
-                                     (const char **)msg->u.watch.vec,
-                                     msg->u.watch.vec_size);
-
-       kfree(msg->u.watch.vec);
-       kfree(msg);
+static int xenwatch_thread(void *unused)
+{
+       struct list_head *ent;
+       struct xs_stored_msg *msg;
+
+       for (;;) {
+               wait_event_interruptible(watch_events_waitq,
+                                        !list_empty(&watch_events));
+
+               down(&xenwatch_mutex);
+
+               spin_lock(&watch_events_lock);
+               ent = watch_events.next;
+               if (ent != &watch_events)
+                       list_del(ent);
+               spin_unlock(&watch_events_lock);
+
+               if (ent != &watch_events) {
+                       msg = list_entry(ent, struct xs_stored_msg, list);
+                       msg->u.watch.handle->callback(
+                               msg->u.watch.handle,
+                               (const char **)msg->u.watch.vec,
+                               msg->u.watch.vec_size);
+                       kfree(msg->u.watch.vec);
+                       kfree(msg);
+               }
+
+               up(&xenwatch_mutex);
+       }
 }
 
 static int process_msg(void)
@@ -696,8 +744,6 @@
        body[msg->hdr.len] = '\0';
 
        if (msg->hdr.type == XS_WATCH_EVENT) {
-               INIT_WORK(&msg->u.watch.work, xenbus_fire_watch, msg);
-
                msg->u.watch.vec = split(body, msg->hdr.len,
                                         &msg->u.watch.vec_size);
                if (IS_ERR(msg->u.watch.vec)) {
@@ -709,7 +755,10 @@
                msg->u.watch.handle = find_watch(
                        msg->u.watch.vec[XS_WATCH_TOKEN]);
                if (msg->u.watch.handle != NULL) {
-                       queue_work(watches_workq, &msg->u.watch.work);
+                       spin_lock(&watch_events_lock);
+                       list_add_tail(&msg->list, &watch_events);
+                       wake_up(&watch_events_waitq);
+                       spin_unlock(&watch_events_lock);
                } else {
                        kfree(msg->u.watch.vec);
                        kfree(msg);
@@ -718,7 +767,7 @@
        } else {
                msg->u.reply.body = body;
                spin_lock(&xs_state.reply_lock);
-               list_add_tail(&msg->u.reply.list, &xs_state.reply_list);
+               list_add_tail(&msg->list, &xs_state.reply_list);
                spin_unlock(&xs_state.reply_lock);
                wake_up(&xs_state.reply_waitq);
        }
@@ -726,7 +775,7 @@
        return 0;
 }
 
-static int read_thread(void *unused)
+static int xenbus_thread(void *unused)
 {
        int err;
 
@@ -741,7 +790,7 @@
 int xs_init(void)
 {
        int err;
-       struct task_struct *reader;
+       struct task_struct *task;
 
        INIT_LIST_HEAD(&xs_state.reply_list);
        spin_lock_init(&xs_state.reply_lock);
@@ -755,13 +804,14 @@
        if (err)
                return err;
 
-       /* Create our own workqueue for executing watch callbacks. */
-       watches_workq = create_singlethread_workqueue("xenwatch");
-       BUG_ON(watches_workq == NULL);
-
-       reader = kthread_run(read_thread, NULL, "xenbus");
-       if (IS_ERR(reader))
-               return PTR_ERR(reader);
+       task = kthread_run(xenwatch_thread, NULL, "xenwatch");
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       xenwatch_pid = task->pid;
+
+       task = kthread_run(xenbus_thread, NULL, "xenbus");
+       if (IS_ERR(task))
+               return PTR_ERR(task);
 
        return 0;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.