[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] [Mini-OS] add xenbus token support and separate watch event queues



add xenbus token support and separate watch event queues

Signed-off-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx>

# HG changeset patch
# User Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx>
# Date 1200582503 0
# Node ID 112c1180720c87a0bfa99142399e3ef3f79f1711
# Parent  33d1c7f9217a1c915afe01fe822811ce69b3a90f
add xenbus token support and separate watch event queues

diff -r 33d1c7f9217a -r 112c1180720c extras/mini-os/include/xenbus.h
--- a/extras/mini-os/include/xenbus.h   Thu Jan 17 14:58:01 2008 +0000
+++ b/extras/mini-os/include/xenbus.h   Thu Jan 17 15:08:23 2008 +0000
@@ -12,13 +12,45 @@ void init_xenbus(void);
    set to a malloc'd copy of the value. */
 char *xenbus_read(xenbus_transaction_t xbt, const char *path, char **value);
 
-char *xenbus_watch_path(xenbus_transaction_t xbt, const char *path);
-void wait_for_watch(void);
-char* xenbus_wait_for_value(const char*,const char*);
+/* Watch event queue */
+struct xenbus_event {
+    /* Keep these two as this for xs.c */
+    char *path;
+    char *token;
+    struct xenbus_event *next;
+};
+
+char *xenbus_watch_path_token(xenbus_transaction_t xbt, const char *path, 
const char *token, struct xenbus_event *volatile *events);
+char *xenbus_unwatch_path_token(xenbus_transaction_t xbt, const char *path, 
const char *token);
+extern struct wait_queue_head xenbus_watch_queue;
+void xenbus_wait_for_watch(void);
+char **xenbus_wait_for_watch_return(void);
+char* xenbus_wait_for_value(const char *path, const char *value);
+
+/* When no token is provided, use a global queue. */
+#define XENBUS_WATCH_PATH_TOKEN "xenbus_watch_path"
+extern struct xenbus_event * volatile xenbus_events;
+#define xenbus_watch_path(xbt, path) xenbus_watch_path_token(xbt, path, 
XENBUS_WATCH_PATH_TOKEN, NULL)
+#define xenbus_unwatch_path(xbt, path) xenbus_unwatch_path_token(xbt, path, 
XENBUS_WATCH_PATH_TOKEN)
+
 
 /* Associates a value with a path.  Returns a malloc'd error string on
    failure. */
 char *xenbus_write(xenbus_transaction_t xbt, const char *path, const char 
*value);
+
+struct write_req {
+    const void *data;
+    unsigned len;
+};
+
+/* Send a message to xenbus, in the same fashion as xb_write, and
+   block waiting for a reply.  The reply is malloced and should be
+   freed by the caller. */
+struct xsd_sockmsg *
+xenbus_msg_reply(int type,
+                 xenbus_transaction_t trans,
+                 struct write_req *io,
+                 int nr_reqs);
 
 /* Removes the value associated with a path.  Returns a malloc'd error
    string on failure. */
@@ -52,4 +84,9 @@ char *xenbus_transaction_end(xenbus_tran
 /* Read path and parse it as an integer.  Returns -1 on error. */
 int xenbus_read_integer(char *path);
 
+/* Contraction of snprintf and xenbus_write(path/node). */
+char* xenbus_printf(xenbus_transaction_t xbt,
+                                  char* node, char* path,
+                                  char* fmt, ...);
+
 #endif /* XENBUS_H__ */
diff -r 33d1c7f9217a -r 112c1180720c extras/mini-os/netfront.c
--- a/extras/mini-os/netfront.c Thu Jan 17 14:58:01 2008 +0000
+++ b/extras/mini-os/netfront.c Thu Jan 17 15:08:23 2008 +0000
@@ -26,20 +26,6 @@ struct net_info {
 
 } net_info;
 
-
-char* xenbus_printf(xenbus_transaction_t xbt,
-        char* node,char* path,
-        char* fmt,unsigned int arg)
-{
-    char fullpath[256];
-    char val[256];
-
-    sprintf(fullpath,"%s/%s",node,path);
-    sprintf(val,fmt,arg);
-    xenbus_write(xbt,fullpath,val);
-
-    return NULL;
-}
 
 
 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
diff -r 33d1c7f9217a -r 112c1180720c extras/mini-os/xenbus/xenbus.c
--- a/extras/mini-os/xenbus/xenbus.c    Thu Jan 17 14:58:01 2008 +0000
+++ b/extras/mini-os/xenbus/xenbus.c    Thu Jan 17 15:08:23 2008 +0000
@@ -43,7 +43,14 @@
 
 static struct xenstore_domain_interface *xenstore_buf;
 static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
-static DECLARE_WAIT_QUEUE_HEAD(watch_queue);
+DECLARE_WAIT_QUEUE_HEAD(xenbus_watch_queue);
+
+struct xenbus_event *volatile xenbus_events;
+static struct watch {
+    char *token;
+    struct xenbus_event *volatile *events;
+    struct watch *next;
+} *watches;
 struct xenbus_req_info 
 {
     int in_use:1;
@@ -68,16 +75,27 @@ static void memcpy_from_ring(const void 
     memcpy(dest + c1, ring, c2);
 }
 
-void wait_for_watch(void)
+char **xenbus_wait_for_watch_return()
 {
+    struct xenbus_event *event;
     DEFINE_WAIT(w);
-    add_waiter(w,watch_queue);
-    schedule();
+    while (!(event = xenbus_events)) {
+        add_waiter(w, xenbus_watch_queue);
+        schedule();
+    }
     remove_waiter(w);
-    wake(current);
+    xenbus_events = event->next;
+    return &event->path;
 }
 
-char* xenbus_wait_for_value(const char* path,const char* value)
+void xenbus_wait_for_watch(void)
+{
+    char **ret;
+    ret = xenbus_wait_for_watch_return();
+    free(ret);
+}
+
+char* xenbus_wait_for_value(const char* path, const char* value)
 {
     for(;;)
     {
@@ -91,7 +109,7 @@ char* xenbus_wait_for_value(const char* 
         free(res);
 
         if(r==0) break;
-        else wait_for_watch();
+        else xenbus_wait_for_watch();
     }
     return NULL;
 }
@@ -129,20 +147,32 @@ static void xenbus_thread_func(void *ign
 
             if(msg.type == XS_WATCH_EVENT)
             {
-                char* payload = (char*)malloc(sizeof(msg) + msg.len);
-                char *path,*token;
+               struct xenbus_event *event = malloc(sizeof(*event) + msg.len),
+                                    *volatile *events = NULL;
+               char *data = (char*)event + sizeof(*event);
+                struct watch *watch;
 
                 memcpy_from_ring(xenstore_buf->rsp,
-                    payload,
-                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons),
-                    msg.len + sizeof(msg));
+                   data,
+                    MASK_XENSTORE_IDX(xenstore_buf->rsp_cons + sizeof(msg)),
+                    msg.len);
 
-                path = payload + sizeof(msg);
-                token = path + strlen(path) + 1;
+               event->path = data;
+               event->token = event->path + strlen(event->path) + 1;
 
                 xenstore_buf->rsp_cons += msg.len + sizeof(msg);
-                free(payload);
-                wake_up(&watch_queue);
+
+                for (watch = watches; watch; watch = watch->next)
+                    if (!strcmp(watch->token, event->token)) {
+                        events = watch->events;
+                        break;
+                    }
+                if (!events)
+                    events = &xenbus_events;
+
+               event->next = *events;
+               *events = event;
+                wake_up(&xenbus_watch_queue);
             }
 
             else
@@ -230,11 +260,6 @@ void init_xenbus(void)
     DEBUG("xenbus on irq %d\n", err);
 }
 
-struct write_req {
-    const void *data;
-    unsigned len;
-};
-
 /* Send data to xenbus.  This can block.  All of the requests are seen
    by xenbus as if sent atomically.  The header is added
    automatically, using type %type, req_id %req_id, and trans_id
@@ -316,7 +341,7 @@ static void xb_write(int type, int req_i
 /* Send a mesasge to xenbus, in the same fashion as xb_write, and
    block waiting for a reply.  The reply is malloced and should be
    freed by the caller. */
-static struct xsd_sockmsg *
+struct xsd_sockmsg *
 xenbus_msg_reply(int type,
                 xenbus_transaction_t trans,
                 struct write_req *io,
@@ -437,23 +462,55 @@ char *xenbus_write(xenbus_transaction_t 
     return NULL;
 }
 
-char* xenbus_watch_path( xenbus_transaction_t xbt, const char *path)
+char* xenbus_watch_path_token( xenbus_transaction_t xbt, const char *path, 
const char *token, struct xenbus_event *volatile *events)
 {
-       /* in the future one could have multiple watch queues, and use
-        * the token for demuxing. For now the token is 0. */
-
     struct xsd_sockmsg *rep;
 
     struct write_req req[] = { 
         {path, strlen(path) + 1},
-        {"0",2 },
+       {token, strlen(token) + 1},
     };
+
+    struct watch *watch = malloc(sizeof(*watch));
+
+    watch->token = strdup(token);
+    watch->events = events;
+    watch->next = watches;
+    watches = watch;
 
     rep = xenbus_msg_reply(XS_WATCH, xbt, req, ARRAY_SIZE(req));
 
     char *msg = errmsg(rep);
     if (msg) return msg;
     free(rep);
+
+    return NULL;
+}
+
+char* xenbus_unwatch_path_token( xenbus_transaction_t xbt, const char *path, 
const char *token)
+{
+    struct xsd_sockmsg *rep;
+
+    struct write_req req[] = { 
+        {path, strlen(path) + 1},
+       {token, strlen(token) + 1},
+    };
+
+    struct watch *watch, **prev;
+
+    rep = xenbus_msg_reply(XS_UNWATCH, xbt, req, ARRAY_SIZE(req));
+
+    char *msg = errmsg(rep);
+    if (msg) return msg;
+    free(rep);
+
+    for (prev = &watches, watch = *prev; watch; prev = &watch->next, watch = 
*prev)
+        if (!strcmp(watch->token, token)) {
+            free(watch->token);
+            *prev = watch->next;
+            free(watch);
+            break;
+        }
 
     return NULL;
 }
@@ -566,6 +623,25 @@ int xenbus_read_integer(char *path)
     return t;
 }
 
+char* xenbus_printf(xenbus_transaction_t xbt,
+                                  char* node, char* path,
+                                  char* fmt, ...)
+{
+#define BUFFER_SIZE 256
+    char fullpath[BUFFER_SIZE];
+    char val[BUFFER_SIZE];
+    va_list args;
+
+    BUG_ON(strlen(node) + strlen(path) + 1 >= BUFFER_SIZE);
+    sprintf(fullpath,"%s/%s", node, path);
+    va_start(args, fmt);
+    vsprintf(val, fmt, args);
+    va_end(args);
+    xenbus_write(xbt,fullpath,val);
+
+    return NULL;
+}
+
 static void do_ls_test(const char *pre)
 {
     char **dirs;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.