[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] xenstore: support building the xenstore clients statically.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1207744053 -3600
# Node ID 32e3c81ada56100a3d08df8c2044c4103fbd6e25
# Parent  9b635405ef901c9939a62d83d53ae681333954ea
xenstore: support building the xenstore clients statically.

This removes threading from libxenstore.a (but not libxenstore.so)
since pthreads is incompatible with static linking and none of the
command line clients require threads anyway.

It is now possible to build these utilities statically with a uclibc
toolchain which is useful for small userspace utility domains.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 tools/xenstore/Makefile |   20 +++++++-
 tools/xenstore/xs.c     |  108 ++++++++++++++++++++++++++++++++----------------
 2 files changed, 89 insertions(+), 39 deletions(-)

diff -r 9b635405ef90 -r 32e3c81ada56 tools/xenstore/Makefile
--- a/tools/xenstore/Makefile   Wed Apr 09 11:30:32 2008 +0100
+++ b/tools/xenstore/Makefile   Wed Apr 09 13:27:33 2008 +0100
@@ -24,8 +24,18 @@ XENSTORED_OBJS_$(CONFIG_NetBSD) = xensto
 
 XENSTORED_OBJS += $(XENSTORED_OBJS_y)
 
+ifneq ($(XENSTORE_STATIC_CLIENTS),y)
+LIBXENSTORE := libxenstore.so
+else
+LIBXENSTORE := libxenstore.a
+$(CLIENTS) xenstore-control xenstore-ls: CFLAGS += -static
+endif
+
 .PHONY: all
-all: libxenstore.so libxenstore.a xenstored $(CLIENTS) xs_tdb_dump 
xenstore-control xenstore-ls
+all: libxenstore.so libxenstore.a xenstored clients xs_tdb_dump 
+
+.PHONY: clients
+clients: $(CLIENTS) xenstore-control xenstore-ls
 
 ifeq ($(CONFIG_SunOS),y)
 xenstored_probes.h: xenstored_probes.d
@@ -42,16 +52,16 @@ xenstored: $(XENSTORED_OBJS)
 xenstored: $(XENSTORED_OBJS)
        $(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDFLAGS_libxenctrl) $(SOCKET_LIBS) -o $@
 
-$(CLIENTS): xenstore-%: xenstore_%.o libxenstore.so
+$(CLIENTS): xenstore-%: xenstore_%.o $(LIBXENSTORE)
        $(CC) $(CFLAGS) $(LDFLAGS) $< -L. -lxenstore $(SOCKET_LIBS) -o $@
 
 $(CLIENTS_OBJS): xenstore_%.o: xenstore_client.c
        $(COMPILE.c) -DCLIENT_$(*F) -o $@ $<
 
-xenstore-control: xenstore_control.o libxenstore.so
+xenstore-control: xenstore_control.o $(LIBXENSTORE)
        $(CC) $(CFLAGS) $(LDFLAGS) $< -L. -lxenstore $(SOCKET_LIBS) -o $@
 
-xenstore-ls: xsls.o libxenstore.so
+xenstore-ls: xsls.o $(LIBXENSTORE)
        $(CC) $(CFLAGS) $(LDFLAGS) $< -L. -lxenstore $(SOCKET_LIBS) -o $@
 
 xs_tdb_dump: xs_tdb_dump.o utils.o tdb.o talloc.o
@@ -61,6 +71,8 @@ libxenstore.so: libxenstore.so.$(MAJOR)
        ln -sf $< $@
 libxenstore.so.$(MAJOR): libxenstore.so.$(MAJOR).$(MINOR)
        ln -sf $< $@
+
+xs.opic: CFLAGS += -DUSE_PTHREAD
 
 libxenstore.so.$(MAJOR).$(MINOR): xs.opic xs_lib.opic
        $(CC) $(CFLAGS) $(LDFLAGS) -Wl,$(SONAME_LDFLAG) 
-Wl,libxenstore.so.$(MAJOR) $(SHLIB_CFLAGS) -o $@ $^ $(SOCKET_LIBS) -lpthread
diff -r 9b635405ef90 -r 32e3c81ada56 tools/xenstore/xs.c
--- a/tools/xenstore/xs.c       Wed Apr 09 11:30:32 2008 +0100
+++ b/tools/xenstore/xs.c       Wed Apr 09 13:27:33 2008 +0100
@@ -32,7 +32,6 @@
 #include <signal.h>
 #include <stdint.h>
 #include <errno.h>
-#include <pthread.h>
 #include "xs.h"
 #include "list.h"
 #include "utils.h"
@@ -42,6 +41,10 @@ struct xs_stored_msg {
        struct xsd_sockmsg hdr;
        char *body;
 };
+
+#ifdef USE_PTHREAD
+
+#include <pthread.h>
 
 struct xs_handle {
        /* Communications channel to xenstore daemon. */
@@ -78,14 +81,37 @@ struct xs_handle {
        pthread_mutex_t request_mutex;
 };
 
+#define mutex_lock(m)          pthread_mutex_lock(m)
+#define mutex_unlock(m)                pthread_mutex_unlock(m)
+#define condvar_signal(c)      pthread_cond_signal(c)
+#define condvar_wait(c,m,hnd)  pthread_cond_wait(c,m)
+
+static void *read_thread(void *arg);
+
+#else /* !defined(USE_PTHREAD) */
+
+struct xs_handle {
+       int fd;
+       struct list_head reply_list;
+       struct list_head watch_list;
+       /* Clients can select() on this pipe to wait for a watch to fire. */
+       int watch_pipe[2];
+};
+
+#define mutex_lock(m)          ((void)0)
+#define mutex_unlock(m)                ((void)0)
+#define condvar_signal(c)      ((void)0)
+#define condvar_wait(c,m,hnd)  read_message(hnd)
+
+#endif
+
 static int read_message(struct xs_handle *h);
-static void *read_thread(void *arg);
 
 int xs_fileno(struct xs_handle *h)
 {
        char c = 0;
 
-       pthread_mutex_lock(&h->watch_mutex);
+       mutex_lock(&h->watch_mutex);
 
        if ((h->watch_pipe[0] == -1) && (pipe(h->watch_pipe) != -1)) {
                /* Kick things off if the watch list is already non-empty. */
@@ -94,7 +120,7 @@ int xs_fileno(struct xs_handle *h)
                                continue;
        }
 
-       pthread_mutex_unlock(&h->watch_mutex);
+       mutex_unlock(&h->watch_mutex);
 
        return h->watch_pipe[0];
 }
@@ -163,18 +189,21 @@ static struct xs_handle *get_handle(cons
 
        h->fd = fd;
 
+       INIT_LIST_HEAD(&h->reply_list);
+       INIT_LIST_HEAD(&h->watch_list);
+
        /* Watch pipe is allocated on demand in xs_fileno(). */
        h->watch_pipe[0] = h->watch_pipe[1] = -1;
 
-       INIT_LIST_HEAD(&h->watch_list);
+#ifdef USE_PTHREAD
        pthread_mutex_init(&h->watch_mutex, NULL);
        pthread_cond_init(&h->watch_condvar, NULL);
 
-       INIT_LIST_HEAD(&h->reply_list);
        pthread_mutex_init(&h->reply_mutex, NULL);
        pthread_cond_init(&h->reply_condvar, NULL);
 
        pthread_mutex_init(&h->request_mutex, NULL);
+#endif
 
        return h;
 }
@@ -198,15 +227,17 @@ void xs_daemon_close(struct xs_handle *h
 {
        struct xs_stored_msg *msg, *tmsg;
 
-       pthread_mutex_lock(&h->request_mutex);
-       pthread_mutex_lock(&h->reply_mutex);
-       pthread_mutex_lock(&h->watch_mutex);
-
+       mutex_lock(&h->request_mutex);
+       mutex_lock(&h->reply_mutex);
+       mutex_lock(&h->watch_mutex);
+
+#ifdef USE_PTHREAD
        if (h->read_thr_exists) {
                /* XXX FIXME: May leak an unpublished message buffer. */
                pthread_cancel(h->read_thr);
                pthread_join(h->read_thr, NULL);
        }
+#endif
 
        list_for_each_entry_safe(msg, tmsg, &h->reply_list, list) {
                free(msg->body);
@@ -218,9 +249,9 @@ void xs_daemon_close(struct xs_handle *h
                free(msg);
        }
 
-       pthread_mutex_unlock(&h->request_mutex);
-       pthread_mutex_unlock(&h->reply_mutex);
-       pthread_mutex_unlock(&h->watch_mutex);
+       mutex_unlock(&h->request_mutex);
+       mutex_unlock(&h->reply_mutex);
+       mutex_unlock(&h->watch_mutex);
 
        if (h->watch_pipe[0] != -1) {
                close(h->watch_pipe[0]);
@@ -277,17 +308,19 @@ static void *read_reply(
        struct xs_stored_msg *msg;
        char *body;
 
+#ifdef USE_PTHREAD
        /* Read from comms channel ourselves if there is no reader thread. */
        if (!h->read_thr_exists && (read_message(h) == -1))
                return NULL;
-
-       pthread_mutex_lock(&h->reply_mutex);
+#endif
+
+       mutex_lock(&h->reply_mutex);
        while (list_empty(&h->reply_list))
-               pthread_cond_wait(&h->reply_condvar, &h->reply_mutex);
+               condvar_wait(&h->reply_condvar, &h->reply_mutex, h);
        msg = list_top(&h->reply_list, struct xs_stored_msg, list);
        list_del(&msg->list);
        assert(list_empty(&h->reply_list));
-       pthread_mutex_unlock(&h->reply_mutex);
+       mutex_unlock(&h->reply_mutex);
 
        *type = msg->hdr.type;
        if (len)
@@ -329,7 +362,7 @@ static void *xs_talkv(struct xs_handle *
        ignorepipe.sa_flags = 0;
        sigaction(SIGPIPE, &ignorepipe, &oldact);
 
-       pthread_mutex_lock(&h->request_mutex);
+       mutex_lock(&h->request_mutex);
 
        if (!xs_write_all(h->fd, &msg, sizeof(msg)))
                goto fail;
@@ -342,7 +375,7 @@ static void *xs_talkv(struct xs_handle *
        if (!ret)
                goto fail;
 
-       pthread_mutex_unlock(&h->request_mutex);
+       mutex_unlock(&h->request_mutex);
 
        sigaction(SIGPIPE, &oldact, NULL);
        if (msg.type == XS_ERROR) {
@@ -362,7 +395,7 @@ fail:
 fail:
        /* We're in a bad state, so close fd. */
        saved_errno = errno;
-       pthread_mutex_unlock(&h->request_mutex);
+       mutex_unlock(&h->request_mutex);
        sigaction(SIGPIPE, &oldact, NULL);
 close_fd:
        close(h->fd);
@@ -556,16 +589,18 @@ bool xs_watch(struct xs_handle *h, const
 {
        struct iovec iov[2];
 
+#ifdef USE_PTHREAD
        /* We dynamically create a reader thread on demand. */
-       pthread_mutex_lock(&h->request_mutex);
+       mutex_lock(&h->request_mutex);
        if (!h->read_thr_exists) {
                if (pthread_create(&h->read_thr, NULL, read_thread, h) != 0) {
-                       pthread_mutex_unlock(&h->request_mutex);
+                       mutex_unlock(&h->request_mutex);
                        return false;
                }
                h->read_thr_exists = 1;
        }
-       pthread_mutex_unlock(&h->request_mutex);
+       mutex_unlock(&h->request_mutex);
+#endif
 
        iov[0].iov_base = (void *)path;
        iov[0].iov_len = strlen(path) + 1;
@@ -586,11 +621,11 @@ char **xs_read_watch(struct xs_handle *h
        char **ret, *strings, c = 0;
        unsigned int num_strings, i;
 
-       pthread_mutex_lock(&h->watch_mutex);
+       mutex_lock(&h->watch_mutex);
 
        /* Wait on the condition variable for a watch to fire. */
        while (list_empty(&h->watch_list))
-               pthread_cond_wait(&h->watch_condvar, &h->watch_mutex);
+               condvar_wait(&h->watch_condvar, &h->watch_mutex, h);
        msg = list_top(&h->watch_list, struct xs_stored_msg, list);
        list_del(&msg->list);
 
@@ -599,7 +634,7 @@ char **xs_read_watch(struct xs_handle *h
                while (read(h->watch_pipe[0], &c, 1) != 1)
                        continue;
 
-       pthread_mutex_unlock(&h->watch_mutex);
+       mutex_unlock(&h->watch_mutex);
 
        assert(msg->hdr.type == XS_WATCH_EVENT);
 
@@ -801,7 +836,7 @@ static int read_message(struct xs_handle
        body[msg->hdr.len] = '\0';
 
        if (msg->hdr.type == XS_WATCH_EVENT) {
-               pthread_mutex_lock(&h->watch_mutex);
+               mutex_lock(&h->watch_mutex);
 
                /* Kick users out of their select() loop. */
                if (list_empty(&h->watch_list) &&
@@ -810,22 +845,23 @@ static int read_message(struct xs_handle
                                continue;
 
                list_add_tail(&msg->list, &h->watch_list);
-               pthread_cond_signal(&h->watch_condvar);
-
-               pthread_mutex_unlock(&h->watch_mutex);
+
+               condvar_signal(&h->watch_condvar);
+
+               mutex_unlock(&h->watch_mutex);
        } else {
-               pthread_mutex_lock(&h->reply_mutex);
+               mutex_lock(&h->reply_mutex);
 
                /* There should only ever be one response pending! */
                if (!list_empty(&h->reply_list)) {
-                       pthread_mutex_unlock(&h->reply_mutex);
+                       mutex_unlock(&h->reply_mutex);
                        goto error;
                }
 
                list_add_tail(&msg->list, &h->reply_list);
-               pthread_cond_signal(&h->reply_condvar);
-
-               pthread_mutex_unlock(&h->reply_mutex);
+               condvar_signal(&h->reply_condvar);
+
+               mutex_unlock(&h->reply_mutex);
        }
 
        return 0;
@@ -838,6 +874,7 @@ static int read_message(struct xs_handle
        return -1;
 }
 
+#ifdef USE_PTHREAD
 static void *read_thread(void *arg)
 {
        struct xs_handle *h = arg;
@@ -847,6 +884,7 @@ static void *read_thread(void *arg)
 
        return NULL;
 }
+#endif
 
 /*
  * Local variables:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.