[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Change xenstore-domain messaging protocol to match what we use
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 2796f432858e37ae901cebd750790ccfd9a83133 # Parent f8c725f1fce800d776a4abf84e081f6662823c74 Change xenstore-domain messaging protocol to match what we use for other inter-domain comms (power-of-two-sized rings, and free-running indexes). The interface is defined in the spirit of the console protocol, so maybe some chance of merging them together later? Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r f8c725f1fce8 -r 2796f432858e linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Wed Oct 12 17:18:43 2005 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Wed Oct 12 17:25:40 2005 @@ -33,29 +33,16 @@ #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/err.h> +#include <asm-xen/xenbus.h> #include "xenbus_comms.h" -#define RINGBUF_DATASIZE ((PAGE_SIZE / 2) - sizeof(struct ringbuf_head)) -struct ringbuf_head -{ - u32 write; /* Next place to write to */ - u32 read; /* Next place to read from */ - u8 flags; - char buf[0]; -} __attribute__((packed)); - static int xenbus_irq; DECLARE_WAIT_QUEUE_HEAD(xb_waitq); -static inline struct ringbuf_head *outbuf(void) +static inline struct xenstore_domain_interface *xenstore_domain_interface(void) { return mfn_to_virt(xen_start_info->store_mfn); -} - -static inline struct ringbuf_head *inbuf(void) -{ - return mfn_to_virt(xen_start_info->store_mfn) + PAGE_SIZE/2; } static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) @@ -64,133 +51,96 @@ return IRQ_HANDLED; } -static int check_buffer(const struct ringbuf_head *h) -{ - return (h->write < RINGBUF_DATASIZE && h->read < RINGBUF_DATASIZE); -} - -/* We can't fill last byte: would look like empty buffer. */ -static void *get_output_chunk(const struct ringbuf_head *h, - void *buf, u32 *len) -{ - u32 read_mark; - - if (h->read == 0) - read_mark = RINGBUF_DATASIZE - 1; - else - read_mark = h->read - 1; - - /* Here to the end of buffer, unless they haven't read some out. */ - *len = RINGBUF_DATASIZE - h->write; - if (read_mark >= h->write) - *len = read_mark - h->write; - return buf + h->write; -} - -static const void *get_input_chunk(const struct ringbuf_head *h, - const void *buf, u32 *len) -{ - /* Here to the end of buffer, unless they haven't written some. */ - *len = RINGBUF_DATASIZE - h->read; - if (h->write >= h->read) - *len = h->write - h->read; - return buf + h->read; -} - -static void update_output_chunk(struct ringbuf_head *h, u32 len) -{ - h->write += len; - if (h->write == RINGBUF_DATASIZE) - h->write = 0; -} - -static void update_input_chunk(struct ringbuf_head *h, u32 len) -{ - h->read += len; - if (h->read == RINGBUF_DATASIZE) - h->read = 0; -} - -static int output_avail(struct ringbuf_head *out) -{ - unsigned int avail; - - get_output_chunk(out, out->buf, &avail); - return avail != 0; +static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) +{ + return ((prod - cons) <= XENSTORE_RING_SIZE); +} + +static void *get_output_chunk(XENSTORE_RING_IDX cons, + XENSTORE_RING_IDX prod, + char *buf, uint32_t *len) +{ + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); + if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) + *len = XENSTORE_RING_SIZE - (prod - cons); + return buf + MASK_XENSTORE_IDX(prod); +} + +static const void *get_input_chunk(XENSTORE_RING_IDX cons, + XENSTORE_RING_IDX prod, + const char *buf, uint32_t *len) +{ + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); + if ((prod - cons) < *len) + *len = prod - cons; + return buf + MASK_XENSTORE_IDX(cons); } int xb_write(const void *data, unsigned len) { - struct ringbuf_head h; - struct ringbuf_head *out = outbuf(); - - do { + struct xenstore_domain_interface *intf = xenstore_domain_interface(); + XENSTORE_RING_IDX cons, prod; + + while (len != 0) { void *dst; unsigned int avail; - wait_event_interruptible(xb_waitq, output_avail(out)); - - /* Make local copy of header to check for sanity. */ - h = *out; - if (!check_buffer(&h)) + wait_event_interruptible(xb_waitq, + (intf->req_prod - intf->req_cons) != + XENSTORE_RING_SIZE); + + /* Read indexes, then verify. */ + cons = intf->req_cons; + prod = intf->req_prod; + mb(); + if (!check_indexes(cons, prod)) return -EIO; - dst = get_output_chunk(&h, out->buf, &avail); + dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; - /* Make sure we read header before we write data - * (implied by data-dependency, but let's play safe). */ - mb(); - memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new header until data is there. */ wmb(); - update_output_chunk(out, avail); + intf->req_prod += avail; /* This implies mb() before other side sees interrupt. */ notify_remote_via_evtchn(xen_start_info->store_evtchn); - } while (len != 0); + } return 0; } -int xs_input_avail(void) -{ - unsigned int avail; - struct ringbuf_head *in = inbuf(); - - get_input_chunk(in, in->buf, &avail); - return avail != 0; -} - int xb_read(void *data, unsigned len) { - struct ringbuf_head h; - struct ringbuf_head *in = inbuf(); - int was_full; + struct xenstore_domain_interface *intf = xenstore_domain_interface(); + XENSTORE_RING_IDX cons, prod; while (len != 0) { unsigned int avail; const char *src; - wait_event_interruptible(xb_waitq, xs_input_avail()); - - h = *in; - if (!check_buffer(&h)) + wait_event_interruptible(xb_waitq, + intf->rsp_cons != intf->rsp_prod); + + /* Read indexes, then verify. */ + cons = intf->rsp_cons; + prod = intf->rsp_prod; + mb(); + if (!check_indexes(cons, prod)) return -EIO; - src = get_input_chunk(&h, in->buf, &avail); + src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; - was_full = !output_avail(&h); /* We must read header before we read data. */ rmb(); @@ -201,13 +151,12 @@ /* Other side must not see free space until we've copied out */ mb(); - - update_input_chunk(in, avail); + intf->rsp_cons += avail; + pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); - /* If it was full, tell them we've taken some. */ - if (was_full) - /* Implies mb(): they will see new header. */ - notify_remote_via_evtchn(xen_start_info->store_evtchn); + + /* Implies mb(): they will see new header. */ + notify_remote_via_evtchn(xen_start_info->store_evtchn); } return 0; diff -r f8c725f1fce8 -r 2796f432858e tools/xenstore/xenstored_core.c --- a/tools/xenstore/xenstored_core.c Wed Oct 12 17:18:43 2005 +++ b/tools/xenstore/xenstored_core.c Wed Oct 12 17:25:40 2005 @@ -1586,7 +1586,7 @@ goto more; } - if (domain_can_write(i)) { + if (domain_can_write(i) && !list_empty(&i->out_list)) { handle_output(i); goto more; } diff -r f8c725f1fce8 -r 2796f432858e tools/xenstore/xenstored_domain.c --- a/tools/xenstore/xenstored_domain.c Wed Oct 12 17:18:43 2005 +++ b/tools/xenstore/xenstored_domain.c Wed Oct 12 17:25:40 2005 @@ -42,7 +42,6 @@ static int *xc_handle; static int eventchn_fd; static int virq_port; -static unsigned int ringbuf_datasize; struct domain { @@ -66,10 +65,7 @@ char *path; /* Shared page. */ - void *page; - - /* Input and output ringbuffer heads. */ - struct ringbuf_head *input, *output; + struct xenstore_domain_interface *interface; /* The connection associated with this. */ struct connection *conn; @@ -80,14 +76,6 @@ static LIST_HEAD(domains); -struct ringbuf_head -{ - uint32_t write; /* Next place to write to */ - uint32_t read; /* Next place to read from */ - uint8_t flags; - char buf[0]; -} __attribute__((packed)); - #ifndef TESTING static void evtchn_notify(int port) { @@ -100,91 +88,57 @@ #endif /* FIXME: Mark connection as broken (close it?) when this happens. */ -static bool check_buffer(const struct ringbuf_head *h) -{ - return (h->write < ringbuf_datasize && h->read < ringbuf_datasize); -} - -/* We can't fill last byte: would look like empty buffer. */ -static void *get_output_chunk(const struct ringbuf_head *h, - void *buf, uint32_t *len) -{ - uint32_t read_mark; - - if (h->read == 0) - read_mark = ringbuf_datasize - 1; - else - read_mark = h->read - 1; - - /* Here to the end of buffer, unless they haven't read some out. */ - *len = ringbuf_datasize - h->write; - if (read_mark >= h->write) - *len = read_mark - h->write; - return buf + h->write; -} - -static const void *get_input_chunk(const struct ringbuf_head *h, - const void *buf, uint32_t *len) -{ - /* Here to the end of buffer, unless they haven't written some. */ - *len = ringbuf_datasize - h->read; - if (h->write >= h->read) - *len = h->write - h->read; - return buf + h->read; -} - -static void update_output_chunk(struct ringbuf_head *h, uint32_t len) -{ - h->write += len; - if (h->write == ringbuf_datasize) - h->write = 0; -} - -static void update_input_chunk(struct ringbuf_head *h, uint32_t len) -{ - h->read += len; - if (h->read == ringbuf_datasize) - h->read = 0; -} - -static bool buffer_has_input(const struct ringbuf_head *h) -{ - uint32_t len; - - get_input_chunk(h, NULL, &len); - return (len != 0); -} - -static bool buffer_has_output_room(const struct ringbuf_head *h) -{ - uint32_t len; - - get_output_chunk(h, NULL, &len); - return (len != 0); +static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) +{ + return ((prod - cons) <= XENSTORE_RING_SIZE); +} + +static void *get_output_chunk(XENSTORE_RING_IDX cons, + XENSTORE_RING_IDX prod, + char *buf, uint32_t *len) +{ + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); + if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) + *len = XENSTORE_RING_SIZE - (prod - cons); + return buf + MASK_XENSTORE_IDX(prod); +} + +static const void *get_input_chunk(XENSTORE_RING_IDX cons, + XENSTORE_RING_IDX prod, + const char *buf, uint32_t *len) +{ + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); + if ((prod - cons) < *len) + *len = prod - cons; + return buf + MASK_XENSTORE_IDX(cons); } static int writechn(struct connection *conn, const void *data, unsigned int len) { uint32_t avail; void *dest; - struct ringbuf_head h; - - /* Must read head once, and before anything else, and verified. */ - h = *conn->domain->output; + struct xenstore_domain_interface *intf = conn->domain->interface; + XENSTORE_RING_IDX cons, prod; + + /* Must read indexes once, and before anything else, and verified. */ + cons = intf->rsp_cons; + prod = intf->rsp_prod; mb(); - if (!check_buffer(&h)) { + if (!check_indexes(cons, prod)) { errno = EIO; return -1; } - dest = get_output_chunk(&h, conn->domain->output->buf, &avail); + dest = get_output_chunk(cons, prod, intf->rsp, &avail); if (avail < len) len = avail; memcpy(dest, data, len); mb(); - update_output_chunk(conn->domain->output, len); + intf->rsp_prod += len; + evtchn_notify(conn->domain->port); + return len; } @@ -192,32 +146,29 @@ { uint32_t avail; const void *src; - struct ringbuf_head h; - bool was_full; - - /* Must read head once, and before anything else, and verified. */ - h = *conn->domain->input; + struct xenstore_domain_interface *intf = conn->domain->interface; + XENSTORE_RING_IDX cons, prod; + + /* Must read indexes once, and before anything else, and verified. */ + cons = intf->req_cons; + prod = intf->req_prod; mb(); - if (!check_buffer(&h)) { + if (!check_indexes(cons, prod)) { errno = EIO; return -1; } - src = get_input_chunk(&h, conn->domain->input->buf, &avail); + src = get_input_chunk(cons, prod, intf->req, &avail); if (avail < len) len = avail; - was_full = !buffer_has_output_room(&h); memcpy(data, src, len); mb(); - update_input_chunk(conn->domain->input, len); - /* FIXME: Probably not neccessary. */ - mb(); - - /* If it was full, tell them we've taken some. */ - if (was_full) - evtchn_notify(conn->domain->port); + intf->req_cons += len; + + evtchn_notify(conn->domain->port); + return len; } @@ -234,8 +185,8 @@ eprintf("> Unbinding port %i failed!\n", domain->port); } - if (domain->page) - munmap(domain->page, getpagesize()); + if (domain->interface) + munmap(domain->interface, getpagesize()); return 0; } @@ -285,13 +236,14 @@ bool domain_can_read(struct connection *conn) { - return buffer_has_input(conn->domain->input); + struct xenstore_domain_interface *intf = conn->domain->interface; + return (intf->req_cons != intf->req_prod); } bool domain_can_write(struct connection *conn) { - return (!list_empty(&conn->out_list) && - buffer_has_output_room(conn->domain->output)); + struct xenstore_domain_interface *intf = conn->domain->interface; + return ((intf->rsp_prod - intf->rsp_cons) != XENSTORE_RING_SIZE); } static struct domain *new_domain(void *context, unsigned int domid, @@ -307,19 +259,14 @@ domain->shutdown = 0; domain->domid = domid; domain->path = talloc_strdup(domain, path); - domain->page = xc_map_foreign_range(*xc_handle, domain->domid, - getpagesize(), - PROT_READ|PROT_WRITE, - mfn); - if (!domain->page) + domain->interface = xc_map_foreign_range( + *xc_handle, domain->domid, + getpagesize(), PROT_READ|PROT_WRITE, mfn); + if (!domain->interface) return NULL; list_add(&domain->list, &domains); talloc_set_destructor(domain, destroy_domain); - - /* One in each half of page. */ - domain->input = domain->page; - domain->output = domain->page + getpagesize()/2; /* Tell kernel we're interested in this event. */ bind.remote_domain = domid; @@ -504,9 +451,6 @@ struct ioctl_evtchn_bind_virq bind; int rc; - /* The size of the ringbuffer: half a page minus head structure. */ - ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head); - xc_handle = talloc(talloc_autofree_context(), int); if (!xc_handle) barf_perror("Failed to allocate domain handle"); @@ -548,3 +492,13 @@ return eventchn_fd; } + +/* + * Local variables: + * c-file-style: "linux" + * indent-tabs-mode: t + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ diff -r f8c725f1fce8 -r 2796f432858e tools/xenstore/xs_test.c --- a/tools/xenstore/xs_test.c Wed Oct 12 17:18:43 2005 +++ b/tools/xenstore/xs_test.c Wed Oct 12 17:25:40 2005 @@ -50,72 +50,33 @@ static bool print_input = false; static unsigned int linenum = 0; -struct ringbuf_head -{ - uint32_t write; /* Next place to write to */ - uint32_t read; /* Next place to read from */ - uint8_t flags; - char buf[0]; -} __attribute__((packed)); - -static struct ringbuf_head *out, *in; -static unsigned int ringbuf_datasize; static int daemon_pid; +static struct xenstore_domain_interface *interface; /* FIXME: Mark connection as broken (close it?) when this happens. */ -static bool check_buffer(const struct ringbuf_head *h) -{ - return (h->write < ringbuf_datasize && h->read < ringbuf_datasize); -} - -/* We can't fill last byte: would look like empty buffer. */ -static void *get_output_chunk(const struct ringbuf_head *h, - void *buf, uint32_t *len) -{ - uint32_t read_mark; - - if (h->read == 0) - read_mark = ringbuf_datasize - 1; - else - read_mark = h->read - 1; - - /* Here to the end of buffer, unless they haven't read some out. */ - *len = ringbuf_datasize - h->write; - if (read_mark >= h->write) - *len = read_mark - h->write; - return buf + h->write; -} - -static const void *get_input_chunk(const struct ringbuf_head *h, - const void *buf, uint32_t *len) -{ - /* Here to the end of buffer, unless they haven't written some. */ - *len = ringbuf_datasize - h->read; - if (h->write >= h->read) - *len = h->write - h->read; - return buf + h->read; -} - -static int output_avail(struct ringbuf_head *out) -{ - unsigned int avail; - - get_output_chunk(out, out->buf, &avail); - return avail != 0; -} - -static void update_output_chunk(struct ringbuf_head *h, uint32_t len) -{ - h->write += len; - if (h->write == ringbuf_datasize) - h->write = 0; -} - -static void update_input_chunk(struct ringbuf_head *h, uint32_t len) -{ - h->read += len; - if (h->read == ringbuf_datasize) - h->read = 0; +static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) +{ + return ((prod - cons) <= XENSTORE_RING_SIZE); +} + +static void *get_output_chunk(XENSTORE_RING_IDX cons, + XENSTORE_RING_IDX prod, + char *buf, uint32_t *len) +{ + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); + if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) + *len = XENSTORE_RING_SIZE - (prod - cons); + return buf + MASK_XENSTORE_IDX(prod); +} + +static const void *get_input_chunk(XENSTORE_RING_IDX cons, + XENSTORE_RING_IDX prod, + const char *buf, uint32_t *len) +{ + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); + if ((prod - cons) < *len) + *len = prod - cons; + return buf + MASK_XENSTORE_IDX(cons); } /* FIXME: We spin, and we're sloppy. */ @@ -123,25 +84,28 @@ void *data, unsigned int len) { unsigned int avail; - int was_full; - - if (!check_buffer(in)) - barf("Corrupt buffer"); - - was_full = !output_avail(in); + struct xenstore_domain_interface *intf = interface; + XENSTORE_RING_IDX cons, prod; + const void *src; + while (len) { - const void *src = get_input_chunk(in, in->buf, &avail); + cons = intf->rsp_cons; + prod = intf->rsp_prod; + if (!check_indexes(cons, prod)) + barf("Corrupt buffer"); + + src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail > len) avail = len; memcpy(data, src, avail); data += avail; len -= avail; - update_input_chunk(in, avail); + intf->rsp_cons += avail; } /* Tell other end we read something. */ - if (was_full) - kill(daemon_pid, SIGUSR2); + kill(daemon_pid, SIGUSR2); + return true; } @@ -149,22 +113,28 @@ const void *data, unsigned int len) { uint32_t avail; - - if (!check_buffer(out)) - barf("Corrupt buffer"); + struct xenstore_domain_interface *intf = interface; + XENSTORE_RING_IDX cons, prod; + void *dst; while (len) { - void *dst = get_output_chunk(out, out->buf, &avail); + cons = intf->req_cons; + prod = intf->req_prod; + if (!check_indexes(cons, prod)) + barf("Corrupt buffer"); + + dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail > len) avail = len; memcpy(dst, data, avail); data += avail; len -= avail; - update_output_chunk(out, avail); + intf->req_prod += avail; } /* Tell other end we wrote something. */ kill(daemon_pid, SIGUSR2); + return true; } @@ -552,21 +522,21 @@ break; fd = open("/tmp/xcmap", O_RDWR); - /* Set in and out pointers. */ - out = mmap(NULL, getpagesize(), PROT_WRITE|PROT_READ, MAP_SHARED,fd,0); - if (out == MAP_FAILED) + /* Set shared comms page. */ + interface = mmap(NULL, getpagesize(), PROT_WRITE|PROT_READ, + MAP_SHARED,fd,0); + if (interface == MAP_FAILED) barf_perror("Failed to map /tmp/xcmap page"); - in = (void *)out + getpagesize() / 2; close(fd); /* Tell them the event channel and our PID. */ - *(int *)((void *)out + 32) = getpid(); - *(uint16_t *)((void *)out + 36) = atoi(eventchn); + *(int *)((void *)interface + 32) = getpid(); + *(uint16_t *)((void *)interface + 36) = atoi(eventchn); if (!xs_introduce_domain(handles[handle], atoi(domid), atol(mfn), atoi(eventchn), path)) { failed(handle); - munmap(out, getpagesize()); + munmap(interface, getpagesize()); return; } output("handle is %i\n", i); @@ -576,7 +546,7 @@ handles[i]->fd = -2; /* Read in daemon pid. */ - daemon_pid = *(int *)((void *)out + 32); + daemon_pid = *(int *)((void *)interface + 32); } static void do_release(unsigned int handle, const char *domid) @@ -823,9 +793,6 @@ usage(); - /* The size of the ringbuffer: half a page minus head structure. */ - ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head); - signal(SIGALRM, alarmed); while (fgets(line, sizeof(line), stdin)) do_command(0, line); diff -r f8c725f1fce8 -r 2796f432858e xen/include/public/io/xs_wire.h --- a/xen/include/public/io/xs_wire.h Wed Oct 12 17:18:43 2005 +++ b/xen/include/public/io/xs_wire.h Wed Oct 12 17:25:40 2005 @@ -93,6 +93,17 @@ XS_WATCH_TOKEN, }; +/* Inter-domain shared memory communications. */ +#define XENSTORE_RING_SIZE 1024 +typedef uint32_t XENSTORE_RING_IDX; +#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) +struct xenstore_domain_interface { + char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ + char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ + XENSTORE_RING_IDX req_cons, req_prod; + XENSTORE_RING_IDX rsp_cons, rsp_prod; +}; + #endif /* _XS_WIRE_H */ /* _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |