[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH v2 04/10] plat/xen: Add support for communication with Xenstore daemon


  • To: minios-devel@xxxxxxxxxxxxx
  • From: Costin Lupu <costin.lupu@xxxxxxxxx>
  • Date: Thu, 23 Aug 2018 13:59:32 +0300
  • Cc: simon.kuenzer@xxxxxxxxx, yuri.volchkov@xxxxxxxxx, sharan.santhanam@xxxxxxxxx
  • Delivery-date: Thu, 23 Aug 2018 10:59:54 +0000
  • Ironport-phdr: 9a23:57B0ix0zryV77+aAsmDT+DRfVm0co7zxezQtwd8ZseMRLvad9pjvdHbS+e9qxAeQG9mDtLQc06L/iOPJYSQ4+5GPsXQPItRndiQuroEopTEmG9OPEkbhLfTnPGQQFcVGU0J5rTngaRAGUMnxaEfPrXKs8DUcBgvwNRZvJuTyB4Xek9m72/q99pHPYQhEniaxba9vJxiqsAvdsdUbj5F/Iagr0BvJpXVIe+VSxWx2IF+Yggjx6MSt8pN96ipco/0u+dJOXqX8ZKQ4UKdXDC86PGAv5c3krgfMQA2S7XYBSGoWkx5IAw/Y7BHmW5r6ryX3uvZh1CScIMb7S60/Vza/4KdxUBLmiDkJOSM3/m/UjcJ/jqxbrQm9qxBj2YPYfJuYOOZicq7bYNgURXBBXsFUVyFZHI68aJAPD/YAPeZesoLzoUYOrQOjBQKxA+7g1jhIhmTq3a071eQtCwXG3BE4H9ITq3nbsM71OL0KUeCo16bE1y/Db/RP1Dr79YPGfBchofSWUrJxd8rc0UgvFwTbjlqOtYPpJS6a2f4Rs2eB6edrSOGhi3Y/pg1srTWj290gh4rJi44P1FzI6yd0zJw7KNGgVUJ2bsOoHIFTuiyaLYd6XM0vTm9ytConxbAKp5i2dzUQxps93R7QcfmHfpCN4hLkSemePy91hGlgeLKjnxay9lWgyvHkWsm0zllKqi1Fn8HStnAIzRPT68yHRuFh8Ue6wzqPzx3T5fpeLUAukqrXM58hwrgumZoPqUnPAyD7lFjsgKKVdkgo4Pak5/rlb7n8qJKQL4p0hRv/MqQqlMy/G+M4Mg0WUmeH/uS8z6Hj/Uz/QLlQiv02j7HZsI3BKMsHva61GRdV0ps45BqlFTem18wUnWMbI1JdZBKHk4/pNknLIPD5C/e/glOskCxyy//bILLhHIvCLnzYnbflfLZ98FJcyBEpwdFQ+Z1UDKsNIPXpWk/+rNbYFAM2MxSow+b7D9VwzpgeWWKVDa+DKqzSt0WE5vk1LOmKeI8VuCzyK/4/6P7rlnI5n0EScrWt3ZQNc3+0BOppI1iBa3r2ntgBCXsKvhY5TOHyk12CUDtTZ3G0X64m5TE7FZypDYHCR4+zmryMxyG7EYNKaWBCEF+DCm3keJuZW/0UciKdPtdhkiAYVbimU4Ig1QyutBX7y7Z9MOrU/SkYuo7j1Nh24O3Tiwo/9TpqAMuB12GCVXp7knkWSD8rj+hDphl4y1GC1rM9j/FGGNh74/JSThx8JZPaietgBIPcQAXEK/yOU0qnRJ2CHCksBoY6xMQSYkA7H8i6kzjIxGyyHrVTjbvdV898yb7Vw3Wkf5U18H3BzqR011Q=
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>

Add support for communication with Xenstore daemon via the shared
page. In Unikraft, the incoming messages are processed by the
Xenstore thread.

Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
---
 plat/xen/Makefile.uk       |   1 +
 plat/xen/xenbus/xenbus.c   |   8 +
 plat/xen/xenbus/xs_comms.c | 535 +++++++++++++++++++++++++++++++++++++++++++++
 plat/xen/xenbus/xs_comms.h |  64 ++++++
 4 files changed, 608 insertions(+)
 create mode 100644 plat/xen/xenbus/xs_comms.c
 create mode 100644 plat/xen/xenbus/xs_comms.h

diff --git a/plat/xen/Makefile.uk b/plat/xen/Makefile.uk
index b8c70e1..2703a54 100644
--- a/plat/xen/Makefile.uk
+++ b/plat/xen/Makefile.uk
@@ -79,4 +79,5 @@ LIBXENBUS_ASINCLUDES-y         += $(LIBXENPLAT_ASINCLUDES-y)
 LIBXENBUS_CFLAGS-y             += $(LIBXENPLAT_CFLAGS-y)
 LIBXENBUS_CINCLUDES-y          += $(LIBXENPLAT_CINCLUDES-y)
 LIBXENBUS_SRCS-y               += $(LIBXENPLAT_BASE)/xenbus/xenbus.c
+LIBXENBUS_SRCS-y               += $(LIBXENPLAT_BASE)/xenbus/xs_comms.c
 endif
diff --git a/plat/xen/xenbus/xenbus.c b/plat/xen/xenbus/xenbus.c
index 1bc57c3..a20546b 100644
--- a/plat/xen/xenbus/xenbus.c
+++ b/plat/xen/xenbus/xenbus.c
@@ -43,6 +43,7 @@
 #include <uk/errptr.h>
 #include <uk/assert.h>
 #include <xenbus/xenbus.h>
+#include "xs_comms.h"
 
 static struct xenbus_handler xbh;
 
@@ -86,6 +87,13 @@ static int xenbus_init(struct uk_alloc *a)
 
        xbh.a = a;
 
+       ret = xs_comms_init();
+       if (ret) {
+               uk_printd(DLVL_ERR,
+                       "Error initializing Xenstore communication.");
+               return ret;
+       }
+
        UK_TAILQ_FOREACH_SAFE(drv, &xbh.drv_list, next, drv_next) {
                if (drv->init) {
                        ret = drv->init(a);
diff --git a/plat/xen/xenbus/xs_comms.c b/plat/xen/xenbus/xs_comms.c
new file mode 100644
index 0000000..df3739d
--- /dev/null
+++ b/plat/xen/xenbus/xs_comms.c
@@ -0,0 +1,535 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Authors: Steven Smith (sos22@xxxxxxxxx)
+ *          Grzegorz Milos (gm281@xxxxxxxxx)
+ *          John D. Ramsdell
+ *          Costin Lupu <costin.lupu@xxxxxxxxx>
+ *
+ * Copyright (c) 2006, Cambridge University
+ *               2018, NEC Europe Ltd., NEC Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
+ */
+/*
+ * Communication with Xenstore
+ * Ported from Mini-OS xenbus.c
+ */
+
+#include <string.h>
+#include <uk/errptr.h>
+#include <uk/bitmap.h>
+#include <uk/wait.h>
+#include <uk/arch/spinlock.h>
+#include <common/events.h>
+#include <xen-x86/mm.h>
+#include <xen-x86/setup.h>
+#include "xs_comms.h"
+
+
+/*
+ * Xenstore handler structure
+ */
+struct xs_handler {
+       /**< Communication: event channel */
+       evtchn_port_t evtchn;
+       /**< Communication: shared memory */
+       struct xenstore_domain_interface *buf;
+       /**< Thread processing incoming xs replies */
+       struct uk_thread *thread;
+       /**< Waiting queue for notifying incoming xs replies */
+       struct uk_waitq waitq;
+};
+
+static struct xs_handler xsh = {
+       .waitq = __WAIT_QUEUE_INITIALIZER(xsh.waitq),
+};
+
+static int xs_avail_space_for_read(unsigned int req_size)
+{
+       return (xsh.buf->rsp_prod - xsh.buf->rsp_cons >= req_size);
+}
+
+static int xs_avail_space_for_write(unsigned int req_size)
+{
+       return (xsh.buf->req_prod - xsh.buf->req_cons +
+               req_size <= XENSTORE_RING_SIZE);
+}
+
+/*
+ * In-flight request structure.
+ */
+struct xs_request {
+       /**< Waiting queue for incoming reply notification */
+       struct uk_waitq waitq;
+       /**< Request header */
+       struct xsd_sockmsg hdr;
+       /**< Received reply */
+       struct {
+               /**< Reply string + size */
+               struct xs_iovec iovec;
+               /**< Error number */
+               int errornum;
+               /**< Non-zero for incoming replies */
+               int recvd;
+       } reply;
+};
+
+/*
+ * Pool of in-flight requests.
+ * Request IDs are reused, hence the limited set of entries.
+ *
+ * TODO sequential IDs
+ */
+struct xs_request_pool {
+       /**< Number of live requests */
+       __u32 num_live;
+       /**< Lock */
+       spinlock_t lock;
+       /**< Waiting queue for 'not-full' notifications */
+       struct uk_waitq waitq;
+
+       /* Map size is power of 2 */
+#define REQID_MAP_SHIFT  5
+#define REQID_MAP_SIZE   (1 << REQID_MAP_SHIFT)
+       unsigned long entries_bm[BITS_TO_LONGS(REQID_MAP_SIZE) * sizeof(long)];
+       /**< Entries */
+       struct xs_request entries[REQID_MAP_SIZE];
+};
+
+static struct xs_request_pool xs_req_pool;
+
+static void xs_request_pool_init(struct xs_request_pool *pool)
+{
+       struct xs_request *xs_req;
+
+       pool->num_live = 0;
+       ukarch_spin_lock_init(&pool->lock);
+       uk_waitq_init(&pool->waitq);
+       bitmap_zero(pool->entries_bm, REQID_MAP_SIZE);
+       for (int i = 0; i < REQID_MAP_SIZE; i++) {
+               xs_req = &pool->entries[i];
+               xs_req->hdr.req_id = i;
+               uk_waitq_init(&xs_req->waitq);
+       }
+}
+
+/*
+ * Allocate an identifier for a Xenstore request.
+ * Blocks if none are available.
+ */
+static struct xs_request *xs_request_get(void)
+{
+       unsigned long entry_idx;
+
+       /* wait for an available entry */
+       while (1) {
+               ukarch_spin_lock(&xs_req_pool.lock);
+
+               if (xs_req_pool.num_live < REQID_MAP_SIZE)
+                       break;
+
+               ukarch_spin_unlock(&xs_req_pool.lock);
+
+               uk_waitq_wait_event(&xs_req_pool.waitq,
+                       (xs_req_pool.num_live < REQID_MAP_SIZE));
+       }
+
+       /* find an available entry */
+       entry_idx =
+               find_first_zero_bit(xs_req_pool.entries_bm, REQID_MAP_SIZE);
+
+       set_bit(entry_idx, xs_req_pool.entries_bm);
+       xs_req_pool.num_live++;
+
+       ukarch_spin_unlock(&xs_req_pool.lock);
+
+       return &xs_req_pool.entries[entry_idx];
+}
+
+/* Release a request identifier */
+static void xs_request_put(struct xs_request *xs_req)
+{
+       __u32 reqid = xs_req->hdr.req_id;
+
+       ukarch_spin_lock(&xs_req_pool.lock);
+
+       UK_ASSERT(test_bit(reqid, xs_req_pool.entries_bm) == 1);
+
+       clear_bit(reqid, xs_req_pool.entries_bm);
+       xs_req_pool.num_live--;
+
+       if (xs_req_pool.num_live == 0 ||
+               xs_req_pool.num_live == REQID_MAP_SIZE - 1)
+               uk_waitq_wake_up(&xs_req_pool.waitq);
+
+       ukarch_spin_unlock(&xs_req_pool.lock);
+}
+
+/*
+ * Send request to Xenstore. A request is made of multiple iovecs which are
+ * preceded by a single iovec referencing the request header. The iovecs are
+ * seen by Xenstore as if sent atomically. This can block.
+ *
+ * TODO A different thread (e.g. Xenbus thread) should write the messages if
+ * we expect high loads of requests.
+ */
+static int xs_msg_write(struct xsd_sockmsg *xsd_req,
+       const struct xs_iovec *iovec)
+{
+       XENSTORE_RING_IDX prod;
+       const struct xs_iovec *crnt_iovec;
+       struct xs_iovec hdr_iovec;
+       unsigned int req_size, req_off;
+       unsigned int buf_off;
+       unsigned int this_chunk_len;
+       int rc;
+
+       req_size = sizeof(*xsd_req) + xsd_req->len;
+       if (req_size > XENSTORE_RING_SIZE)
+               return -ENOSPC;
+
+       hdr_iovec.data = xsd_req;
+       hdr_iovec.len  = sizeof(*xsd_req);
+
+       /* The batched iovecs are preceded by a single header. */
+       crnt_iovec = &hdr_iovec;
+
+       /*
+        * Wait for the ring to drain to the point where
+        * we can send the message.
+        */
+       while (!xs_avail_space_for_write(req_size)) {
+               /* Wait for there to be space on the ring */
+               uk_printd(DLVL_EXTRA,
+                       "prod %d, len %d, cons %d, size %d; waiting.\n",
+                       xsh.buf->req_prod, req_size,
+                       xsh.buf->req_cons, XENSTORE_RING_SIZE);
+
+               uk_waitq_wait_event(&xsh.waitq,
+                       xs_avail_space_for_write(req_size));
+               uk_printd(DLVL_EXTRA, "Back from wait.\n");
+       }
+
+       /* We must write requests after reading the consumer index. */
+       mb();
+
+       /*
+        * We're now guaranteed to be able to send the message
+        * without overflowing the ring. Do so.
+        */
+
+       prod = xsh.buf->req_prod;
+       req_off = 0;
+       buf_off = 0;
+       while (req_off < req_size) {
+               this_chunk_len = MIN(crnt_iovec->len - buf_off,
+                       XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod));
+
+               memcpy(
+                       (char *) xsh.buf->req + MASK_XENSTORE_IDX(prod),
+                       (char *) crnt_iovec->data + buf_off,
+                       this_chunk_len
+               );
+
+               prod += this_chunk_len;
+               req_off += this_chunk_len;
+               buf_off += this_chunk_len;
+
+               if (buf_off == crnt_iovec->len) {
+                       buf_off = 0;
+                       if (crnt_iovec == &hdr_iovec)
+                               crnt_iovec = iovec;
+                       else
+                               crnt_iovec++;
+               }
+       }
+
+       uk_printd(DLVL_EXTRA, "Complete main loop of %s.\n", __func__);
+       UK_ASSERT(buf_off == 0);
+       UK_ASSERT(req_off == req_size);
+       UK_ASSERT(prod <= xsh.buf->req_cons + XENSTORE_RING_SIZE);
+
+       /* Remote must see entire message before updating indexes */
+       wmb();
+
+       xsh.buf->req_prod += req_size;
+
+       /* Send evtchn to notify remote */
+       rc = notify_remote_via_evtchn(xsh.evtchn);
+       UK_ASSERT(rc == 0);
+
+       return 0;
+}
+
+int xs_msg_reply(enum xsd_sockmsg_type msg_type, xenbus_transaction_t xbt,
+       const struct xs_iovec *req_iovecs, int req_iovecs_num,
+       struct xs_iovec *rep_iovec)
+{
+       struct xs_request *xs_req;
+       int err;
+
+       if (req_iovecs == NULL)
+               return -EINVAL;
+
+       xs_req = xs_request_get();
+       xs_req->hdr.type = msg_type;
+       /* req_id was on pool init  */
+       xs_req->hdr.tx_id = xbt;
+       xs_req->hdr.len = 0;
+       for (int i = 0; i < req_iovecs_num; i++)
+               xs_req->hdr.len += req_iovecs[i].len;
+
+       xs_req->reply.recvd = 0;
+
+       /* send the request */
+       err = xs_msg_write(&xs_req->hdr, req_iovecs);
+       if (err)
+               goto out;
+
+       /* wait reply */
+       uk_waitq_wait_event(&xs_req->waitq,
+               xs_req->reply.recvd != 0);
+
+       err = xs_req->reply.errornum;
+       if (err == 0) {
+               if (rep_iovec)
+                       *rep_iovec = xs_req->reply.iovec;
+               else
+                       free(xs_req->reply.iovec.data);
+       }
+
+out:
+       xs_request_put(xs_req);
+
+       return (-err);
+}
+
+/*
+ * Converts a Xenstore reply error to a positive error number.
+ * Returns 0 if the reply is successful.
+ */
+static int reply_to_errno(const char *reply)
+{
+       int err = 0;
+
+       for (int i = 0; i < (int) ARRAY_SIZE(xsd_errors); i++) {
+               if (!strcmp(reply, xsd_errors[i].errstring)) {
+                       err = xsd_errors[i].errnum;
+                       goto out;
+               }
+       }
+
+       uk_printd(DLVL_WARN, "Unknown Xenstore error: %s\n", reply);
+       err = EINVAL;
+
+out:
+       return err;
+}
+
+/* Process an incoming xs reply */
+static void process_reply(struct xsd_sockmsg *hdr, char *payload)
+{
+       struct xs_request *xs_req;
+
+       if (!test_bit(hdr->req_id, xs_req_pool.entries_bm)) {
+               uk_printd(DLVL_WARN, "Invalid reply id=%d", hdr->req_id);
+               free(payload);
+               return;
+       }
+
+       xs_req = &xs_req_pool.entries[hdr->req_id];
+
+       if (hdr->type == XS_ERROR) {
+               xs_req->reply.errornum = reply_to_errno(payload);
+               free(payload);
+
+       } else if (hdr->type != xs_req->hdr.type) {
+               uk_printd(DLVL_WARN, "Mismatching message type: %d", hdr->type);
+               free(payload);
+               return;
+
+       } else {
+               /* set reply */
+               xs_req->reply.iovec.data = payload;
+               xs_req->reply.iovec.len = hdr->len;
+               xs_req->reply.errornum = 0;
+       }
+
+       xs_req->reply.recvd = 1;
+
+       /* notify waiting requester */
+       uk_waitq_wake_up(&xs_req->waitq);
+}
+
+/* Process an incoming xs watch event */
+static void process_watch_event(char *watch_msg)
+{
+       /* TODO */
+}
+
+static void memcpy_from_ring(const char *ring, char *dest, int off, int len)
+{
+       int c1, c2;
+
+       c1 = MIN(len, XENSTORE_RING_SIZE - off);
+       c2 = len - c1;
+
+       memcpy(dest, ring + off, c1);
+       if (c2)
+               memcpy(dest + c1, ring, c2);
+}
+
+static void xs_msg_read(struct xsd_sockmsg *hdr)
+{
+       XENSTORE_RING_IDX cons;
+       char *payload;
+
+       payload = malloc(hdr->len + 1);
+       if (payload == NULL) {
+               uk_printd(DLVL_WARN,
+                       "No memory available for saving Xenstore message!");
+               return;
+       }
+
+       cons = xsh.buf->rsp_cons;
+
+       /* copy payload */
+       memcpy_from_ring(
+               xsh.buf->rsp,
+               payload,
+               MASK_XENSTORE_IDX(cons + sizeof(*hdr)),
+               hdr->len
+       );
+       payload[hdr->len] = '\0';
+
+       /* Remote must not see available space until we've copied the reply */
+       mb();
+       xsh.buf->rsp_cons += sizeof(*hdr) + hdr->len;
+
+       if (xsh.buf->rsp_prod - cons >= XENSTORE_RING_SIZE)
+               notify_remote_via_evtchn(xsh.evtchn);
+
+       if (hdr->type == XS_WATCH_EVENT)
+               process_watch_event(payload);
+       else
+               process_reply(hdr, payload);
+}
+
+static void xs_thread_func(void *ign __unused)
+{
+       struct xsd_sockmsg msg;
+       XENSTORE_RING_IDX prod = xsh.buf->rsp_prod;
+
+       for (;;) {
+               /* wait for incoming xs response */
+               uk_waitq_wait_event(&xsh.waitq, prod != xsh.buf->rsp_prod);
+
+               while (1) {
+                       prod = xsh.buf->rsp_prod;
+
+                       uk_printd(DLVL_EXTRA, "Rsp_cons %d, rsp_prod %d.\n",
+                               xsh.buf->rsp_cons, xsh.buf->rsp_prod);
+
+                       if (!xs_avail_space_for_read(sizeof(msg)))
+                               break;
+
+                       /* Make sure data is read after reading the indexes */
+                       rmb();
+
+                       /* copy the message header */
+                       memcpy_from_ring(
+                               xsh.buf->rsp,
+                               (char *) &msg,
+                               MASK_XENSTORE_IDX(xsh.buf->rsp_cons),
+                               sizeof(msg)
+                       );
+
+                       uk_printd(DLVL_EXTRA, "Msg len %lu, %u avail, id %u.\n",
+                               msg.len + sizeof(msg),
+                               xsh.buf->rsp_prod - xsh.buf->rsp_cons,
+                               msg.req_id);
+
+                       if (!xs_avail_space_for_read(sizeof(msg) + msg.len))
+                               break;
+
+                       /* Make sure data is read after reading the indexes */
+                       rmb();
+
+                       uk_printd(DLVL_EXTRA, "Message is good.\n");
+                       xs_msg_read(&msg);
+               }
+       }
+}
+
+static void xs_evtchn_handler(evtchn_port_t port,
+               struct __regs *regs __unused, void *ign __unused)
+{
+       UK_ASSERT(xsh.evtchn == port);
+       uk_waitq_wake_up(&xsh.waitq);
+}
+
+int xs_comms_init(void)
+{
+       struct uk_thread *thread;
+       evtchn_port_t port;
+
+       xs_request_pool_init(&xs_req_pool);
+
+       uk_waitq_init(&xsh.waitq);
+
+       thread = uk_thread_create("xenstore", xs_thread_func, NULL);
+       if (PTRISERR(thread))
+               return PTR2ERR(thread);
+
+       xsh.thread = thread;
+
+       xsh.evtchn = HYPERVISOR_start_info->store_evtchn;
+       xsh.buf = mfn_to_virt(HYPERVISOR_start_info->store_mfn);
+
+       port = bind_evtchn(xsh.evtchn, xs_evtchn_handler, NULL);
+       UK_ASSERT(port == xsh.evtchn);
+       unmask_evtchn(xsh.evtchn);
+
+       uk_printd(DLVL_INFO,
+               "Xenstore connection initialised on port %d, buf %p (mfn 
%#lx)\n",
+               port, xsh.buf, HYPERVISOR_start_info->store_mfn);
+
+       return 0;
+}
+
+void xs_comms_fini(void)
+{
+       mask_evtchn(xsh.evtchn);
+       unbind_evtchn(xsh.evtchn);
+
+       xsh.buf = NULL;
+
+       /* TODO stop thread, instead of killing it */
+       uk_thread_destroy(xsh.thread);
+       xsh.thread = NULL;
+}
diff --git a/plat/xen/xenbus/xs_comms.h b/plat/xen/xenbus/xs_comms.h
new file mode 100644
index 0000000..9b558d0
--- /dev/null
+++ b/plat/xen/xenbus/xs_comms.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Authors: Costin Lupu <costin.lupu@xxxxxxxxx>
+ *
+ * Copyright (c) 2018, NEC Europe Ltd., NEC Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
+ */
+
+#ifndef __XS_COMMS_H__
+#define __XS_COMMS_H__
+
+#include <xen/io/xs_wire.h>
+#include <xenbus/xs.h>
+
+int  xs_comms_init(void);
+void xs_comms_fini(void);
+
+struct xs_iovec {
+       void *data;
+       unsigned int len;
+};
+
+/*
+ * Sends a message to Xenstore and blocks waiting for a reply.
+ * The reply is malloc'ed and should be freed by the caller.
+ *
+ * @param msg_type Xenstore message type
+ * @param xbt Xenbus transaction id
+ * @param req_iovecs Array of request strings buffers
+ * @param req_iovecs_num Request strings buffers number
+ * @param rep_iovec Incoming reply string buffer (optional)
+ * @return 0 on success, a negative errno value on error.
+ */
+int xs_msg_reply(enum xsd_sockmsg_type msg_type, xenbus_transaction_t xbt,
+       const struct xs_iovec *req_iovecs, int req_iovecs_num,
+       struct xs_iovec *rep_iovec);
+
+#endif /* __XS_COMMS_H__ */
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.