[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [UNIKRAFT PATCH v2 04/10] plat/xen: Add support for communication with Xenstore daemon



Costin Lupu <costin.lup@xxxxxxxxx> writes:

> Hi Yuri,
>
> Please see my comments inline.
>
> On 08/29/2018 03:38 PM, Yuri Volchkov wrote:
>> Hey Costin.
>> 
>> I found just a couple of important things. But both of them are really
>> tiny.
>> 
>> The comments are inline
>> 
>> -Yuri.
>> 
>> Costin Lupu <costin.lupu@xxxxxxxxx> writes:
>> 
>>> Add support for communication with Xenstore daemon via the shared
>>> page. In Unikraft, the incoming messages are processed by the
>>> Xenstore thread.
>>>
>>> Signed-off-by: Costin Lupu <costin.lupu@xxxxxxxxx>
>>> ---
>>>  plat/xen/Makefile.uk       |   1 +
>>>  plat/xen/xenbus/xenbus.c   |   8 +
>>>  plat/xen/xenbus/xs_comms.c | 535 
>>> +++++++++++++++++++++++++++++++++++++++++++++
>>>  plat/xen/xenbus/xs_comms.h |  64 ++++++
>>>  4 files changed, 608 insertions(+)
>>>  create mode 100644 plat/xen/xenbus/xs_comms.c
>>>  create mode 100644 plat/xen/xenbus/xs_comms.h
>>>
>>> diff --git a/plat/xen/Makefile.uk b/plat/xen/Makefile.uk
>>> index b8c70e1..2703a54 100644
>>> --- a/plat/xen/Makefile.uk
>>> +++ b/plat/xen/Makefile.uk
>>> @@ -79,4 +79,5 @@ LIBXENBUS_ASINCLUDES-y         += 
>>> $(LIBXENPLAT_ASINCLUDES-y)
>>>  LIBXENBUS_CFLAGS-y             += $(LIBXENPLAT_CFLAGS-y)
>>>  LIBXENBUS_CINCLUDES-y          += $(LIBXENPLAT_CINCLUDES-y)
>>>  LIBXENBUS_SRCS-y               += $(LIBXENPLAT_BASE)/xenbus/xenbus.c
>>> +LIBXENBUS_SRCS-y               += $(LIBXENPLAT_BASE)/xenbus/xs_comms.c
>>>  endif
>>> diff --git a/plat/xen/xenbus/xenbus.c b/plat/xen/xenbus/xenbus.c
>>> index 1bc57c3..a20546b 100644
>>> --- a/plat/xen/xenbus/xenbus.c
>>> +++ b/plat/xen/xenbus/xenbus.c
>>> @@ -43,6 +43,7 @@
>>>  #include <uk/errptr.h>
>>>  #include <uk/assert.h>
>>>  #include <xenbus/xenbus.h>
>>> +#include "xs_comms.h"
>>>  
>>>  static struct xenbus_handler xbh;
>>>  
>>> @@ -86,6 +87,13 @@ static int xenbus_init(struct uk_alloc *a)
>>>  
>>>     xbh.a = a;
>>>  
>>> +   ret = xs_comms_init();
>>> +   if (ret) {
>>> +           uk_printd(DLVL_ERR,
>>> +                   "Error initializing Xenstore communication.");
>>> +           return ret;
>>> +   }
>>> +
>>>     UK_TAILQ_FOREACH_SAFE(drv, &xbh.drv_list, next, drv_next) {
>>>             if (drv->init) {
>>>                     ret = drv->init(a);
>>> diff --git a/plat/xen/xenbus/xs_comms.c b/plat/xen/xenbus/xs_comms.c
>>> new file mode 100644
>>> index 0000000..df3739d
>>> --- /dev/null
>>> +++ b/plat/xen/xenbus/xs_comms.c
>>> @@ -0,0 +1,535 @@
>>> +/* SPDX-License-Identifier: BSD-3-Clause */
>>> +/*
>>> + * Authors: Steven Smith (sos22@xxxxxxxxx)
>>> + *          Grzegorz Milos (gm281@xxxxxxxxx)
>>> + *          John D. Ramsdell
>>> + *          Costin Lupu <costin.lupu@xxxxxxxxx>
>>> + *
>>> + * Copyright (c) 2006, Cambridge University
>>> + *               2018, NEC Europe Ltd., NEC Corporation. All rights 
>>> reserved.
>>> + *
>>> + * Redistribution and use in source and binary forms, with or without
>>> + * modification, are permitted provided that the following conditions
>>> + * are met:
>>> + *
>>> + * 1. Redistributions of source code must retain the above copyright
>>> + *    notice, this list of conditions and the following disclaimer.
>>> + * 2. Redistributions in binary form must reproduce the above copyright
>>> + *    notice, this list of conditions and the following disclaimer in the
>>> + *    documentation and/or other materials provided with the distribution.
>>> + * 3. Neither the name of the copyright holder nor the names of its
>>> + *    contributors may be used to endorse or promote products derived from
>>> + *    this software without specific prior written permission.
>>> + *
>>> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
>>> IS"
>>> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 
>>> THE
>>> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
>>> PURPOSE
>>> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS 
>>> BE
>>> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
>>> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
>>> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
>>> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
>>> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
>>> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 
>>> THE
>>> + * POSSIBILITY OF SUCH DAMAGE.
>>> + *
>>> + * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
>>> + */
>>> +/*
>>> + * Communication with Xenstore
>>> + * Ported from Mini-OS xenbus.c
>>> + */
>>> +
>>> +#include <string.h>
>>> +#include <uk/errptr.h>
>>> +#include <uk/bitmap.h>
>>> +#include <uk/wait.h>
>>> +#include <uk/arch/spinlock.h>
>>> +#include <common/events.h>
>>> +#include <xen-x86/mm.h>
>>> +#include <xen-x86/setup.h>
>>> +#include "xs_comms.h"
>>> +
>>> +
>>> +/*
>>> + * Xenstore handler structure
>>> + */
>>> +struct xs_handler {
>>> +   /**< Communication: event channel */
>>> +   evtchn_port_t evtchn;
>>> +   /**< Communication: shared memory */
>>> +   struct xenstore_domain_interface *buf;
>>> +   /**< Thread processing incoming xs replies */
>>> +   struct uk_thread *thread;
>>> +   /**< Waiting queue for notifying incoming xs replies */
>>> +   struct uk_waitq waitq;
>>> +};
>>> +
>>> +static struct xs_handler xsh = {
>>> +   .waitq = __WAIT_QUEUE_INITIALIZER(xsh.waitq),
>>> +};
>>> +
>>> +static int xs_avail_space_for_read(unsigned int req_size)
>>> +{
>>> +   return (xsh.buf->rsp_prod - xsh.buf->rsp_cons >= req_size);
>>> +}
>>> +
>>> +static int xs_avail_space_for_write(unsigned int req_size)
>>> +{
>>> +   return (xsh.buf->req_prod - xsh.buf->req_cons +
>>> +           req_size <= XENSTORE_RING_SIZE);
>>> +}
>>> +
>>> +/*
>>> + * In-flight request structure.
>>> + */
>>> +struct xs_request {
>>> +   /**< Waiting queue for incoming reply notification */
>>> +   struct uk_waitq waitq;
>>> +   /**< Request header */
>>> +   struct xsd_sockmsg hdr;
>>> +   /**< Received reply */
>>> +   struct {
>>> +           /**< Reply string + size */
>>> +           struct xs_iovec iovec;
>>> +           /**< Error number */
>>> +           int errornum;
>>> +           /**< Non-zero for incoming replies */
>>> +           int recvd;
>>> +   } reply;
>>> +};
>>> +
>>> +/*
>>> + * Pool of in-flight requests.
>>> + * Request IDs are reused, hence the limited set of entries.
>>> + *
>>> + * TODO sequential IDs
>>> + */
>>> +struct xs_request_pool {
>>> +   /**< Number of live requests */
>>> +   __u32 num_live;
>>> +   /**< Lock */
>>> +   spinlock_t lock;
>>> +   /**< Waiting queue for 'not-full' notifications */
>>> +   struct uk_waitq waitq;
>>> +
>>> +   /* Map size is power of 2 */
>>> +#define REQID_MAP_SHIFT  5
>>> +#define REQID_MAP_SIZE   (1 << REQID_MAP_SHIFT)
>>> +   unsigned long entries_bm[BITS_TO_LONGS(REQID_MAP_SIZE) * sizeof(long)];
>>> +   /**< Entries */
>>> +   struct xs_request entries[REQID_MAP_SIZE];
>>> +};
>>> +
>>> +static struct xs_request_pool xs_req_pool;
>>> +
>>> +static void xs_request_pool_init(struct xs_request_pool *pool)
>>> +{
>>> +   struct xs_request *xs_req;
>>> +
>>> +   pool->num_live = 0;
>>> +   ukarch_spin_lock_init(&pool->lock);
>>> +   uk_waitq_init(&pool->waitq);
>>> +   bitmap_zero(pool->entries_bm, REQID_MAP_SIZE);
>>> +   for (int i = 0; i < REQID_MAP_SIZE; i++) {
>>> +           xs_req = &pool->entries[i];
>>> +           xs_req->hdr.req_id = i;
>>> +           uk_waitq_init(&xs_req->waitq);
>>> +   }
>>> +}
>>> +
>>> +/*
>>> + * Allocate an identifier for a Xenstore request.
>>> + * Blocks if none are available.
>>> + */
>>> +static struct xs_request *xs_request_get(void)
>>> +{
>>> +   unsigned long entry_idx;
>>> +
>>> +   /* wait for an available entry */
>>> +   while (1) {
>>> +           ukarch_spin_lock(&xs_req_pool.lock);
>>> +
>>> +           if (xs_req_pool.num_live < REQID_MAP_SIZE)
>>> +                   break;
>>> +
>>> +           ukarch_spin_unlock(&xs_req_pool.lock);
>>> +
>>> +           uk_waitq_wait_event(&xs_req_pool.waitq,
>>> +                   (xs_req_pool.num_live < REQID_MAP_SIZE));
>>> +   }
>>> +
>>> +   /* find an available entry */
>>> +   entry_idx =
>>> +           find_first_zero_bit(xs_req_pool.entries_bm, REQID_MAP_SIZE);
>>> +
>>> +   set_bit(entry_idx, xs_req_pool.entries_bm);
>>> +   xs_req_pool.num_live++;
>>> +
>>> +   ukarch_spin_unlock(&xs_req_pool.lock);
>>> +
>>> +   return &xs_req_pool.entries[entry_idx];
>>> +}
>>> +
>>> +/* Release a request identifier */
>>> +static void xs_request_put(struct xs_request *xs_req)
>>> +{
>>> +   __u32 reqid = xs_req->hdr.req_id;
>>> +
>>> +   ukarch_spin_lock(&xs_req_pool.lock);
>>> +
>>> +   UK_ASSERT(test_bit(reqid, xs_req_pool.entries_bm) == 1);
>>> +
>>> +   clear_bit(reqid, xs_req_pool.entries_bm);
>>> +   xs_req_pool.num_live--;
>>> +
>>> +   if (xs_req_pool.num_live == 0 ||
>>> +           xs_req_pool.num_live == REQID_MAP_SIZE - 1)
>> I understand the second condition, but why we need to wake up waiter if
>> num_live == 0?
>
> The first condition is used for suspending. We should wait until all
> requests were answered, that's why we're waiting for num_live to get to
> zero. Maybe it would be better to remove the first condition and add it
> back when we'll have the suspend/resume functionality ready.
Probably removing it now is a good decision.

However, how waiters even possible if num_live == 0? Every time there is
at least one slot in the pool, all waiters will wake up. If there is a
slot at the time xs_request_get was called, it will get one and will not
wait.

>
>> And, in the second condition, you probably meant "REQID_MAP_SIZE - 2",
>> not "- 1". We want to wake up waiters if all request from the pull _WAS_
>> in-flight.
>
> All requests are in-flight iff num_live == REQID_MAP_SIZE (which btw I
> have to rename).
>
>> So, if we entered xs_request_put at the point of time, when there was no
>> free requests in the pull, num_live will be "REQID_MAP_SIZE - 1". After
>> the decrementing, it becomes "REQID_MAP_SIZE - 2".
>
> I'm afraid here it should be REQID_MAP_SIZE - 1, in other words if we
> have at least on slot free then we can signal that it's "non-full".
Ok, stupid mistake :). This makes it only one important thing found.

>> 
>> 
>>> +           uk_waitq_wake_up(&xs_req_pool.waitq);
>>> +
>>> +   ukarch_spin_unlock(&xs_req_pool.lock);
>>> +}
>>> +
>>> +/*
>>> + * Send request to Xenstore. A request is made of multiple iovecs which are
>>> + * preceded by a single iovec referencing the request header. The iovecs 
>>> are
>>> + * seen by Xenstore as if sent atomically. This can block.
>>> + *
>>> + * TODO A different thread (e.g. Xenbus thread) should write the messages 
>>> if
>>> + * we expect high loads of requests.
>>> + */
>>> +static int xs_msg_write(struct xsd_sockmsg *xsd_req,
>>> +   const struct xs_iovec *iovec)
>>> +{
>>> +   XENSTORE_RING_IDX prod;
>>> +   const struct xs_iovec *crnt_iovec;
>>> +   struct xs_iovec hdr_iovec;
>>> +   unsigned int req_size, req_off;
>>> +   unsigned int buf_off;
>>> +   unsigned int this_chunk_len;
>>> +   int rc;
>>> +
>>> +   req_size = sizeof(*xsd_req) + xsd_req->len;
>>> +   if (req_size > XENSTORE_RING_SIZE)
>>> +           return -ENOSPC;
>>> +
>>> +   hdr_iovec.data = xsd_req;
>>> +   hdr_iovec.len  = sizeof(*xsd_req);
>>> +
>>> +   /* The batched iovecs are preceded by a single header. */
>>> +   crnt_iovec = &hdr_iovec;
>>> +
>>> +   /*
>>> +    * Wait for the ring to drain to the point where
>>> +    * we can send the message.
>>> +    */
>>> +   while (!xs_avail_space_for_write(req_size)) {
>>> +           /* Wait for there to be space on the ring */
>>> +           uk_printd(DLVL_EXTRA,
>>> +                   "prod %d, len %d, cons %d, size %d; waiting.\n",
>>> +                   xsh.buf->req_prod, req_size,
>>> +                   xsh.buf->req_cons, XENSTORE_RING_SIZE);
>>> +
>>> +           uk_waitq_wait_event(&xsh.waitq,
>>> +                   xs_avail_space_for_write(req_size));
>>> +           uk_printd(DLVL_EXTRA, "Back from wait.\n");
>>> +   }
>> This will work with cooperative scheduler for sure. But I guess that
>> here will be a problem in case of preemptive one. If we was preempted at
>> this point, and another thread wants to write to the ring buffer too, we
>> are screwed. For example, the other thread could use the free space in
>> the ring buffer, and when we get cpu time back, we will continue with
>> confidence that there is enough space for us.
>> 
>> So far we was ignoring places like this. How about we start to leave at
>> least notes. For example /* TODO: disable preemption here */.
>> 
>> Or, a dedicated thread would solve this problem, as discussed in v1 and
>> as you mentioned in the comment above this function.
>
> In deed, for preemptive scheduling we would have a problem here. I guess
> the best solution would be to delegate the writes to the xenstore thread.
>
>>> +
>>> +   /* We must write requests after reading the consumer index. */
>>> +   mb();
>> Maybe rmb() here is enough? Or do we actually need a full barrier?
>
> Before the barrier we have a read operation (reading the consumer
> index), while after we have write operations (writing the requests). So
> we need to order the writes after reads with full barrier.
Oh, right. Thanks for clarifying.

>
>>> +
>>> +   /*
>>> +    * We're now guaranteed to be able to send the message
>>> +    * without overflowing the ring. Do so.
>>> +    */
>>> +
>>> +   prod = xsh.buf->req_prod;
>>> +   req_off = 0;
>>> +   buf_off = 0;
>>> +   while (req_off < req_size) {
>>> +           this_chunk_len = MIN(crnt_iovec->len - buf_off,
>>> +                   XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod));
>>> +
>>> +           memcpy(
>>> +                   (char *) xsh.buf->req + MASK_XENSTORE_IDX(prod),
>>> +                   (char *) crnt_iovec->data + buf_off,
>>> +                   this_chunk_len
>>> +           );
>>> +
>>> +           prod += this_chunk_len;
>>> +           req_off += this_chunk_len;
>>> +           buf_off += this_chunk_len;
>>> +
>>> +           if (buf_off == crnt_iovec->len) {
>>> +                   buf_off = 0;
>>> +                   if (crnt_iovec == &hdr_iovec)
>>> +                           crnt_iovec = iovec;
>>> +                   else
>>> +                           crnt_iovec++;
>>> +           }
>>> +   }
>>> +
>>> +   uk_printd(DLVL_EXTRA, "Complete main loop of %s.\n", __func__);
>>> +   UK_ASSERT(buf_off == 0);
>>> +   UK_ASSERT(req_off == req_size);
>>> +   UK_ASSERT(prod <= xsh.buf->req_cons + XENSTORE_RING_SIZE);
>>> +
>>> +   /* Remote must see entire message before updating indexes */
>>> +   wmb();
>>> +
>>> +   xsh.buf->req_prod += req_size;
>>> +
>>> +   /* Send evtchn to notify remote */
>>> +   rc = notify_remote_via_evtchn(xsh.evtchn);
>>> +   UK_ASSERT(rc == 0);
>>> +
>>> +   return 0;
>>> +}
>>> +
>>> +int xs_msg_reply(enum xsd_sockmsg_type msg_type, xenbus_transaction_t xbt,
>>> +   const struct xs_iovec *req_iovecs, int req_iovecs_num,
>>> +   struct xs_iovec *rep_iovec)
>>> +{
>>> +   struct xs_request *xs_req;
>>> +   int err;
>>> +
>>> +   if (req_iovecs == NULL)
>>> +           return -EINVAL;
>>> +
>>> +   xs_req = xs_request_get();
>>> +   xs_req->hdr.type = msg_type;
>>> +   /* req_id was on pool init  */
>>> +   xs_req->hdr.tx_id = xbt;
>>> +   xs_req->hdr.len = 0;
>>> +   for (int i = 0; i < req_iovecs_num; i++)
>>> +           xs_req->hdr.len += req_iovecs[i].len;
>>> +
>>> +   xs_req->reply.recvd = 0;
>>> +
>>> +   /* send the request */
>>> +   err = xs_msg_write(&xs_req->hdr, req_iovecs);
>>> +   if (err)
>>> +           goto out;
>>> +
>>> +   /* wait reply */
>>> +   uk_waitq_wait_event(&xs_req->waitq,
>>> +           xs_req->reply.recvd != 0);
>>> +
>>> +   err = xs_req->reply.errornum;
>>> +   if (err == 0) {
>>> +           if (rep_iovec)
>>> +                   *rep_iovec = xs_req->reply.iovec;
>>> +           else
>>> +                   free(xs_req->reply.iovec.data);
>>> +   }
>>> +
>>> +out:
>>> +   xs_request_put(xs_req);
>>> +
>>> +   return (-err);
>> xs_msg_write already returns a negative error code. This line will make
>> it positive. I guess you want to invert it when reading from
>> xs_req->reply.errornum, a few lines above, instead of doing it here
>
> Yes, good catch!
>
>>> +}
>>> +
>>> +/*
>>> + * Converts a Xenstore reply error to a positive error number.
>>> + * Returns 0 if the reply is successful.
>>> + */
>>> +static int reply_to_errno(const char *reply)
>>> +{
>>> +   int err = 0;
>>> +
>>> +   for (int i = 0; i < (int) ARRAY_SIZE(xsd_errors); i++) {
>>> +           if (!strcmp(reply, xsd_errors[i].errstring)) {
>>> +                   err = xsd_errors[i].errnum;
>>> +                   goto out;
>>> +           }
>>> +   }
>>> +
>>> +   uk_printd(DLVL_WARN, "Unknown Xenstore error: %s\n", reply);
>>> +   err = EINVAL;
>>> +
>>> +out:
>>> +   return err;
>>> +}
>>> +
>>> +/* Process an incoming xs reply */
>>> +static void process_reply(struct xsd_sockmsg *hdr, char *payload)
>>> +{
>>> +   struct xs_request *xs_req;
>>> +
>>> +   if (!test_bit(hdr->req_id, xs_req_pool.entries_bm)) {
>>> +           uk_printd(DLVL_WARN, "Invalid reply id=%d", hdr->req_id);
>>> +           free(payload);
>>> +           return;
>>> +   }
>>> +
>>> +   xs_req = &xs_req_pool.entries[hdr->req_id];
>>> +
>>> +   if (hdr->type == XS_ERROR) {
>>> +           xs_req->reply.errornum = reply_to_errno(payload);
>>> +           free(payload);
>>> +
>>> +   } else if (hdr->type != xs_req->hdr.type) {
>>> +           uk_printd(DLVL_WARN, "Mismatching message type: %d", hdr->type);
>>> +           free(payload);
>>> +           return;
>>> +
>>> +   } else {
>>> +           /* set reply */
>>> +           xs_req->reply.iovec.data = payload;
>>> +           xs_req->reply.iovec.len = hdr->len;
>>> +           xs_req->reply.errornum = 0;
>>> +   }
>>> +
>>> +   xs_req->reply.recvd = 1;
>>> +
>>> +   /* notify waiting requester */
>>> +   uk_waitq_wake_up(&xs_req->waitq);
>>> +}
>>> +
>>> +/* Process an incoming xs watch event */
>>> +static void process_watch_event(char *watch_msg)
>>> +{
>>> +   /* TODO */
>>> +}
>>> +
>>> +static void memcpy_from_ring(const char *ring, char *dest, int off, int 
>>> len)
>>> +{
>>> +   int c1, c2;
>>> +
>>> +   c1 = MIN(len, XENSTORE_RING_SIZE - off);
>>> +   c2 = len - c1;
>>> +
>>> +   memcpy(dest, ring + off, c1);
>>> +   if (c2)
>>> +           memcpy(dest + c1, ring, c2);
>>> +}
>>> +
>>> +static void xs_msg_read(struct xsd_sockmsg *hdr)
>>> +{
>>> +   XENSTORE_RING_IDX cons;
>>> +   char *payload;
>>> +
>>> +   payload = malloc(hdr->len + 1);
>>> +   if (payload == NULL) {
>>> +           uk_printd(DLVL_WARN,
>>> +                   "No memory available for saving Xenstore message!");
>>> +           return;
>>> +   }
>>> +
>>> +   cons = xsh.buf->rsp_cons;
>>> +
>>> +   /* copy payload */
>>> +   memcpy_from_ring(
>>> +           xsh.buf->rsp,
>>> +           payload,
>>> +           MASK_XENSTORE_IDX(cons + sizeof(*hdr)),
>>> +           hdr->len
>>> +   );
>>> +   payload[hdr->len] = '\0';
>>> +
>>> +   /* Remote must not see available space until we've copied the reply */
>>> +   mb();
>> Maybe wmb() here is enough? Or do we actually need a full barrier?
>
> Similarly to the previous situation, before the barrier we have a read
> operation (reading the request data), while after the barrier we have a
> write operation (writing the consumer index). So we need to order write
> after read with full barrier.
>
>>> +   xsh.buf->rsp_cons += sizeof(*hdr) + hdr->len;
>>> +
>>> +   if (xsh.buf->rsp_prod - cons >= XENSTORE_RING_SIZE)
>>> +           notify_remote_via_evtchn(xsh.evtchn);
>>> +
>>> +   if (hdr->type == XS_WATCH_EVENT)
>>> +           process_watch_event(payload);
>>> +   else
>>> +           process_reply(hdr, payload);
>>> +}
>>> +
>>> +static void xs_thread_func(void *ign __unused)
>>> +{
>>> +   struct xsd_sockmsg msg;
>>> +   XENSTORE_RING_IDX prod = xsh.buf->rsp_prod;
>>> +
>>> +   for (;;) {
>>> +           /* wait for incoming xs response */
>>> +           uk_waitq_wait_event(&xsh.waitq, prod != xsh.buf->rsp_prod);
>>> +
>>> +           while (1) {
>>> +                   prod = xsh.buf->rsp_prod;
>>> +
>>> +                   uk_printd(DLVL_EXTRA, "Rsp_cons %d, rsp_prod %d.\n",
>>> +                           xsh.buf->rsp_cons, xsh.buf->rsp_prod);
>>> +
>>> +                   if (!xs_avail_space_for_read(sizeof(msg)))
>>> +                           break;
>>> +
>>> +                   /* Make sure data is read after reading the indexes */
>>> +                   rmb();
>>> +
>>> +                   /* copy the message header */
>>> +                   memcpy_from_ring(
>>> +                           xsh.buf->rsp,
>>> +                           (char *) &msg,
>>> +                           MASK_XENSTORE_IDX(xsh.buf->rsp_cons),
>>> +                           sizeof(msg)
>>> +                   );
>>> +
>>> +                   uk_printd(DLVL_EXTRA, "Msg len %lu, %u avail, id %u.\n",
>>> +                           msg.len + sizeof(msg),
>>> +                           xsh.buf->rsp_prod - xsh.buf->rsp_cons,
>>> +                           msg.req_id);
>>> +
>>> +                   if (!xs_avail_space_for_read(sizeof(msg) + msg.len))
>>> +                           break;
>>> +
>>> +                   /* Make sure data is read after reading the indexes */
>>> +                   rmb();
>>> +
>>> +                   uk_printd(DLVL_EXTRA, "Message is good.\n");
>>> +                   xs_msg_read(&msg);
>>> +           }
>>> +   }
>>> +}
>>> +
>>> +static void xs_evtchn_handler(evtchn_port_t port,
>>> +           struct __regs *regs __unused, void *ign __unused)
>>> +{
>>> +   UK_ASSERT(xsh.evtchn == port);
>>> +   uk_waitq_wake_up(&xsh.waitq);
>>> +}
>>> +
>>> +int xs_comms_init(void)
>>> +{
>>> +   struct uk_thread *thread;
>>> +   evtchn_port_t port;
>>> +
>>> +   xs_request_pool_init(&xs_req_pool);
>>> +
>>> +   uk_waitq_init(&xsh.waitq);
>>> +
>>> +   thread = uk_thread_create("xenstore", xs_thread_func, NULL);
>>> +   if (PTRISERR(thread))
>>> +           return PTR2ERR(thread);
>>> +
>>> +   xsh.thread = thread;
>>> +
>>> +   xsh.evtchn = HYPERVISOR_start_info->store_evtchn;
>>> +   xsh.buf = mfn_to_virt(HYPERVISOR_start_info->store_mfn);
>>> +
>>> +   port = bind_evtchn(xsh.evtchn, xs_evtchn_handler, NULL);
>>> +   UK_ASSERT(port == xsh.evtchn);
>>> +   unmask_evtchn(xsh.evtchn);
>>> +
>>> +   uk_printd(DLVL_INFO,
>>> +           "Xenstore connection initialised on port %d, buf %p (mfn 
>>> %#lx)\n",
>>> +           port, xsh.buf, HYPERVISOR_start_info->store_mfn);
>>> +
>>> +   return 0;
>>> +}
>>> +
>>> +void xs_comms_fini(void)
>>> +{
>>> +   mask_evtchn(xsh.evtchn);
>>> +   unbind_evtchn(xsh.evtchn);
>>> +
>>> +   xsh.buf = NULL;
>>> +
>>> +   /* TODO stop thread, instead of killing it */
>>> +   uk_thread_destroy(xsh.thread);
>>> +   xsh.thread = NULL;
>>> +}
>>> diff --git a/plat/xen/xenbus/xs_comms.h b/plat/xen/xenbus/xs_comms.h
>>> new file mode 100644
>>> index 0000000..9b558d0
>>> --- /dev/null
>>> +++ b/plat/xen/xenbus/xs_comms.h
>>> @@ -0,0 +1,64 @@
>>> +/* SPDX-License-Identifier: BSD-3-Clause */
>>> +/*
>>> + * Authors: Costin Lupu <costin.lupu@xxxxxxxxx>
>>> + *
>>> + * Copyright (c) 2018, NEC Europe Ltd., NEC Corporation. All rights 
>>> reserved.
>>> + *
>>> + * Redistribution and use in source and binary forms, with or without
>>> + * modification, are permitted provided that the following conditions
>>> + * are met:
>>> + *
>>> + * 1. Redistributions of source code must retain the above copyright
>>> + *    notice, this list of conditions and the following disclaimer.
>>> + * 2. Redistributions in binary form must reproduce the above copyright
>>> + *    notice, this list of conditions and the following disclaimer in the
>>> + *    documentation and/or other materials provided with the distribution.
>>> + * 3. Neither the name of the copyright holder nor the names of its
>>> + *    contributors may be used to endorse or promote products derived from
>>> + *    this software without specific prior written permission.
>>> + *
>>> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
>>> IS"
>>> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 
>>> THE
>>> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
>>> PURPOSE
>>> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS 
>>> BE
>>> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
>>> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
>>> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
>>> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
>>> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
>>> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 
>>> THE
>>> + * POSSIBILITY OF SUCH DAMAGE.
>>> + *
>>> + * THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.
>>> + */
>>> +
>>> +#ifndef __XS_COMMS_H__
>>> +#define __XS_COMMS_H__
>>> +
>>> +#include <xen/io/xs_wire.h>
>>> +#include <xenbus/xs.h>
>>> +
>>> +int  xs_comms_init(void);
>>> +void xs_comms_fini(void);
>>> +
>>> +struct xs_iovec {
>>> +   void *data;
>>> +   unsigned int len;
>>> +};
>>> +
>>> +/*
>>> + * Sends a message to Xenstore and blocks waiting for a reply.
>>> + * The reply is malloc'ed and should be freed by the caller.
>>> + *
>>> + * @param msg_type Xenstore message type
>>> + * @param xbt Xenbus transaction id
>>> + * @param req_iovecs Array of request strings buffers
>>> + * @param req_iovecs_num Request strings buffers number
>>> + * @param rep_iovec Incoming reply string buffer (optional)
>>> + * @return 0 on success, a negative errno value on error.
>>> + */
>>> +int xs_msg_reply(enum xsd_sockmsg_type msg_type, xenbus_transaction_t xbt,
>>> +   const struct xs_iovec *req_iovecs, int req_iovecs_num,
>>> +   struct xs_iovec *rep_iovec);
>>> +
>>> +#endif /* __XS_COMMS_H__ */
>>> -- 
>>> 2.11.0
>>>
>> 

-- 
Yuri Volchkov
Software Specialist

NEC Europe Ltd
Kurfürsten-Anlage 36
D-69115 Heidelberg

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.