[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.16] tools/xenstore: split up send_reply()



commit ce6aea73f6c4c90fab2500933b3a488e2f30334b
Author:     Juergen Gross <jgross@xxxxxxxx>
AuthorDate: Tue Sep 13 07:35:07 2022 +0200
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Nov 1 14:07:24 2022 +0000

    tools/xenstore: split up send_reply()
    
    Today send_reply() is used for both, normal request replies and watch
    events.
    
    Split it up into send_reply() and send_event(). This will be used to
    add some event specific handling.
    
    add_event() can be merged into send_event(), removing the need for an
    intermediate memory allocation.
    
    This is part of XSA-326.
    
    Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
    Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx>
    (cherry picked from commit 9bfde319dbac2a1321898d2f75a3f075c3eb7b32)
---
 tools/xenstore/xenstored_core.c  | 74 ++++++++++++++++++++++++----------------
 tools/xenstore/xenstored_core.h  |  1 +
 tools/xenstore/xenstored_watch.c | 39 ++++-----------------
 3 files changed, 52 insertions(+), 62 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index e9c9695fd1..249ad5ec6f 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -767,49 +767,32 @@ static void send_error(struct connection *conn, int error)
 void send_reply(struct connection *conn, enum xsd_sockmsg_type type,
                const void *data, unsigned int len)
 {
-       struct buffered_data *bdata;
+       struct buffered_data *bdata = conn->in;
+
+       assert(type != XS_WATCH_EVENT);
 
        if ( len > XENSTORE_PAYLOAD_MAX ) {
                send_error(conn, E2BIG);
                return;
        }
 
-       /* Replies reuse the request buffer, events need a new one. */
-       if (type != XS_WATCH_EVENT) {
-               bdata = conn->in;
-               /* Drop asynchronous responses, e.g. errors for watch events. */
-               if (!bdata)
-                       return;
-               bdata->inhdr = true;
-               bdata->used = 0;
-               conn->in = NULL;
-       } else {
-               /* Message is a child of the connection for auto-cleanup. */
-               bdata = new_buffer(conn);
+       if (!bdata)
+               return;
+       bdata->inhdr = true;
+       bdata->used = 0;
 
-               /*
-                * Allocation failure here is unfortunate: we have no way to
-                * tell anybody about it.
-                */
-               if (!bdata)
-                       return;
-       }
        if (len <= DEFAULT_BUFFER_SIZE)
                bdata->buffer = bdata->default_buffer;
-       else
+       else {
                bdata->buffer = talloc_array(bdata, char, len);
-       if (!bdata->buffer) {
-               if (type == XS_WATCH_EVENT) {
-                       /* Same as above: no way to tell someone. */
-                       talloc_free(bdata);
+               if (!bdata->buffer) {
+                       send_error(conn, ENOMEM);
                        return;
                }
-               /* re-establish request buffer for sending ENOMEM. */
-               conn->in = bdata;
-               send_error(conn, ENOMEM);
-               return;
        }
 
+       conn->in = NULL;
+
        /* Update relevant header fields and fill in the message body. */
        bdata->hdr.msg.type = type;
        bdata->hdr.msg.len = len;
@@ -817,8 +800,39 @@ void send_reply(struct connection *conn, enum 
xsd_sockmsg_type type,
 
        /* Queue for later transmission. */
        list_add_tail(&bdata->list, &conn->out_list);
+}
 
-       return;
+/*
+ * Send a watch event.
+ * As this is not directly related to the current command, errors can't be
+ * reported.
+ */
+void send_event(struct connection *conn, const char *path, const char *token)
+{
+       struct buffered_data *bdata;
+       unsigned int len;
+
+       len = strlen(path) + 1 + strlen(token) + 1;
+       /* Don't try to send over-long events. */
+       if (len > XENSTORE_PAYLOAD_MAX)
+               return;
+
+       bdata = new_buffer(conn);
+       if (!bdata)
+               return;
+
+       bdata->buffer = talloc_array(bdata, char, len);
+       if (!bdata->buffer) {
+               talloc_free(bdata);
+               return;
+       }
+       strcpy(bdata->buffer, path);
+       strcpy(bdata->buffer + strlen(path) + 1, token);
+       bdata->hdr.msg.type = XS_WATCH_EVENT;
+       bdata->hdr.msg.len = len;
+
+       /* Queue for later transmission. */
+       list_add_tail(&bdata->list, &conn->out_list);
 }
 
 /* Some routines (write, mkdir, etc) just need a non-error return */
diff --git a/tools/xenstore/xenstored_core.h b/tools/xenstore/xenstored_core.h
index 0004fa848c..9af9af4390 100644
--- a/tools/xenstore/xenstored_core.h
+++ b/tools/xenstore/xenstored_core.h
@@ -187,6 +187,7 @@ unsigned int get_string(const struct buffered_data *data, 
unsigned int offset);
 
 void send_reply(struct connection *conn, enum xsd_sockmsg_type type,
                const void *data, unsigned int len);
+void send_event(struct connection *conn, const char *path, const char *token);
 
 /* Some routines (write, mkdir, etc) just need a non-error return */
 void send_ack(struct connection *conn, enum xsd_sockmsg_type type);
diff --git a/tools/xenstore/xenstored_watch.c b/tools/xenstore/xenstored_watch.c
index aca0a71bad..99a2c266b2 100644
--- a/tools/xenstore/xenstored_watch.c
+++ b/tools/xenstore/xenstored_watch.c
@@ -85,35 +85,6 @@ static const char *get_watch_path(const struct watch *watch, 
const char *name)
        return path;
 }
 
-/*
- * Send a watch event.
- * Temporary memory allocations are done with ctx.
- */
-static void add_event(struct connection *conn,
-                     const void *ctx,
-                     struct watch *watch,
-                     const char *name)
-{
-       /* Data to send (node\0token\0). */
-       unsigned int len;
-       char *data;
-
-       name = get_watch_path(watch, name);
-
-       len = strlen(name) + 1 + strlen(watch->token) + 1;
-       /* Don't try to send over-long events. */
-       if (len > XENSTORE_PAYLOAD_MAX)
-               return;
-
-       data = talloc_array(ctx, char, len);
-       if (!data)
-               return;
-       strcpy(data, name);
-       strcpy(data + strlen(name) + 1, watch->token);
-       send_reply(conn, XS_WATCH_EVENT, data, len);
-       talloc_free(data);
-}
-
 /*
  * Check permissions of a specific watch to fire:
  * Either the node itself or its parent have to be readable by the connection
@@ -190,10 +161,14 @@ void fire_watches(struct connection *conn, const void 
*ctx, const char *name,
                list_for_each_entry(watch, &i->watches, list) {
                        if (exact) {
                                if (streq(name, watch->node))
-                                       add_event(i, ctx, watch, name);
+                                       send_event(i,
+                                                  get_watch_path(watch, name),
+                                                  watch->token);
                        } else {
                                if (is_child(name, watch->node))
-                                       add_event(i, ctx, watch, name);
+                                       send_event(i,
+                                                  get_watch_path(watch, name),
+                                                  watch->token);
                        }
                }
        }
@@ -292,7 +267,7 @@ int do_watch(struct connection *conn, struct buffered_data 
*in)
        send_ack(conn, XS_WATCH);
 
        /* We fire once up front: simplifies clients and restart. */
-       add_event(conn, in, watch, watch->node);
+       send_event(conn, get_watch_path(watch, watch->node), watch->token);
 
        return 0;
 }
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.16



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.