|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 01/10] libxl: Introduce nested async operations (nested ao)
This allows a long-running ao to avoid accumulating memory. Each
nested ao has its own gc.
Signed-off-by: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
Acked-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Tested-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
tools/libxl/libxl_event.c | 35 +++++++++++++++++++++++++++++++++++
tools/libxl/libxl_internal.h | 24 +++++++++++++++++++++++-
2 files changed, 58 insertions(+), 1 deletions(-)
diff --git a/tools/libxl/libxl_event.c b/tools/libxl/libxl_event.c
index 0fe4428..54d15db 100644
--- a/tools/libxl/libxl_event.c
+++ b/tools/libxl/libxl_event.c
@@ -1572,6 +1572,7 @@ void libxl__ao_complete(libxl__egc *egc, libxl__ao *ao,
int rc)
LOG(DEBUG,"ao %p: complete, rc=%d",ao,rc);
assert(ao->magic == LIBXL__AO_MAGIC);
assert(!ao->complete);
+ assert(!ao->nested);
ao->complete = 1;
ao->rc = rc;
@@ -1736,6 +1737,7 @@ void libxl__ao_progress_report(libxl__egc *egc, libxl__ao
*ao,
const libxl_asyncprogress_how *how, libxl_event *ev)
{
AO_GC;
+ assert(!ao->nested);
if (how->callback == dummy_asyncprogress_callback_ignore) {
LOG(DEBUG,"ao %p: progress report: ignored",ao);
libxl_event_free(CTX,ev);
@@ -1756,6 +1758,39 @@ void libxl__ao_progress_report(libxl__egc *egc,
libxl__ao *ao,
}
+/* nested ao */
+
+_hidden libxl__ao *libxl__nested_ao_create(libxl__ao *parent)
+{
+ /* We only use the parent to get the ctx. However, we require the
+ * caller to provide us with an ao, not just a ctx, to prove that
+ * they are already in an asynchronous operation. That will avoid
+ * people using this to (for example) make an ao in a non-ao_how
+ * function somewhere in the middle of libxl. */
+ libxl__ao *child = NULL;
+ libxl_ctx *ctx = libxl__gc_owner(&parent->gc);
+
+ assert(parent->magic == LIBXL__AO_MAGIC);
+
+ child = libxl__zalloc(&ctx->nogc_gc, sizeof(*child));
+ child->magic = LIBXL__AO_MAGIC;
+ child->nested = 1;
+ LIBXL_INIT_GC(child->gc, ctx);
+ libxl__gc *gc = &child->gc;
+
+ LOG(DEBUG,"ao %p: nested ao, parent %p", child, parent);
+ return child;
+}
+
+_hidden void libxl__nested_ao_free(libxl__ao *child)
+{
+ assert(child->magic == LIBXL__AO_MAGIC);
+ assert(child->nested);
+ libxl_ctx *ctx = libxl__gc_owner(&child->gc);
+ libxl__ao__destroy(ctx, child);
+}
+
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 4729566..6ccd826 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -427,7 +427,7 @@ struct libxl__ao {
* only in libxl__ao_complete.)
*/
uint32_t magic;
- unsigned constructing:1, in_initiator:1, complete:1, notified:1;
+ unsigned constructing:1, in_initiator:1, complete:1, notified:1, nested:1;
int progress_reports_outstanding;
int rc;
libxl__gc gc;
@@ -1780,6 +1780,28 @@ _hidden void
libxl__ao_complete_check_progress_reports(libxl__egc*, libxl__ao*);
/*
+ * Short-lived sub-ao, aka "nested ao".
+ *
+ * Some asynchronous operations are very long-running. Generally,
+ * since an ao has a gc, any allocations made in that ao will live
+ * until the ao is completed. When this is not desirable, these
+ * functions may be used to manage a "sub-ao".
+ *
+ * The returned sub-ao is suitable for passing to gc-related functions
+ * and macros such as libxl__ao_inprogress_gc, AO_GC, and STATE_AO_GC.
+ *
+ * It MUST NOT be used with AO_INPROGRESS, AO_ABORT,
+ * libxl__ao_complete, libxl__ao_progress_report, and so on.
+ *
+ * The caller must ensure that all of the sub-ao's are freed before
+ * the parent is.
+ */
+
+_hidden libxl__ao *libxl__nested_ao_create(libxl__ao *parent); /* cannot fail
*/
+_hidden void libxl__nested_ao_free(libxl__ao *child);
+
+
+/*
* File descriptors and CLOEXEC
*/
--
1.7.7.5 (Apple Git-26)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |