|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] Fix libxenstore memory leak when USE_PTHREAD is not defined
On Thu, 2012-09-13 at 17:09 +0100, Andres Lagar-Cavilla wrote:
> tools/xenstore/xs.c | 22 ++++++----------------
> 1 files changed, 6 insertions(+), 16 deletions(-)
>
>
> Remove usage of pthread_cleanup_push and _pop, and explicitly call free for
> heap objects in error paths. Also remove cleanup_p* for a mutex unlock path.
> By
> the way, set a suitable errno value for an error path that had none.
>
> Resend due to small fix spotted, please ignore previous one.
>
> Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Does this reintroduce the same issue as 21353:2dd3141b3e3e was supposed
to solve (i.e. leaks memory or mutexes if you pthread_cancel the thread
in the midst of things operation)?
Can we keep cleanup_push/pop for use with the mutexes and for the
malloc/free do:
#ifdef USE_PTHREAD
#define cleanup_push... as currently
#define cleanup_pop... as currently
#define cleanup_malloc(x) cleanup_push(free, x)
#define cleanup_free(doit, x) cleanup_pop(doit)
#else
#define cleanup_push... nop as now
#define cleanup_pop... nop as now
#define cleanup_malloc... NOP
#define cleanup_free(doit, x) if (doit) free(x)
#endif
Does that work?
>
> diff -r 588d0dc298a4 -r 9bfaf86e061f tools/xenstore/xs.c
> --- a/tools/xenstore/xs.c
> +++ b/tools/xenstore/xs.c
> @@ -99,14 +99,6 @@ struct xs_handle {
> #define mutex_unlock(m) pthread_mutex_unlock(m)
> #define condvar_signal(c) pthread_cond_signal(c)
> #define condvar_wait(c,m) pthread_cond_wait(c,m)
> -#define cleanup_push(f, a) \
> - pthread_cleanup_push((void (*)(void *))(f), (void *)(a))
> -/*
> - * Some definitions of pthread_cleanup_pop() are a macro starting with an
> - * end-brace. GCC then complains if we immediately precede that with a label.
> - * Hence we insert a dummy statement to appease the compiler in this
> situation.
> - */
> -#define cleanup_pop(run) ((void)0); pthread_cleanup_pop(run)
>
> #define read_thread_exists(h) (h->read_thr_exists)
>
> @@ -126,8 +118,6 @@ struct xs_handle {
> #define mutex_unlock(m) ((void)0)
> #define condvar_signal(c) ((void)0)
> #define condvar_wait(c,m) ((void)0)
> -#define cleanup_push(f, a) ((void)0)
> -#define cleanup_pop(run) ((void)0)
> #define read_thread_exists(h) (0)
>
> #endif
> @@ -1059,7 +1049,6 @@ static int read_message(struct xs_handle
> msg = malloc(sizeof(*msg));
> if (msg == NULL)
> goto error;
> - cleanup_push(free, msg);
> if (!read_all(h->fd, &msg->hdr, sizeof(msg->hdr), nonblocking)) { /*
> Cancellation point */
> saved_errno = errno;
> goto error_freemsg;
> @@ -1069,7 +1058,6 @@ static int read_message(struct xs_handle
> body = msg->body = malloc(msg->hdr.len + 1);
> if (body == NULL)
> goto error_freemsg;
> - cleanup_push(free, body);
> if (!read_all(h->fd, body, msg->hdr.len, 0)) { /* Cancellation point */
> saved_errno = errno;
> goto error_freebody;
> @@ -1079,7 +1067,6 @@ static int read_message(struct xs_handle
>
> if (msg->hdr.type == XS_WATCH_EVENT) {
> mutex_lock(&h->watch_mutex);
> - cleanup_push(pthread_mutex_unlock, &h->watch_mutex);
>
> /* Kick users out of their select() loop. */
> if (list_empty(&h->watch_list) &&
> @@ -1091,13 +1078,14 @@ static int read_message(struct xs_handle
>
> condvar_signal(&h->watch_condvar);
>
> - cleanup_pop(1);
> + mutex_unlock(&h->watch_mutex);
> } else {
> mutex_lock(&h->reply_mutex);
>
> /* There should only ever be one response pending! */
> if (!list_empty(&h->reply_list)) {
> mutex_unlock(&h->reply_mutex);
> + saved_errno = EEXIST;
> goto error_freebody;
> }
>
> @@ -1110,9 +1098,11 @@ static int read_message(struct xs_handle
> ret = 0;
>
> error_freebody:
> - cleanup_pop(ret == -1);
> + if (ret)
> + free(body);
> error_freemsg:
> - cleanup_pop(ret == -1);
> + if (ret)
> + free(msg);
> error:
> errno = saved_errno;
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |