[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH XEN v6 30/32] tools/libs/call: linux: avoid forking between mmap and madvise



Use pthread_atfork to prevent the application from forking before the
madvisoe(), which would result in CoW mappings getting passed to
hypercalls.

(largely cribbed from libxl_fork.c)

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
v6: New
---
 tools/libs/call/buffer.c |  2 +-
 tools/libs/call/linux.c  | 66 +++++++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 64 insertions(+), 4 deletions(-)

diff --git a/tools/libs/call/buffer.c b/tools/libs/call/buffer.c
index 2d8fc29..d2acdd4 100644
--- a/tools/libs/call/buffer.c
+++ b/tools/libs/call/buffer.c
@@ -24,7 +24,7 @@
 
 #define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
 
-pthread_mutex_t cache_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t cache_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static void cache_lock(xencall_handle *xcall)
 {
diff --git a/tools/libs/call/linux.c b/tools/libs/call/linux.c
index 1485424..64b7be8 100644
--- a/tools/libs/call/linux.c
+++ b/tools/libs/call/linux.c
@@ -20,13 +20,57 @@
 #include <errno.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <pthread.h>
+#include <assert.h>
 
 #include <sys/mman.h>
 #include <sys/ioctl.h>
 
 #include "private.h"
 
-static int set_cloexec(int fd)
+static pthread_mutex_t fork_mutex = PTHREAD_MUTEX_INITIALIZER;
+static int atfork_registered = 0;
+
+static void atfork_lock(void)
+{
+    int r = pthread_mutex_lock(&fork_mutex);
+    assert(!r);
+}
+
+static void atfork_unlock(void)
+{
+    int r = pthread_mutex_unlock(&fork_mutex);
+    assert(!r);
+}
+
+static int atfork_init(void)
+{
+    int rc;
+
+    atfork_lock();
+
+    if ( atfork_registered )
+    {
+        rc = 0;
+        goto out;
+    }
+
+    rc = pthread_atfork(atfork_lock, atfork_unlock, atfork_unlock);
+    if ( rc )
+    {
+        rc = -1;
+        goto out;
+    }
+
+    atfork_registered = 1;
+    rc = 0;
+
+ out:
+    atfork_unlock();
+    return rc;
+}
+
+static int set_cloexec(xencall_handle *xcall, int fd)
 {
     int flags;
 
@@ -68,7 +112,7 @@ int osdep_xencall_open(xencall_handle *xcall)
     {
         /* Fallback to /proc/xen/privcmd */
         fd = open("/proc/xen/privcmd", O_RDWR);
-        if ( fd > -1 && set_cloexec(fd) < 0 )
+        if ( fd > -1 && set_cloexec(xcall, fd) < 0 )
             goto error;
     }
 
@@ -78,6 +122,12 @@ int osdep_xencall_open(xencall_handle *xcall)
         return -1;
     }
 
+    if ( atfork_init() < 0 )
+    {
+        PERROR("Failed to setup atfork");
+        goto error;
+    }
+
     xcall->fd = fd;
     return 0;
 
@@ -107,12 +157,14 @@ void *osdep_alloc_pages(xencall_handle *xcall, size_t 
npages)
     void *p;
     int rc, saved_errno;
 
+    atfork_lock(); /* Avoid forking between mmap and madvise */
+
     /* Address returned by mmap is page aligned. */
     p = mmap(NULL, size, PROT_READ|PROT_WRITE, 
MAP_PRIVATE|MAP_ANONYMOUS|MAP_LOCKED, -1, 0);
     if ( p == MAP_FAILED )
     {
         PERROR("alloc_pages: mmap failed");
-        return NULL;
+        goto out_nomap;
     }
 
     /* Do not copy the VMA to child process on fork. Avoid the page being COW
@@ -124,24 +176,32 @@ void *osdep_alloc_pages(xencall_handle *xcall, size_t 
npages)
         goto out;
     }
 
+    atfork_unlock();
     return p;
 
 out:
     saved_errno = errno;
     (void)munmap(p, size);
     errno = saved_errno;
+out_nomap:
+    atfork_unlock();
     return NULL;
 }
 
 void osdep_free_pages(xencall_handle *xcall, void *ptr, size_t npages)
 {
     int saved_errno = errno;
+
+    atfork_lock();
+
     /* Recover the VMA flags. Maybe it's not necessary */
     madvise(ptr, npages * PAGE_SIZE, MADV_DOFORK);
 
     munmap(ptr, npages * PAGE_SIZE);
     /* We MUST propagate the hypercall errno, not unmap call's. */
     errno = saved_errno;
+
+    atfork_unlock();
 }
 
 /*
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.