[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] libxc: New hcall_buf_{prep, release} pre-mlock interface



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1264157943 0
# Node ID fbe8f32fa257e6d8e02cd4c84ccbe61dd31f1906
# Parent  c06732ac23921ed257106b8ba582008c69b8c9f1
libxc: New hcall_buf_{prep,release} pre-mlock interface

Allow certain performance-critical hypercall wrappers to register data
buffers via a new interface which allows them to be 'bounced' into a
pre-mlock'ed page-sized per-thread data area. This saves the cost of
mlock/munlock on every such hypercall, which can be very expensive on
modern kernels.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 tools/libxc/xc_domain_restore.c |    2 
 tools/libxc/xc_domain_save.c    |    6 -
 tools/libxc/xc_misc.c           |   74 ++++++++++-----------
 tools/libxc/xc_physdev.c        |    6 -
 tools/libxc/xc_private.c        |  135 ++++++++++++++++++++++++++++++++++++----
 tools/libxc/xc_private.h        |   58 +++++++++--------
 tools/libxc/xg_private.c        |   16 ----
 tools/libxc/xg_private.h        |    2 
 8 files changed, 199 insertions(+), 100 deletions(-)

diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xc_domain_restore.c   Fri Jan 22 10:59:03 2010 +0000
@@ -1424,7 +1424,7 @@ int xc_domain_restore(int xc_handle, int
     ctx->p2m   = calloc(dinfo->p2m_size, sizeof(xen_pfn_t));
     pfn_type   = calloc(dinfo->p2m_size, sizeof(unsigned long));
 
-    region_mfn = xg_memalign(PAGE_SIZE, ROUNDUP(
+    region_mfn = xc_memalign(PAGE_SIZE, ROUNDUP(
                               MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
 
     if ( (ctx->p2m == NULL) || (pfn_type == NULL) ||
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xc_domain_save.c      Fri Jan 22 10:59:03 2010 +0000
@@ -1012,9 +1012,9 @@ int xc_domain_save(int xc_handle, int io
     sent_last_iter = dinfo->p2m_size;
 
     /* Setup to_send / to_fix and to_skip bitmaps */
-    to_send = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
+    to_send = xc_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
     to_fix  = calloc(1, BITMAP_SIZE);
-    to_skip = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
+    to_skip = xc_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
 
     if ( !to_send || !to_fix || !to_skip )
     {
@@ -1056,7 +1056,7 @@ int xc_domain_save(int xc_handle, int io
 
     analysis_phase(xc_handle, dom, ctx, to_skip, 0);
 
-    pfn_type   = xg_memalign(PAGE_SIZE, ROUNDUP(
+    pfn_type   = xc_memalign(PAGE_SIZE, ROUNDUP(
                               MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
     pfn_batch  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
     if ( (pfn_type == NULL) || (pfn_batch == NULL) )
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xc_misc.c     Fri Jan 22 10:59:03 2010 +0000
@@ -175,29 +175,29 @@ int xc_hvm_set_pci_intx_level(
     unsigned int level)
 {
     DECLARE_HYPERCALL;
-    struct xen_hvm_set_pci_intx_level arg;
-    int rc;
+    struct xen_hvm_set_pci_intx_level _arg, *arg = &_arg;
+    int rc;
+
+    if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
+    {
+        PERROR("Could not lock memory");
+        return rc;
+    }
 
     hypercall.op     = __HYPERVISOR_hvm_op;
     hypercall.arg[0] = HVMOP_set_pci_intx_level;
-    hypercall.arg[1] = (unsigned long)&arg;
-
-    arg.domid  = dom;
-    arg.domain = domain;
-    arg.bus    = bus;
-    arg.device = device;
-    arg.intx   = intx;
-    arg.level  = level;
-
-    if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
-    {
-        PERROR("Could not lock memory");
-        return rc;
-    }
-
-    rc = do_xen_hypercall(xc_handle, &hypercall);
-
-    unlock_pages(&arg, sizeof(arg));
+    hypercall.arg[1] = (unsigned long)arg;
+
+    arg->domid  = dom;
+    arg->domain = domain;
+    arg->bus    = bus;
+    arg->device = device;
+    arg->intx   = intx;
+    arg->level  = level;
+
+    rc = do_xen_hypercall(xc_handle, &hypercall);
+
+    hcall_buf_release((void **)&arg, sizeof(*arg));
 
     return rc;
 }
@@ -208,26 +208,26 @@ int xc_hvm_set_isa_irq_level(
     unsigned int level)
 {
     DECLARE_HYPERCALL;
-    struct xen_hvm_set_isa_irq_level arg;
-    int rc;
+    struct xen_hvm_set_isa_irq_level _arg, *arg = &_arg;
+    int rc;
+
+    if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
+    {
+        PERROR("Could not lock memory");
+        return rc;
+    }
 
     hypercall.op     = __HYPERVISOR_hvm_op;
     hypercall.arg[0] = HVMOP_set_isa_irq_level;
-    hypercall.arg[1] = (unsigned long)&arg;
-
-    arg.domid   = dom;
-    arg.isa_irq = isa_irq;
-    arg.level   = level;
-
-    if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
-    {
-        PERROR("Could not lock memory");
-        return rc;
-    }
-
-    rc = do_xen_hypercall(xc_handle, &hypercall);
-
-    unlock_pages(&arg, sizeof(arg));
+    hypercall.arg[1] = (unsigned long)arg;
+
+    arg->domid   = dom;
+    arg->isa_irq = isa_irq;
+    arg->level   = level;
+
+    rc = do_xen_hypercall(xc_handle, &hypercall);
+
+    hcall_buf_release((void **)&arg, sizeof(*arg));
 
     return rc;
 }
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xc_physdev.c
--- a/tools/libxc/xc_physdev.c  Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xc_physdev.c  Fri Jan 22 10:59:03 2010 +0000
@@ -36,7 +36,7 @@ int xc_physdev_map_pirq(int xc_handle,
     map.index = index;
     map.pirq = *pirq;
 
-    rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map);
+    rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map, sizeof(map));
 
     if ( !rc )
         *pirq = map.pirq;
@@ -68,7 +68,7 @@ int xc_physdev_map_pirq_msi(int xc_handl
     map.entry_nr = entry_nr;
     map.table_base = table_base;
 
-    rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map);
+    rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map, sizeof(map));
 
     if ( !rc )
         *pirq = map.pirq;
@@ -86,7 +86,7 @@ int xc_physdev_unmap_pirq(int xc_handle,
     unmap.domid = domid;
     unmap.pirq = pirq;
 
-    rc = do_physdev_op(xc_handle, PHYSDEVOP_unmap_pirq, &unmap);
+    rc = do_physdev_op(xc_handle, PHYSDEVOP_unmap_pirq, &unmap, sizeof(unmap));
 
     return rc;
 }
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xc_private.c  Fri Jan 22 10:59:03 2010 +0000
@@ -8,6 +8,9 @@
 #include "xc_private.h"
 #include "xg_private.h"
 #include <stdarg.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <unistd.h>
 #include <pthread.h>
 
 static pthread_key_t last_error_pkey;
@@ -126,27 +129,119 @@ void xc_set_error(int code, const char *
     }
 }
 
+#ifdef __sun__
+
+int lock_pages(void *addr, size_t len) { return 0; }
+void unlock_pages(void *addr, size_t len) { }
+
+int hcall_buf_prep(void **addr, size_t len) { return 0; }
+void hcall_buf_release(void **addr, size_t len) { }
+
+#else /* !__sun__ */
+
 int lock_pages(void *addr, size_t len)
 {
-      int e = 0;
-#ifndef __sun__
+      int e;
       void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
       size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
                      PAGE_SIZE - 1) & PAGE_MASK;
       e = mlock(laddr, llen);
-#endif
       return e;
 }
 
 void unlock_pages(void *addr, size_t len)
 {
-#ifndef __sun__
     void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
     size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
                    PAGE_SIZE - 1) & PAGE_MASK;
     safe_munlock(laddr, llen);
+}
+
+static pthread_key_t hcall_buf_pkey;
+static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT;
+struct hcall_buf {
+    void *buf;
+    void *oldbuf;
+};
+
+static void _xc_clean_hcall_buf(void *m)
+{
+    struct hcall_buf *hcall_buf = m;
+
+    if ( hcall_buf )
+    {
+        if ( hcall_buf->buf )
+        {
+            unlock_pages(hcall_buf->buf, PAGE_SIZE);
+            free(hcall_buf->buf);
+        }
+
+        free(hcall_buf);
+    }
+
+    pthread_setspecific(hcall_buf_pkey, NULL);
+}
+
+static void _xc_init_hcall_buf(void)
+{
+    pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf);
+}
+
+int hcall_buf_prep(void **addr, size_t len)
+{
+    struct hcall_buf *hcall_buf;
+
+    pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf);
+
+    hcall_buf = pthread_getspecific(hcall_buf_pkey);
+    if ( !hcall_buf )
+    {
+        hcall_buf = calloc(1, sizeof(*hcall_buf));
+        if ( !hcall_buf )
+            goto out;
+        pthread_setspecific(hcall_buf_pkey, hcall_buf);
+    }
+
+    if ( !hcall_buf->buf )
+    {
+        hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE);
+        if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) )
+        {
+            free(hcall_buf->buf);
+            hcall_buf->buf = NULL;
+            goto out;
+        }
+    }
+
+    if ( (len < PAGE_SIZE) && !hcall_buf->oldbuf )
+    {
+        memcpy(hcall_buf->buf, *addr, len);
+        hcall_buf->oldbuf = *addr;
+        *addr = hcall_buf->buf;
+        return 0;
+    }
+
+ out:
+    return lock_pages(*addr, len);
+}
+
+void hcall_buf_release(void **addr, size_t len)
+{
+    struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey);
+
+    if ( hcall_buf && (hcall_buf->buf == *addr) )
+    {
+        memcpy(hcall_buf->oldbuf, *addr, len);
+        *addr = hcall_buf->oldbuf;
+        hcall_buf->oldbuf = NULL;
+    }
+    else
+    {
+        unlock_pages(*addr, len);
+    }
+}
+
 #endif
-}
 
 /* NB: arr must be locked */
 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
@@ -169,21 +264,21 @@ int xc_mmuext_op(
     DECLARE_HYPERCALL;
     long ret = -EINVAL;
 
+    if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
+
     hypercall.op     = __HYPERVISOR_mmuext_op;
     hypercall.arg[0] = (unsigned long)op;
     hypercall.arg[1] = (unsigned long)nr_ops;
     hypercall.arg[2] = (unsigned long)0;
     hypercall.arg[3] = (unsigned long)dom;
 
-    if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out1;
-    }
-
     ret = do_xen_hypercall(xc_handle, &hypercall);
 
-    unlock_pages(op, nr_ops*sizeof(*op));
+    hcall_buf_release((void **)&op, nr_ops*sizeof(*op));
 
  out1:
     return ret;
@@ -656,6 +751,22 @@ int xc_ffs64(uint64_t x)
     return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
 }
 
+void *xc_memalign(size_t alignment, size_t size)
+{
+#if defined(_POSIX_C_SOURCE) && !defined(__sun__)
+    int ret;
+    void *ptr;
+    ret = posix_memalign(&ptr, alignment, size);
+    if (ret != 0)
+        return NULL;
+    return ptr;
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+    return valloc(size);
+#else
+    return memalign(alignment, size);
+#endif
+}
+
 /*
  * Local variables:
  * mode: C
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xc_private.h  Fri Jan 22 10:59:03 2010 +0000
@@ -78,8 +78,13 @@ void xc_set_error(int code, const char *
 #define PERROR(_m, _a...) xc_set_error(XC_INTERNAL_ERROR, _m " (%d = %s)", \
                                        ## _a , errno, safe_strerror(errno))
 
+void *xc_memalign(size_t alignment, size_t size);
+
 int lock_pages(void *addr, size_t len);
 void unlock_pages(void *addr, size_t len);
+
+int hcall_buf_prep(void **addr, size_t len);
+void hcall_buf_release(void **addr, size_t len);
 
 static inline void safe_munlock(const void *addr, size_t len)
 {
@@ -101,21 +106,22 @@ static inline int do_xen_version(int xc_
     return do_xen_hypercall(xc_handle, &hypercall);
 }
 
-static inline int do_physdev_op(int xc_handle, int cmd, void *op)
-{
-    int ret = -1;
-
-    DECLARE_HYPERCALL;
+static inline int do_physdev_op(int xc_handle, int cmd, void *op, size_t len)
+{
+    int ret = -1;
+
+    DECLARE_HYPERCALL;
+
+    if ( hcall_buf_prep(&op, len) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
+
     hypercall.op = __HYPERVISOR_physdev_op;
     hypercall.arg[0] = (unsigned long) cmd;
     hypercall.arg[1] = (unsigned long) op;
 
-    if ( lock_pages(op, sizeof(*op)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out1;
-    }
-
     if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
     {
         if ( errno == EACCES )
@@ -123,7 +129,7 @@ static inline int do_physdev_op(int xc_h
                     " rebuild the user-space tool set?\n");
     }
 
-    unlock_pages(op, sizeof(*op));
+    hcall_buf_release(&op, len);
 
 out1:
     return ret;
@@ -133,18 +139,18 @@ static inline int do_domctl(int xc_handl
 {
     int ret = -1;
     DECLARE_HYPERCALL;
+
+    if ( hcall_buf_prep((void **)&domctl, sizeof(*domctl)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
 
     domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
 
     hypercall.op     = __HYPERVISOR_domctl;
     hypercall.arg[0] = (unsigned long)domctl;
 
-    if ( lock_pages(domctl, sizeof(*domctl)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out1;
-    }
-
     if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
     {
         if ( errno == EACCES )
@@ -152,7 +158,7 @@ static inline int do_domctl(int xc_handl
                     " rebuild the user-space tool set?\n");
     }
 
-    unlock_pages(domctl, sizeof(*domctl));
+    hcall_buf_release((void **)&domctl, sizeof(*domctl));
 
  out1:
     return ret;
@@ -162,18 +168,18 @@ static inline int do_sysctl(int xc_handl
 {
     int ret = -1;
     DECLARE_HYPERCALL;
+
+    if ( hcall_buf_prep((void **)&sysctl, sizeof(*sysctl)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
 
     sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
 
     hypercall.op     = __HYPERVISOR_sysctl;
     hypercall.arg[0] = (unsigned long)sysctl;
 
-    if ( lock_pages(sysctl, sizeof(*sysctl)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out1;
-    }
-
     if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
     {
         if ( errno == EACCES )
@@ -181,7 +187,7 @@ static inline int do_sysctl(int xc_handl
                     " rebuild the user-space tool set?\n");
     }
 
-    unlock_pages(sysctl, sizeof(*sysctl));
+    hcall_buf_release((void **)&sysctl, sizeof(*sysctl));
 
  out1:
     return ret;
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c  Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xg_private.c  Fri Jan 22 10:59:03 2010 +0000
@@ -183,22 +183,6 @@ __attribute__((weak))
     return -1;
 }
 
-void *xg_memalign(size_t alignment, size_t size)
-{
-#if defined(_POSIX_C_SOURCE) && !defined(__sun__)
-    int ret;
-    void *ptr;
-    ret = posix_memalign(&ptr, alignment, size);
-    if (ret != 0)
-        return NULL;
-    return ptr;
-#elif defined(__NetBSD__) || defined(__OpenBSD__)
-    return valloc(size);
-#else
-    return memalign(alignment, size);
-#endif
-}
-
 /*
  * Local variables:
  * mode: C
diff -r c06732ac2392 -r fbe8f32fa257 tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h  Thu Jan 21 15:13:00 2010 +0000
+++ b/tools/libxc/xg_private.h  Fri Jan 22 10:59:03 2010 +0000
@@ -177,6 +177,4 @@ int pin_table(int xc_handle, unsigned in
 int pin_table(int xc_handle, unsigned int type, unsigned long mfn,
               domid_t dom);
 
-void *xg_memalign(size_t alignment, size_t size);
-
 #endif /* XG_PRIVATE_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.