[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCHv2] valgrind support for Xen privcmd ioctls / hypercalls



Below is an updated version of my patch to valgrind to allow it to
understand privcmd ioctls and some hypercalls. I have implemented a
bunch more hypercalls, including all those I saw when creating a pv or
hvm domain with my particular configuration.

As before I bailed on the normal valgrind policy of duplicating the
interface headers so usage is still:

        ./configure --with-xen=/path/to/headers

The path needs to be to an installed set of xen headers, such that
"#include <xen/xen.h>" is valid e.g. dist/install/usr/include in your
built Xen tree or /usr/include or something equivalent (e.g. libxen-dev
installed under Debian etc.)

Not extensively tested, caveat emptor etc etc.

diff --git a/configure.in b/configure.in
index 62e1837..e71ecd6 100644
--- a/configure.in
+++ b/configure.in
@@ -1558,6 +1558,11 @@ elif test x$VGCONF_PLATFORM_SEC_CAPS = xPPC32_AIX5 ; then
   mflag_secondary=-q32
 fi

+AC_ARG_WITH(xen,
+   [  --with-xen=             Specify location of Xen headers],
+   XEN_CFLAGS=-I$withval
+)
+AC_SUBST(XEN_CFLAGS)

 AC_ARG_WITH(mpicc,
    [  --with-mpicc=           Specify name of MPI2-ised C compiler],
diff --git a/coregrind/Makefile.am b/coregrind/Makefile.am
index d9d1bca..d7216f9 100644
--- a/coregrind/Makefile.am
+++ b/coregrind/Makefile.am
@@ -211,6 +211,7 @@ noinst_HEADERS = \
        m_syswrap/priv_syswrap-aix5.h \
        m_syswrap/priv_syswrap-darwin.h \
        m_syswrap/priv_syswrap-main.h \
+       m_syswrap/priv_syswrap-xen.h \
        m_ume/priv_ume.h

 #----------------------------------------------------------------------------
@@ -338,6 +339,7 @@ COREGRIND_SOURCES_COMMON = \
        m_syswrap/syswrap-ppc64-aix5.c \
        m_syswrap/syswrap-x86-darwin.c \
        m_syswrap/syswrap-amd64-darwin.c \
+       m_syswrap/syswrap-xen.c \
        m_ume/elf.c \
        m_ume/macho.c \
        m_ume/main.c \
@@ -350,7 +352,7 @@ nodist_libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_SOURCES 
= \
 libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CPPFLAGS = \
     $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
 libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CFLAGS = \
-    $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+    $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) @XEN_CFLAGS@
 libcoregrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CCASFLAGS = \
     $(AM_CCASFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
 if VGCONF_HAVE_PLATFORM_SEC
diff --git a/coregrind/m_debuginfo/debuginfo.c 
b/coregrind/m_debuginfo/debuginfo.c
index 08babd0..5272fae 100644
--- a/coregrind/m_debuginfo/debuginfo.c
+++ b/coregrind/m_debuginfo/debuginfo.c
@@ -637,6 +637,11 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV )
    if (!filename)
       return 0;

+   if (strncmp(filename, "/proc/xen/", 10) == 0) {
+      //VG_(printf)("ignoring mmap of %s\n", filename);
+      return 0;
+   }
+
    if (debug)
       VG_(printf)("di_notify_mmap-2: %s\n", filename);

diff --git a/coregrind/m_syswrap/priv_syswrap-xen.h 
b/coregrind/m_syswrap/priv_syswrap-xen.h
new file mode 100644
index 0000000..42505bb
--- /dev/null
+++ b/coregrind/m_syswrap/priv_syswrap-xen.h
@@ -0,0 +1,13 @@
+#ifndef __PRIV_SYSWRAP_XEN_H
+#define __PRIV_SYSWRAP_XEN_H
+
+DECL_TEMPLATE(xen, ioctl_privcmd_hypercall);
+DECL_TEMPLATE(xen, ioctl_privcmd_mmap);
+DECL_TEMPLATE(xen, ioctl_privcmd_mmapbatch);
+DECL_TEMPLATE(xen, ioctl_privcmd_mmapbatch_v2);
+
+#endif   // __PRIV_SYSWRAP_XEN_H
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/syswrap-linux.c 
b/coregrind/m_syswrap/syswrap-linux.c
index 247402d..baa33c2 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -57,7 +57,7 @@
 #include "priv_types_n_macros.h"
 #include "priv_syswrap-generic.h"
 #include "priv_syswrap-linux.h"
-
+#include "priv_syswrap-xen.h"

 // Run a thread from beginning to end and return the thread's
 // scheduler-return-code.
@@ -4821,6 +4821,20 @@ PRE(sys_ioctl)
       }
       break;

+
+   case VKI_XEN_IOCTL_PRIVCMD_HYPERCALL:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_hypercall)(tid, layout, arrghs, 
status, flags);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAP:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_mmap)(tid, layout, arrghs, status, 
flags);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_mmapbatch)(tid, layout, arrghs, 
status, flags);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2:
+      WRAPPER_PRE_NAME(xen, ioctl_privcmd_mmapbatch_v2)(tid, layout, arrghs, 
status, flags);
+      break;
+
    default:
       /* EVIOC* are variable length and return size written on success */
       switch (ARG2 & ~(_VKI_IOC_SIZEMASK << _VKI_IOC_SIZESHIFT)) {
@@ -5633,6 +5647,19 @@ POST(sys_ioctl)
       }
       break;

+   case VKI_XEN_IOCTL_PRIVCMD_HYPERCALL:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_hypercall)(tid, arrghs, status);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAP:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_mmap)(tid, arrghs, status);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_mmapbatch)(tid, arrghs, status);
+      break;
+   case VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2:
+      WRAPPER_POST_NAME(xen, ioctl_privcmd_mmapbatch_v2)(tid, arrghs, status);
+      break;
+
    default:
       /* EVIOC* are variable length and return size written on success */
       switch (ARG2 & ~(_VKI_IOC_SIZEMASK << _VKI_IOC_SIZESHIFT)) {
diff --git a/coregrind/m_syswrap/syswrap-xen.c 
b/coregrind/m_syswrap/syswrap-xen.c
new file mode 100644
index 0000000..5e87f8e
--- /dev/null
+++ b/coregrind/m_syswrap/syswrap-xen.c
@@ -0,0 +1,751 @@
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debuginfo.h"    // VG_(di_notify_*)
+#include "pub_core_transtab.h"     // VG_(discard_translations)
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_options.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_signals.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h"
+#include "priv_syswrap-xen.h"
+
+#include <stdint.h>
+
+#define __XEN_TOOLS__
+
+#include <xen/xen.h>
+#include <xen/sysctl.h>
+#include <xen/domctl.h>
+#include <xen/memory.h>
+#include <xen/event_channel.h>
+#include <xen/version.h>
+
+#include <xen/hvm/hvm_op.h>
+
+#define PRE(name)       DEFN_PRE_TEMPLATE(xen, name)
+#define POST(name)      DEFN_POST_TEMPLATE(xen, name)
+
+PRE(ioctl_privcmd_hypercall)
+{
+   struct vki_xen_privcmd_hypercall *args = (struct vki_xen_privcmd_hypercall 
*)(ARG3);
+
+   if (!args)
+      return;
+
+
+   switch (args->op) {
+   case __HYPERVISOR_memory_op:
+      PRINT("__HYPERVISOR_memory_op ( %lld, %llx )", args->arg[0], 
args->arg[1]);
+
+      switch (args->arg[0]) {
+      case XENMEM_set_memory_map: {
+        xen_foreign_memory_map_t *arg = (xen_foreign_memory_map_t *)(unsigned 
int)args->arg[1];
+        PRE_MEM_READ("XENMEM_set_memory_map", (Addr)&arg->domid, 
sizeof(arg->domid));
+        PRE_MEM_READ("XENMEM_set_memory_map", (Addr)&arg->map, 
sizeof(arg->map));
+        break;
+      }
+      case XENMEM_increase_reservation:
+      case XENMEM_decrease_reservation:
+      case XENMEM_populate_physmap: {
+        struct xen_memory_reservation *memory_reservation = (struct 
xen_memory_reservation *)(unsigned int)args->arg[1];
+        char *which;
+
+        switch (args->arg[0]) {
+        case XENMEM_increase_reservation:
+           which = "XENMEM_increase_reservation";
+           break;
+        case XENMEM_decrease_reservation:
+           which = "XENMEM_decrease_reservation";
+           PRE_MEM_READ(which, (Addr)memory_reservation->extent_start.p, 
sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+        case XENMEM_populate_physmap:
+           which = "XENMEM_populate_physmap";
+           PRE_MEM_READ(which, (Addr)memory_reservation->extent_start.p, 
sizeof(xen_pfn_t) * memory_reservation->nr_extents);
+           break;
+        default:
+           which = "XENMEM_unknown";
+           break;
+        }
+
+        PRE_MEM_READ(which, (Addr)&memory_reservation->extent_start, 
sizeof(memory_reservation->extent_start));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->nr_extents, 
sizeof(memory_reservation->nr_extents));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->extent_order, 
sizeof(memory_reservation->extent_order));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->mem_flags, 
sizeof(memory_reservation->mem_flags));
+        PRE_MEM_READ(which, (Addr)&memory_reservation->domid, 
sizeof(memory_reservation->domid));
+
+        break;
+      }
+
+      default:
+        VG_(printf)("pre __HYPERVISOR_memory_op unknown command %lld\n", 
args->arg[0]);
+        break;
+      }
+      break;
+
+   case __HYPERVISOR_mmuext_op: {
+          mmuext_op_t *ops = (void *)(unsigned int)args->arg[0];
+          unsigned int i, nr = args->arg[1];
+          //unsigned int *pdone = (void *)(unsigned int)args->arg[2];
+          //unsigned int foreigndom = args->arg[3];
+          //VG_(printf)("HYPERVISOR_mmuext_op %d ops at %p on dom%d done at 
%p\n", nr, ops, foreigndom, pdone);
+          for (i=0; i<nr; i++) {
+                  mmuext_op_t *op = ops + i;
+                  PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP", (Addr)&op->cmd, 
sizeof(op->cmd));
+                  switch(op->cmd) {
+                  case MMUEXT_PIN_L1_TABLE:
+                  case MMUEXT_PIN_L2_TABLE:
+                  case MMUEXT_PIN_L3_TABLE:
+                  case MMUEXT_PIN_L4_TABLE:
+                  case MMUEXT_UNPIN_TABLE:
+                  case MMUEXT_NEW_BASEPTR:
+                  case MMUEXT_CLEAR_PAGE:
+                  case MMUEXT_COPY_PAGE:
+                  case MMUEXT_MARK_SUPER:
+                  case MMUEXT_UNMARK_SUPER:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn", 
(Addr)&op->arg1.mfn, sizeof(op->arg1.mfn));
+                          break;
+
+                  case MMUEXT_INVLPG_LOCAL:
+                  case MMUEXT_INVLPG_ALL:
+                  case MMUEXT_SET_LDT:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn", 
(Addr)&op->arg1.linear_addr, sizeof(op->arg1.linear_addr));
+                          break;
+
+                  case MMUEXT_TLB_FLUSH_LOCAL:
+                  case MMUEXT_TLB_FLUSH_MULTI:
+                  case MMUEXT_INVLPG_MULTI:
+                  case MMUEXT_TLB_FLUSH_ALL:
+                  case MMUEXT_FLUSH_CACHE:
+                  case MMUEXT_NEW_USER_BASEPTR:
+                  case MMUEXT_FLUSH_CACHE_GLOBAL:
+                          /* None */
+                          break;
+                  }
+
+                  switch(op->cmd) {
+                  case MMUEXT_SET_LDT:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents", 
(Addr)&op->arg2.nr_ents, sizeof(op->arg2.nr_ents));
+                          break;
+
+                  case MMUEXT_TLB_FLUSH_MULTI:
+                  case MMUEXT_INVLPG_MULTI:
+                          /* How many??? */
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask", 
(Addr)&op->arg2.vcpumask, sizeof(op->arg2.vcpumask));
+                          break;
+
+                  case MMUEXT_COPY_PAGE:
+                          PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn", 
(Addr)&op->arg2.src_mfn, sizeof(op->arg2.src_mfn));
+                          break;
+
+                  case MMUEXT_PIN_L1_TABLE:
+                  case MMUEXT_PIN_L2_TABLE:
+                  case MMUEXT_PIN_L3_TABLE:
+                  case MMUEXT_PIN_L4_TABLE:
+                  case MMUEXT_UNPIN_TABLE:
+                  case MMUEXT_NEW_BASEPTR:
+                  case MMUEXT_TLB_FLUSH_LOCAL:
+                  case MMUEXT_INVLPG_LOCAL:
+                  case MMUEXT_TLB_FLUSH_ALL:
+                  case MMUEXT_INVLPG_ALL:
+                  case MMUEXT_FLUSH_CACHE:
+                  case MMUEXT_NEW_USER_BASEPTR:
+                  case MMUEXT_CLEAR_PAGE:
+                  case MMUEXT_FLUSH_CACHE_GLOBAL:
+                  case MMUEXT_MARK_SUPER:
+                  case MMUEXT_UNMARK_SUPER:
+                          /* None */
+                          break;
+                  }
+          }
+          break;
+   }
+
+   case __HYPERVISOR_event_channel_op:
+   case __HYPERVISOR_event_channel_op_compat: {
+      __vki_u32 cmd;
+      void *arg;
+      int compat = 0;
+
+      if (args->op == __HYPERVISOR_event_channel_op) {
+        cmd = args->arg[0];
+        arg = (void *)(unsigned int)args->arg[1];
+      } else {
+        struct evtchn_op *evtchn = (struct evtchn_op *)(unsigned 
int)args->arg[0];
+        cmd = evtchn->cmd;
+        arg = &evtchn->u;
+        compat = 1;
+      }
+      PRINT("__HYPERVISOR_event_channel_op ( %d, %p )%s", cmd, arg, compat ? " 
compat" : "");
+
+      switch (cmd) {
+      case EVTCHNOP_alloc_unbound: {
+        struct evtchn_alloc_unbound *alloc_unbound = arg;
+        PRE_MEM_READ("EVTCHNOP_alloc_unbound", (Addr)&alloc_unbound->dom, 
sizeof(alloc_unbound->dom));
+        PRE_MEM_READ("EVTCHNOP_alloc_unbound", 
(Addr)&alloc_unbound->remote_dom, sizeof(alloc_unbound->remote_dom));
+        break;
+      }
+      default:
+        VG_(printf)("pre __HYPERVISOR_event_channel_op unknown command %d\n", 
cmd);
+        break;
+      }
+      break;
+   }
+
+   case __HYPERVISOR_xen_version:
+      PRINT("__HYPERVISOR_xen_version ( %lld, %llx )", args->arg[0], 
args->arg[1]);
+
+      switch (args->arg[0]) {
+      case XENVER_version:
+      case XENVER_extraversion:
+      case XENVER_compile_info:
+      case XENVER_capabilities:
+      case XENVER_changeset:
+      case XENVER_platform_parameters:
+      case XENVER_get_features:
+      case XENVER_pagesize:
+      case XENVER_guest_handle:
+      case XENVER_commandline:
+        /* No inputs */
+        break;
+
+      default:
+        VG_(printf)("pre __HYPERVISOR_xen_version unknown command %lld\n", 
args->arg[0]);
+        break;
+      }
+      break;
+      break;
+   case __HYPERVISOR_sysctl: {
+      struct xen_sysctl *sysctl = (struct xen_sysctl *)(unsigned 
int)args->arg[0];
+
+      PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
+
+      /* Single argument hypercall */
+      PRE_MEM_READ("hypercall", ARG3, 8 + ( 8 * 1 ) );
+
+      /*
+       * Common part of xen_sysctl:
+       *    uint32_t cmd;
+       *    uint32_t interface_version;
+       */
+      PRE_MEM_READ("__HYPERVISOR_sysctl", args->arg[0], sizeof(uint32_t) + 
sizeof(uint32_t));
+
+      if (!sysctl || sysctl->interface_version != XEN_SYSCTL_INTERFACE_VERSION)
+        /* BUG ? */
+        return;
+
+#define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) 
PRE_MEM_READ("XEN_SYSCTL_" # _sysctl, \
+                                                        
(Addr)&sysctl->u._union._field, \
+                                                        
sizeof(sysctl->u._union._field))
+#define PRE_XEN_SYSCTL_READ(_sysctl, _field) __PRE_XEN_SYSCTL_READ(_sysctl, 
_sysctl, _field)
+      switch (sysctl->cmd) {
+      case XEN_SYSCTL_getdomaininfolist:
+        PRE_XEN_SYSCTL_READ(getdomaininfolist, first_domain);
+        PRE_XEN_SYSCTL_READ(getdomaininfolist, max_domains);
+        PRE_XEN_SYSCTL_READ(getdomaininfolist, buffer);
+        break;
+
+      case XEN_SYSCTL_cpupool_op:
+        /* yes the interface is this fucking barking */
+        PRE_XEN_SYSCTL_READ(cpupool_op, op);
+
+        switch(sysctl->u.cpupool_op.op) {
+        case XEN_SYSCTL_CPUPOOL_OP_CREATE:
+        case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
+        case XEN_SYSCTL_CPUPOOL_OP_INFO:
+        case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
+        case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
+        case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
+           PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
+        }
+
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_CREATE)
+           PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
+
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
+           PRE_XEN_SYSCTL_READ(cpupool_op, domid);
+
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
+            sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_RMCPU)
+           PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
+
+        break;
+
+      case XEN_SYSCTL_physinfo:
+        /* No input params */
+        break;
+
+      default:
+        VG_(printf)("pre sysctl version %x unknown cmd %d\n",
+                    sysctl->interface_version, sysctl->cmd);
+        break;
+      }
+#undef PRE_XEN_SYSCTL_READ
+#undef __PRE_XEN_SYSCTL_READ
+   }
+      break;
+
+   case __HYPERVISOR_domctl: {
+      struct xen_domctl *domctl = (struct xen_domctl *)(unsigned 
int)args->arg[0];
+
+      PRINT("__HYPERVISOR_domctl ( %d )", domctl->cmd);
+
+      /* Single argument hypercall */
+      PRE_MEM_READ("hypercall", ARG3, 8 + ( 8 * 1 ) );
+
+      /*
+       * Common part of xen_domctl:
+       *    uint32_t cmd;
+       *    uint32_t interface_version;
+       *    domid_t  domain;
+       */
+      PRE_MEM_READ("__HYPERVISOR_domctl", args->arg[0], sizeof(uint32_t) + 
sizeof(uint32_t) + sizeof(domid_t));
+
+      if (!domctl || domctl->interface_version != XEN_DOMCTL_INTERFACE_VERSION)
+        /* BUG ? */
+        return;
+
+      //PRE_REG_READ1(long, "__HYPERVISOR_domctl",);
+#define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) 
PRE_MEM_READ("XEN_DOMCTL_" # _domctl, \
+                                                        
(Addr)&domctl->u._union._field, \
+                                                        
sizeof(domctl->u._union._field))
+#define PRE_XEN_DOMCTL_READ(_domctl, _field) __PRE_XEN_DOMCTL_READ(_domctl, 
_domctl, _field)
+
+      switch (domctl->cmd) {
+      case XEN_DOMCTL_destroydomain:
+      case XEN_DOMCTL_pausedomain:
+      case XEN_DOMCTL_max_vcpus:
+      case XEN_DOMCTL_get_address_size:
+      case XEN_DOMCTL_gettscinfo:
+      case XEN_DOMCTL_getdomaininfo:
+      case XEN_DOMCTL_unpausedomain:
+        /* No input fields. */
+        break;
+
+      case XEN_DOMCTL_createdomain:
+        PRE_XEN_DOMCTL_READ(createdomain, ssidref);
+        PRE_XEN_DOMCTL_READ(createdomain, handle);
+        PRE_XEN_DOMCTL_READ(createdomain, flags);
+        break;
+
+      case XEN_DOMCTL_max_mem:
+        PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
+        break;
+
+      case XEN_DOMCTL_set_address_size:
+        __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
+        break;
+
+      case XEN_DOMCTL_settscinfo:
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
+        __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
+        break;
+
+      case XEN_DOMCTL_hypercall_init:
+        PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
+        break;
+
+      case XEN_DOMCTL_getvcpuinfo:
+        PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
+        break;
+
+      case XEN_DOMCTL_getvcpuaffinity:
+        __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity, vcpu);
+        break;
+
+      case XEN_DOMCTL_setvcpuaffinity:
+        __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
+        PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity",
+                     (Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
+                       domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+        break;
+
+      case XEN_DOMCTL_getvcpucontext:
+        __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
+        break;
+
+      case XEN_DOMCTL_setvcpucontext:
+        __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
+        __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
+        break;
+
+      case XEN_DOMCTL_set_cpuid:
+        PRE_MEM_READ("XEN_DOMCTL_set_cpuid", (Addr)&domctl->u.cpuid, 
sizeof(domctl->u.cpuid));
+        break;
+      default:
+        VG_(printf)("pre domctl version %x unknown cmd %d on domain %d\n",
+                    domctl->interface_version, domctl->cmd, domctl->domain);
+        break;
+      }
+#undef PRE_XEN_DOMCTL_READ
+#undef __PRE_XEN_DOMCTL_READ
+   }
+      break;
+
+   case __HYPERVISOR_hvm_op: {
+      unsigned long op = args->arg[0];
+      void *arg = (void *)(unsigned long)args->arg[1];
+
+      PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op, arg);
+
+      //PRE_REG_READ1(long, "__HYPERVISOR_hvm_op",);
+#define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) PRE_MEM_READ("XEN_HVMOP_" 
# _hvm_op, \
+                                                                  
(Addr)&((_type*)arg)->_field, \
+                                                        
sizeof(((_type*)arg)->_field))
+#define PRE_XEN_HVMOP_READ(_hvm_op, _field) __PRE_XEN_HVMOP_READ(_hvm_op, 
"xen_hvm_" # _hvm_op "_t", _field)
+
+      switch (op) {
+      case HVMOP_set_param:
+        __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, domid);
+        __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, index);
+        __PRE_XEN_HVMOP_READ(set_param, xen_hvm_param_t, value);
+        break;
+
+      case HVMOP_get_param:
+        __PRE_XEN_HVMOP_READ(get_param, xen_hvm_param_t, domid);
+        __PRE_XEN_HVMOP_READ(get_param, xen_hvm_param_t, index);
+        break;
+
+      default:
+        VG_(printf)("pre hvm_op unknown OP %ld\n", op);
+        break;
+#undef __PRE_XEN_HVMOP_READ
+#undef PRE_XEN_HVMOP_READ
+      }
+   }
+      break;
+
+   default:
+      VG_(printf)("pre unknown hypercall %lld ( %#llx, %#llx, %#llx, %#llx, 
%#llx )\n",
+                 args->op, args->arg[0], args->arg[1], args->arg[2], 
args->arg[3], args->arg[4]);
+   }
+}
+
+POST(ioctl_privcmd_hypercall)
+{
+   struct vki_xen_privcmd_hypercall *args = (struct vki_xen_privcmd_hypercall 
*)(ARG3);
+
+   if (!args)
+      return;
+
+   switch (args->op) {
+   case __HYPERVISOR_memory_op:
+      switch (args->arg[0]) {
+      case XENMEM_set_memory_map:
+      case XENMEM_decrease_reservation:
+        /* No outputs */
+        break;
+      case XENMEM_increase_reservation:
+      case XENMEM_populate_physmap: {
+        struct xen_memory_reservation *memory_reservation = (struct 
xen_memory_reservation *)(unsigned int)args->arg[1];
+
+        POST_MEM_WRITE((Addr)memory_reservation->extent_start.p, 
sizeof(xen_pfn_t) * ARG1);
+      }
+        break;
+
+      default:
+        VG_(printf)("post __HYPERVISOR_memory_op unknown command %lld\n", 
args->arg[0]);
+        break;
+      }
+      break;
+
+   case __HYPERVISOR_mmuext_op: {
+          //mmuext_op_t *ops = (void *)(unsigned int)args->arg[0];
+          //unsigned int nr = args->arg[1];
+          unsigned int *pdone = (void *)(unsigned int)args->arg[2];
+          //unsigned int foreigndom = args->arg[3];
+          /* simplistic */
+          POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
+          break;
+   }
+
+   case __HYPERVISOR_event_channel_op:
+   case __HYPERVISOR_event_channel_op_compat: {
+      __vki_u32 cmd;
+      void *arg;
+
+      if (args->op == __HYPERVISOR_event_channel_op) {
+        cmd = args->arg[0];
+        arg = (void *)(unsigned int)args->arg[1];
+      } else {
+        struct evtchn_op *evtchn = (struct evtchn_op *)(unsigned 
int)args->arg[0];
+        cmd = evtchn->cmd;
+        arg = &evtchn->u;
+      }
+      switch (cmd) {
+      case EVTCHNOP_alloc_unbound: {
+        struct evtchn_alloc_unbound *alloc_unbound = arg;
+        POST_MEM_WRITE((Addr)&alloc_unbound->port, 
sizeof(alloc_unbound->port));
+        break;
+      }
+      default:
+        VG_(printf)("post __HYPERVISOR_event_channel_op unknown command %d\n", 
cmd);
+        break;
+      }
+      break;
+
+   }
+
+   case __HYPERVISOR_xen_version:
+      switch (args->arg[0]) {
+      case XENVER_version:
+        /* No outputs */
+        break;
+      case XENVER_extraversion:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_extraversion_t));
+        break;
+      case XENVER_compile_info:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_compile_info_t));
+        break;
+      case XENVER_capabilities:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_capabilities_info_t));
+        break;
+      case XENVER_changeset:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_changeset_info_t));
+        break;
+      case XENVER_platform_parameters:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_platform_parameters_t));
+        break;
+      case XENVER_get_features:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_feature_info_t));
+        break;
+      case XENVER_pagesize:
+        /* No outputs */
+        break;
+      case XENVER_guest_handle:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_domain_handle_t));
+        break;
+      case XENVER_commandline:
+        POST_MEM_WRITE((Addr)args->arg[1], sizeof(xen_commandline_t));
+        break;
+      default:
+        VG_(printf)("post __HYPERVISOR_xen_version unknown command %lld\n", 
args->arg[0]);
+        break;
+      }
+      break;
+
+   case __HYPERVISOR_sysctl: {
+      struct xen_sysctl *sysctl = (struct xen_sysctl *)(unsigned 
int)args->arg[0];
+
+      if (!sysctl || sysctl->interface_version != XEN_SYSCTL_INTERFACE_VERSION)
+        return;
+
+#define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) 
POST_MEM_WRITE((Addr)&sysctl->u._union._field, sizeof(sysctl->u._union._field));
+#define POST_XEN_SYSCTL_WRITE(_sysctl, _field) 
__POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
+      switch (sysctl->cmd) {
+      case XEN_SYSCTL_getdomaininfolist:
+        POST_XEN_SYSCTL_WRITE(getdomaininfolist, num_domains);
+        POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist.buffer.p,
+                       sizeof(xen_domctl_getdomaininfo_t) * 
sysctl->u.getdomaininfolist.num_domains);
+        break;
+
+      case XEN_SYSCTL_cpupool_op:
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_CREATE ||
+            sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO)
+           POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO) {
+           POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
+           POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
+        }
+        if (sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_INFO ||
+            sysctl->u.cpupool_op.op == XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
+           POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
+        break;
+
+      case XEN_SYSCTL_physinfo:
+        POST_XEN_SYSCTL_WRITE(physinfo, threads_per_core);
+        POST_XEN_SYSCTL_WRITE(physinfo, cores_per_socket);
+        POST_XEN_SYSCTL_WRITE(physinfo, nr_cpus);
+        POST_XEN_SYSCTL_WRITE(physinfo, max_cpu_id);
+        POST_XEN_SYSCTL_WRITE(physinfo, nr_nodes);
+        POST_XEN_SYSCTL_WRITE(physinfo, max_node_id);
+ POST_XEN_SYSCTL_WRITE(physinfo, cpu_khz);
+        POST_XEN_SYSCTL_WRITE(physinfo, total_pages);
+        POST_XEN_SYSCTL_WRITE(physinfo, free_pages);
+        POST_XEN_SYSCTL_WRITE(physinfo, scrub_pages);
+        POST_XEN_SYSCTL_WRITE(physinfo, hw_cap[8]);
+        POST_XEN_SYSCTL_WRITE(physinfo, capabilities);
+        break;
+
+      default:
+        VG_(printf)("post sysctl version %x cmd %d\n",
+                    sysctl->interface_version, sysctl->cmd);
+        break;
+      }
+#undef POST_XEN_SYSCTL_WRITE
+#undef __POST_XEN_SYSCTL_WRITE
+      break;
+   }
+
+   case __HYPERVISOR_domctl: {
+      struct xen_domctl *domctl = (struct xen_domctl *)(unsigned 
int)args->arg[0];
+
+      if (!domctl || domctl->interface_version != XEN_DOMCTL_INTERFACE_VERSION)
+        return;
+
+#define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) 
POST_MEM_WRITE((Addr)&domctl->u._union._field, sizeof(domctl->u._union._field));
+#define POST_XEN_DOMCTL_WRITE(_domctl, _field) 
__POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
+      switch (domctl->cmd) {
+      case XEN_DOMCTL_createdomain:
+      case XEN_DOMCTL_destroydomain:
+      case XEN_DOMCTL_pausedomain:
+      case XEN_DOMCTL_max_mem:
+      case XEN_DOMCTL_set_address_size:
+      case XEN_DOMCTL_settscinfo:
+      case XEN_DOMCTL_hypercall_init:
+      case XEN_DOMCTL_setvcpuaffinity:
+      case XEN_DOMCTL_setvcpucontext:
+      case XEN_DOMCTL_set_cpuid:
+      case XEN_DOMCTL_unpausedomain:
+        /* No output fields */
+        break;
+
+      case XEN_DOMCTL_max_vcpus:
+        POST_XEN_DOMCTL_WRITE(max_vcpus, max);
+
+      case XEN_DOMCTL_get_address_size:
+        __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
+        break;
+
+      case XEN_DOMCTL_gettscinfo:
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
+        __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
+        break;
+
+      case XEN_DOMCTL_getvcpuinfo:
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
+        POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
+        break;
+
+      case XEN_DOMCTL_getvcpuaffinity:
+        POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
+                       domctl->u.vcpuaffinity.cpumap.nr_cpus / 8);
+        break;
+
+      case XEN_DOMCTL_getdomaininfo:
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, domain);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, flags);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, tot_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, max_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, shr_pages);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, shared_info_frame);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, cpu_time);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, nr_online_vcpus);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, max_vcpu_id);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, ssidref);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, handle);
+        POST_XEN_DOMCTL_WRITE(getdomaininfo, cpupool);
+        break;
+
+      case XEN_DOMCTL_getvcpucontext:
+        __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
+        break;
+
+      default:
+        VG_(printf)("post domctl version %x cmd %d on domain %d\n",
+                    domctl->interface_version, domctl->cmd, domctl->domain);
+        break;
+      }
+#undef POST_XEN_DOMCTL_WRITE
+#undef __POST_XEN_DOMCTL_WRITE
+      break;
+   }
+
+
+   case __HYPERVISOR_hvm_op: {
+      unsigned long op = args->arg[0];
+      void *arg = (void *)(unsigned long)args->arg[1];
+
+#define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) 
POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
+                                                                      
sizeof(((_type*)arg)->_field))
+#define POST_XEN_HVMOP_WRITE(_hvm_op, _field) __PRE_XEN_HVMOP_READ(_hvm_op, 
"xen_hvm_" # _hvm_op "_t", _field)
+      switch (op) {
+      case HVMOP_set_param:
+        /* No output paramters */
+        break;
+
+      case HVMOP_get_param:
+        __POST_XEN_HVMOP_WRITE(get_param, xen_hvm_param_t, value);
+        break;
+
+      default:
+        VG_(printf)("post hvm_op unknown OP %ld\n", op);
+        break;
+#undef __POST_XEN_HVMOP_WRITE
+#undef POST_XEN_HVMOP_WRITE
+      }
+   }
+      break;
+
+   default:
+      VG_(printf)("post unknown hypercall %lld ( %#llx, %#llx, %#llx, %#llx, 
%#llx )\n",
+                 args->op, args->arg[0], args->arg[1], args->arg[2], 
args->arg[3], args->arg[4]);
+      break;
+   }
+}
+
+
+PRE(ioctl_privcmd_mmap)
+{
+   struct vki_xen_privcmd_mmap *args = (struct vki_xen_privcmd_mmap *)(ARG3);
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP", (Addr)&args->num, 
sizeof(args->num));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP", (Addr)&args->dom, 
sizeof(args->dom));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAP", (Addr)args->entry, 
sizeof(*(args->entry)) * args->num);
+}
+
+PRE(ioctl_privcmd_mmapbatch)
+{
+   struct vki_xen_privcmd_mmapbatch *args = (struct vki_xen_privcmd_mmapbatch 
*)(ARG3);
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)&args->num, 
sizeof(args->num));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)&args->dom, 
sizeof(args->dom));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)&args->addr, 
sizeof(args->addr));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH", (Addr)args->arr, 
sizeof(*(args->arr)) * args->num);
+}
+
+PRE(ioctl_privcmd_mmapbatch_v2)
+{
+   struct vki_xen_privcmd_mmapbatch_v2 *args = (struct 
vki_xen_privcmd_mmapbatch_v2 *)(ARG3);
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)&args->num, 
sizeof(args->num));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)&args->dom, 
sizeof(args->dom));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)&args->addr, 
sizeof(args->addr));
+   PRE_MEM_READ("VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2", (Addr)args->arr, 
sizeof(*(args->arr)) * args->num);
+}
+
+POST(ioctl_privcmd_mmap)
+{
+   //struct vki_xen_privcmd_mmap *args = (struct vki_xen_privcmd_mmap *)(ARG3);
+}
+
+POST(ioctl_privcmd_mmapbatch)
+{
+   struct vki_xen_privcmd_mmapbatch *args = (struct vki_xen_privcmd_mmapbatch 
*)(ARG3);
+   POST_MEM_WRITE((Addr)args->arr, sizeof(*(args->arr)) * args->num);
+}
+
+POST(ioctl_privcmd_mmapbatch_v2)
+{
+   struct vki_xen_privcmd_mmapbatch_v2 *args = (struct 
vki_xen_privcmd_mmapbatch_v2 *)(ARG3);
+   POST_MEM_WRITE((Addr)args->err, sizeof(*(args->err)) * args->num);
+}
diff --git a/include/Makefile.am b/include/Makefile.am
index 33d0857..22bffa7 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -54,7 +54,8 @@ nobase_pkginclude_HEADERS = \
        vki/vki-scnums-ppc64-linux.h    \
        vki/vki-scnums-x86-linux.h      \
        vki/vki-scnums-arm-linux.h      \
-       vki/vki-scnums-darwin.h
+       vki/vki-scnums-darwin.h
+       vki/vki-xen.h

 noinst_HEADERS = \
        vki/vki-ppc32-aix5.h            \
diff --git a/include/pub_tool_vki.h b/include/pub_tool_vki.h
index 73a4174..c4c117f 100644
--- a/include/pub_tool_vki.h
+++ b/include/pub_tool_vki.h
@@ -47,6 +47,7 @@

 #if defined(VGO_linux)
 #  include "vki/vki-linux.h"
+#  include "vki/vki-xen.h"
 #elif defined(VGP_ppc32_aix5)
 #  include "vki/vki-ppc32-aix5.h"
 #elif defined(VGP_ppc64_aix5)
diff --git a/include/vki/vki-linux.h b/include/vki/vki-linux.h
index beff378..1214300 100644
--- a/include/vki/vki-linux.h
+++ b/include/vki/vki-linux.h
@@ -2709,6 +2709,51 @@ struct vki_getcpu_cache {
 #define VKI_EV_MAX             0x1f
 #define VKI_EV_CNT             (VKI_EV_MAX+1)

+//----------------------------------------------------------------------
+// Xen privcmd IOCTL
+//----------------------------------------------------------------------
+
+typedef unsigned long __vki_xen_pfn_t;
+
+struct vki_xen_privcmd_hypercall {
+       __vki_u64 op;
+       __vki_u64 arg[5];
+};
+
+struct vki_xen_privcmd_mmap_entry {
+        __vki_u64 va;
+        __vki_u64 mfn;
+        __vki_u64 npages;
+};
+
+struct vki_xen_privcmd_mmap {
+        int num;
+        __vki_u16 dom; /* target domain */
+        struct vki_xen_privcmd_mmap_entry *entry;
+};
+
+struct vki_xen_privcmd_mmapbatch {
+        int num;     /* number of pages to populate */
+        __vki_u16 dom; /* target domain */
+        __vki_u64 addr;  /* virtual address */
+        __vki_xen_pfn_t *arr; /* array of mfns - top nibble set on err */
+};
+
+struct vki_xen_privcmd_mmapbatch_v2 {
+        unsigned int num; /* number of pages to populate */
+        __vki_u16 dom;      /* target domain */
+        __vki_u64 addr;       /* virtual address */
+        const __vki_xen_pfn_t *arr; /* array of mfns */
+        int __user *err;  /* array of error codes */
+};
+
+#define VKI_XEN_IOCTL_PRIVCMD_HYPERCALL    _VKI_IOC(_VKI_IOC_NONE, 'P', 0, 
sizeof(struct vki_xen_privcmd_hypercall))
+#define VKI_XEN_IOCTL_PRIVCMD_MMAP         _VKI_IOC(_VKI_IOC_NONE, 'P', 2, 
sizeof(struct vki_xen_privcmd_mmap))
+
+#define VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH    _VKI_IOC(_VKI_IOC_NONE, 'P', 3, 
sizeof(struct vki_xen_privcmd_mmapbatch))
+#define VKI_XEN_IOCTL_PRIVCMD_MMAPBATCH_V2 _VKI_IOC(_VKI_IOC_NONE, 'P', 4, 
sizeof(struct vki_xen_privcmd_mmapbatch_v2))
+
+
 #endif // __VKI_LINUX_H

 /*--------------------------------------------------------------------*/
diff --git a/include/vki/vki-xen.h b/include/vki/vki-xen.h
new file mode 100644
index 0000000..7842cc9
--- /dev/null
+++ b/include/vki/vki-xen.h
@@ -0,0 +1,8 @@
+#ifndef __VKI_XEN_H
+#define __VKI_XEN_H
+
+#endif // __VKI_XEN_H
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/



-- 
Ian Campbell
Current Noise: Iron Monkey - 666 Pack

Ask not for whom the telephone bell tolls...
if thou art in the bathtub, it tolls for thee.


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.