[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.5 v6 01/17] xen: Relocate mem_access and mem_event into common.



In preparation to add support for ARM LPAE mem_event, relocate mem_access,
mem_event and auxiliary functions into common Xen code.
This patch makes no functional changes to the X86 side, for ARM mem_event
and mem_access functions are just defined as placeholder stubs, and are
actually enabled later in the series.

Edits that are only header path adjustments:
   xen/arch/x86/domctl.c
   xen/arch/x86/mm/hap/nested_ept.c
   xen/arch/x86/mm/hap/nested_hap.c
   xen/arch/x86/mm/mem_paging.c
   xen/arch/x86/mm/mem_sharing.c
   xen/arch/x86/mm/p2m-pod.c
   xen/arch/x86/mm/p2m-pt.c
   xen/arch/x86/mm/p2m.c
   xen/arch/x86/x86_64/compat/mm.c
   xen/arch/x86/x86_64/mm.c

Makefile adjustments for new/removed code:
   xen/common/Makefile
   xen/arch/x86/mm/Makefile

Relocated prepare_ring_for_helper and destroy_ring_for_helper functions:
   xen/include/xen/mm.h
   xen/common/memory.c
   xen/include/asm-x86/hvm/hvm.h
   xen/arch/x86/hvm/hvm.c

Code movement of mem_event and mem_access:
    xen/arch/x86/mm/mem_access.c -> xen/common/mem_access.c
    xen/arch/x86/mm/mem_event.c -> xen/common/mem_event.c
    xen/include/asm-x86/mem_access.h -> xen/include/xen/mem_access.h
    xen/include/asm-x86/mem_event.h -> xen/include/xen/mem_event.h

Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
v5: Make <xen/mem-event.h> include <xen/sched.h> by default.
    Style fix with grouping of #includes.

v4: Make <xen/mem_access.h> include <public/memory.h> by default.

v3: Replace asm/domain.h with xen/sched.h in mem_event.c to better
    accomodate for the new code location.
    Replace #ifdef CONFIG_X86 wrappers with HAS_MEM_ACCESS flags.

v2: Update MAINTAINERS.
    More descriptive commit message to aid in the review process.
---
 MAINTAINERS                      |   6 +
 xen/Rules.mk                     |   1 +
 xen/arch/x86/Rules.mk            |   1 +
 xen/arch/x86/domctl.c            |   2 +-
 xen/arch/x86/hvm/hvm.c           |  63 +---
 xen/arch/x86/mm/Makefile         |   2 -
 xen/arch/x86/mm/hap/nested_ept.c |   6 +-
 xen/arch/x86/mm/hap/nested_hap.c |   6 +-
 xen/arch/x86/mm/mem_access.c     | 133 --------
 xen/arch/x86/mm/mem_event.c      | 705 ---------------------------------------
 xen/arch/x86/mm/mem_paging.c     |   2 +-
 xen/arch/x86/mm/mem_sharing.c    |   4 +-
 xen/arch/x86/mm/p2m-pod.c        |   8 +-
 xen/arch/x86/mm/p2m-pt.c         |  10 +-
 xen/arch/x86/mm/p2m.c            |   8 +-
 xen/arch/x86/x86_64/compat/mm.c  |   4 +-
 xen/arch/x86/x86_64/mm.c         |   4 +-
 xen/common/Makefile              |   2 +
 xen/common/domain.c              |   1 +
 xen/common/mem_access.c          | 133 ++++++++
 xen/common/mem_event.c           | 705 +++++++++++++++++++++++++++++++++++++++
 xen/common/memory.c              |  63 ++++
 xen/include/asm-arm/mm.h         |   1 -
 xen/include/asm-x86/config.h     |   3 +
 xen/include/asm-x86/hvm/hvm.h    |   6 -
 xen/include/asm-x86/mem_access.h |  39 ---
 xen/include/asm-x86/mem_event.h  |  82 -----
 xen/include/asm-x86/mm.h         |   2 -
 xen/include/xen/mem_access.h     |  60 ++++
 xen/include/xen/mem_event.h      | 143 ++++++++
 xen/include/xen/mm.h             |   6 +
 31 files changed, 1154 insertions(+), 1057 deletions(-)
 delete mode 100644 xen/arch/x86/mm/mem_access.c
 delete mode 100644 xen/arch/x86/mm/mem_event.c
 create mode 100644 xen/common/mem_access.c
 create mode 100644 xen/common/mem_event.c
 delete mode 100644 xen/include/asm-x86/mem_access.h
 delete mode 100644 xen/include/asm-x86/mem_event.h
 create mode 100644 xen/include/xen/mem_access.h
 create mode 100644 xen/include/xen/mem_event.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 266e47b..f659180 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -337,6 +337,12 @@ F: xen/arch/x86/mm/mem_sharing.c
 F:     xen/arch/x86/mm/mem_paging.c
 F:     tools/memshr
 
+MEMORY EVENT AND ACCESS
+M:     Tim Deegan <tim@xxxxxxx>
+S:     Supported
+F:     xen/common/mem_event.c
+F:     xen/common/mem_access.c
+
 XENTRACE
 M:     George Dunlap <george.dunlap@xxxxxxxxxxxxx>
 S:     Supported
diff --git a/xen/Rules.mk b/xen/Rules.mk
index b49f3c8..dc15b09 100644
--- a/xen/Rules.mk
+++ b/xen/Rules.mk
@@ -57,6 +57,7 @@ CFLAGS-$(HAS_ACPI)      += -DHAS_ACPI
 CFLAGS-$(HAS_GDBSX)     += -DHAS_GDBSX
 CFLAGS-$(HAS_PASSTHROUGH) += -DHAS_PASSTHROUGH
 CFLAGS-$(HAS_DEVICE_TREE) += -DHAS_DEVICE_TREE
+CFLAGS-$(HAS_MEM_ACCESS)  += -DHAS_MEM_ACCESS
 CFLAGS-$(HAS_PCI)       += -DHAS_PCI
 CFLAGS-$(HAS_IOPORTS)   += -DHAS_IOPORTS
 CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER
diff --git a/xen/arch/x86/Rules.mk b/xen/arch/x86/Rules.mk
index 576985e..bd4e342 100644
--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -12,6 +12,7 @@ HAS_NS16550 := y
 HAS_EHCI := y
 HAS_KEXEC := y
 HAS_GDBSX := y
+HAS_MEM_ACCESS := y
 xenoprof := y
 
 #
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 7a5de43..26a3ea1 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -30,7 +30,7 @@
 #include <xen/hypercall.h> /* for arch_do_domctl */
 #include <xsm/xsm.h>
 #include <xen/iommu.h>
-#include <asm/mem_event.h>
+#include <xen/mem_event.h>
 #include <public/mem_event.h>
 #include <asm/mem_sharing.h>
 #include <asm/xstate.h>
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8d905d3..7649d36 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -35,6 +35,9 @@
 #include <xen/paging.h>
 #include <xen/cpu.h>
 #include <xen/wait.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <xen/rangeset.h>
 #include <asm/shadow.h>
 #include <asm/hap.h>
 #include <asm/current.h>
@@ -63,10 +66,7 @@
 #include <public/hvm/ioreq.h>
 #include <public/version.h>
 #include <public/memory.h>
-#include <asm/mem_event.h>
-#include <asm/mem_access.h>
 #include <public/mem_event.h>
-#include <xen/rangeset.h>
 #include <public/arch-x86/cpuid.h>
 
 bool_t __read_mostly hvm_enabled;
@@ -484,19 +484,6 @@ static void hvm_free_ioreq_gmfn(struct domain *d, unsigned 
long gmfn)
     clear_bit(i, &d->arch.hvm_domain.ioreq_gmfn.mask);
 }
 
-void destroy_ring_for_helper(
-    void **_va, struct page_info *page)
-{
-    void *va = *_va;
-
-    if ( va != NULL )
-    {
-        unmap_domain_page_global(va);
-        put_page_and_type(page);
-        *_va = NULL;
-    }
-}
-
 static void hvm_unmap_ioreq_page(struct hvm_ioreq_server *s, bool_t buf)
 {
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
@@ -504,50 +491,6 @@ static void hvm_unmap_ioreq_page(struct hvm_ioreq_server 
*s, bool_t buf)
     destroy_ring_for_helper(&iorp->va, iorp->page);
 }
 
-int prepare_ring_for_helper(
-    struct domain *d, unsigned long gmfn, struct page_info **_page,
-    void **_va)
-{
-    struct page_info *page;
-    p2m_type_t p2mt;
-    void *va;
-
-    page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
-    if ( p2m_is_paging(p2mt) )
-    {
-        if ( page )
-            put_page(page);
-        p2m_mem_paging_populate(d, gmfn);
-        return -ENOENT;
-    }
-    if ( p2m_is_shared(p2mt) )
-    {
-        if ( page )
-            put_page(page);
-        return -ENOENT;
-    }
-    if ( !page )
-        return -EINVAL;
-
-    if ( !get_page_type(page, PGT_writable_page) )
-    {
-        put_page(page);
-        return -EINVAL;
-    }
-
-    va = __map_domain_page_global(page);
-    if ( va == NULL )
-    {
-        put_page_and_type(page);
-        return -ENOMEM;
-    }
-
-    *_va = va;
-    *_page = page;
-
-    return 0;
-}
-
 static int hvm_map_ioreq_page(
     struct hvm_ioreq_server *s, bool_t buf, unsigned long gmfn)
 {
diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index 73dcdf4..ed4b1f8 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -6,10 +6,8 @@ obj-y += p2m.o p2m-pt.o p2m-ept.o p2m-pod.o
 obj-y += guest_walk_2.o
 obj-y += guest_walk_3.o
 obj-$(x86_64) += guest_walk_4.o
-obj-$(x86_64) += mem_event.o
 obj-$(x86_64) += mem_paging.o
 obj-$(x86_64) += mem_sharing.o
-obj-$(x86_64) += mem_access.o
 
 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
index 0d044bc..cbbc4e9 100644
--- a/xen/arch/x86/mm/hap/nested_ept.c
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -17,14 +17,14 @@
  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  */
+#include <xen/mem_event.h>
+#include <xen/event.h>
+#include <public/mem_event.h>
 #include <asm/domain.h>
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
-#include <asm/mem_event.h>
-#include <public/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <xen/event.h>
 #include <asm/hap.h>
 #include <asm/hvm/support.h>
 
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 137a87c..a4bb835 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -19,14 +19,14 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <xen/mem_event.h>
+#include <xen/event.h>
+#include <public/mem_event.h>
 #include <asm/domain.h>
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
-#include <asm/mem_event.h>
-#include <public/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <xen/event.h>
 #include <asm/hap.h>
 #include <asm/hvm/support.h>
 
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
deleted file mode 100644
index e8465a5..0000000
--- a/xen/arch/x86/mm/mem_access.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/******************************************************************************
- * arch/x86/mm/mem_access.c
- *
- * Memory access support.
- *
- * Copyright (c) 2011 Virtuata, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-
-#include <xen/sched.h>
-#include <xen/guest_access.h>
-#include <xen/hypercall.h>
-#include <asm/p2m.h>
-#include <asm/mem_event.h>
-#include <xsm/xsm.h>
-
-
-int mem_access_memop(unsigned long cmd,
-                     XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
-{
-    long rc;
-    xen_mem_access_op_t mao;
-    struct domain *d;
-
-    if ( copy_from_guest(&mao, arg, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_live_remote_domain_by_id(mao.domid, &d);
-    if ( rc )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
-    if ( rc )
-        goto out;
-
-    rc = -ENODEV;
-    if ( unlikely(!d->mem_event->access.ring_page) )
-        goto out;
-
-    switch ( mao.op )
-    {
-    case XENMEM_access_op_resume:
-        p2m_mem_access_resume(d);
-        rc = 0;
-        break;
-
-    case XENMEM_access_op_set_access:
-    {
-        unsigned long start_iter = cmd & ~MEMOP_CMD_MASK;
-
-        rc = -EINVAL;
-        if ( (mao.pfn != ~0ull) &&
-             (mao.nr < start_iter ||
-              ((mao.pfn + mao.nr - 1) < mao.pfn) ||
-              ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) )
-            break;
-
-        rc = p2m_set_mem_access(d, mao.pfn, mao.nr, start_iter,
-                                MEMOP_CMD_MASK, mao.access);
-        if ( rc > 0 )
-        {
-            ASSERT(!(rc & MEMOP_CMD_MASK));
-            rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh",
-                                               XENMEM_access_op | rc, arg);
-        }
-        break;
-    }
-
-    case XENMEM_access_op_get_access:
-    {
-        xenmem_access_t access;
-
-        rc = -EINVAL;
-        if ( (mao.pfn > domain_get_maximum_gpfn(d)) && mao.pfn != ~0ull )
-            break;
-
-        rc = p2m_get_mem_access(d, mao.pfn, &access);
-        if ( rc != 0 )
-            break;
-
-        mao.access = access;
-        rc = __copy_field_to_guest(arg, &mao, access) ? -EFAULT : 0;
-
-        break;
-    }
-
-    default:
-        rc = -ENOSYS;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-int mem_access_send_req(struct domain *d, mem_event_request_t *req)
-{
-    int rc = mem_event_claim_slot(d, &d->mem_event->access);
-    if ( rc < 0 )
-        return rc;
-
-    mem_event_put_request(d, &d->mem_event->access, req);
-
-    return 0;
-} 
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
deleted file mode 100644
index ba7e71e..0000000
--- a/xen/arch/x86/mm/mem_event.c
+++ /dev/null
@@ -1,705 +0,0 @@
-/******************************************************************************
- * arch/x86/mm/mem_event.c
- *
- * Memory event support.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-
-#include <asm/domain.h>
-#include <xen/event.h>
-#include <xen/wait.h>
-#include <asm/p2m.h>
-#include <asm/mem_event.h>
-#include <asm/mem_paging.h>
-#include <asm/mem_access.h>
-#include <asm/mem_sharing.h>
-#include <xsm/xsm.h>
-
-/* for public/io/ring.h macros */
-#define xen_mb()   mb()
-#define xen_rmb()  rmb()
-#define xen_wmb()  wmb()
-
-#define mem_event_ring_lock_init(_med)  spin_lock_init(&(_med)->ring_lock)
-#define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
-#define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
-
-static int mem_event_enable(
-    struct domain *d,
-    xen_domctl_mem_event_op_t *mec,
-    struct mem_event_domain *med,
-    int pause_flag,
-    int param,
-    xen_event_channel_notification_t notification_fn)
-{
-    int rc;
-    unsigned long ring_gfn = d->arch.hvm_domain.params[param];
-
-    /* Only one helper at a time. If the helper crashed,
-     * the ring is in an undefined state and so is the guest.
-     */
-    if ( med->ring_page )
-        return -EBUSY;
-
-    /* The parameter defaults to zero, and it should be 
-     * set to something */
-    if ( ring_gfn == 0 )
-        return -ENOSYS;
-
-    mem_event_ring_lock_init(med);
-    mem_event_ring_lock(med);
-
-    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, 
-                                    &med->ring_page);
-    if ( rc < 0 )
-        goto err;
-
-    /* Set the number of currently blocked vCPUs to 0. */
-    med->blocked = 0;
-
-    /* Allocate event channel */
-    rc = alloc_unbound_xen_event_channel(d->vcpu[0],
-                                         current->domain->domain_id,
-                                         notification_fn);
-    if ( rc < 0 )
-        goto err;
-
-    med->xen_port = mec->port = rc;
-
-    /* Prepare ring buffer */
-    FRONT_RING_INIT(&med->front_ring,
-                    (mem_event_sring_t *)med->ring_page,
-                    PAGE_SIZE);
-
-    /* Save the pause flag for this particular ring. */
-    med->pause_flag = pause_flag;
-
-    /* Initialize the last-chance wait queue. */
-    init_waitqueue_head(&med->wq);
-
-    mem_event_ring_unlock(med);
-    return 0;
-
- err:
-    destroy_ring_for_helper(&med->ring_page, 
-                            med->ring_pg_struct);
-    mem_event_ring_unlock(med);
-
-    return rc;
-}
-
-static unsigned int mem_event_ring_available(struct mem_event_domain *med)
-{
-    int avail_req = RING_FREE_REQUESTS(&med->front_ring);
-    avail_req -= med->target_producers;
-    avail_req -= med->foreign_producers;
-
-    BUG_ON(avail_req < 0);
-
-    return avail_req;
-}
-
-/*
- * mem_event_wake_blocked() will wakeup vcpus waiting for room in the
- * ring. These vCPUs were paused on their way out after placing an event,
- * but need to be resumed where the ring is capable of processing at least
- * one event from them.
- */
-static void mem_event_wake_blocked(struct domain *d, struct mem_event_domain 
*med)
-{
-    struct vcpu *v;
-    int online = d->max_vcpus;
-    unsigned int avail_req = mem_event_ring_available(med);
-
-    if ( avail_req == 0 || med->blocked == 0 )
-        return;
-
-    /*
-     * We ensure that we only have vCPUs online if there are enough free slots
-     * for their memory events to be processed.  This will ensure that no
-     * memory events are lost (due to the fact that certain types of events
-     * cannot be replayed, we need to ensure that there is space in the ring
-     * for when they are hit).
-     * See comment below in mem_event_put_request().
-     */
-    for_each_vcpu ( d, v )
-        if ( test_bit(med->pause_flag, &v->pause_flags) )
-            online--;
-
-    ASSERT(online == (d->max_vcpus - med->blocked));
-
-    /* We remember which vcpu last woke up to avoid scanning always linearly
-     * from zero and starving higher-numbered vcpus under high load */
-    if ( d->vcpu )
-    {
-        int i, j, k;
-
-        for (i = med->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++)
-        {
-            k = i % d->max_vcpus;
-            v = d->vcpu[k];
-            if ( !v )
-                continue;
-
-            if ( !(med->blocked) || online >= avail_req )
-               break;
-
-            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
-            {
-                vcpu_unpause(v);
-                online++;
-                med->blocked--;
-                med->last_vcpu_wake_up = k;
-            }
-        }
-    }
-}
-
-/*
- * In the event that a vCPU attempted to place an event in the ring and
- * was unable to do so, it is queued on a wait queue.  These are woken as
- * needed, and take precedence over the blocked vCPUs.
- */
-static void mem_event_wake_queued(struct domain *d, struct mem_event_domain 
*med)
-{
-    unsigned int avail_req = mem_event_ring_available(med);
-
-    if ( avail_req > 0 )
-        wake_up_nr(&med->wq, avail_req);
-}
-
-/*
- * mem_event_wake() will wakeup all vcpus waiting for the ring to
- * become available.  If we have queued vCPUs, they get top priority. We
- * are guaranteed that they will go through code paths that will eventually
- * call mem_event_wake() again, ensuring that any blocked vCPUs will get
- * unpaused once all the queued vCPUs have made it through.
- */
-void mem_event_wake(struct domain *d, struct mem_event_domain *med)
-{
-    if (!list_empty(&med->wq.list))
-        mem_event_wake_queued(d, med);
-    else
-        mem_event_wake_blocked(d, med);
-}
-
-static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
-{
-    if ( med->ring_page )
-    {
-        struct vcpu *v;
-
-        mem_event_ring_lock(med);
-
-        if ( !list_empty(&med->wq.list) )
-        {
-            mem_event_ring_unlock(med);
-            return -EBUSY;
-        }
-
-        /* Free domU's event channel and leave the other one unbound */
-        free_xen_event_channel(d->vcpu[0], med->xen_port);
-
-        /* Unblock all vCPUs */
-        for_each_vcpu ( d, v )
-        {
-            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
-            {
-                vcpu_unpause(v);
-                med->blocked--;
-            }
-        }
-
-        destroy_ring_for_helper(&med->ring_page, 
-                                med->ring_pg_struct);
-        mem_event_ring_unlock(med);
-    }
-
-    return 0;
-}
-
-static inline void mem_event_release_slot(struct domain *d,
-                                          struct mem_event_domain *med)
-{
-    /* Update the accounting */
-    if ( current->domain == d )
-        med->target_producers--;
-    else
-        med->foreign_producers--;
-
-    /* Kick any waiters */
-    mem_event_wake(d, med);
-}
-
-/*
- * mem_event_mark_and_pause() tags vcpu and put it to sleep.
- * The vcpu will resume execution in mem_event_wake_waiters().
- */
-void mem_event_mark_and_pause(struct vcpu *v, struct mem_event_domain *med)
-{
-    if ( !test_and_set_bit(med->pause_flag, &v->pause_flags) )
-    {
-        vcpu_pause_nosync(v);
-        med->blocked++;
-    }
-}
-
-/*
- * This must be preceded by a call to claim_slot(), and is guaranteed to
- * succeed.  As a side-effect however, the vCPU may be paused if the ring is
- * overly full and its continued execution would cause stalling and excessive
- * waiting.  The vCPU will be automatically unpaused when the ring clears.
- */
-void mem_event_put_request(struct domain *d,
-                           struct mem_event_domain *med,
-                           mem_event_request_t *req)
-{
-    mem_event_front_ring_t *front_ring;
-    int free_req;
-    unsigned int avail_req;
-    RING_IDX req_prod;
-
-    if ( current->domain != d )
-    {
-        req->flags |= MEM_EVENT_FLAG_FOREIGN;
-        ASSERT( !(req->flags & MEM_EVENT_FLAG_VCPU_PAUSED) );
-    }
-
-    mem_event_ring_lock(med);
-
-    /* Due to the reservations, this step must succeed. */
-    front_ring = &med->front_ring;
-    free_req = RING_FREE_REQUESTS(front_ring);
-    ASSERT(free_req > 0);
-
-    /* Copy request */
-    req_prod = front_ring->req_prod_pvt;
-    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
-    req_prod++;
-
-    /* Update ring */
-    front_ring->req_prod_pvt = req_prod;
-    RING_PUSH_REQUESTS(front_ring);
-
-    /* We've actually *used* our reservation, so release the slot. */
-    mem_event_release_slot(d, med);
-
-    /* Give this vCPU a black eye if necessary, on the way out.
-     * See the comments above wake_blocked() for more information
-     * on how this mechanism works to avoid waiting. */
-    avail_req = mem_event_ring_available(med);
-    if( current->domain == d && avail_req < d->max_vcpus )
-        mem_event_mark_and_pause(current, med);
-
-    mem_event_ring_unlock(med);
-
-    notify_via_xen_event_channel(d, med->xen_port);
-}
-
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med, 
mem_event_response_t *rsp)
-{
-    mem_event_front_ring_t *front_ring;
-    RING_IDX rsp_cons;
-
-    mem_event_ring_lock(med);
-
-    front_ring = &med->front_ring;
-    rsp_cons = front_ring->rsp_cons;
-
-    if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
-    {
-        mem_event_ring_unlock(med);
-        return 0;
-    }
-
-    /* Copy response */
-    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
-    rsp_cons++;
-
-    /* Update ring */
-    front_ring->rsp_cons = rsp_cons;
-    front_ring->sring->rsp_event = rsp_cons + 1;
-
-    /* Kick any waiters -- since we've just consumed an event,
-     * there may be additional space available in the ring. */
-    mem_event_wake(d, med);
-
-    mem_event_ring_unlock(med);
-
-    return 1;
-}
-
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
-{
-    mem_event_ring_lock(med);
-    mem_event_release_slot(d, med);
-    mem_event_ring_unlock(med);
-}
-
-static int mem_event_grab_slot(struct mem_event_domain *med, int foreign)
-{
-    unsigned int avail_req;
-
-    if ( !med->ring_page )
-        return -ENOSYS;
-
-    mem_event_ring_lock(med);
-
-    avail_req = mem_event_ring_available(med);
-    if ( avail_req == 0 )
-    {
-        mem_event_ring_unlock(med);
-        return -EBUSY;
-    }
-
-    if ( !foreign )
-        med->target_producers++;
-    else
-        med->foreign_producers++;
-
-    mem_event_ring_unlock(med);
-
-    return 0;
-}
-
-/* Simple try_grab wrapper for use in the wait_event() macro. */
-static int mem_event_wait_try_grab(struct mem_event_domain *med, int *rc)
-{
-    *rc = mem_event_grab_slot(med, 0);
-    return *rc;
-}
-
-/* Call mem_event_grab_slot() until the ring doesn't exist, or is available. */
-static int mem_event_wait_slot(struct mem_event_domain *med)
-{
-    int rc = -EBUSY;
-    wait_event(med->wq, mem_event_wait_try_grab(med, &rc) != -EBUSY);
-    return rc;
-}
-
-bool_t mem_event_check_ring(struct mem_event_domain *med)
-{
-    return (med->ring_page != NULL);
-}
-
-/*
- * Determines whether or not the current vCPU belongs to the target domain,
- * and calls the appropriate wait function.  If it is a guest vCPU, then we
- * use mem_event_wait_slot() to reserve a slot.  As long as there is a ring,
- * this function will always return 0 for a guest.  For a non-guest, we check
- * for space and return -EBUSY if the ring is not available.
- *
- * Return codes: -ENOSYS: the ring is not yet configured
- *               -EBUSY: the ring is busy
- *               0: a spot has been reserved
- *
- */
-int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
-                            bool_t allow_sleep)
-{
-    if ( (current->domain == d) && allow_sleep )
-        return mem_event_wait_slot(med);
-    else
-        return mem_event_grab_slot(med, (current->domain != d));
-}
-
-/* Registered with Xen-bound event channel for incoming notifications. */
-static void mem_paging_notification(struct vcpu *v, unsigned int port)
-{
-    if ( likely(v->domain->mem_event->paging.ring_page != NULL) )
-        p2m_mem_paging_resume(v->domain);
-}
-
-/* Registered with Xen-bound event channel for incoming notifications. */
-static void mem_access_notification(struct vcpu *v, unsigned int port)
-{
-    if ( likely(v->domain->mem_event->access.ring_page != NULL) )
-        p2m_mem_access_resume(v->domain);
-}
-
-/* Registered with Xen-bound event channel for incoming notifications. */
-static void mem_sharing_notification(struct vcpu *v, unsigned int port)
-{
-    if ( likely(v->domain->mem_event->share.ring_page != NULL) )
-        mem_sharing_sharing_resume(v->domain);
-}
-
-int do_mem_event_op(int op, uint32_t domain, void *arg)
-{
-    int ret;
-    struct domain *d;
-
-    ret = rcu_lock_live_remote_domain_by_id(domain, &d);
-    if ( ret )
-        return ret;
-
-    ret = xsm_mem_event_op(XSM_DM_PRIV, d, op);
-    if ( ret )
-        goto out;
-
-    switch (op)
-    {
-        case XENMEM_paging_op:
-            ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg);
-            break;
-        case XENMEM_sharing_op:
-            ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg);
-            break;
-        default:
-            ret = -ENOSYS;
-    }
-
- out:
-    rcu_unlock_domain(d);
-    return ret;
-}
-
-/* Clean up on domain destruction */
-void mem_event_cleanup(struct domain *d)
-{
-    if ( d->mem_event->paging.ring_page ) {
-        /* Destroying the wait queue head means waking up all
-         * queued vcpus. This will drain the list, allowing
-         * the disable routine to complete. It will also drop
-         * all domain refs the wait-queued vcpus are holding.
-         * Finally, because this code path involves previously
-         * pausing the domain (domain_kill), unpausing the 
-         * vcpus causes no harm. */
-        destroy_waitqueue_head(&d->mem_event->paging.wq);
-        (void)mem_event_disable(d, &d->mem_event->paging);
-    }
-    if ( d->mem_event->access.ring_page ) {
-        destroy_waitqueue_head(&d->mem_event->access.wq);
-        (void)mem_event_disable(d, &d->mem_event->access);
-    }
-    if ( d->mem_event->share.ring_page ) {
-        destroy_waitqueue_head(&d->mem_event->share.wq);
-        (void)mem_event_disable(d, &d->mem_event->share);
-    }
-}
-
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
-                     XEN_GUEST_HANDLE_PARAM(void) u_domctl)
-{
-    int rc;
-
-    rc = xsm_mem_event_control(XSM_PRIV, d, mec->mode, mec->op);
-    if ( rc )
-        return rc;
-
-    if ( unlikely(d == current->domain) )
-    {
-        gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
-        return -EINVAL;
-    }
-
-    if ( unlikely(d->is_dying) )
-    {
-        gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
-                 d->domain_id);
-        return 0;
-    }
-
-    if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
-    {
-        gdprintk(XENLOG_INFO,
-                 "Memory event op on a domain (%u) with no vcpus\n",
-                 d->domain_id);
-        return -EINVAL;
-    }
-
-    rc = -ENOSYS;
-
-    switch ( mec->mode )
-    {
-    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
-    {
-        struct mem_event_domain *med = &d->mem_event->paging;
-        rc = -EINVAL;
-
-        switch( mec->op )
-        {
-        case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE:
-        {
-            struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
-            rc = -EOPNOTSUPP;
-            /* pvh fixme: p2m_is_foreign types need addressing */
-            if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
-                break;
-
-            rc = -ENODEV;
-            /* Only HAP is supported */
-            if ( !hap_enabled(d) )
-                break;
-
-            /* No paging if iommu is used */
-            rc = -EMLINK;
-            if ( unlikely(need_iommu(d)) )
-                break;
-
-            rc = -EXDEV;
-            /* Disallow paging in a PoD guest */
-            if ( p2m->pod.entry_count )
-                break;
-
-            rc = mem_event_enable(d, mec, med, _VPF_mem_paging, 
-                                    HVM_PARAM_PAGING_RING_PFN,
-                                    mem_paging_notification);
-        }
-        break;
-
-        case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE:
-        {
-            if ( med->ring_page )
-                rc = mem_event_disable(d, med);
-        }
-        break;
-
-        default:
-            rc = -ENOSYS;
-            break;
-        }
-    }
-    break;
-
-    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
-    {
-        struct mem_event_domain *med = &d->mem_event->access;
-        rc = -EINVAL;
-
-        switch( mec->op )
-        {
-        case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
-        {
-            rc = -ENODEV;
-            /* Only HAP is supported */
-            if ( !hap_enabled(d) )
-                break;
-
-            /* Currently only EPT is supported */
-            if ( !cpu_has_vmx )
-                break;
-
-            rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
-                                    HVM_PARAM_ACCESS_RING_PFN,
-                                    mem_access_notification);
-        }
-        break;
-
-        case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
-        {
-            if ( med->ring_page )
-                rc = mem_event_disable(d, med);
-        }
-        break;
-
-        default:
-            rc = -ENOSYS;
-            break;
-        }
-    }
-    break;
-
-    case XEN_DOMCTL_MEM_EVENT_OP_SHARING: 
-    {
-        struct mem_event_domain *med = &d->mem_event->share;
-        rc = -EINVAL;
-
-        switch( mec->op )
-        {
-        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE:
-        {
-            rc = -EOPNOTSUPP;
-            /* pvh fixme: p2m_is_foreign types need addressing */
-            if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
-                break;
-
-            rc = -ENODEV;
-            /* Only HAP is supported */
-            if ( !hap_enabled(d) )
-                break;
-
-            rc = mem_event_enable(d, mec, med, _VPF_mem_sharing, 
-                                    HVM_PARAM_SHARING_RING_PFN,
-                                    mem_sharing_notification);
-        }
-        break;
-
-        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE:
-        {
-            if ( med->ring_page )
-                rc = mem_event_disable(d, med);
-        }
-        break;
-
-        default:
-            rc = -ENOSYS;
-            break;
-        }
-    }
-    break;
-
-    default:
-        rc = -ENOSYS;
-    }
-
-    return rc;
-}
-
-void mem_event_vcpu_pause(struct vcpu *v)
-{
-    ASSERT(v == current);
-
-    atomic_inc(&v->mem_event_pause_count);
-    vcpu_pause_nosync(v);
-}
-
-void mem_event_vcpu_unpause(struct vcpu *v)
-{
-    int old, new, prev = v->mem_event_pause_count.counter;
-
-    /* All unpause requests as a result of toolstack responses.  Prevent
-     * underflow of the vcpu pause count. */
-    do
-    {
-        old = prev;
-        new = old - 1;
-
-        if ( new < 0 )
-        {
-            printk(XENLOG_G_WARNING
-                   "%pv mem_event: Too many unpause attempts\n", v);
-            return;
-        }
-
-        prev = cmpxchg(&v->mem_event_pause_count.counter, old, new);
-    } while ( prev != old );
-
-    vcpu_unpause(v);
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index 235776d..65f6a3d 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -22,7 +22,7 @@
 
 
 #include <asm/p2m.h>
-#include <asm/mem_event.h>
+#include <xen/mem_event.h>
 
 
 int mem_paging_memop(struct domain *d, xen_mem_event_op_t *mec)
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 79188b9..7c0fc7d 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -27,12 +27,12 @@
 #include <xen/mm.h>
 #include <xen/grant_table.h>
 #include <xen/sched.h>
+#include <xen/rcupdate.h>
+#include <xen/mem_event.h>
 #include <asm/page.h>
 #include <asm/string.h>
 #include <asm/p2m.h>
-#include <asm/mem_event.h>
 #include <asm/atomic.h>
-#include <xen/rcupdate.h>
 #include <asm/event.h>
 #include <xsm/xsm.h>
 
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index bd4c7c8..43f507c 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -20,16 +20,16 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <xen/iommu.h>
+#include <xen/mem_event.h>
+#include <xen/event.h>
+#include <public/mem_event.h>
 #include <asm/domain.h>
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
 #include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
-#include <xen/iommu.h>
-#include <asm/mem_event.h>
-#include <public/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <xen/event.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 085ab6f..e48b63a 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -25,16 +25,16 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <xen/iommu.h>
+#include <xen/mem_event.h>
+#include <xen/event.h>
+#include <xen/trace.h>
+#include <public/mem_event.h>
 #include <asm/domain.h>
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
-#include <xen/iommu.h>
-#include <asm/mem_event.h>
-#include <public/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <xen/event.h>
-#include <xen/trace.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 32776c3..a9f120a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -24,16 +24,16 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <xen/iommu.h>
+#include <xen/mem_event.h>
+#include <xen/event.h>
+#include <public/mem_event.h>
 #include <asm/domain.h>
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
 #include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
-#include <xen/iommu.h>
-#include <asm/mem_event.h>
-#include <public/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <xen/event.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 #include <xsm/xsm.h>
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 69c6195..c079702 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -1,10 +1,10 @@
 #include <xen/event.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
 #include <xen/multicall.h>
 #include <compat/memory.h>
 #include <compat/xen.h>
-#include <asm/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <asm/mem_access.h>
 
 int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int 
entries)
 {
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 4937f9a..0da6ddc 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -26,6 +26,8 @@
 #include <xen/nodemask.h>
 #include <xen/guest_access.h>
 #include <xen/hypercall.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
 #include <asm/current.h>
 #include <asm/asm_defns.h>
 #include <asm/page.h>
@@ -35,9 +37,7 @@
 #include <asm/msr.h>
 #include <asm/setup.h>
 #include <asm/numa.h>
-#include <asm/mem_event.h>
 #include <asm/mem_sharing.h>
-#include <asm/mem_access.h>
 #include <public/memory.h>
 
 /* Parameters for PFN/MADDR compression. */
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 3683ae3..b9f3387 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -51,6 +51,8 @@ obj-y += tmem_xen.o
 obj-y += radix-tree.o
 obj-y += rbtree.o
 obj-y += lzo.o
+obj-$(HAS_MEM_ACCESS) += mem_access.o
+obj-$(HAS_MEM_ACCESS) += mem_event.o
 
 obj-bin-$(CONFIG_X86) += $(foreach n,decompress bunzip2 unxz unlzma unlzo 
unlz4 earlycpio,$(n).init.o)
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 62514b0..134bed6 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -15,6 +15,7 @@
 #include <xen/domain.h>
 #include <xen/mm.h>
 #include <xen/event.h>
+#include <xen/mem_event.h>
 #include <xen/time.h>
 #include <xen/console.h>
 #include <xen/softirq.h>
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
new file mode 100644
index 0000000..9a8c1a9
--- /dev/null
+++ b/xen/common/mem_access.c
@@ -0,0 +1,133 @@
+/******************************************************************************
+ * mem_access.c
+ *
+ * Memory access support.
+ *
+ * Copyright (c) 2011 Virtuata, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#include <xen/sched.h>
+#include <xen/guest_access.h>
+#include <xen/hypercall.h>
+#include <xen/mem_event.h>
+#include <public/memory.h>
+#include <asm/p2m.h>
+#include <xsm/xsm.h>
+
+int mem_access_memop(unsigned long cmd,
+                     XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
+{
+    long rc;
+    xen_mem_access_op_t mao;
+    struct domain *d;
+
+    if ( copy_from_guest(&mao, arg, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_live_remote_domain_by_id(mao.domid, &d);
+    if ( rc )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
+    if ( rc )
+        goto out;
+
+    rc = -ENODEV;
+    if ( unlikely(!d->mem_event->access.ring_page) )
+        goto out;
+
+    switch ( mao.op )
+    {
+    case XENMEM_access_op_resume:
+        p2m_mem_access_resume(d);
+        rc = 0;
+        break;
+
+    case XENMEM_access_op_set_access:
+    {
+        unsigned long start_iter = cmd & ~MEMOP_CMD_MASK;
+
+        rc = -EINVAL;
+        if ( (mao.pfn != ~0ull) &&
+             (mao.nr < start_iter ||
+              ((mao.pfn + mao.nr - 1) < mao.pfn) ||
+              ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) )
+            break;
+
+        rc = p2m_set_mem_access(d, mao.pfn, mao.nr, start_iter,
+                                MEMOP_CMD_MASK, mao.access);
+        if ( rc > 0 )
+        {
+            ASSERT(!(rc & MEMOP_CMD_MASK));
+            rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh",
+                                               XENMEM_access_op | rc, arg);
+        }
+        break;
+    }
+
+    case XENMEM_access_op_get_access:
+    {
+        xenmem_access_t access;
+
+        rc = -EINVAL;
+        if ( (mao.pfn > domain_get_maximum_gpfn(d)) && mao.pfn != ~0ull )
+            break;
+
+        rc = p2m_get_mem_access(d, mao.pfn, &access);
+        if ( rc != 0 )
+            break;
+
+        mao.access = access;
+        rc = __copy_field_to_guest(arg, &mao, access) ? -EFAULT : 0;
+
+        break;
+    }
+
+    default:
+        rc = -ENOSYS;
+        break;
+    }
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+int mem_access_send_req(struct domain *d, mem_event_request_t *req)
+{
+    int rc = mem_event_claim_slot(d, &d->mem_event->access);
+    if ( rc < 0 )
+        return rc;
+
+    mem_event_put_request(d, &d->mem_event->access, req);
+
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
new file mode 100644
index 0000000..336b638
--- /dev/null
+++ b/xen/common/mem_event.c
@@ -0,0 +1,705 @@
+/******************************************************************************
+ * mem_event.c
+ *
+ * Memory event support.
+ *
+ * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <xen/wait.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <asm/p2m.h>
+#include <asm/mem_paging.h>
+#include <asm/mem_sharing.h>
+#include <xsm/xsm.h>
+
+/* for public/io/ring.h macros */
+#define xen_mb()   mb()
+#define xen_rmb()  rmb()
+#define xen_wmb()  wmb()
+
+#define mem_event_ring_lock_init(_med)  spin_lock_init(&(_med)->ring_lock)
+#define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
+#define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
+
+static int mem_event_enable(
+    struct domain *d,
+    xen_domctl_mem_event_op_t *mec,
+    struct mem_event_domain *med,
+    int pause_flag,
+    int param,
+    xen_event_channel_notification_t notification_fn)
+{
+    int rc;
+    unsigned long ring_gfn = d->arch.hvm_domain.params[param];
+
+    /* Only one helper at a time. If the helper crashed,
+     * the ring is in an undefined state and so is the guest.
+     */
+    if ( med->ring_page )
+        return -EBUSY;
+
+    /* The parameter defaults to zero, and it should be 
+     * set to something */
+    if ( ring_gfn == 0 )
+        return -ENOSYS;
+
+    mem_event_ring_lock_init(med);
+    mem_event_ring_lock(med);
+
+    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, 
+                                    &med->ring_page);
+    if ( rc < 0 )
+        goto err;
+
+    /* Set the number of currently blocked vCPUs to 0. */
+    med->blocked = 0;
+
+    /* Allocate event channel */
+    rc = alloc_unbound_xen_event_channel(d->vcpu[0],
+                                         current->domain->domain_id,
+                                         notification_fn);
+    if ( rc < 0 )
+        goto err;
+
+    med->xen_port = mec->port = rc;
+
+    /* Prepare ring buffer */
+    FRONT_RING_INIT(&med->front_ring,
+                    (mem_event_sring_t *)med->ring_page,
+                    PAGE_SIZE);
+
+    /* Save the pause flag for this particular ring. */
+    med->pause_flag = pause_flag;
+
+    /* Initialize the last-chance wait queue. */
+    init_waitqueue_head(&med->wq);
+
+    mem_event_ring_unlock(med);
+    return 0;
+
+ err:
+    destroy_ring_for_helper(&med->ring_page, 
+                            med->ring_pg_struct);
+    mem_event_ring_unlock(med);
+
+    return rc;
+}
+
+static unsigned int mem_event_ring_available(struct mem_event_domain *med)
+{
+    int avail_req = RING_FREE_REQUESTS(&med->front_ring);
+    avail_req -= med->target_producers;
+    avail_req -= med->foreign_producers;
+
+    BUG_ON(avail_req < 0);
+
+    return avail_req;
+}
+
+/*
+ * mem_event_wake_blocked() will wakeup vcpus waiting for room in the
+ * ring. These vCPUs were paused on their way out after placing an event,
+ * but need to be resumed where the ring is capable of processing at least
+ * one event from them.
+ */
+static void mem_event_wake_blocked(struct domain *d, struct mem_event_domain 
*med)
+{
+    struct vcpu *v;
+    int online = d->max_vcpus;
+    unsigned int avail_req = mem_event_ring_available(med);
+
+    if ( avail_req == 0 || med->blocked == 0 )
+        return;
+
+    /*
+     * We ensure that we only have vCPUs online if there are enough free slots
+     * for their memory events to be processed.  This will ensure that no
+     * memory events are lost (due to the fact that certain types of events
+     * cannot be replayed, we need to ensure that there is space in the ring
+     * for when they are hit).
+     * See comment below in mem_event_put_request().
+     */
+    for_each_vcpu ( d, v )
+        if ( test_bit(med->pause_flag, &v->pause_flags) )
+            online--;
+
+    ASSERT(online == (d->max_vcpus - med->blocked));
+
+    /* We remember which vcpu last woke up to avoid scanning always linearly
+     * from zero and starving higher-numbered vcpus under high load */
+    if ( d->vcpu )
+    {
+        int i, j, k;
+
+        for (i = med->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++)
+        {
+            k = i % d->max_vcpus;
+            v = d->vcpu[k];
+            if ( !v )
+                continue;
+
+            if ( !(med->blocked) || online >= avail_req )
+               break;
+
+            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
+            {
+                vcpu_unpause(v);
+                online++;
+                med->blocked--;
+                med->last_vcpu_wake_up = k;
+            }
+        }
+    }
+}
+
+/*
+ * In the event that a vCPU attempted to place an event in the ring and
+ * was unable to do so, it is queued on a wait queue.  These are woken as
+ * needed, and take precedence over the blocked vCPUs.
+ */
+static void mem_event_wake_queued(struct domain *d, struct mem_event_domain 
*med)
+{
+    unsigned int avail_req = mem_event_ring_available(med);
+
+    if ( avail_req > 0 )
+        wake_up_nr(&med->wq, avail_req);
+}
+
+/*
+ * mem_event_wake() will wakeup all vcpus waiting for the ring to
+ * become available.  If we have queued vCPUs, they get top priority. We
+ * are guaranteed that they will go through code paths that will eventually
+ * call mem_event_wake() again, ensuring that any blocked vCPUs will get
+ * unpaused once all the queued vCPUs have made it through.
+ */
+void mem_event_wake(struct domain *d, struct mem_event_domain *med)
+{
+    if (!list_empty(&med->wq.list))
+        mem_event_wake_queued(d, med);
+    else
+        mem_event_wake_blocked(d, med);
+}
+
+static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
+{
+    if ( med->ring_page )
+    {
+        struct vcpu *v;
+
+        mem_event_ring_lock(med);
+
+        if ( !list_empty(&med->wq.list) )
+        {
+            mem_event_ring_unlock(med);
+            return -EBUSY;
+        }
+
+        /* Free domU's event channel and leave the other one unbound */
+        free_xen_event_channel(d->vcpu[0], med->xen_port);
+
+        /* Unblock all vCPUs */
+        for_each_vcpu ( d, v )
+        {
+            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
+            {
+                vcpu_unpause(v);
+                med->blocked--;
+            }
+        }
+
+        destroy_ring_for_helper(&med->ring_page, 
+                                med->ring_pg_struct);
+        mem_event_ring_unlock(med);
+    }
+
+    return 0;
+}
+
+static inline void mem_event_release_slot(struct domain *d,
+                                          struct mem_event_domain *med)
+{
+    /* Update the accounting */
+    if ( current->domain == d )
+        med->target_producers--;
+    else
+        med->foreign_producers--;
+
+    /* Kick any waiters */
+    mem_event_wake(d, med);
+}
+
+/*
+ * mem_event_mark_and_pause() tags vcpu and put it to sleep.
+ * The vcpu will resume execution in mem_event_wake_waiters().
+ */
+void mem_event_mark_and_pause(struct vcpu *v, struct mem_event_domain *med)
+{
+    if ( !test_and_set_bit(med->pause_flag, &v->pause_flags) )
+    {
+        vcpu_pause_nosync(v);
+        med->blocked++;
+    }
+}
+
+/*
+ * This must be preceded by a call to claim_slot(), and is guaranteed to
+ * succeed.  As a side-effect however, the vCPU may be paused if the ring is
+ * overly full and its continued execution would cause stalling and excessive
+ * waiting.  The vCPU will be automatically unpaused when the ring clears.
+ */
+void mem_event_put_request(struct domain *d,
+                           struct mem_event_domain *med,
+                           mem_event_request_t *req)
+{
+    mem_event_front_ring_t *front_ring;
+    int free_req;
+    unsigned int avail_req;
+    RING_IDX req_prod;
+
+    if ( current->domain != d )
+    {
+        req->flags |= MEM_EVENT_FLAG_FOREIGN;
+        ASSERT( !(req->flags & MEM_EVENT_FLAG_VCPU_PAUSED) );
+    }
+
+    mem_event_ring_lock(med);
+
+    /* Due to the reservations, this step must succeed. */
+    front_ring = &med->front_ring;
+    free_req = RING_FREE_REQUESTS(front_ring);
+    ASSERT(free_req > 0);
+
+    /* Copy request */
+    req_prod = front_ring->req_prod_pvt;
+    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
+    req_prod++;
+
+    /* Update ring */
+    front_ring->req_prod_pvt = req_prod;
+    RING_PUSH_REQUESTS(front_ring);
+
+    /* We've actually *used* our reservation, so release the slot. */
+    mem_event_release_slot(d, med);
+
+    /* Give this vCPU a black eye if necessary, on the way out.
+     * See the comments above wake_blocked() for more information
+     * on how this mechanism works to avoid waiting. */
+    avail_req = mem_event_ring_available(med);
+    if( current->domain == d && avail_req < d->max_vcpus )
+        mem_event_mark_and_pause(current, med);
+
+    mem_event_ring_unlock(med);
+
+    notify_via_xen_event_channel(d, med->xen_port);
+}
+
+int mem_event_get_response(struct domain *d, struct mem_event_domain *med, 
mem_event_response_t *rsp)
+{
+    mem_event_front_ring_t *front_ring;
+    RING_IDX rsp_cons;
+
+    mem_event_ring_lock(med);
+
+    front_ring = &med->front_ring;
+    rsp_cons = front_ring->rsp_cons;
+
+    if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
+    {
+        mem_event_ring_unlock(med);
+        return 0;
+    }
+
+    /* Copy response */
+    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
+    rsp_cons++;
+
+    /* Update ring */
+    front_ring->rsp_cons = rsp_cons;
+    front_ring->sring->rsp_event = rsp_cons + 1;
+
+    /* Kick any waiters -- since we've just consumed an event,
+     * there may be additional space available in the ring. */
+    mem_event_wake(d, med);
+
+    mem_event_ring_unlock(med);
+
+    return 1;
+}
+
+void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
+{
+    mem_event_ring_lock(med);
+    mem_event_release_slot(d, med);
+    mem_event_ring_unlock(med);
+}
+
+static int mem_event_grab_slot(struct mem_event_domain *med, int foreign)
+{
+    unsigned int avail_req;
+
+    if ( !med->ring_page )
+        return -ENOSYS;
+
+    mem_event_ring_lock(med);
+
+    avail_req = mem_event_ring_available(med);
+    if ( avail_req == 0 )
+    {
+        mem_event_ring_unlock(med);
+        return -EBUSY;
+    }
+
+    if ( !foreign )
+        med->target_producers++;
+    else
+        med->foreign_producers++;
+
+    mem_event_ring_unlock(med);
+
+    return 0;
+}
+
+/* Simple try_grab wrapper for use in the wait_event() macro. */
+static int mem_event_wait_try_grab(struct mem_event_domain *med, int *rc)
+{
+    *rc = mem_event_grab_slot(med, 0);
+    return *rc;
+}
+
+/* Call mem_event_grab_slot() until the ring doesn't exist, or is available. */
+static int mem_event_wait_slot(struct mem_event_domain *med)
+{
+    int rc = -EBUSY;
+    wait_event(med->wq, mem_event_wait_try_grab(med, &rc) != -EBUSY);
+    return rc;
+}
+
+bool_t mem_event_check_ring(struct mem_event_domain *med)
+{
+    return (med->ring_page != NULL);
+}
+
+/*
+ * Determines whether or not the current vCPU belongs to the target domain,
+ * and calls the appropriate wait function.  If it is a guest vCPU, then we
+ * use mem_event_wait_slot() to reserve a slot.  As long as there is a ring,
+ * this function will always return 0 for a guest.  For a non-guest, we check
+ * for space and return -EBUSY if the ring is not available.
+ *
+ * Return codes: -ENOSYS: the ring is not yet configured
+ *               -EBUSY: the ring is busy
+ *               0: a spot has been reserved
+ *
+ */
+int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
+                            bool_t allow_sleep)
+{
+    if ( (current->domain == d) && allow_sleep )
+        return mem_event_wait_slot(med);
+    else
+        return mem_event_grab_slot(med, (current->domain != d));
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_paging_notification(struct vcpu *v, unsigned int port)
+{
+    if ( likely(v->domain->mem_event->paging.ring_page != NULL) )
+        p2m_mem_paging_resume(v->domain);
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_access_notification(struct vcpu *v, unsigned int port)
+{
+    if ( likely(v->domain->mem_event->access.ring_page != NULL) )
+        p2m_mem_access_resume(v->domain);
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void mem_sharing_notification(struct vcpu *v, unsigned int port)
+{
+    if ( likely(v->domain->mem_event->share.ring_page != NULL) )
+        mem_sharing_sharing_resume(v->domain);
+}
+
+int do_mem_event_op(int op, uint32_t domain, void *arg)
+{
+    int ret;
+    struct domain *d;
+
+    ret = rcu_lock_live_remote_domain_by_id(domain, &d);
+    if ( ret )
+        return ret;
+
+    ret = xsm_mem_event_op(XSM_DM_PRIV, d, op);
+    if ( ret )
+        goto out;
+
+    switch (op)
+    {
+        case XENMEM_paging_op:
+            ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg);
+            break;
+        case XENMEM_sharing_op:
+            ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg);
+            break;
+        default:
+            ret = -ENOSYS;
+    }
+
+ out:
+    rcu_unlock_domain(d);
+    return ret;
+}
+
+/* Clean up on domain destruction */
+void mem_event_cleanup(struct domain *d)
+{
+    if ( d->mem_event->paging.ring_page ) {
+        /* Destroying the wait queue head means waking up all
+         * queued vcpus. This will drain the list, allowing
+         * the disable routine to complete. It will also drop
+         * all domain refs the wait-queued vcpus are holding.
+         * Finally, because this code path involves previously
+         * pausing the domain (domain_kill), unpausing the 
+         * vcpus causes no harm. */
+        destroy_waitqueue_head(&d->mem_event->paging.wq);
+        (void)mem_event_disable(d, &d->mem_event->paging);
+    }
+    if ( d->mem_event->access.ring_page ) {
+        destroy_waitqueue_head(&d->mem_event->access.wq);
+        (void)mem_event_disable(d, &d->mem_event->access);
+    }
+    if ( d->mem_event->share.ring_page ) {
+        destroy_waitqueue_head(&d->mem_event->share.wq);
+        (void)mem_event_disable(d, &d->mem_event->share);
+    }
+}
+
+int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
+                     XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+{
+    int rc;
+
+    rc = xsm_mem_event_control(XSM_PRIV, d, mec->mode, mec->op);
+    if ( rc )
+        return rc;
+
+    if ( unlikely(d == current->domain) )
+    {
+        gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
+        return -EINVAL;
+    }
+
+    if ( unlikely(d->is_dying) )
+    {
+        gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
+                 d->domain_id);
+        return 0;
+    }
+
+    if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
+    {
+        gdprintk(XENLOG_INFO,
+                 "Memory event op on a domain (%u) with no vcpus\n",
+                 d->domain_id);
+        return -EINVAL;
+    }
+
+    rc = -ENOSYS;
+
+    switch ( mec->mode )
+    {
+    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
+    {
+        struct mem_event_domain *med = &d->mem_event->paging;
+        rc = -EINVAL;
+
+        switch( mec->op )
+        {
+        case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE:
+        {
+            struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+            rc = -EOPNOTSUPP;
+            /* pvh fixme: p2m_is_foreign types need addressing */
+            if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
+                break;
+
+            rc = -ENODEV;
+            /* Only HAP is supported */
+            if ( !hap_enabled(d) )
+                break;
+
+            /* No paging if iommu is used */
+            rc = -EMLINK;
+            if ( unlikely(need_iommu(d)) )
+                break;
+
+            rc = -EXDEV;
+            /* Disallow paging in a PoD guest */
+            if ( p2m->pod.entry_count )
+                break;
+
+            rc = mem_event_enable(d, mec, med, _VPF_mem_paging, 
+                                    HVM_PARAM_PAGING_RING_PFN,
+                                    mem_paging_notification);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE:
+        {
+            if ( med->ring_page )
+                rc = mem_event_disable(d, med);
+        }
+        break;
+
+        default:
+            rc = -ENOSYS;
+            break;
+        }
+    }
+    break;
+
+    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
+    {
+        struct mem_event_domain *med = &d->mem_event->access;
+        rc = -EINVAL;
+
+        switch( mec->op )
+        {
+        case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
+        {
+            rc = -ENODEV;
+            /* Only HAP is supported */
+            if ( !hap_enabled(d) )
+                break;
+
+            /* Currently only EPT is supported */
+            if ( !cpu_has_vmx )
+                break;
+
+            rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
+                                    HVM_PARAM_ACCESS_RING_PFN,
+                                    mem_access_notification);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
+        {
+            if ( med->ring_page )
+                rc = mem_event_disable(d, med);
+        }
+        break;
+
+        default:
+            rc = -ENOSYS;
+            break;
+        }
+    }
+    break;
+
+    case XEN_DOMCTL_MEM_EVENT_OP_SHARING: 
+    {
+        struct mem_event_domain *med = &d->mem_event->share;
+        rc = -EINVAL;
+
+        switch( mec->op )
+        {
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE:
+        {
+            rc = -EOPNOTSUPP;
+            /* pvh fixme: p2m_is_foreign types need addressing */
+            if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
+                break;
+
+            rc = -ENODEV;
+            /* Only HAP is supported */
+            if ( !hap_enabled(d) )
+                break;
+
+            rc = mem_event_enable(d, mec, med, _VPF_mem_sharing, 
+                                    HVM_PARAM_SHARING_RING_PFN,
+                                    mem_sharing_notification);
+        }
+        break;
+
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE:
+        {
+            if ( med->ring_page )
+                rc = mem_event_disable(d, med);
+        }
+        break;
+
+        default:
+            rc = -ENOSYS;
+            break;
+        }
+    }
+    break;
+
+    default:
+        rc = -ENOSYS;
+    }
+
+    return rc;
+}
+
+void mem_event_vcpu_pause(struct vcpu *v)
+{
+    ASSERT(v == current);
+
+    atomic_inc(&v->mem_event_pause_count);
+    vcpu_pause_nosync(v);
+}
+
+void mem_event_vcpu_unpause(struct vcpu *v)
+{
+    int old, new, prev = v->mem_event_pause_count.counter;
+
+    /* All unpause requests as a result of toolstack responses.  Prevent
+     * underflow of the vcpu pause count. */
+    do
+    {
+        old = prev;
+        new = old - 1;
+
+        if ( new < 0 )
+        {
+            printk(XENLOG_G_WARNING
+                   "%pv mem_event: Too many unpause attempts\n", v);
+            return;
+        }
+
+        prev = cmpxchg(&v->mem_event_pause_count.counter, old, new);
+    } while ( prev != old );
+
+    vcpu_unpause(v);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 2e3225d..164f1b9 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1104,6 +1104,69 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     return rc;
 }
 
+void destroy_ring_for_helper(
+    void **_va, struct page_info *page)
+{
+    void *va = *_va;
+
+    if ( va != NULL )
+    {
+        unmap_domain_page_global(va);
+        put_page_and_type(page);
+        *_va = NULL;
+    }
+}
+
+int prepare_ring_for_helper(
+    struct domain *d, unsigned long gmfn, struct page_info **_page,
+    void **_va)
+{
+    struct page_info *page;
+    p2m_type_t p2mt;
+    void *va;
+
+    page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
+
+#ifdef CONFIG_MEM_PAGING
+    if ( p2m_is_paging(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        p2m_mem_paging_populate(d, gmfn);
+        return -ENOENT;
+    }
+#endif
+#ifdef CONFIG_MEM_SHARING
+    if ( p2m_is_shared(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        return -ENOENT;
+    }
+#endif
+
+    if ( !page )
+        return -EINVAL;
+
+    if ( !get_page_type(page, PGT_writable_page) )
+    {
+        put_page(page);
+        return -EINVAL;
+    }
+
+    va = __map_domain_page_global(page);
+    if ( va == NULL )
+    {
+        put_page_and_type(page);
+        return -ENOMEM;
+    }
+
+    *_va = va;
+    *_page = page;
+
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 9fa80a4..7fc3b97 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -301,7 +301,6 @@ struct page_info *get_page_from_gva(struct domain *d, 
vaddr_t va,
     })
 
 static inline void put_gfn(struct domain *d, unsigned long gfn) {}
-static inline void mem_event_cleanup(struct domain *d) {}
 static inline int relinquish_shared_pages(struct domain *d)
 {
     return 0;
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 210ff57..8a864ce 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -67,6 +67,9 @@
 #define NR_CPUS 256
 #endif
 
+#define CONFIG_MEM_SHARING 1
+#define CONFIG_MEM_PAGING 1
+
 /* Maximum we can support with current vLAPIC ID mapping. */
 #define MAX_HVM_VCPUS 128
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1123857..74e66f8 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -226,12 +226,6 @@ int hvm_vcpu_cacheattr_init(struct vcpu *v);
 void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
 void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
 
-/* Prepare/destroy a ring for a dom0 helper. Helper with talk
- * with Xen on behalf of this hvm domain. */
-int prepare_ring_for_helper(struct domain *d, unsigned long gmfn, 
-                            struct page_info **_page, void **_va);
-void destroy_ring_for_helper(void **_va, struct page_info *page);
-
 bool_t hvm_send_assist_req(ioreq_t *p);
 void hvm_broadcast_assist_req(ioreq_t *p);
 
diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
deleted file mode 100644
index 5c7c5fd..0000000
--- a/xen/include/asm-x86/mem_access.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/******************************************************************************
- * include/asm-x86/mem_access.h
- *
- * Memory access support.
- *
- * Copyright (c) 2011 Virtuata, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#ifndef _XEN_ASM_MEM_ACCESS_H
-#define _XEN_ASM_MEM_ACCESS_H
-
-int mem_access_memop(unsigned long cmd,
-                     XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
-int mem_access_send_req(struct domain *d, mem_event_request_t *req);
-
-#endif /* _XEN_ASM_MEM_ACCESS_H */
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-x86/mem_event.h b/xen/include/asm-x86/mem_event.h
deleted file mode 100644
index ed4481a..0000000
--- a/xen/include/asm-x86/mem_event.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/******************************************************************************
- * include/asm-x86/mem_event.h
- *
- * Common interface for memory event support.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-
-#ifndef __MEM_EVENT_H__
-#define __MEM_EVENT_H__
-
-/* Returns whether a ring has been set up */
-bool_t mem_event_check_ring(struct mem_event_domain *med);
-
-/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
- * available space and the caller is a foreign domain. If the guest itself
- * is the caller, -EBUSY is avoided by sleeping on a wait queue to ensure
- * that the ring does not lose future events. 
- *
- * However, the allow_sleep flag can be set to false in cases in which it is ok
- * to lose future events, and thus -EBUSY can be returned to guest vcpus
- * (handle with care!). 
- *
- * In general, you must follow a claim_slot() call with either put_request() or
- * cancel_slot(), both of which are guaranteed to
- * succeed. 
- */
-int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
-                            bool_t allow_sleep);
-static inline int mem_event_claim_slot(struct domain *d, 
-                                        struct mem_event_domain *med)
-{
-    return __mem_event_claim_slot(d, med, 1);
-}
-
-static inline int mem_event_claim_slot_nosleep(struct domain *d,
-                                        struct mem_event_domain *med)
-{
-    return __mem_event_claim_slot(d, med, 0);
-}
-
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med);
-
-void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
-                            mem_event_request_t *req);
-
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
-                           mem_event_response_t *rsp);
-
-int do_mem_event_op(int op, uint32_t domain, void *arg);
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
-                     XEN_GUEST_HANDLE_PARAM(void) u_domctl);
-
-void mem_event_vcpu_pause(struct vcpu *v);
-void mem_event_vcpu_unpause(struct vcpu *v);
-
-#endif /* __MEM_EVENT_H__ */
-
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 7b85865..ebd482d 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -611,8 +611,6 @@ unsigned int domain_clamp_alloc_bitsize(struct domain *d, 
unsigned int bits);
 
 unsigned long domain_get_maximum_gpfn(struct domain *d);
 
-void mem_event_cleanup(struct domain *d);
-
 extern struct domain *dom_xen, *dom_io, *dom_cow;      /* for vmcoreinfo */
 
 /* Definition of an mm lock: spinlock with extra fields for debugging */
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
new file mode 100644
index 0000000..19d1a2d
--- /dev/null
+++ b/xen/include/xen/mem_access.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ * mem_access.h
+ *
+ * Memory access support.
+ *
+ * Copyright (c) 2011 Virtuata, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _XEN_ASM_MEM_ACCESS_H
+#define _XEN_ASM_MEM_ACCESS_H
+
+#include <public/memory.h>
+
+#ifdef HAS_MEM_ACCESS
+
+int mem_access_memop(unsigned long cmd,
+                     XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
+int mem_access_send_req(struct domain *d, mem_event_request_t *req);
+
+#else
+
+static inline
+int mem_access_memop(unsigned long cmd,
+                     XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
+{
+    return -ENOSYS;
+}
+
+static inline
+int mem_access_send_req(struct domain *d, mem_event_request_t *req)
+{
+    return -ENOSYS;
+}
+
+#endif /* HAS_MEM_ACCESS */
+
+#endif /* _XEN_ASM_MEM_ACCESS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
new file mode 100644
index 0000000..8612b26
--- /dev/null
+++ b/xen/include/xen/mem_event.h
@@ -0,0 +1,143 @@
+/******************************************************************************
+ * mem_event.h
+ *
+ * Common interface for memory event support.
+ *
+ * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#ifndef __MEM_EVENT_H__
+#define __MEM_EVENT_H__
+
+#include <xen/sched.h>
+
+#ifdef HAS_MEM_ACCESS
+
+/* Clean up on domain destruction */
+void mem_event_cleanup(struct domain *d);
+
+/* Returns whether a ring has been set up */
+bool_t mem_event_check_ring(struct mem_event_domain *med);
+
+/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
+ * available space and the caller is a foreign domain. If the guest itself
+ * is the caller, -EBUSY is avoided by sleeping on a wait queue to ensure
+ * that the ring does not lose future events. 
+ *
+ * However, the allow_sleep flag can be set to false in cases in which it is ok
+ * to lose future events, and thus -EBUSY can be returned to guest vcpus
+ * (handle with care!). 
+ *
+ * In general, you must follow a claim_slot() call with either put_request() or
+ * cancel_slot(), both of which are guaranteed to
+ * succeed. 
+ */
+int __mem_event_claim_slot(struct domain *d, struct mem_event_domain *med,
+                            bool_t allow_sleep);
+static inline int mem_event_claim_slot(struct domain *d, 
+                                        struct mem_event_domain *med)
+{
+    return __mem_event_claim_slot(d, med, 1);
+}
+
+static inline int mem_event_claim_slot_nosleep(struct domain *d,
+                                        struct mem_event_domain *med)
+{
+    return __mem_event_claim_slot(d, med, 0);
+}
+
+void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med);
+
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
+                            mem_event_request_t *req);
+
+int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
+                           mem_event_response_t *rsp);
+
+int do_mem_event_op(int op, uint32_t domain, void *arg);
+int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
+                     XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+
+void mem_event_vcpu_pause(struct vcpu *v);
+void mem_event_vcpu_unpause(struct vcpu *v);
+
+#else
+
+static inline void mem_event_cleanup(struct domain *d) {}
+
+static inline bool_t mem_event_check_ring(struct mem_event_domain *med)
+{
+    return 0;
+}
+
+static inline int mem_event_claim_slot(struct domain *d,
+                                        struct mem_event_domain *med)
+{
+    return -ENOSYS;
+}
+
+static inline int mem_event_claim_slot_nosleep(struct domain *d,
+                                        struct mem_event_domain *med)
+{
+    return -ENOSYS;
+}
+
+static inline
+void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
+{}
+
+static inline
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
+                            mem_event_request_t *req)
+{}
+
+static inline
+int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
+                           mem_event_response_t *rsp)
+{
+    return -ENOSYS;
+}
+
+static inline int do_mem_event_op(int op, uint32_t domain, void *arg)
+{
+    return -ENOSYS;
+}
+
+static inline
+int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
+                     XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+{
+    return -ENOSYS;
+}
+
+static inline void mem_event_vcpu_pause(struct vcpu *v) {}
+static inline void mem_event_vcpu_unpause(struct vcpu *v) {}
+
+#endif /* HAS_MEM_ACCESS */
+
+#endif /* __MEM_EVENT_H__ */
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b183189..7c0efc7 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -371,4 +371,10 @@ int guest_remove_page(struct domain *d, unsigned long 
gmfn);
 /* TRUE if the whole page at @mfn is of the requested RAM type(s) above. */
 int page_is_ram_type(unsigned long mfn, unsigned long mem_type);
 
+/* Prepare/destroy a ring for a dom0 helper. Helper with talk
+ * with Xen on behalf of this domain. */
+int prepare_ring_for_helper(struct domain *d, unsigned long gmfn,
+                            struct page_info **_page, void **_va);
+void destroy_ring_for_helper(void **_va, struct page_info *page);
+
 #endif /* __XEN_MM_H__ */
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.