[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH v6 09/43] altp2m: Move do_altp2m_op to common code



From: Rose Spangler <Rose.Spangler@xxxxxxxxxxxxxx>

This commit moves the altp2m HVMOP implementation to common code, making it
possible to re-use it across architectures. In this commit, the entire
implementation is gated behind CONFIG_X86, with no changes to the body of
do_altp2m_op. A stub implementation is added for ARM, which will be removed
once do_altp2m_op has been modified to add ARM support.

This commit contains only code movement, and no change in functionality is
intended.

This is commit 1/2 of the common do_altp2m_op phase.

Signed-off-by: Rose Spangler <Rose.Spangler@xxxxxxxxxxxxxx>
Signed-off-by: Aqib Javaid <Aqib.Javaid@xxxxxxxxxxxxxx>
---
v6: Introduced this patch.
---
 xen/arch/x86/hvm/hvm.c   | 400 -------------------------------------
 xen/common/altp2m.c      | 413 +++++++++++++++++++++++++++++++++++++++
 xen/include/xen/altp2m.h |  11 ++
 3 files changed, 424 insertions(+), 400 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5b6e91adce17..4adcc3c49d95 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4526,406 +4526,6 @@ static int hvmop_get_param(
     return rc;
 }
 
-/*
- * altp2m operations are envisioned as being used in several different
- * modes:
- *
- * - external: All control and decisions are made by an external agent
- *   running domain 0.
- *
- * - internal: altp2m operations are used exclusively by an in-guest
- *   agent to protect itself from the guest kernel and in-guest
- *   attackers.
- *
- * - coordinated: An in-guest agent handles #VE and VMFUNCs locally,
- *   but makes requests of an agent running outside the domain for
- *   bigger changes (such as modifying altp2m entires).
- *
- * This corresponds to the three values for HVM_PARAM_ALTP2M
- * (external, mixed, limited). All three models have advantages and
- * disadvantages.
- *
- * Normally hypercalls made by a program in domain 0 in order to
- * control a guest would be DOMCTLs rather than HVMOPs.  But in order
- * to properly enable the 'internal' use case, as well as to avoid
- * fragmentation, all altp2m subops should come under this single
- * HVMOP.
- *
- * Note that 'internal' mode (HVM_PARAM_ALTP2M == XEN_ALTP2M_mixed)
- * has not been evaluated for safety from a security perspective.
- * Before using this mode in a security-critical environment, each
- * subop should be evaluated for safety, with unsafe subops
- * blacklisted in xsm_hvm_altp2mhvm_op().
- */
-static int do_altp2m_op(
-    XEN_GUEST_HANDLE_PARAM(void) arg)
-{
-#ifdef CONFIG_ALTP2M
-    struct xen_hvm_altp2m_op a;
-    struct domain *d = NULL;
-    int rc = 0;
-    uint64_t mode;
-
-    if ( !altp2m_supported() )
-        return -EOPNOTSUPP;
-
-    if ( copy_from_guest(&a, arg, 1) )
-        return -EFAULT;
-
-    if ( a.pad1 || a.pad2 ||
-         (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) )
-        return -EINVAL;
-
-    switch ( a.cmd )
-    {
-    case HVMOP_altp2m_get_domain_state:
-    case HVMOP_altp2m_set_domain_state:
-    case HVMOP_altp2m_vcpu_enable_notify:
-    case HVMOP_altp2m_vcpu_disable_notify:
-    case HVMOP_altp2m_create_p2m:
-    case HVMOP_altp2m_destroy_p2m:
-    case HVMOP_altp2m_switch_p2m:
-    case HVMOP_altp2m_set_suppress_ve:
-    case HVMOP_altp2m_set_suppress_ve_multi:
-    case HVMOP_altp2m_get_suppress_ve:
-    case HVMOP_altp2m_set_mem_access:
-    case HVMOP_altp2m_set_mem_access_multi:
-    case HVMOP_altp2m_get_mem_access:
-    case HVMOP_altp2m_change_gfn:
-    case HVMOP_altp2m_get_p2m_idx:
-    case HVMOP_altp2m_set_visibility:
-        break;
-
-    default:
-        return -EOPNOTSUPP;
-    }
-
-    d = rcu_lock_domain_by_any_id(a.domain);
-
-    if ( d == NULL )
-        return -ESRCH;
-
-    if ( !is_hvm_domain(d) )
-    {
-        rc = -EOPNOTSUPP;
-        goto out;
-    }
-
-    if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
-         (a.cmd != HVMOP_altp2m_set_domain_state) &&
-         !d->altp2m_active )
-    {
-        rc = -EOPNOTSUPP;
-        goto out;
-    }
-
-    mode = d->arch.hvm.params[HVM_PARAM_ALTP2M];
-
-    if ( XEN_ALTP2M_disabled == mode )
-    {
-        rc = -EINVAL;
-        goto out;
-    }
-
-    if ( d->nr_altp2m == 0 )
-    {
-        rc = -EOPNOTSUPP;
-        goto out;
-    }
-
-    if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) )
-        goto out;
-
-    switch ( a.cmd )
-    {
-    case HVMOP_altp2m_get_domain_state:
-        a.u.domain_state.state = altp2m_active(d);
-        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-        break;
-
-    case HVMOP_altp2m_set_domain_state:
-    {
-        struct vcpu *v;
-        bool ostate;
-
-        if ( nestedhvm_enabled(d) )
-        {
-            rc = -EINVAL;
-            break;
-        }
-
-        rc = domain_pause_except_self(d);
-        if ( rc )
-            break;
-
-        ostate = d->altp2m_active;
-        d->altp2m_active = !!a.u.domain_state.state;
-
-        /* If the alternate p2m state has changed, handle appropriately */
-        if ( d->altp2m_active != ostate &&
-             (ostate || !(rc = altp2m_init_by_id(d, 0))) )
-        {
-            for_each_vcpu( d, v )
-            {
-                if ( !ostate )
-                    altp2m_vcpu_initialise(v);
-                else
-                    altp2m_vcpu_destroy(v);
-            }
-
-            if ( ostate )
-                altp2m_flush(d);
-        }
-        else if ( rc )
-            d->altp2m_active = false;
-
-        domain_unpause_except_self(d);
-        break;
-    }
-
-    case HVMOP_altp2m_vcpu_enable_notify:
-    {
-        struct vcpu *v;
-
-        if ( a.u.enable_notify.pad ||
-             a.u.enable_notify.vcpu_id >= d->max_vcpus )
-        {
-            rc = -EINVAL;
-            break;
-        }
-
-        if ( !cpu_has_vmx_virt_exceptions )
-        {
-            rc = -EOPNOTSUPP;
-            break;
-        }
-
-        v = d->vcpu[a.u.enable_notify.vcpu_id];
-
-        rc = altp2m_vcpu_enable_ve(v, _gfn(a.u.enable_notify.gfn));
-        break;
-    }
-
-    case HVMOP_altp2m_vcpu_disable_notify:
-    {
-        struct vcpu *v;
-
-        if ( a.u.disable_notify.vcpu_id >= d->max_vcpus )
-        {
-            rc = -EINVAL;
-            break;
-        }
-
-        if ( !cpu_has_vmx_virt_exceptions )
-        {
-            rc = -EOPNOTSUPP;
-            break;
-        }
-
-        v = d->vcpu[a.u.enable_notify.vcpu_id];
-
-        altp2m_vcpu_disable_ve(v);
-        break;
-    }
-
-    case HVMOP_altp2m_create_p2m:
-        rc = altp2m_init_next_available(d, &a.u.view.view,
-                                        a.u.view.hvmmem_default_access);
-        if ( !rc )
-            rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-        break;
-
-    case HVMOP_altp2m_destroy_p2m:
-        rc = altp2m_destroy_by_id(d, a.u.view.view);
-        break;
-
-    case HVMOP_altp2m_switch_p2m:
-        rc = altp2m_switch_domain_altp2m_by_id(d, a.u.view.view);
-        break;
-
-    case HVMOP_altp2m_set_suppress_ve:
-        if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
-            rc = -EINVAL;
-        else
-        {
-            gfn_t gfn = _gfn(a.u.suppress_ve.gfn);
-            unsigned int altp2m_idx = a.u.suppress_ve.view;
-            bool suppress_ve = a.u.suppress_ve.suppress_ve;
-
-            rc = p2m_set_suppress_ve(d, gfn, suppress_ve, altp2m_idx);
-        }
-        break;
-
-    case HVMOP_altp2m_set_suppress_ve_multi:
-    {
-        uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
-
-        a.u.suppress_ve_multi.last_gfn = min(a.u.suppress_ve_multi.last_gfn,
-                                             max_phys_addr);
-
-        if ( a.u.suppress_ve_multi.pad1 ||
-             a.u.suppress_ve_multi.first_gfn > a.u.suppress_ve_multi.last_gfn )
-            rc = -EINVAL;
-        else
-        {
-            rc = p2m_set_suppress_ve_multi(d, &a.u.suppress_ve_multi);
-            if ( (!rc || rc == -ERESTART) && __copy_to_guest(arg, &a, 1) )
-                rc = -EFAULT;
-        }
-        break;
-    }
-
-    case HVMOP_altp2m_get_suppress_ve:
-        if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
-            rc = -EINVAL;
-        else
-        {
-            gfn_t gfn = _gfn(a.u.suppress_ve.gfn);
-            unsigned int altp2m_idx = a.u.suppress_ve.view;
-            bool suppress_ve;
-
-            rc = p2m_get_suppress_ve(d, gfn, &suppress_ve, altp2m_idx);
-            if ( !rc )
-            {
-                a.u.suppress_ve.suppress_ve = suppress_ve;
-                rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-            }
-        }
-        break;
-
-    case HVMOP_altp2m_set_mem_access:
-        if ( !vm_event_is_enabled(current) )
-        {
-            rc = -EOPNOTSUPP;
-            break;
-        }
-
-        if ( a.u.mem_access.pad )
-            rc = -EINVAL;
-        else
-            rc = p2m_set_mem_access(d, _gfn(a.u.mem_access.gfn), 1, 0, 0,
-                                    a.u.mem_access.access,
-                                    a.u.mem_access.view);
-        break;
-
-    case HVMOP_altp2m_set_mem_access_multi:
-        if ( !vm_event_is_enabled(current) )
-        {
-            rc = -EOPNOTSUPP;
-            break;
-        }
-
-        if ( a.u.set_mem_access_multi.pad ||
-             a.u.set_mem_access_multi.opaque > a.u.set_mem_access_multi.nr )
-        {
-            rc = -EINVAL;
-            break;
-        }
-
-        /*
-         * Unlike XENMEM_access_op_set_access_multi, we don't need any bits of
-         * the 'continuation' counter to be zero (to stash a command in).
-         * However, 0x40 is a good 'stride' to make sure that we make
-         * a reasonable amount of forward progress before yielding,
-         * so use a mask of 0x3F here.
-         */
-        rc = p2m_set_mem_access_multi(d, a.u.set_mem_access_multi.pfn_list,
-                                      a.u.set_mem_access_multi.access_list,
-                                      a.u.set_mem_access_multi.nr,
-                                      a.u.set_mem_access_multi.opaque,
-                                      0x3F,
-                                      a.u.set_mem_access_multi.view);
-        if ( rc > 0 )
-        {
-            a.u.set_mem_access_multi.opaque = rc;
-            rc = -ERESTART;
-            if ( __copy_field_to_guest(guest_handle_cast(arg, 
xen_hvm_altp2m_op_t),
-                                       &a, u.set_mem_access_multi.opaque) )
-                rc = -EFAULT;
-        }
-        break;
-
-    case HVMOP_altp2m_get_mem_access:
-        if ( !vm_event_is_enabled(current) )
-        {
-            rc = -EOPNOTSUPP;
-            break;
-        }
-
-        if ( a.u.mem_access.pad )
-            rc = -EINVAL;
-        else
-        {
-            xenmem_access_t access;
-
-            rc = p2m_get_mem_access(d, _gfn(a.u.mem_access.gfn), &access,
-                                    a.u.mem_access.view);
-            if ( !rc )
-            {
-                a.u.mem_access.access = access;
-                rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-            }
-        }
-        break;
-
-    case HVMOP_altp2m_change_gfn:
-        if ( a.u.change_gfn.pad1 || a.u.change_gfn.pad2 )
-            rc = -EINVAL;
-        else
-            rc = altp2m_change_gfn(d, a.u.change_gfn.view,
-                                   _gfn(a.u.change_gfn.old_gfn),
-                                   _gfn(a.u.change_gfn.new_gfn));
-        break;
-
-    case HVMOP_altp2m_get_p2m_idx:
-    {
-        struct vcpu *v;
-
-        if ( !altp2m_active(d) )
-        {
-            rc = -EOPNOTSUPP;
-            break;
-        }
-
-        if ( (v = domain_vcpu(d, a.u.get_vcpu_p2m_idx.vcpu_id)) == NULL )
-        {
-            rc = -EINVAL;
-            break;
-        }
-
-        a.u.get_vcpu_p2m_idx.altp2m_idx = altp2m_vcpu_idx(v);
-        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-        break;
-    }
-
-    case HVMOP_altp2m_set_visibility:
-    {
-        unsigned int idx = a.u.set_visibility.altp2m_idx;
-
-        if ( a.u.set_visibility.pad )
-            rc = -EINVAL;
-        else if ( !altp2m_active(d) )
-            rc = -EOPNOTSUPP;
-        else
-            rc = altp2m_set_view_visibility(d, idx, 
a.u.set_visibility.visible);
-        break;
-    }
-
-    default:
-        ASSERT_UNREACHABLE();
-        rc = -EOPNOTSUPP;
-        break;
-    }
-
- out:
-    rcu_unlock_domain(d);
-
-    return rc;
-#else /* !CONFIG_ALTP2M */
-    return -EOPNOTSUPP;
-#endif /* CONFIG_ALTP2M */
-}
-
 DEFINE_XEN_GUEST_HANDLE(compat_hvm_altp2m_op_t);
 
 /*
diff --git a/xen/common/altp2m.c b/xen/common/altp2m.c
index 1a4e102324d4..a43b3a3e34b3 100644
--- a/xen/common/altp2m.c
+++ b/xen/common/altp2m.c
@@ -1,5 +1,418 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 #include <xen/altp2m.h>
+#include <xen/guest_access.h>
+#include <xen/vm_event.h>
+
+#include <asm/altp2m.h>
+
+#include <public/hvm/params.h>
+
+#include <xsm/xsm.h>
+
+#ifdef CONFIG_X86
+#include <asm/hvm/nestedhvm.h>
+/*
+ * altp2m operations are envisioned as being used in several different
+ * modes:
+ *
+ * - external: All control and decisions are made by an external agent
+ *   running domain 0.
+ *
+ * - internal: altp2m operations are used exclusively by an in-guest
+ *   agent to protect itself from the guest kernel and in-guest
+ *   attackers.
+ *
+ * - coordinated: An in-guest agent handles #VE and VMFUNCs locally,
+ *   but makes requests of an agent running outside the domain for
+ *   bigger changes (such as modifying altp2m entires).
+ *
+ * This corresponds to the three values for HVM_PARAM_ALTP2M
+ * (external, mixed, limited). All three models have advantages and
+ * disadvantages.
+ *
+ * Normally hypercalls made by a program in domain 0 in order to
+ * control a guest would be DOMCTLs rather than HVMOPs.  But in order
+ * to properly enable the 'internal' use case, as well as to avoid
+ * fragmentation, all altp2m subops should come under this single
+ * HVMOP.
+ *
+ * Note that 'internal' mode (HVM_PARAM_ALTP2M == XEN_ALTP2M_mixed)
+ * has not been evaluated for safety from a security perspective.
+ * Before using this mode in a security-critical environment, each
+ * subop should be evaluated for safety, with unsafe subops
+ * blacklisted in xsm_hvm_altp2mhvm_op().
+ */
+int do_altp2m_op(
+    XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    struct xen_hvm_altp2m_op a;
+    struct domain *d = NULL;
+    int rc = 0;
+    uint64_t mode;
+
+    if ( !altp2m_supported() )
+        return -EOPNOTSUPP;
+
+    if ( copy_from_guest(&a, arg, 1) )
+        return -EFAULT;
+
+    if ( a.pad1 || a.pad2 ||
+         (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) )
+        return -EINVAL;
+
+    switch ( a.cmd )
+    {
+    case HVMOP_altp2m_get_domain_state:
+    case HVMOP_altp2m_set_domain_state:
+    case HVMOP_altp2m_vcpu_enable_notify:
+    case HVMOP_altp2m_vcpu_disable_notify:
+    case HVMOP_altp2m_create_p2m:
+    case HVMOP_altp2m_destroy_p2m:
+    case HVMOP_altp2m_switch_p2m:
+    case HVMOP_altp2m_set_suppress_ve:
+    case HVMOP_altp2m_set_suppress_ve_multi:
+    case HVMOP_altp2m_get_suppress_ve:
+    case HVMOP_altp2m_set_mem_access:
+    case HVMOP_altp2m_set_mem_access_multi:
+    case HVMOP_altp2m_get_mem_access:
+    case HVMOP_altp2m_change_gfn:
+    case HVMOP_altp2m_get_p2m_idx:
+    case HVMOP_altp2m_set_visibility:
+        break;
+
+    default:
+        return -EOPNOTSUPP;
+    }
+
+    d = rcu_lock_domain_by_any_id(a.domain);
+
+    if ( d == NULL )
+        return -ESRCH;
+
+    if ( !is_hvm_domain(d) )
+    {
+        rc = -EOPNOTSUPP;
+        goto out;
+    }
+
+    if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
+         (a.cmd != HVMOP_altp2m_set_domain_state) &&
+         !d->altp2m_active )
+    {
+        rc = -EOPNOTSUPP;
+        goto out;
+    }
+
+    mode = d->arch.hvm.params[HVM_PARAM_ALTP2M];
+
+    if ( XEN_ALTP2M_disabled == mode )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    if ( d->nr_altp2m == 0 )
+    {
+        rc = -EOPNOTSUPP;
+        goto out;
+    }
+
+    if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) )
+        goto out;
+
+    switch ( a.cmd )
+    {
+    case HVMOP_altp2m_get_domain_state:
+        a.u.domain_state.state = altp2m_active(d);
+        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+        break;
+
+    case HVMOP_altp2m_set_domain_state:
+    {
+        struct vcpu *v;
+        bool ostate;
+
+        if ( nestedhvm_enabled(d) )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        rc = domain_pause_except_self(d);
+        if ( rc )
+            break;
+
+        ostate = d->altp2m_active;
+        d->altp2m_active = !!a.u.domain_state.state;
+
+        /* If the alternate p2m state has changed, handle appropriately */
+        if ( d->altp2m_active != ostate &&
+             (ostate || !(rc = altp2m_init_by_id(d, 0))) )
+        {
+            for_each_vcpu( d, v )
+            {
+                if ( !ostate )
+                    altp2m_vcpu_initialise(v);
+                else
+                    altp2m_vcpu_destroy(v);
+            }
+
+            if ( ostate )
+                altp2m_flush(d);
+        }
+        else if ( rc )
+            d->altp2m_active = false;
+
+        domain_unpause_except_self(d);
+        break;
+    }
+
+    case HVMOP_altp2m_vcpu_enable_notify:
+    {
+        struct vcpu *v;
+
+        if ( a.u.enable_notify.pad ||
+             a.u.enable_notify.vcpu_id >= d->max_vcpus )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        if ( !cpu_has_vmx_virt_exceptions )
+        {
+            rc = -EOPNOTSUPP;
+            break;
+        }
+
+        v = d->vcpu[a.u.enable_notify.vcpu_id];
+
+        rc = altp2m_vcpu_enable_ve(v, _gfn(a.u.enable_notify.gfn));
+        break;
+    }
+
+    case HVMOP_altp2m_vcpu_disable_notify:
+    {
+        struct vcpu *v;
+
+        if ( a.u.disable_notify.vcpu_id >= d->max_vcpus )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        if ( !cpu_has_vmx_virt_exceptions )
+        {
+            rc = -EOPNOTSUPP;
+            break;
+        }
+
+        v = d->vcpu[a.u.enable_notify.vcpu_id];
+
+        altp2m_vcpu_disable_ve(v);
+        break;
+    }
+
+    case HVMOP_altp2m_create_p2m:
+        rc = altp2m_init_next_available(d, &a.u.view.view,
+                                        a.u.view.hvmmem_default_access);
+        if ( !rc )
+            rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+        break;
+
+    case HVMOP_altp2m_destroy_p2m:
+        rc = altp2m_destroy_by_id(d, a.u.view.view);
+        break;
+
+    case HVMOP_altp2m_switch_p2m:
+        rc = altp2m_switch_domain_altp2m_by_id(d, a.u.view.view);
+        break;
+
+    case HVMOP_altp2m_set_suppress_ve:
+        if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
+            rc = -EINVAL;
+        else
+        {
+            gfn_t gfn = _gfn(a.u.suppress_ve.gfn);
+            unsigned int altp2m_idx = a.u.suppress_ve.view;
+            bool suppress_ve = a.u.suppress_ve.suppress_ve;
+
+            rc = p2m_set_suppress_ve(d, gfn, suppress_ve, altp2m_idx);
+        }
+        break;
+
+    case HVMOP_altp2m_set_suppress_ve_multi:
+    {
+        uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
+
+        a.u.suppress_ve_multi.last_gfn = min(a.u.suppress_ve_multi.last_gfn,
+                                             max_phys_addr);
+
+        if ( a.u.suppress_ve_multi.pad1 ||
+             a.u.suppress_ve_multi.first_gfn > a.u.suppress_ve_multi.last_gfn )
+            rc = -EINVAL;
+        else
+        {
+            rc = p2m_set_suppress_ve_multi(d, &a.u.suppress_ve_multi);
+            if ( (!rc || rc == -ERESTART) && __copy_to_guest(arg, &a, 1) )
+                rc = -EFAULT;
+        }
+        break;
+    }
+
+    case HVMOP_altp2m_get_suppress_ve:
+        if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
+            rc = -EINVAL;
+        else
+        {
+            gfn_t gfn = _gfn(a.u.suppress_ve.gfn);
+            unsigned int altp2m_idx = a.u.suppress_ve.view;
+            bool suppress_ve;
+
+            rc = p2m_get_suppress_ve(d, gfn, &suppress_ve, altp2m_idx);
+            if ( !rc )
+            {
+                a.u.suppress_ve.suppress_ve = suppress_ve;
+                rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+            }
+        }
+        break;
+
+    case HVMOP_altp2m_set_mem_access:
+        if ( !vm_event_is_enabled(current) )
+        {
+            rc = -EOPNOTSUPP;
+            break;
+        }
+
+        if ( a.u.mem_access.pad )
+            rc = -EINVAL;
+        else
+            rc = p2m_set_mem_access(d, _gfn(a.u.mem_access.gfn), 1, 0, 0,
+                                    a.u.mem_access.access,
+                                    a.u.mem_access.view);
+        break;
+
+    case HVMOP_altp2m_set_mem_access_multi:
+        if ( !vm_event_is_enabled(current) )
+        {
+            rc = -EOPNOTSUPP;
+            break;
+        }
+
+        if ( a.u.set_mem_access_multi.pad ||
+             a.u.set_mem_access_multi.opaque > a.u.set_mem_access_multi.nr )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        /*
+         * Unlike XENMEM_access_op_set_access_multi, we don't need any bits of
+         * the 'continuation' counter to be zero (to stash a command in).
+         * However, 0x40 is a good 'stride' to make sure that we make
+         * a reasonable amount of forward progress before yielding,
+         * so use a mask of 0x3F here.
+         */
+        rc = p2m_set_mem_access_multi(d, a.u.set_mem_access_multi.pfn_list,
+                                      a.u.set_mem_access_multi.access_list,
+                                      a.u.set_mem_access_multi.nr,
+                                      a.u.set_mem_access_multi.opaque,
+                                      0x3F,
+                                      a.u.set_mem_access_multi.view);
+        if ( rc > 0 )
+        {
+            a.u.set_mem_access_multi.opaque = rc;
+            rc = -ERESTART;
+            if ( __copy_field_to_guest(guest_handle_cast(arg, 
xen_hvm_altp2m_op_t),
+                                       &a, u.set_mem_access_multi.opaque) )
+                rc = -EFAULT;
+        }
+        break;
+
+    case HVMOP_altp2m_get_mem_access:
+        if ( !vm_event_is_enabled(current) )
+        {
+            rc = -EOPNOTSUPP;
+            break;
+        }
+
+        if ( a.u.mem_access.pad )
+            rc = -EINVAL;
+        else
+        {
+            xenmem_access_t access;
+
+            rc = p2m_get_mem_access(d, _gfn(a.u.mem_access.gfn), &access,
+                                    a.u.mem_access.view);
+            if ( !rc )
+            {
+                a.u.mem_access.access = access;
+                rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+            }
+        }
+        break;
+
+    case HVMOP_altp2m_change_gfn:
+        if ( a.u.change_gfn.pad1 || a.u.change_gfn.pad2 )
+            rc = -EINVAL;
+        else
+            rc = altp2m_change_gfn(d, a.u.change_gfn.view,
+                                   _gfn(a.u.change_gfn.old_gfn),
+                                   _gfn(a.u.change_gfn.new_gfn));
+        break;
+
+    case HVMOP_altp2m_get_p2m_idx:
+    {
+        struct vcpu *v;
+
+        if ( !altp2m_active(d) )
+        {
+            rc = -EOPNOTSUPP;
+            break;
+        }
+
+        if ( (v = domain_vcpu(d, a.u.get_vcpu_p2m_idx.vcpu_id)) == NULL )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        a.u.get_vcpu_p2m_idx.altp2m_idx = altp2m_vcpu_idx(v);
+        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+        break;
+    }
+
+    case HVMOP_altp2m_set_visibility:
+    {
+        unsigned int idx = a.u.set_visibility.altp2m_idx;
+
+        if ( a.u.set_visibility.pad )
+            rc = -EINVAL;
+        else if ( !altp2m_active(d) )
+            rc = -EOPNOTSUPP;
+        else
+            rc = altp2m_set_view_visibility(d, idx, 
a.u.set_visibility.visible);
+        break;
+    }
+
+    default:
+        ASSERT_UNREACHABLE();
+        rc = -EOPNOTSUPP;
+        break;
+    }
+
+ out:
+    rcu_unlock_domain(d);
+
+    return rc;
+}
+#else
+int do_altp2m_op(
+    XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    return -EOPNOTSUPP;
+}
+#endif
 
 /*
  * Local variables:
diff --git a/xen/include/xen/altp2m.h b/xen/include/xen/altp2m.h
index b3b21124dd7e..be627152668a 100644
--- a/xen/include/xen/altp2m.h
+++ b/xen/include/xen/altp2m.h
@@ -2,8 +2,12 @@
 #ifndef __XEN_ALTP2M_H__
 #define __XEN_ALTP2M_H__
 
+#include <xen/errno.h>
+#include <xen/param.h>
 #include <xen/sched.h>
 
+#include <public/hvm/hvm_op.h>
+
 #ifdef CONFIG_ALTP2M
 
 /* Alternate p2m HVM on/off per domain */
@@ -12,6 +16,8 @@ static inline bool altp2m_active(const struct domain *d)
     return d->altp2m_active;
 }
 
+int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg);
+
 #else /* CONFIG_ALTP2M */
 
 static inline bool altp2m_active(const struct domain *d)
@@ -19,6 +25,11 @@ static inline bool altp2m_active(const struct domain *d)
     return false;
 }
 
+static inline int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    return -EOPNOTSUPP;
+}
+
 #endif /* CONFIG_ALTP2M */
 
 #endif /* __XEN_ALTP2M_H__ */
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.