|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 13/13] x86/altp2m: XSM hooks for altp2m HVM ops
From: Ravi Sahita <ravi.sahita@xxxxxxxxx>
Signed-off-by: Ravi Sahita <ravi.sahita@xxxxxxxxx>
---
tools/flask/policy/policy/modules/xen/xen.if | 4 +-
xen/arch/x86/hvm/hvm.c | 118 ++++++++++++++++-----------
xen/include/xsm/dummy.h | 12 +++
xen/include/xsm/xsm.h | 12 +++
xen/xsm/dummy.c | 2 +
xen/xsm/flask/hooks.c | 12 +++
xen/xsm/flask/policy/access_vectors | 7 ++
7 files changed, 119 insertions(+), 48 deletions(-)
diff --git a/tools/flask/policy/policy/modules/xen/xen.if
b/tools/flask/policy/policy/modules/xen/xen.if
index f4cde11..6177fe9 100644
--- a/tools/flask/policy/policy/modules/xen/xen.if
+++ b/tools/flask/policy/policy/modules/xen/xen.if
@@ -8,7 +8,7 @@
define(`declare_domain_common', `
allow $1 $2:grant { query setup };
allow $1 $2:mmu { adjust physmap map_read map_write stat pinpage
updatemp mmuext_op };
- allow $1 $2:hvm { getparam setparam };
+ allow $1 $2:hvm { getparam setparam altp2mhvm_op };
allow $1 $2:domain2 get_vnumainfo;
')
@@ -58,7 +58,7 @@ define(`create_domain_common', `
allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage
mmuext_op updatemp };
allow $1 $2:grant setup;
allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc
- setparam pcilevel trackdirtyvram nested };
+ setparam pcilevel trackdirtyvram nested altp2mhvm
altp2mhvm_op };
')
# create_domain(priv, target)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 92c123c..cc0c7b3 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5891,6 +5891,9 @@ static int hvmop_set_param(
nestedhvm_vcpu_destroy(v);
break;
case HVM_PARAM_ALTP2MHVM:
+ rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
+ if ( rc )
+ break;
if ( a.value > 1 )
rc = -EINVAL;
if ( a.value &&
@@ -6471,12 +6474,15 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
return -ESRCH;
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] )
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
{
- a.state = altp2m_active(d);
- rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] )
+ {
+ a.state = altp2m_active(d);
+ rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
}
rcu_unlock_domain(d);
@@ -6497,31 +6503,34 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
return -ESRCH;
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] &&
- !nestedhvm_enabled(d) )
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
{
- ostate = d->arch.altp2m_active;
- d->arch.altp2m_active = !!a.state;
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.hvm_domain.params[HVM_PARAM_ALTP2MHVM] &&
+ !nestedhvm_enabled(d) )
+ {
+ ostate = d->arch.altp2m_active;
+ d->arch.altp2m_active = !!a.state;
- rc = 0;
+ rc = 0;
- /* If the alternate p2m state has changed, handle appropriately */
- if ( d->arch.altp2m_active != ostate )
- {
- if ( ostate || !(rc = p2m_init_altp2m_by_id(d, 0)) )
+ /* If the alternate p2m state has changed, handle
appropriately */
+ if ( d->arch.altp2m_active != ostate )
{
- for_each_vcpu( d, v )
+ if ( ostate || !(rc = p2m_init_altp2m_by_id(d, 0)) )
{
- if ( !ostate )
- altp2m_vcpu_initialise(v);
- else
- altp2m_vcpu_destroy(v);
+ for_each_vcpu( d, v )
+ {
+ if ( !ostate )
+ altp2m_vcpu_initialise(v);
+ else
+ altp2m_vcpu_destroy(v);
+ }
+
+ if ( ostate )
+ p2m_flush_altp2m(d);
}
-
- if ( ostate )
- p2m_flush_altp2m(d);
}
}
}
@@ -6540,6 +6549,9 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
+ if ( (rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, curr_d)) )
+ return rc;
+
if ( !is_hvm_domain(curr_d) || !hvm_altp2m_supported() ||
!curr_d->arch.altp2m_active ||
gfn_x(vcpu_altp2m(curr).veinfo_gfn) != INVALID_GFN)
@@ -6568,11 +6580,14 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
return -ESRCH;
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.altp2m_active &&
- !(rc = p2m_init_next_altp2m(d, &a.view)) )
- rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
+ {
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active &&
+ !(rc = p2m_init_next_altp2m(d, &a.view)) )
+ rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
rcu_unlock_domain(d);
break;
@@ -6590,10 +6605,13 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
return -ESRCH;
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.altp2m_active )
- rc = p2m_destroy_altp2m_by_id(d, a.view);
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
+ {
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_destroy_altp2m_by_id(d, a.view);
+ }
rcu_unlock_domain(d);
break;
@@ -6611,10 +6629,13 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
return -ESRCH;
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.altp2m_active )
- rc = p2m_switch_domain_altp2m_by_id(d, a.view);
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
+ {
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_switch_domain_altp2m_by_id(d, a.view);
+ }
rcu_unlock_domain(d);
break;
@@ -6631,11 +6652,13 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
d = rcu_lock_domain_by_any_id(a.domid);
if ( d == NULL )
return -ESRCH;
-
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.altp2m_active )
- rc = p2m_set_altp2m_mem_access(d, a.view, _gfn(a.gfn),
a.hvmmem_access);
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
+ {
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_set_altp2m_mem_access(d, a.view, _gfn(a.gfn),
a.hvmmem_access);
+ }
rcu_unlock_domain(d);
break;
@@ -6653,10 +6676,13 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
return -ESRCH;
- rc = -EINVAL;
- if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
- d->arch.altp2m_active )
- rc = p2m_change_altp2m_gfn(d, a.view, _gfn(a.old_gfn),
_gfn(a.new_gfn));
+ if ( !(rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
+ {
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) && hvm_altp2m_supported() &&
+ d->arch.altp2m_active )
+ rc = p2m_change_altp2m_gfn(d, a.view, _gfn(a.old_gfn),
_gfn(a.new_gfn));
+ }
rcu_unlock_domain(d);
break;
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index f044c0f..e0b561d 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -548,6 +548,18 @@ static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG
struct domain *d)
return xsm_default_action(action, current->domain, d);
}
+static XSM_INLINE int xsm_hvm_param_altp2mhvm(XSM_DEFAULT_ARG struct domain *d)
+{
+ XSM_ASSERT_ACTION(XSM_PRIV);
+ return xsm_default_action(action, current->domain, d);
+}
+
+static XSM_INLINE int xsm_hvm_altp2mhvm_op(XSM_DEFAULT_ARG struct domain *d)
+{
+ XSM_ASSERT_ACTION(XSM_TARGET);
+ return xsm_default_action(action, current->domain, d);
+}
+
static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d,
int mode, int op)
{
XSM_ASSERT_ACTION(XSM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index c872d44..dc48d23 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -147,6 +147,8 @@ struct xsm_operations {
int (*hvm_param) (struct domain *d, unsigned long op);
int (*hvm_control) (struct domain *d, unsigned long op);
int (*hvm_param_nested) (struct domain *d);
+ int (*hvm_param_altp2mhvm) (struct domain *d);
+ int (*hvm_altp2mhvm_op) (struct domain *d);
int (*get_vnumainfo) (struct domain *d);
int (*vm_event_control) (struct domain *d, int mode, int op);
@@ -586,6 +588,16 @@ static inline int xsm_hvm_param_nested (xsm_default_t def,
struct domain *d)
return xsm_ops->hvm_param_nested(d);
}
+static inline int xsm_hvm_param_altp2mhvm (xsm_default_t def, struct domain *d)
+{
+ return xsm_ops->hvm_param_altp2mhvm(d);
+}
+
+static inline int xsm_hvm_altp2mhvm_op (xsm_default_t def, struct domain *d)
+{
+ return xsm_ops->hvm_altp2mhvm_op(d);
+}
+
static inline int xsm_get_vnumainfo (xsm_default_t def, struct domain *d)
{
return xsm_ops->get_vnumainfo(d);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index e84b0e4..3461d4f 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -116,6 +116,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, hvm_param);
set_to_dummy_if_null(ops, hvm_control);
set_to_dummy_if_null(ops, hvm_param_nested);
+ set_to_dummy_if_null(ops, hvm_param_altp2mhvm);
+ set_to_dummy_if_null(ops, hvm_altp2mhvm_op);
set_to_dummy_if_null(ops, do_xsm_op);
#ifdef CONFIG_COMPAT
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 6e37d29..2b998c9 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1170,6 +1170,16 @@ static int flask_hvm_param_nested(struct domain *d)
return current_has_perm(d, SECCLASS_HVM, HVM__NESTED);
}
+static int flask_hvm_param_altp2mhvm(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_HVM, HVM__ALTP2MHVM);
+}
+
+static int flask_hvm_altp2mhvm_op(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_HVM, HVM__ALTP2MHVM_OP);
+}
+
static int flask_vm_event_control(struct domain *d, int mode, int op)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT);
@@ -1654,6 +1664,8 @@ static struct xsm_operations flask_ops = {
.hvm_param = flask_hvm_param,
.hvm_control = flask_hvm_param,
.hvm_param_nested = flask_hvm_param_nested,
+ .hvm_param_altp2mhvm = flask_hvm_param_altp2mhvm,
+ .hvm_altp2mhvm_op = flask_hvm_altp2mhvm_op,
.do_xsm_op = do_flask_op,
.get_vnumainfo = flask_get_vnumainfo,
diff --git a/xen/xsm/flask/policy/access_vectors
b/xen/xsm/flask/policy/access_vectors
index 68284d5..d168de2 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -272,6 +272,13 @@ class hvm
share_mem
# HVMOP_set_param setting HVM_PARAM_NESTEDHVM
nested
+# HVMOP_set_param setting HVM_PARAM_ALTP2MHVM
+ altp2mhvm
+# HVMOP_altp2m_set_domain_state HVMOP_altp2m_get_domain_state
+# HVMOP_altp2m_vcpu_enable_notify HVMOP_altp2m_create_p2m
+# HVMOP_altp2m_destroy_p2m HVMOP_altp2m_switch_p2m
+# HVMOP_altp2m_set_mem_access HVMOP_altp2m_change_gfn
+ altp2mhvm_op
}
# Class event describes event channels. Interdomain event channels have their
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |