[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 6/6] xsm: add tee access policy support



As we don't want any guest to access limited resources of TEE,
we need a way to control who can work with it.

Thus, new access vector class "tee" is added with only ony operation
"call" so far. tee framework uses this to check if guest has a right
to work with TEE.

Also, example security context domU_with_tee_t was added.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
 tools/flask/policy/modules/dom0.te          |  3 +++
 tools/flask/policy/modules/domU_with_tee.te | 23 +++++++++++++++++++++++
 tools/flask/policy/modules/modules.conf     |  1 +
 tools/flask/policy/modules/xen.if           | 12 ++++++++++++
 xen/arch/arm/tee/tee.c                      | 10 ++++++++++
 xen/include/xsm/dummy.h                     | 10 ++++++++++
 xen/include/xsm/xsm.h                       | 13 +++++++++++++
 xen/xsm/dummy.c                             |  4 ++++
 xen/xsm/flask/hooks.c                       | 15 +++++++++++++++
 xen/xsm/flask/policy/access_vectors         |  7 +++++++
 xen/xsm/flask/policy/security_classes       |  1 +
 11 files changed, 99 insertions(+)
 create mode 100644 tools/flask/policy/modules/domU_with_tee.te

diff --git a/tools/flask/policy/modules/dom0.te 
b/tools/flask/policy/modules/dom0.te
index 1643b40..8bac4f3 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -27,6 +27,9 @@ allow dom0_t xen_t:version {
        xen_build_id
 };
 
+# Allow dom0 to work with TEE
+allow dom0_t xen_t:tee call;
+
 allow dom0_t xen_t:mmu memorymap;
 
 # Allow dom0 to use these domctls on itself. For domctls acting on other
diff --git a/tools/flask/policy/modules/domU_with_tee.te 
b/tools/flask/policy/modules/domU_with_tee.te
new file mode 100644
index 0000000..4d856b2
--- /dev/null
+++ b/tools/flask/policy/modules/domU_with_tee.te
@@ -0,0 +1,23 @@
+###############################################################################
+#
+# Domain creation
+#
+###############################################################################
+
+declare_domain(domU_with_tee_t)
+domain_self_comms(domU_t)
+create_domain(dom0_t, domU_with_tee_t)
+manage_domain(dom0_t, domU_with_tee_t)
+domain_comms(dom0_t, domU_with_tee_t)
+domain_comms(domU_with_tee_t, domU_with_tee_t)
+migrate_domain_out(dom0_t, domU_with_tee_t)
+domain_self_comms(domU_with_tee_t)
+
+# This is required for PCI (or other device) passthrough
+delegate_devices(dom0_t, domU_with_tee_t)
+
+# Both of these domain types can be created using the default (system) role
+role system_r types { domU_with_tee_t dm_dom_t };
+
+# Allow to work with TEE
+access_tee(domU_with_tee_t)
diff --git a/tools/flask/policy/modules/modules.conf 
b/tools/flask/policy/modules/modules.conf
index 6dba0a3..9010d91 100644
--- a/tools/flask/policy/modules/modules.conf
+++ b/tools/flask/policy/modules/modules.conf
@@ -29,6 +29,7 @@ domU = on
 isolated_domU = on
 prot_domU = on
 nomigrate = on
+domU_with_tee = on
 
 # Example device policy.  Also see policy/device_contexts.
 nic_dev = on
diff --git a/tools/flask/policy/modules/xen.if 
b/tools/flask/policy/modules/xen.if
index 5543749..e534179 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -209,3 +209,15 @@ define(`admin_device', `
 define(`delegate_devices', `
     allow $1 $2:resource { add remove };
 ')
+
+################################################################################
+#
+# Miscellaneous services
+#
+################################################################################
+
+# access_tee(domain)
+
+define(`access_tee', `
+    allow $1 xen_t:tee call;
+')
diff --git a/xen/arch/arm/tee/tee.c b/xen/arch/arm/tee/tee.c
index 6df3b09..06f5091 100644
--- a/xen/arch/arm/tee/tee.c
+++ b/xen/arch/arm/tee/tee.c
@@ -19,6 +19,7 @@
 #include <xen/errno.h>
 #include <xen/types.h>
 #include <asm/tee/tee.h>
+#include <xsm/xsm.h>
 
 extern const struct tee_mediator_desc _steemediator[], _eteemediator[];
 static const struct tee_mediator_ops *mediator_ops;
@@ -42,6 +43,9 @@ bool tee_handle_smc(struct cpu_user_regs *regs)
     if ( !mediator_ops )
         return false;
 
+    if ( xsm_tee_op(XSM_PRIV, current->domain) )
+        return false;
+
     return mediator_ops->handle_smc(regs);
 }
 
@@ -50,6 +54,9 @@ int tee_domain_create(struct domain *d)
     if ( !mediator_ops )
         return -ENODEV;
 
+    if ( xsm_tee_op(XSM_PRIV, d) )
+        return -EPERM;
+
     return mediator_ops->domain_create(d);
 }
 
@@ -58,6 +65,9 @@ void tee_domain_destroy(struct domain *d)
     if ( !mediator_ops )
         return;
 
+    if ( xsm_tee_op(XSM_PRIV, d) )
+        return;
+
     return mediator_ops->domain_destroy(d);
 }
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index b2cd56c..b8e2fa0 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -724,3 +724,13 @@ static XSM_INLINE int xsm_xen_version (XSM_DEFAULT_ARG 
uint32_t op)
         return xsm_default_action(XSM_PRIV, current->domain, NULL);
     }
 }
+
+#ifdef CONFIG_TEE
+
+static XSM_INLINE int xsm_tee_op (XSM_DEFAULT_ARG struct domain *d)
+{
+       XSM_ASSERT_ACTION(XSM_PRIV);
+       return 0;
+}
+
+#endif /* CONFIG_TEE */
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 7f7feff..aa57831 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -180,6 +180,10 @@ struct xsm_operations {
     int (*dm_op) (struct domain *d);
 #endif
     int (*xen_version) (uint32_t cmd);
+
+#ifdef CONFIG_TEE
+    int (*tee_op) (struct domain *d);
+#endif
 };
 
 #ifdef CONFIG_XSM
@@ -692,6 +696,15 @@ static inline int xsm_xen_version (xsm_default_t def, 
uint32_t op)
     return xsm_ops->xen_version(op);
 }
 
+#ifdef CONFIG_TEE
+
+static inline int xsm_tee_op (xsm_default_t def, struct domain *d)
+{
+       return xsm_ops->tee_op(d);
+}
+
+#endif /* CONFIG_ARM_TEE */
+
 #endif /* XSM_NO_WRAPPERS */
 
 #ifdef CONFIG_MULTIBOOT
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 479b103..2ef56dd 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -157,4 +157,8 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, dm_op);
 #endif
     set_to_dummy_if_null(ops, xen_version);
+
+#ifdef CONFIG_TEE
+    set_to_dummy_if_null(ops, tee_op);
+#endif
 }
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index f01b4cf..73d9c1f 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1718,6 +1718,17 @@ static int flask_xen_version (uint32_t op)
     }
 }
 
+#ifdef CONFIG_TEE
+
+static int flask_tee_op(struct domain *d)
+{
+    u32 dsid = domain_sid(d);
+
+    return avc_has_perm(dsid, SECINITSID_XEN, SECCLASS_TEE, TEE__CALL, NULL);
+}
+
+#endif  /* CONFIG_TEE */
+
 long do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op);
 int compat_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op);
 
@@ -1851,6 +1862,10 @@ static struct xsm_operations flask_ops = {
     .dm_op = flask_dm_op,
 #endif
     .xen_version = flask_xen_version,
+
+#ifdef CONFIG_TEE
+    .tee_op = flask_tee_op,
+#endif
 };
 
 void __init flask_init(const void *policy_buffer, size_t policy_size)
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 3a2d863..29e20f1 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -535,3 +535,10 @@ class version
 # Xen build id
     xen_build_id
 }
+
+# Class TEE is used to contol access to Trusted Execution Environments
+# on ARM platforms
+class tee
+{
+    call
+}
diff --git a/xen/xsm/flask/policy/security_classes 
b/xen/xsm/flask/policy/security_classes
index cde4e1a..d4c9482 100644
--- a/xen/xsm/flask/policy/security_classes
+++ b/xen/xsm/flask/policy/security_classes
@@ -19,5 +19,6 @@ class event
 class grant
 class security
 class version
+class tee
 
 # FLASK
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.