[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC 7/9] x86/nested, xsm: add nested_grant_table_op hypercall



Provides proxying to the host hypervisor for the GNTTABOP_query_size op.

Signed-off-by: Christopher Clark <christopher.clark@xxxxxxxxxx>
---
 tools/flask/policy/modules/dom0.te  |  1 +
 xen/arch/x86/guest/hypercall_page.S |  1 +
 xen/arch/x86/guest/xen-nested.c     | 37 +++++++++++++++++++++++++++++
 xen/arch/x86/hypercall.c            |  1 +
 xen/arch/x86/pv/hypercall.c         |  1 +
 xen/include/public/xen.h            |  1 +
 xen/include/xen/hypercall.h         |  5 ++++
 xen/include/xsm/dummy.h             |  7 ++++++
 xen/include/xsm/xsm.h               |  7 ++++++
 xen/xsm/dummy.c                     |  1 +
 xen/xsm/flask/hooks.c               |  6 +++++
 11 files changed, 68 insertions(+)

diff --git a/tools/flask/policy/modules/dom0.te 
b/tools/flask/policy/modules/dom0.te
index 7d0f29f082..03c93a3093 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -47,6 +47,7 @@ allow dom0_t dom0_t:resource { add remove };
 allow dom0_t nestedxen_t:version { xen_version xen_get_features };
 allow dom0_t nestedxen_t:mmu physmap;
 allow dom0_t nestedxen_t:hvm { setparam getparam };
+allow dom0_t nestedxen_t:grant query;
 
 # These permissions allow using the FLASK security server to compute access
 # checks locally, which could be used by a domain or service (such as xenstore)
diff --git a/xen/arch/x86/guest/hypercall_page.S 
b/xen/arch/x86/guest/hypercall_page.S
index adbb82f4ec..33403714ce 100644
--- a/xen/arch/x86/guest/hypercall_page.S
+++ b/xen/arch/x86/guest/hypercall_page.S
@@ -63,6 +63,7 @@ DECLARE_HYPERCALL(xenpmu_op)
 DECLARE_HYPERCALL(nested_xen_version)
 DECLARE_HYPERCALL(nested_memory_op)
 DECLARE_HYPERCALL(nested_hvm_op)
+DECLARE_HYPERCALL(nested_grant_table_op)
 
 DECLARE_HYPERCALL(arch_0)
 DECLARE_HYPERCALL(arch_1)
diff --git a/xen/arch/x86/guest/xen-nested.c b/xen/arch/x86/guest/xen-nested.c
index 82bd6885e6..a4049e366f 100644
--- a/xen/arch/x86/guest/xen-nested.c
+++ b/xen/arch/x86/guest/xen-nested.c
@@ -22,6 +22,7 @@
 #include <xen/lib.h>
 #include <xen/sched.h>
 
+#include <public/grant_table.h>
 #include <public/hvm/hvm_op.h>
 #include <public/memory.h>
 #include <public/version.h>
@@ -202,3 +203,39 @@ long do_nested_hvm_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         return -EOPNOTSUPP;
     }
 }
+
+long do_nested_grant_table_op(unsigned int cmd,
+                              XEN_GUEST_HANDLE_PARAM(void) uop,
+                              unsigned int count)
+{
+    struct gnttab_query_size op;
+    long ret;
+
+    if ( !xen_nested )
+        return -ENOSYS;
+
+    if ( cmd != GNTTABOP_query_size )
+    {
+        gprintk(XENLOG_ERR, "Nested grant table op %u not supported.\n", cmd);
+        return -EOPNOTSUPP;
+    }
+
+    if ( count != 1 )
+        return -EINVAL;
+
+    if ( copy_from_guest(&op, uop, 1) )
+        return -EFAULT;
+
+    if ( op.dom != DOMID_SELF )
+        return -EPERM;
+
+    ret = xsm_nested_grant_query_size(XSM_PRIV, current->domain);
+    if ( ret )
+        return ret;
+
+    ret = xen_hypercall_grant_table_op(cmd, &op, 1);
+    if ( !ret && __copy_to_guest(uop, &op, 1) )
+        return -EFAULT;
+
+    return ret;
+}
diff --git a/xen/arch/x86/hypercall.c b/xen/arch/x86/hypercall.c
index 268cc9450a..1b9f4c6050 100644
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -77,6 +77,7 @@ const hypercall_args_t hypercall_args_table[NR_hypercalls] =
     ARGS(nested_xen_version, 2),
     COMP(nested_memory_op, 2, 2),
     ARGS(nested_hvm_op, 2),
+    ARGS(nested_grant_table_op, 3),
 #endif
     ARGS(mca, 1),
     ARGS(arch_1, 1),
diff --git a/xen/arch/x86/pv/hypercall.c b/xen/arch/x86/pv/hypercall.c
index e88ecce222..efa1bd0830 100644
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -88,6 +88,7 @@ const hypercall_table_t pv_hypercall_table[] = {
     HYPERCALL(nested_xen_version),
     COMPAT_CALL(nested_memory_op),
     HYPERCALL(nested_hvm_op),
+    HYPERCALL(nested_grant_table_op),
 #endif
     HYPERCALL(mca),
     HYPERCALL(arch_1),
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 1731409eb8..000b7fc9d0 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -124,6 +124,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
 #define __HYPERVISOR_nested_xen_version   42
 #define __HYPERVISOR_nested_memory_op     43
 #define __HYPERVISOR_nested_hvm_op        44
+#define __HYPERVISOR_nested_grant_table_op 45
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
diff --git a/xen/include/xen/hypercall.h b/xen/include/xen/hypercall.h
index b09070539e..102b20fd5f 100644
--- a/xen/include/xen/hypercall.h
+++ b/xen/include/xen/hypercall.h
@@ -162,6 +162,11 @@ extern long do_nested_memory_op(
 extern long do_nested_hvm_op(
     int cmd,
     XEN_GUEST_HANDLE_PARAM(void) arg);
+
+extern long do_nested_grant_table_op(
+    unsigned int cmd,
+    XEN_GUEST_HANDLE_PARAM(void) uop,
+    unsigned int count);
 #endif
 
 #ifdef CONFIG_COMPAT
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 238b425c49..f5871ef05a 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -761,6 +761,13 @@ static XSM_INLINE int xsm_nested_hvm_op(XSM_DEFAULT_ARG 
const struct domain *d,
     XSM_ASSERT_ACTION(XSM_PRIV);
     return xsm_default_action(action, d, NULL);
 }
+
+static XSM_INLINE int xsm_nested_grant_query_size(XSM_DEFAULT_ARG
+                                                  const struct domain *d)
+{
+    XSM_ASSERT_ACTION(XSM_PRIV);
+    return xsm_default_action(action, d, NULL);
+}
 #endif
 
 #include <public/version.h>
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index cc02bf18c7..e12001c401 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -191,6 +191,7 @@ struct xsm_operations {
     int (*nested_xen_version) (const struct domain *d, unsigned int cmd);
     int (*nested_add_to_physmap) (const struct domain *d);
     int (*nested_hvm_op) (const struct domain *d, unsigned int cmd);
+    int (*nested_grant_query_size) (const struct domain *d);
 #endif
 };
 
@@ -748,6 +749,12 @@ static inline int xsm_nested_hvm_op(xsm_default_t def, 
const struct domain *d,
     return xsm_ops->nested_hvm_op(d, cmd);
 }
 
+static inline int xsm_nested_grant_query_size(xsm_default_t def,
+                                              const struct domain *d)
+{
+    return xsm_ops->nested_grant_query_size(d);
+}
+
 #endif /* CONFIG_XEN_NESTED */
 
 #endif /* XSM_NO_WRAPPERS */
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 909d41a81b..8c213c258f 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -161,5 +161,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, nested_xen_version);
     set_to_dummy_if_null(ops, nested_add_to_physmap);
     set_to_dummy_if_null(ops, nested_hvm_op);
+    set_to_dummy_if_null(ops, nested_grant_query_size);
 #endif
 }
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index f8d247e28f..2988df2cd1 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1789,6 +1789,11 @@ static int flask_nested_hvm_op(const struct domain *d, 
unsigned int op)
     return domain_has_nested_perm(d, SECCLASS_HVM, perm);
 }
 
+static int flask_nested_grant_query_size(const struct domain *d)
+{
+    return domain_has_nested_perm(d, SECCLASS_GRANT, GRANT__QUERY);
+}
+
 #endif
 
 long do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op);
@@ -1934,6 +1939,7 @@ static struct xsm_operations flask_ops = {
     .nested_xen_version = flask_nested_xen_version,
     .nested_add_to_physmap = flask_nested_add_to_physmap,
     .nested_hvm_op = flask_nested_hvm_op,
+    .nested_grant_query_size = flask_nested_grant_query_size,
 #endif
 };
 
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.