[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 2/9] xsm bits for vNUMA hypercalls



Define xsm_get_vnumainfo hypercall used for domain which
wish to receive vnuma topology. Add xsm hook for
XEN_DOMCTL_setvnumainfo. Also adds basic policies.

Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
---
 tools/flask/policy/policy/modules/xen/xen.if |    3 ++-
 tools/flask/policy/policy/modules/xen/xen.te |    2 +-
 xen/common/memory.c                          |    7 +++++++
 xen/include/xsm/dummy.h                      |    6 ++++++
 xen/include/xsm/xsm.h                        |    7 +++++++
 xen/xsm/dummy.c                              |    1 +
 xen/xsm/flask/hooks.c                        |   10 ++++++++++
 xen/xsm/flask/policy/access_vectors          |    4 ++++
 8 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/tools/flask/policy/policy/modules/xen/xen.if 
b/tools/flask/policy/policy/modules/xen/xen.if
index dedc035..32b51b6 100644
--- a/tools/flask/policy/policy/modules/xen/xen.if
+++ b/tools/flask/policy/policy/modules/xen/xen.if
@@ -49,7 +49,7 @@ define(`create_domain_common', `
                        getdomaininfo hypercall setvcpucontext setextvcpucontext
                        getscheduler getvcpuinfo getvcpuextstate getaddrsize
                        getaffinity setaffinity };
-       allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim  
set_max_evtchn };
+       allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim 
set_max_evtchn set_vnumainfo get_vnumainfo };
        allow $1 $2:security check_context;
        allow $1 $2:shadow enable;
        allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage 
mmuext_op };
@@ -81,6 +81,7 @@ define(`manage_domain', `
        allow $1 $2:domain { getdomaininfo getvcpuinfo getaffinity
                        getaddrsize pause unpause trigger shutdown destroy
                        setaffinity setdomainmaxmem getscheduler };
+    allow $1 $2:domain2 set_vnumainfo;
 ')
 
 # migrate_domain_out(priv, target)
diff --git a/tools/flask/policy/policy/modules/xen/xen.te 
b/tools/flask/policy/policy/modules/xen/xen.te
index bb59fe8..1937883 100644
--- a/tools/flask/policy/policy/modules/xen/xen.te
+++ b/tools/flask/policy/policy/modules/xen/xen.te
@@ -76,7 +76,7 @@ allow dom0_t dom0_t:domain {
        getpodtarget setpodtarget set_misc_info set_virq_handler
 };
 allow dom0_t dom0_t:domain2 {
-       set_cpuid gettsc settsc setscheduler set_max_evtchn
+       set_cpuid gettsc settsc setscheduler set_max_evtchn set_vnumainfo 
get_vnumainfo
 };
 allow dom0_t dom0_t:resource { add remove };
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 58c919c..5d017c9 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -986,6 +986,13 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( (d = rcu_lock_domain_by_any_id(topology.domid)) == NULL )
             return -ESRCH;
 
+        rc = xsm_get_vnumainfo(XSM_TARGET, d);
+        if ( rc )
+        {
+            rcu_unlock_domain(d);
+            return rc;
+        }
+
         read_lock(&d->vnuma_rwlock);
 
         if ( d->vnuma == NULL )
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index c5aa316..df55e70 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -317,6 +317,12 @@ static XSM_INLINE int xsm_set_pod_target(XSM_DEFAULT_ARG 
struct domain *d)
     return xsm_default_action(action, current->domain, d);
 }
 
+static XSM_INLINE int xsm_get_vnumainfo(XSM_DEFAULT_ARG struct domain *d)
+{
+    XSM_ASSERT_ACTION(XSM_TARGET);
+    return xsm_default_action(action, current->domain, d);
+}
+
 #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
 static XSM_INLINE int xsm_get_device_group(XSM_DEFAULT_ARG uint32_t 
machine_bdf)
 {
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index a85045d..6c1c079 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -139,6 +139,7 @@ struct xsm_operations {
     int (*hvm_param) (struct domain *d, unsigned long op);
     int (*hvm_control) (struct domain *d, unsigned long op);
     int (*hvm_param_nested) (struct domain *d);
+    int (*get_vnumainfo) (struct domain *d);
 
 #ifdef CONFIG_X86
     int (*do_mca) (void);
@@ -534,6 +535,11 @@ static inline int xsm_hvm_param_nested (xsm_default_t def, 
struct domain *d)
     return xsm_ops->hvm_param_nested(d);
 }
 
+static inline int xsm_get_vnumainfo (xsm_default_t def, struct domain *d)
+{
+    return xsm_ops->get_vnumainfo(d);
+}
+
 #ifdef CONFIG_X86
 static inline int xsm_do_mca(xsm_default_t def)
 {
@@ -653,6 +659,7 @@ static inline int xsm_ioport_mapping (xsm_default_t def, 
struct domain *d, uint3
 {
     return xsm_ops->ioport_mapping(d, s, e, allow);
 }
+
 #endif /* CONFIG_X86 */
 
 #endif /* XSM_NO_WRAPPERS */
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index c95c803..0826a8b 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -85,6 +85,7 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, iomem_permission);
     set_to_dummy_if_null(ops, iomem_mapping);
     set_to_dummy_if_null(ops, pci_config_permission);
+    set_to_dummy_if_null(ops, get_vnumainfo);
 
 #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
     set_to_dummy_if_null(ops, get_device_group);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index f2f59ea..00efba1 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -404,6 +404,11 @@ static int flask_claim_pages(struct domain *d)
     return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM);
 }
 
+static int flask_get_vnumainfo(struct domain *d)
+{
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__GET_VNUMAINFO);
+}
+
 static int flask_console_io(struct domain *d, int cmd)
 {
     u32 perm;
@@ -715,6 +720,9 @@ static int flask_domctl(struct domain *d, int cmd)
     case XEN_DOMCTL_cacheflush:
         return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__CACHEFLUSH);
 
+    case XEN_DOMCTL_setvnumainfo:
+        return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN2__SET_VNUMAINFO);
+
     default:
         printk("flask_domctl: Unknown op %d\n", cmd);
         return -EPERM;
@@ -1552,6 +1560,8 @@ static struct xsm_operations flask_ops = {
     .hvm_param_nested = flask_hvm_param_nested,
 
     .do_xsm_op = do_flask_op,
+    .get_vnumainfo = flask_get_vnumainfo,
+
 #ifdef CONFIG_COMPAT
     .do_compat_op = compat_flask_op,
 #endif
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 32371a9..d279841 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -200,6 +200,10 @@ class domain2
     cacheflush
 # Creation of the hardware domain when it is not dom0
     create_hardware_domain
+# XEN_DOMCTL_setvnumainfo
+    set_vnumainfo
+# XENMEM_getvnumainfo
+    get_vnumainfo
 }
 
 # Similar to class domain, but primarily contains domctls related to HVM 
domains
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.