[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/8] xsm: Add missing access checks



Actions requiring IS_PRIV should also require some XSM access control in
order for XSM to be useful in confining multiple privileged domains. Add
XSM hooks for new hypercalls and sub-commands that are under IS_PRIV but
not currently under any access checks.

Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
 tools/flask/policy/policy/flask/access_vectors |   14 ++
 xen/arch/ia64/xen/mm.c                         |    8 +
 xen/arch/x86/cpu/mcheck/mce.c                  |    4 +
 xen/arch/x86/domctl.c                          |   17 ++-
 xen/arch/x86/hvm/hvm.c                         |   26 +++
 xen/arch/x86/mm.c                              |   10 +-
 xen/arch/x86/msi.c                             |    6 +
 xen/arch/x86/physdev.c                         |    9 +
 xen/arch/x86/platform_hypercall.c              |   28 +++
 xen/arch/x86/sysctl.c                          |   14 ++
 xen/common/domctl.c                            |   10 +-
 xen/common/grant_table.c                       |   10 +
 xen/common/sysctl.c                            |   17 ++
 xen/drivers/passthrough/iommu.c                |    8 +
 xen/drivers/passthrough/pci.c                  |   17 ++-
 xen/include/xsm/xsm.h                          |  122 +++++++++++++
 xen/xsm/flask/hooks.c                          |  221 +++++++++++++++++++++++-
 xen/xsm/flask/include/av_perm_to_string.h      |   14 ++
 xen/xsm/flask/include/av_permissions.h         |   14 ++
 19 files changed, 556 insertions(+), 13 deletions(-)

diff --git a/tools/flask/policy/policy/flask/access_vectors 
b/tools/flask/policy/policy/flask/access_vectors
index 38036d0..644f2e1 100644
--- a/tools/flask/policy/policy/flask/access_vectors
+++ b/tools/flask/policy/policy/flask/access_vectors
@@ -45,6 +45,11 @@ class xen
        debug
        getcpuinfo
        heap
+       pm_op
+       mca_op
+       lockprof
+       cpupool_op
+       sched_op
 }
 
 class domain
@@ -77,6 +82,9 @@ class domain
        setextvcpucontext
        getvcpuextstate
        setvcpuextstate
+       getpodtarget
+       setpodtarget
+       set_misc_info
 }
 
 class hvm
@@ -91,6 +99,9 @@ class hvm
        bind_irq
        cacheattr
     trackdirtyvram
+    hvmctl
+    mem_event
+    mem_sharing
 }
 
 class event
@@ -152,6 +163,9 @@ class resource
        stat_device
        add_device
        remove_device
+       plug
+       unplug
+       setup
 }
 
 class security
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
index d440e4d..694bf95 100644
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -3496,6 +3496,14 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
             return rc;
 
         if ( op == XENMEM_set_pod_target )
+            rc = xsm_set_pod_target(d);
+        else
+            rc = xsm_get_pod_target(d);
+
+        if ( rc != 0 )
+            goto pod_target_out_unlock;
+
+        if ( op == XENMEM_set_pod_target )
         {
             /* if -ENOSYS is returned,
                domain builder aborts domain creation. */
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 6af93c0..b592041 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -1384,6 +1384,10 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
     if (!IS_PRIV(v->domain) )
         return x86_mcerr(NULL, -EPERM);
 
+    ret = xsm_do_mca();
+    if ( ret )
+        return x86_mcerr(NULL, ret);
+
     if ( copy_from_guest(op, u_xen_mc, 1) )
         return x86_mcerr("do_mca: failed copyin of xen_mc_t", -EFAULT);
 
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 4e258f3..9c9d5d1 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1441,8 +1441,10 @@ long arch_do_domctl(
         d = rcu_lock_domain_by_id(domctl->domain);
         if ( d != NULL )
         {
-            ret = mem_event_domctl(d, &domctl->u.mem_event_op,
-                                   guest_handle_cast(u_domctl, void));
+            ret = xsm_mem_event(d);
+            if ( !ret )
+                ret = mem_event_domctl(d, &domctl->u.mem_event_op,
+                                       guest_handle_cast(u_domctl, void));
             rcu_unlock_domain(d);
             copy_to_guest(u_domctl, domctl, 1);
         } 
@@ -1457,7 +1459,9 @@ long arch_do_domctl(
         d = rcu_lock_domain_by_id(domctl->domain);
         if ( d != NULL )
         {
-            ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
+            ret = xsm_mem_sharing(d);
+            if ( !ret )
+                ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
             rcu_unlock_domain(d);
             copy_to_guest(u_domctl, domctl, 1);
         } 
@@ -1498,8 +1502,11 @@ long arch_do_domctl(
         d = rcu_lock_domain_by_id(domctl->domain);
         if ( d != NULL )
         {
-            p2m = p2m_get_hostp2m(d);
-            p2m->access_required = domctl->u.access_required.access_required;
+            ret = xsm_mem_event(d);
+            if ( !ret ) {
+                p2m = p2m_get_hostp2m(d);
+                p2m->access_required = 
domctl->u.access_required.access_required;
+            }
             rcu_unlock_domain(d);
         } 
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 901132d..bae3696 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3869,6 +3869,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
         if ( rc != 0 )
             return rc;
 
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail_getmemtype;
+
         rc = -EINVAL;
         if ( is_hvm_domain(d) )
         {
@@ -3883,6 +3887,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
                 a.mem_type =  HVMMEM_mmio_dm;
             rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
         }
+
+    param_fail_getmemtype:
         rcu_unlock_domain(d);
         break;
     }
@@ -3911,6 +3917,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
         if ( !is_hvm_domain(d) )
             goto param_fail4;
 
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail4;
+
         rc = -EINVAL;
         if ( (a.first_pfn > domain_get_maximum_gpfn(d)) ||
              ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
@@ -3986,6 +3996,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
         if ( !is_hvm_domain(d) )
             goto param_fail5;
 
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail5;
+
         rc = -EINVAL;
         if ( (a.first_pfn > domain_get_maximum_gpfn(d)) ||
              ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
@@ -4016,6 +4030,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
         if ( !is_hvm_domain(d) )
             goto param_fail6;
 
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail6;
+
         rc = -EINVAL;
         if ( (a.pfn > domain_get_maximum_gpfn(d)) && a.pfn != ~0ull )
             goto param_fail6;
@@ -4048,6 +4066,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
         if ( !is_hvm_domain(d) || !paging_mode_shadow(d) )
             goto param_fail7;
 
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail7;
+
         rc = 0;
         pagetable_dying(d, a.gpa);
 
@@ -4098,6 +4120,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) 
arg)
         if ( !is_hvm_domain(d) )
             goto param_fail8;
 
+        rc = xsm_hvm_param(d, op);
+        if ( rc )
+            goto param_fail8;
+
         rc = -ENOENT;
         if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
             goto param_fail8;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 19391fc..67f5630 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5041,7 +5041,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
 
         /* Support DOMID_SELF? */
         if ( !IS_PRIV(current->domain) )
-            return -EINVAL;
+            return -EPERM;
 
         if ( copy_from_guest(&target, arg, 1) )
             return -EFAULT;
@@ -5051,6 +5051,14 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
             return rc;
 
         if ( op == XENMEM_set_pod_target )
+            rc = xsm_set_pod_target(d);
+        else
+            rc = xsm_get_pod_target(d);
+
+        if ( rc != 0 )
+            goto pod_target_out_unlock;
+
+        if ( op == XENMEM_set_pod_target )
         {
             if ( target.target_pages > d->max_pages )
             {
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index 2d86006..782b84b 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -29,6 +29,7 @@
 #include <io_ports.h>
 #include <public/physdev.h>
 #include <xen/iommu.h>
+#include <xsm/xsm.h>
 
 /* bitmap indicate which fixed map is free */
 DEFINE_SPINLOCK(msix_fixmap_lock);
@@ -992,6 +993,7 @@ int pci_restore_msi_state(struct pci_dev *pdev)
 {
     unsigned long flags;
     int irq;
+    int ret;
     struct msi_desc *entry, *tmp;
     struct irq_desc *desc;
 
@@ -1000,6 +1002,10 @@ int pci_restore_msi_state(struct pci_dev *pdev)
     if (!pdev)
         return -EINVAL;
 
+    ret = xsm_resource_setup_pci((pdev->seg << 16) | (pdev->bus << 8) | 
pdev->devfn);
+    if ( ret )
+        return ret;
+
     list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
     {
         irq = entry->irq;
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index c1c7e54..ca4fb59 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -600,6 +600,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
         if ( !IS_PRIV(current->domain) )
             break;
 
+        ret = xsm_resource_setup_misc();
+        if ( ret )
+            break;
+
         ret = -EFAULT;
         if ( copy_from_guest(&info, arg, 1) )
             break;
@@ -662,6 +666,11 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
         ret = -EINVAL;
         if ( setup_gsi.gsi < 0 || setup_gsi.gsi >= nr_irqs_gsi )
             break;
+
+        ret = xsm_resource_setup_gsi(setup_gsi.gsi);
+        if ( ret )
+            break;
+
         ret = mp_register_gsi(setup_gsi.gsi, setup_gsi.triggering,
                               setup_gsi.polarity);
         break; 
diff --git a/xen/arch/x86/platform_hypercall.c 
b/xen/arch/x86/platform_hypercall.c
index a0d23ba..f9a836a 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -390,6 +390,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) 
u_xenpf_op)
     break;
 
     case XENPF_set_processor_pminfo:
+        ret = xsm_setpminfo();
+        if ( ret )
+            break;
+
         switch ( op->u.set_pminfo.type )
         {
         case XEN_PM_PX:
@@ -440,6 +444,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) 
u_xenpf_op)
 
         g_info = &op->u.pcpu_info;
 
+        ret = xsm_getcpuinfo();
+        if ( ret )
+            break;
+
         if ( !get_cpu_maps() )
         {
             ret = -EBUSY;
@@ -509,6 +517,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) 
u_xenpf_op)
     {
         int cpu = op->u.cpu_ol.cpuid;
 
+        ret = xsm_resource_plug_core();
+        if ( ret )
+            break;
+
         if ( cpu >= nr_cpu_ids || !cpu_present(cpu) )
         {
             ret = -EINVAL;
@@ -521,6 +533,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) 
u_xenpf_op)
             break;
         }
 
+        ret = xsm_resource_plug_core();
+        if ( ret )
+            break;
+
         ret = continue_hypercall_on_cpu(
             0, cpu_up_helper, (void *)(unsigned long)cpu);
         break;
@@ -530,6 +546,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) 
u_xenpf_op)
     {
         int cpu = op->u.cpu_ol.cpuid;
 
+        ret = xsm_resource_unplug_core();
+        if ( ret )
+            break;
+
         if ( cpu == 0 )
         {
             ret = -EOPNOTSUPP;
@@ -555,12 +575,20 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) 
u_xenpf_op)
     break;
 
     case XENPF_cpu_hotadd:
+        ret = xsm_resource_plug_core();
+        if ( ret )
+            break;
+
         ret = cpu_add(op->u.cpu_add.apic_id,
                       op->u.cpu_add.acpi_id,
                       op->u.cpu_add.pxm);
     break;
 
     case XENPF_mem_hotadd:
+        ret = xsm_resource_plug_core();
+        if ( ret )
+            break;
+
         ret = memory_add(op->u.mem_add.spfn,
                       op->u.mem_add.epfn,
                       op->u.mem_add.pxm);
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index 738e517..379f071 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -103,6 +103,10 @@ long arch_do_sysctl(
         uint32_t i, max_cpu_index, last_online_cpu;
         xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
 
+        ret = xsm_physinfo();
+        if ( ret )
+            break;
+
         last_online_cpu = cpumask_last(&cpu_online_map);
         max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
         ti->max_cpu_index = last_online_cpu;
@@ -139,6 +143,10 @@ long arch_do_sysctl(
         uint32_t i, j, max_node_index, last_online_node;
         xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
 
+        ret = xsm_physinfo();
+        if ( ret )
+            break;
+
         last_online_node = last_node(node_online_map);
         max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
         ni->max_node_index = last_online_node;
@@ -189,10 +197,16 @@ long arch_do_sysctl(
         switch ( sysctl->u.cpu_hotplug.op )
         {
         case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
+            ret = xsm_resource_plug_core();
+            if ( ret )
+                break;
             ret = continue_hypercall_on_cpu(
                 0, cpu_up_helper, (void *)(unsigned long)cpu);
             break;
         case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
+            ret = xsm_resource_unplug_core();
+            if ( ret )
+                break;
             ret = continue_hypercall_on_cpu(
                 0, cpu_down_helper, (void *)(unsigned long)cpu);
             break;
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 06594a0..d6ae09b 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -972,9 +972,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
         d = rcu_lock_domain_by_id(op->domain);
         if ( d != NULL )
         {
-            d->suspend_evtchn = op->u.subscribe.port;
+            ret = xsm_domctl(d, op->cmd);
+            if ( !ret )
+                d->suspend_evtchn = op->u.subscribe.port;
             rcu_unlock_domain(d);
-            ret = 0;
         }
     }
     break;
@@ -985,9 +986,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
         ret = -ESRCH;
         if ( (d = rcu_lock_domain_by_id(op->domain)) != NULL )
         {
-            d->disable_migrate = op->u.disable_migrate.disable;
+            ret = xsm_domctl(d, op->cmd);
+            if ( !ret )
+                d->disable_migrate = op->u.disable_migrate.disable;
             rcu_unlock_domain(d);
-            ret = 0;
         }
     }
     break;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index e2b103b..fefa838 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2208,6 +2208,11 @@ 
gnttab_get_status_frames(XEN_GUEST_HANDLE(gnttab_get_status_frames_t) uop,
             op.status = GNTST_general_error;
         goto out1;
     }
+    rc = xsm_grant_setup(current->domain, d);
+    if ( rc ) {
+        op.status = GNTST_permission_denied;
+        goto out1;
+    }
 
     gt = d->grant_table;
 
@@ -2259,6 +2264,11 @@ gnttab_get_version(XEN_GUEST_HANDLE(gnttab_get_version_t 
uop))
         rcu_unlock_domain(d);
         return -EPERM;
     }
+    if ( xsm_grant_query_size(current->domain, d) )
+    {
+        rcu_unlock_domain(d);
+        return -EPERM;
+    }
     spin_lock(&d->grant_table->lock);
     op.version = d->grant_table->gt_version;
     spin_unlock(&d->grant_table->lock);
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index ccfdb22..f8f7cf8 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -152,6 +152,11 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 #ifdef LOCK_PROFILE
     case XEN_SYSCTL_lockprof_op:
     {
+        ret = xsm_lockprof();
+        if ( ret )
+            break;
+
+        ret = perfc_control(&op->u.perfc_op);
         ret = spinlock_profile_control(&op->u.lockprof_op);
         if ( copy_to_guest(u_sysctl, op, 1) )
             ret = -EFAULT;
@@ -260,6 +265,10 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
         uint32_t *status, *ptr;
         unsigned long pfn;
 
+        ret = xsm_page_offline(op->u.page_offline.cmd);
+        if ( ret )
+            break;
+
         ptr = status = xmalloc_bytes( sizeof(uint32_t) *
                                 (op->u.page_offline.end -
                                   op->u.page_offline.start + 1));
@@ -314,6 +323,10 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 
     case XEN_SYSCTL_cpupool_op:
     {
+        ret = xsm_cpupool_op();
+        if ( ret )
+            break;
+
         ret = cpupool_do_sysctl(&op->u.cpupool_op);
         if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
             ret = -EFAULT;
@@ -322,6 +335,10 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 
     case XEN_SYSCTL_scheduler_op:
     {
+        ret = xsm_sched_op();
+        if ( ret )
+            break;
+
         ret = sched_adjust_global(&op->u.scheduler_op);
         if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
             ret = -EFAULT;
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index bacca11..d88e9d7 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -425,12 +425,16 @@ static int iommu_get_device_group(
              ((pdev->bus == bus) && (pdev->devfn == devfn)) )
             continue;
 
+        if ( xsm_get_device_group((seg << 16) | (pdev->bus << 8) | 
pdev->devfn) )
+            continue;
+
         sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn);
         if ( (sdev_id == group_id) && (i < max_sdevs) )
         {
             bdf = 0;
             bdf |= (pdev->bus & 0xff) << 16;
             bdf |= (pdev->devfn & 0xff) << 8;
+
             if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
             {
                 spin_unlock(&pcidevs_lock);
@@ -519,6 +523,10 @@ int iommu_do_domctl(
         u32 max_sdevs;
         XEN_GUEST_HANDLE_64(uint32) sdevs;
 
+        ret = xsm_get_device_group(domctl->u.get_device_group.machine_sbdf);
+        if ( ret )
+            break;
+
         ret = -EINVAL;
         if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
             break;
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index b7f87d0..50d337a 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -28,6 +28,7 @@
 #include <xen/keyhandler.h>
 #include <xen/radix-tree.h>
 #include <xen/tasklet.h>
+#include <xsm/xsm.h>
 #ifdef CONFIG_X86
 #include <asm/msi.h>
 #endif
@@ -297,7 +298,7 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn, const struct 
pci_dev_info *info)
     struct pci_dev *pdev;
     unsigned int slot = PCI_SLOT(devfn), func = PCI_FUNC(devfn);
     const char *pdev_type;
-    int ret = -ENOMEM;
+    int ret;
 
     if (!info)
         pdev_type = "device";
@@ -318,6 +319,12 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn, const struct 
pci_dev_info *info)
         pdev_type = "device";
     }
 
+    ret = xsm_resource_plug_pci((seg << 16) | (bus << 8) | devfn);
+    if ( ret )
+        return ret;
+
+    ret = -ENOMEM;
+
     spin_lock(&pcidevs_lock);
     pseg = alloc_pseg(seg);
     if ( !pseg )
@@ -426,7 +433,13 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
 {
     struct pci_seg *pseg = get_pseg(seg);
     struct pci_dev *pdev;
-    int ret = -ENODEV;
+    int ret;
+
+    ret = xsm_resource_unplug_pci((seg << 16) | (bus << 8) | devfn);
+    if ( ret )
+        return ret;
+
+    ret = -ENODEV;
 
     if ( !pseg )
         return -ENODEV;
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 43829c7..0c7f248 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -63,6 +63,7 @@ struct xsm_operations {
     int (*getvcpuinfo) (struct domain *d);
     int (*domain_settime) (struct domain *d);
     int (*set_target) (struct domain *d, struct domain *e);
+    int (*domctl) (struct domain *d, int cmd);
     int (*tbufcontrol) (void);
     int (*readconsole) (uint32_t clear);
     int (*sched_id) (void);
@@ -74,7 +75,9 @@ struct xsm_operations {
     int (*getcpuinfo) (void);
     int (*availheap) (void);
     int (*get_pmstat) (void);
+    int (*setpminfo) (void);
     int (*pm_op) (void);
+    int (*do_mca) (void);
 
     int (*evtchn_unbound) (struct domain *d, struct evtchn *chn, domid_t id2);
     int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1,
@@ -96,6 +99,8 @@ struct xsm_operations {
     int (*alloc_security_evtchn) (struct evtchn *chn);
     void (*free_security_evtchn) (struct evtchn *chn);
 
+    int (*get_pod_target) (struct domain *d);
+    int (*set_pod_target) (struct domain *d);
     int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
     int (*memory_pin_page) (struct domain *d, struct page_info *page);
@@ -109,10 +114,24 @@ struct xsm_operations {
     int (*irq_permission) (struct domain *d, int pirq, uint8_t allow);
     int (*iomem_permission) (struct domain *d, uint64_t s, uint64_t e, uint8_t 
allow);
 
+    int (*get_device_group) (uint32_t machine_bdf);
     int (*test_assign_device) (uint32_t machine_bdf);
     int (*assign_device) (struct domain *d, uint32_t machine_bdf);
     int (*deassign_device) (struct domain *d, uint32_t machine_bdf);
 
+    int (*resource_plug_core) (void);
+    int (*resource_unplug_core) (void);
+    int (*resource_plug_pci) (uint32_t machine_bdf);
+    int (*resource_unplug_pci) (uint32_t machine_bdf);
+    int (*resource_setup_pci) (uint32_t machine_bdf);
+    int (*resource_setup_gsi) (int gsi);
+    int (*resource_setup_misc) (void);
+
+    int (*page_offline)(uint32_t cmd);
+    int (*lockprof)(void);
+    int (*cpupool_op)(void);
+    int (*sched_op)(void);
+
     long (*__do_xsm_op) (XEN_GUEST_HANDLE(xsm_op_t) op);
 
 #ifdef CONFIG_X86
@@ -128,6 +147,8 @@ struct xsm_operations {
     int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
+    int (*mem_event) (struct domain *d);
+    int (*mem_sharing) (struct domain *d);
     int (*apic) (struct domain *d, int cmd);
     int (*xen_settime) (void);
     int (*memtype) (uint32_t access);
@@ -149,6 +170,7 @@ struct xsm_operations {
     int (*add_to_physmap) (struct domain *d1, struct domain *d2);
     int (*sendtrigger) (struct domain *d);
     int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
+    int (*unbind_pt_irq) (struct domain *d);
     int (*pin_mem_cacheattr) (struct domain *d);
     int (*ext_vcpucontext) (struct domain *d, uint32_t cmd);
     int (*vcpuextstate) (struct domain *d, uint32_t cmd);
@@ -236,6 +258,11 @@ static inline int xsm_set_target (struct domain *d, struct 
domain *e)
     return xsm_call(set_target(d, e));
 }
 
+static inline int xsm_domctl (struct domain *d, int cmd)
+{
+    return xsm_call(domctl(d, cmd));
+}
+
 static inline int xsm_tbufcontrol (void)
 {
     return xsm_call(tbufcontrol());
@@ -291,11 +318,21 @@ static inline int xsm_get_pmstat(void)
     return xsm_call(get_pmstat());
 }
 
+static inline int xsm_setpminfo(void)
+{
+       return xsm_call(setpminfo());
+}
+
 static inline int xsm_pm_op(void)
 {
     return xsm_call(pm_op());
 }
 
+static inline int xsm_do_mca(void)
+{
+    return xsm_call(do_mca());
+}
+
 static inline int xsm_evtchn_unbound (struct domain *d1, struct evtchn *chn,
                                                                     domid_t 
id2)
 {
@@ -379,6 +416,16 @@ static inline void xsm_free_security_evtchn (struct evtchn 
*chn)
     (void)xsm_call(free_security_evtchn(chn));
 }
 
+static inline int xsm_get_pod_target (struct domain *d)
+{
+    return xsm_call(get_pod_target(d));
+}
+
+static inline int xsm_set_pod_target (struct domain *d)
+{
+    return xsm_call(set_pod_target(d));
+}
+
 static inline int xsm_memory_adjust_reservation (struct domain *d1, struct
                                                                     domain *d2)
 {
@@ -426,6 +473,11 @@ static inline int xsm_iomem_permission (struct domain *d, 
uint64_t s, uint64_t e
     return xsm_call(iomem_permission(d, s, e, allow));
 }
 
+static inline int xsm_get_device_group(uint32_t machine_bdf)
+{
+    return xsm_call(get_device_group(machine_bdf));
+}
+
 static inline int xsm_test_assign_device(uint32_t machine_bdf)
 {
     return xsm_call(test_assign_device(machine_bdf));
@@ -441,6 +493,61 @@ static inline int xsm_deassign_device(struct domain *d, 
uint32_t machine_bdf)
     return xsm_call(deassign_device(d, machine_bdf));
 }
 
+static inline int xsm_resource_plug_pci (uint32_t machine_bdf)
+{
+    return xsm_call(resource_plug_pci(machine_bdf));
+}
+
+static inline int xsm_resource_unplug_pci (uint32_t machine_bdf)
+{
+    return xsm_call(resource_unplug_pci(machine_bdf));
+}
+
+static inline int xsm_resource_plug_core (void)
+{
+    return xsm_call(resource_plug_core());
+}
+
+static inline int xsm_resource_unplug_core (void)
+{
+    return xsm_call(resource_unplug_core());
+}
+
+static inline int xsm_resource_setup_pci (uint32_t machine_bdf)
+{
+    return xsm_call(resource_setup_pci(machine_bdf));
+}
+
+static inline int xsm_resource_setup_gsi (int gsi)
+{
+    return xsm_call(resource_setup_gsi(gsi));
+}
+
+static inline int xsm_resource_setup_misc (void)
+{
+    return xsm_call(resource_setup_misc());
+}
+
+static inline int xsm_page_offline(uint32_t cmd)
+{
+    return xsm_call(page_offline(cmd));
+}
+
+static inline int xsm_lockprof(void)
+{
+    return xsm_call(lockprof());
+}
+
+static inline int xsm_cpupool_op(void)
+{
+    return xsm_call(cpupool_op());
+}
+
+static inline int xsm_sched_op(void)
+{
+    return xsm_call(sched_op());
+}
+
 static inline long __do_xsm_op (XEN_GUEST_HANDLE(xsm_op_t) op)
 {
 #ifdef XSM_ENABLE
@@ -528,6 +635,16 @@ static inline int xsm_hvm_inject_msi (struct domain *d)
     return xsm_call(hvm_inject_msi(d));
 }
 
+static inline int xsm_mem_event (struct domain *d)
+{
+    return xsm_call(mem_event(d));
+}
+
+static inline int xsm_mem_sharing (struct domain *d)
+{
+    return xsm_call(mem_sharing(d));
+}
+
 static inline int xsm_apic (struct domain *d, int cmd)
 {
     return xsm_call(apic(d, cmd));
@@ -626,6 +743,11 @@ static inline int xsm_bind_pt_irq(struct domain *d,
     return xsm_call(bind_pt_irq(d, bind));
 }
 
+static inline int xsm_unbind_pt_irq(struct domain *d)
+{
+    return xsm_call(unbind_pt_irq(d));
+}
+
 static inline int xsm_pin_mem_cacheattr(struct domain *d)
 {
     return xsm_call(pin_mem_cacheattr(d));
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 04c2f68..efe52bb 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -368,6 +368,16 @@ static int get_mfn_sid(unsigned long mfn, u32 *sid)
     return rc;    
 }
 
+static int flask_get_pod_target(struct domain *d)
+{
+    return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, 
DOMAIN__GETPODTARGET);
+}
+
+static int flask_set_pod_target(struct domain *d)
+{
+    return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, 
DOMAIN__SETPODTARGET);
+}
+
 static int flask_memory_adjust_reservation(struct domain *d1, struct domain 
*d2)
 {
     return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST);
@@ -582,6 +592,11 @@ static int flask_set_target(struct domain *d, struct 
domain *e)
     return domain_has_perm(d, e, SECCLASS_DOMAIN, DOMAIN__SET_TARGET);
 }
 
+static int flask_domctl(struct domain *d, int cmd)
+{
+    return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, 
DOMAIN__SET_MISC_INFO);
+}
+
 static int flask_tbufcontrol(void)
 {
     return domain_has_xen(current->domain, XEN__TBUFCONTROL);
@@ -635,6 +650,26 @@ static int flask_availheap(void)
     return domain_has_xen(current->domain, XEN__HEAP);
 }
 
+static int flask_get_pmstat(void)
+{
+    return domain_has_xen(current->domain, XEN__PM_OP);
+}
+
+static int flask_setpminfo(void)
+{
+    return domain_has_xen(current->domain, XEN__PM_OP);
+}
+
+static int flask_pm_op(void)
+{
+    return domain_has_xen(current->domain, XEN__PM_OP);
+}
+
+static int flask_do_mca(void)
+{
+    return domain_has_xen(current->domain, XEN__MCA_OP);
+}
+
 static inline u32 resource_to_perm(uint8_t access)
 {
     if ( access )
@@ -727,6 +762,135 @@ static int flask_iomem_permission(struct domain *d, 
uint64_t start, uint64_t end
     return security_iterate_iomem_sids(start, end, _iomem_has_perm, &data);
 }
 
+static int flask_resource_plug_core(void)
+{
+    struct domain_security_struct *ssec;
+
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, SECINITSID_DOMXEN, SECCLASS_RESOURCE, 
RESOURCE__PLUG, NULL);
+}
+
+static int flask_resource_unplug_core(void)
+{
+    struct domain_security_struct *ssec;
+
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, SECINITSID_DOMXEN, SECCLASS_RESOURCE, 
RESOURCE__UNPLUG, NULL);
+}
+
+static int flask_resource_use_core(void)
+{
+    struct domain_security_struct *ssec;
+
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, SECINITSID_DOMXEN, SECCLASS_RESOURCE, 
RESOURCE__USE, NULL);
+}
+
+static int flask_resource_plug_pci(uint32_t machine_bdf)
+{
+    u32 rsid;
+    int rc = -EPERM;
+    struct avc_audit_data ad;
+    struct domain_security_struct *ssec;
+
+    rc = security_device_sid(machine_bdf, &rsid);
+    if ( rc )
+        return rc;
+
+    AVC_AUDIT_DATA_INIT(&ad, DEV);
+    ad.device = (unsigned long) machine_bdf;
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__PLUG, 
&ad);
+}
+
+static int flask_resource_unplug_pci(uint32_t machine_bdf)
+{
+    u32 rsid;
+    int rc = -EPERM;
+    struct avc_audit_data ad;
+    struct domain_security_struct *ssec;
+
+    rc = security_device_sid(machine_bdf, &rsid);
+    if ( rc )
+        return rc;
+
+    AVC_AUDIT_DATA_INIT(&ad, DEV);
+    ad.device = (unsigned long) machine_bdf;
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__UNPLUG, 
&ad);
+}
+
+static int flask_resource_setup_pci(uint32_t machine_bdf)
+{
+    u32 rsid;
+    int rc = -EPERM;
+    struct avc_audit_data ad;
+    struct domain_security_struct *ssec;
+
+    rc = security_device_sid(machine_bdf, &rsid);
+    if ( rc )
+        return rc;
+
+    AVC_AUDIT_DATA_INIT(&ad, DEV);
+    ad.device = (unsigned long) machine_bdf;
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__SETUP, 
&ad);
+}
+
+static int flask_resource_setup_gsi(int gsi)
+{
+    u32 rsid;
+    int rc = -EPERM;
+    struct avc_audit_data ad;
+    struct domain_security_struct *ssec;
+
+    rc = security_irq_sid(gsi, &rsid);
+    if ( rc )
+        return rc;
+
+    AVC_AUDIT_DATA_INIT(&ad, IRQ);
+    ad.irq = gsi;
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__SETUP, 
&ad);
+}
+
+static int flask_resource_setup_misc(void)
+{
+    struct domain_security_struct *ssec;
+
+    ssec = current->domain->ssid;
+    return avc_has_perm(ssec->sid, SECINITSID_XEN, SECCLASS_RESOURCE, 
RESOURCE__SETUP, NULL);
+}
+
+static inline int flask_page_offline(uint32_t cmd)
+{
+    switch (cmd) {
+    case sysctl_page_offline:
+        return flask_resource_unplug_core();
+    case sysctl_page_online:
+        return flask_resource_plug_core();
+    case sysctl_query_page_offline:
+        return flask_resource_use_core();
+    default:
+        return -EPERM;
+    }
+}
+
+static inline int flask_lockprof(void)
+{
+    return domain_has_xen(current->domain, XEN__LOCKPROF);
+}
+
+static inline int flask_cpupool_op(void)
+{
+    return domain_has_xen(current->domain, XEN__CPUPOOL_OP);
+}
+
+static inline int flask_sched_op(void)
+{
+    return domain_has_xen(current->domain, XEN__SCHED_OP);
+}
+
 static int flask_perfcontrol(void)
 {
     return domain_has_xen(current->domain, XEN__PERFCONTROL);
@@ -887,8 +1051,11 @@ static int flask_hvm_param(struct domain *d, unsigned 
long op)
     case HVMOP_get_param:
         perm = HVM__GETPARAM;
         break;
+    case HVMOP_track_dirty_vram:
+        perm = HVM__TRACKDIRTYVRAM;
+        break;
     default:
-        return -EPERM;
+        perm = HVM__HVMCTL;
     }
 
     return domain_has_perm(current->domain, d, SECCLASS_HVM, perm);
@@ -909,6 +1076,16 @@ static int flask_hvm_set_pci_link_route(struct domain *d)
     return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__PCIROUTE);
 }
 
+static int flask_mem_event(struct domain *d)
+{
+    return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_sharing(struct domain *d)
+{
+    return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__MEM_SHARING);
+}
+
 static int flask_apic(struct domain *d, int cmd)
 {
     u32 perm;
@@ -1088,6 +1265,19 @@ static int flask_sendtrigger(struct domain *d)
     return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, 
DOMAIN__TRIGGER);
 }
 
+static int flask_get_device_group(uint32_t machine_bdf)
+{
+    u32 rsid;
+    int rc = -EPERM;
+    struct domain_security_struct *ssec = current->domain->ssid;
+
+    rc = security_device_sid(machine_bdf, &rsid);
+    if ( rc )
+        return rc;
+
+    return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, 
RESOURCE__STAT_DEVICE, NULL);
+}
+
 static int flask_test_assign_device(uint32_t machine_bdf)
 {
     u32 rsid;
@@ -1174,6 +1364,11 @@ static int flask_bind_pt_irq (struct domain *d, struct 
xen_domctl_bind_pt_irq *b
     return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__USE, 
&ad);
 }
 
+static int flask_unbind_pt_irq (struct domain *d)
+{
+    return domain_has_perm(current->domain, d, SECCLASS_RESOURCE, 
RESOURCE__REMOVE);
+}
+
 static int flask_pin_mem_cacheattr (struct domain *d)
 {
     return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__CACHEATTR);
@@ -1236,6 +1431,7 @@ static struct xsm_operations flask_ops = {
     .getvcpuinfo = flask_getvcpuinfo,
     .domain_settime = flask_domain_settime,
     .set_target = flask_set_target,
+    .domctl = flask_domctl,
     .tbufcontrol = flask_tbufcontrol,
     .readconsole = flask_readconsole,
     .sched_id = flask_sched_id,
@@ -1246,6 +1442,10 @@ static struct xsm_operations flask_ops = {
     .debug_keys = flask_debug_keys,
     .getcpuinfo = flask_getcpuinfo,
     .availheap = flask_availheap,
+    .get_pmstat = flask_get_pmstat,
+    .setpminfo = flask_setpminfo,
+    .pm_op = flask_pm_op,
+    .do_mca = flask_do_mca,
 
     .evtchn_unbound = flask_evtchn_unbound,
     .evtchn_interdomain = flask_evtchn_interdomain,
@@ -1266,6 +1466,8 @@ static struct xsm_operations flask_ops = {
     .alloc_security_evtchn = flask_alloc_security_evtchn,
     .free_security_evtchn = flask_free_security_evtchn,
 
+    .get_pod_target = flask_get_pod_target,
+    .set_pod_target = flask_set_pod_target,
     .memory_adjust_reservation = flask_memory_adjust_reservation,
     .memory_stat_reservation = flask_memory_stat_reservation,
     .memory_pin_page = flask_memory_pin_page,
@@ -1280,6 +1482,19 @@ static struct xsm_operations flask_ops = {
     .irq_permission = flask_irq_permission,
     .iomem_permission = flask_iomem_permission,
 
+    .resource_plug_core = flask_resource_plug_core,
+    .resource_unplug_core = flask_resource_unplug_core,
+    .resource_plug_pci = flask_resource_plug_pci,
+    .resource_unplug_pci = flask_resource_unplug_pci,
+    .resource_setup_pci = flask_resource_setup_pci,
+    .resource_setup_gsi = flask_resource_setup_gsi,
+    .resource_setup_misc = flask_resource_setup_misc,
+
+    .page_offline = flask_page_offline,
+    .lockprof = flask_lockprof,
+    .cpupool_op = flask_cpupool_op,
+    .sched_op = flask_sched_op,
+
     .__do_xsm_op = do_flask_op,
 
 #ifdef CONFIG_X86
@@ -1293,6 +1508,8 @@ static struct xsm_operations flask_ops = {
     .hvm_set_pci_intx_level = flask_hvm_set_pci_intx_level,
     .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
+    .mem_event = flask_mem_event,
+    .mem_sharing = flask_mem_sharing,
     .apic = flask_apic,
     .xen_settime = flask_xen_settime,
     .memtype = flask_memtype,
@@ -1310,10 +1527,12 @@ static struct xsm_operations flask_ops = {
     .update_va_mapping = flask_update_va_mapping,
     .add_to_physmap = flask_add_to_physmap,
     .sendtrigger = flask_sendtrigger,
+    .get_device_group = flask_get_device_group,
     .test_assign_device = flask_test_assign_device,
     .assign_device = flask_assign_device,
     .deassign_device = flask_deassign_device,
     .bind_pt_irq = flask_bind_pt_irq,
+    .unbind_pt_irq = flask_unbind_pt_irq,
     .pin_mem_cacheattr = flask_pin_mem_cacheattr,
     .ext_vcpucontext = flask_ext_vcpucontext,
     .vcpuextstate = flask_vcpuextstate,
diff --git a/xen/xsm/flask/include/av_perm_to_string.h 
b/xen/xsm/flask/include/av_perm_to_string.h
index 56572a7..85cbffc 100644
--- a/xen/xsm/flask/include/av_perm_to_string.h
+++ b/xen/xsm/flask/include/av_perm_to_string.h
@@ -24,6 +24,11 @@
    S_(SECCLASS_XEN, XEN__DEBUG, "debug")
    S_(SECCLASS_XEN, XEN__GETCPUINFO, "getcpuinfo")
    S_(SECCLASS_XEN, XEN__HEAP, "heap")
+   S_(SECCLASS_XEN, XEN__PM_OP, "pm_op")
+   S_(SECCLASS_XEN, XEN__MCA_OP, "mca_op")
+   S_(SECCLASS_XEN, XEN__LOCKPROF, "lockprof")
+   S_(SECCLASS_XEN, XEN__CPUPOOL_OP, "cpupool_op")
+   S_(SECCLASS_XEN, XEN__SCHED_OP, "sched_op")
    S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUCONTEXT, "setvcpucontext")
    S_(SECCLASS_DOMAIN, DOMAIN__PAUSE, "pause")
    S_(SECCLASS_DOMAIN, DOMAIN__UNPAUSE, "unpause")
@@ -52,6 +57,9 @@
    S_(SECCLASS_DOMAIN, DOMAIN__SETEXTVCPUCONTEXT, "setextvcpucontext")
    S_(SECCLASS_DOMAIN, DOMAIN__GETVCPUEXTSTATE, "getvcpuextstate")
    S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUEXTSTATE, "setvcpuextstate")
+   S_(SECCLASS_DOMAIN, DOMAIN__GETPODTARGET, "getpodtarget")
+   S_(SECCLASS_DOMAIN, DOMAIN__SETPODTARGET, "setpodtarget")
+   S_(SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO, "set_misc_info")
    S_(SECCLASS_HVM, HVM__SETHVMC, "sethvmc")
    S_(SECCLASS_HVM, HVM__GETHVMC, "gethvmc")
    S_(SECCLASS_HVM, HVM__SETPARAM, "setparam")
@@ -62,6 +70,9 @@
    S_(SECCLASS_HVM, HVM__BIND_IRQ, "bind_irq")
    S_(SECCLASS_HVM, HVM__CACHEATTR, "cacheattr")
    S_(SECCLASS_HVM, HVM__TRACKDIRTYVRAM, "trackdirtyvram")
+   S_(SECCLASS_HVM, HVM__HVMCTL, "hvmctl")
+   S_(SECCLASS_HVM, HVM__MEM_EVENT, "mem_event")
+   S_(SECCLASS_HVM, HVM__MEM_SHARING, "mem_sharing")
    S_(SECCLASS_EVENT, EVENT__BIND, "bind")
    S_(SECCLASS_EVENT, EVENT__SEND, "send")
    S_(SECCLASS_EVENT, EVENT__STATUS, "status")
@@ -103,6 +114,9 @@
    S_(SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, "stat_device")
    S_(SECCLASS_RESOURCE, RESOURCE__ADD_DEVICE, "add_device")
    S_(SECCLASS_RESOURCE, RESOURCE__REMOVE_DEVICE, "remove_device")
+   S_(SECCLASS_RESOURCE, RESOURCE__PLUG, "plug")
+   S_(SECCLASS_RESOURCE, RESOURCE__UNPLUG, "unplug")
+   S_(SECCLASS_RESOURCE, RESOURCE__SETUP, "setup")
    S_(SECCLASS_SECURITY, SECURITY__COMPUTE_AV, "compute_av")
    S_(SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE, "compute_create")
    S_(SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER, "compute_member")
diff --git a/xen/xsm/flask/include/av_permissions.h 
b/xen/xsm/flask/include/av_permissions.h
index 67511ad..9e55a86 100644
--- a/xen/xsm/flask/include/av_permissions.h
+++ b/xen/xsm/flask/include/av_permissions.h
@@ -24,6 +24,11 @@
 #define XEN__DEBUG                                0x00400000UL
 #define XEN__GETCPUINFO                           0x00800000UL
 #define XEN__HEAP                                 0x01000000UL
+#define XEN__PM_OP                                0x02000000UL
+#define XEN__MCA_OP                               0x04000000UL
+#define XEN__LOCKPROF                             0x08000000UL
+#define XEN__CPUPOOL_OP                           0x10000000UL
+#define XEN__SCHED_OP                             0x20000000UL
 
 #define DOMAIN__SETVCPUCONTEXT                    0x00000001UL
 #define DOMAIN__PAUSE                             0x00000002UL
@@ -53,6 +58,9 @@
 #define DOMAIN__SETEXTVCPUCONTEXT                 0x02000000UL
 #define DOMAIN__GETVCPUEXTSTATE                   0x04000000UL
 #define DOMAIN__SETVCPUEXTSTATE                   0x08000000UL
+#define DOMAIN__GETPODTARGET                      0x10000000UL
+#define DOMAIN__SETPODTARGET                      0x20000000UL
+#define DOMAIN__SET_MISC_INFO                     0x40000000UL
 
 #define HVM__SETHVMC                              0x00000001UL
 #define HVM__GETHVMC                              0x00000002UL
@@ -64,6 +72,9 @@
 #define HVM__BIND_IRQ                             0x00000080UL
 #define HVM__CACHEATTR                            0x00000100UL
 #define HVM__TRACKDIRTYVRAM                       0x00000200UL
+#define HVM__HVMCTL                               0x00000400UL
+#define HVM__MEM_EVENT                            0x00000800UL
+#define HVM__MEM_SHARING                          0x00001000UL
 
 #define EVENT__BIND                               0x00000001UL
 #define EVENT__SEND                               0x00000002UL
@@ -110,6 +121,9 @@
 #define RESOURCE__STAT_DEVICE                     0x00000200UL
 #define RESOURCE__ADD_DEVICE                      0x00000400UL
 #define RESOURCE__REMOVE_DEVICE                   0x00000800UL
+#define RESOURCE__PLUG                            0x00001000UL
+#define RESOURCE__UNPLUG                          0x00002000UL
+#define RESOURCE__SETUP                           0x00004000UL
 
 #define SECURITY__COMPUTE_AV                      0x00000001UL
 #define SECURITY__COMPUTE_CREATE                  0x00000002UL
-- 
1.7.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.