[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/8] x86/domctl: Drop XEN_DOMCTL_set_cpuid



With the final users moved over to using XEN_DOMCTL_set_cpumsr_policy, drop
this domctl and associated infrastructure.

Rename the preexisting set_cpuid XSM vector to set_cpu_policy, now that it is
back to having a single user.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
 tools/flask/policy/modules/dom0.te  |   2 +-
 tools/flask/policy/modules/xen.if   |   2 +-
 xen/arch/x86/domctl.c               | 101 ------------------------------------
 xen/include/public/domctl.h         |  11 +---
 xen/xsm/flask/hooks.c               |   3 +-
 xen/xsm/flask/policy/access_vectors |   3 +-
 6 files changed, 5 insertions(+), 117 deletions(-)

diff --git a/tools/flask/policy/modules/dom0.te 
b/tools/flask/policy/modules/dom0.te
index 9970f9dc08..272f6a4f75 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -38,7 +38,7 @@ allow dom0_t dom0_t:domain {
        getpodtarget setpodtarget set_misc_info set_virq_handler
 };
 allow dom0_t dom0_t:domain2 {
-       set_cpuid gettsc settsc setscheduler set_vnumainfo
+       set_cpu_policy gettsc settsc setscheduler set_vnumainfo
        get_vnumainfo psr_cmt_op psr_alloc get_cpu_policy
 };
 allow dom0_t dom0_t:resource { add remove };
diff --git a/tools/flask/policy/modules/xen.if 
b/tools/flask/policy/modules/xen.if
index de5fb331bf..8eb2293a52 100644
--- a/tools/flask/policy/modules/xen.if
+++ b/tools/flask/policy/modules/xen.if
@@ -50,7 +50,7 @@ define(`create_domain_common', `
                        getdomaininfo hypercall setvcpucontext getscheduler
                        getvcpuinfo getaddrsize getaffinity setaffinity
                        settime setdomainhandle getvcpucontext set_misc_info };
-       allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim
+       allow $1 $2:domain2 { set_cpu_policy settsc setscheduler setclaim
                        set_vnumainfo get_vnumainfo cacheflush
                        psr_cmt_op psr_alloc soft_reset
                        resource_map get_cpu_policy };
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 99bc2fb10d..ec50a88156 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -206,94 +206,6 @@ static void domain_cpu_policy_changed(struct domain *d)
     }
 }
 
-static int update_domain_cpuid_info(struct domain *d,
-                                    const struct xen_domctl_cpuid *ctl)
-{
-    struct cpuid_policy *p = d->arch.cpuid;
-    const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
-
-    /*
-     * Skip update for leaves we don't care about, to avoid the overhead of
-     * recalculate_cpuid_policy().
-     */
-    switch ( ctl->input[0] )
-    {
-    case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
-        if ( ctl->input[0] == 4 &&
-             ctl->input[1] >= ARRAY_SIZE(p->cache.raw) )
-            return 0;
-
-        if ( ctl->input[0] == 7 &&
-             ctl->input[1] >= ARRAY_SIZE(p->feat.raw) )
-            return 0;
-
-        if ( ctl->input[0] == 0xb &&
-             ctl->input[1] >= ARRAY_SIZE(p->topo.raw) )
-            return 0;
-
-        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) < 2);
-        if ( ctl->input[0] == XSTATE_CPUID &&
-             ctl->input[1] != 1 ) /* Everything else automatically calculated. 
*/
-            return 0;
-        break;
-
-    case 0x40000000: case 0x40000100:
-        /* Only care about the max_leaf limit. */
-
-    case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
-        break;
-
-    default:
-        return 0;
-    }
-
-    /* Insert ctl data into cpuid_policy. */
-    switch ( ctl->input[0] )
-    {
-    case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
-        switch ( ctl->input[0] )
-        {
-        case 4:
-            p->cache.raw[ctl->input[1]] = leaf;
-            break;
-
-        case 7:
-            p->feat.raw[ctl->input[1]] = leaf;
-            break;
-
-        case 0xb:
-            p->topo.raw[ctl->input[1]] = leaf;
-            break;
-
-        case XSTATE_CPUID:
-            p->xstate.raw[ctl->input[1]] = leaf;
-            break;
-
-        default:
-            p->basic.raw[ctl->input[0]] = leaf;
-            break;
-        }
-        break;
-
-    case 0x40000000:
-        p->hv_limit = ctl->eax;
-        break;
-
-    case 0x40000100:
-        p->hv2_limit = ctl->eax;
-        break;
-
-    case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
-        p->extd.raw[ctl->input[0] - 0x80000000] = leaf;
-        break;
-    }
-
-    recalculate_cpuid_policy(d);
-    domain_cpu_policy_changed(d);
-
-    return 0;
-}
-
 static int update_domain_cpu_policy(struct domain *d,
                                     xen_domctl_cpu_policy_t *xdpc)
 {
@@ -951,19 +863,6 @@ long arch_do_domctl(
         break;
     }
 
-    case XEN_DOMCTL_set_cpuid:
-        if ( d == currd ) /* no domain_pause() */
-            ret = -EINVAL;
-        else if ( d->creation_finished )
-            ret = -EEXIST; /* No changing once the domain is running. */
-        else
-        {
-            domain_pause(d);
-            ret = update_domain_cpuid_info(d, &domctl->u.cpuid);
-            domain_unpause(d);
-        }
-        break;
-
     case XEN_DOMCTL_gettscinfo:
         if ( d == currd ) /* no domain_pause() */
             ret = -EINVAL;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 0471d3c680..548b917bdb 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -648,14 +648,6 @@ struct xen_domctl_set_target {
 
 #if defined(__i386__) || defined(__x86_64__)
 # define XEN_CPUID_INPUT_UNUSED  0xFFFFFFFF
-/* XEN_DOMCTL_set_cpuid */
-struct xen_domctl_cpuid {
-  uint32_t input[2];
-  uint32_t eax;
-  uint32_t ebx;
-  uint32_t ecx;
-  uint32_t edx;
-};
 
 /*
  * XEN_DOMCTL_{get,set}_cpu_policy (x86 specific)
@@ -1166,7 +1158,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_set_target                    46
 #define XEN_DOMCTL_deassign_device               47
 #define XEN_DOMCTL_unbind_pt_irq                 48
-#define XEN_DOMCTL_set_cpuid                     49
+/* #define XEN_DOMCTL_set_cpuid                  49 - Obsolete - use 
set_cpu_policy */
 #define XEN_DOMCTL_get_device_group              50
 /* #define XEN_DOMCTL_set_machine_address_size   51 - Obsolete */
 /* #define XEN_DOMCTL_get_machine_address_size   52 - Obsolete */
@@ -1243,7 +1235,6 @@ struct xen_domctl {
         struct xen_domctl_vm_event_op       vm_event_op;
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
 #if defined(__i386__) || defined(__x86_64__)
-        struct xen_domctl_cpuid             cpuid;
         struct xen_domctl_cpu_policy        cpu_policy;
         struct xen_domctl_vcpuextstate      vcpuextstate;
         struct xen_domctl_vcpu_msrs         vcpu_msrs;
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index b23772786a..fd8d23c185 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -716,8 +716,7 @@ static int flask_domctl(struct domain *d, int cmd)
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER);
 
     case XEN_DOMCTL_set_cpu_policy:
-    case XEN_DOMCTL_set_cpuid:
-        return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_CPUID);
+        return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_CPU_POLICY);
 
     case XEN_DOMCTL_gettscinfo:
         return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__GETTSC);
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 6f3f9493f8..c055c14c26 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -208,8 +208,7 @@ class domain2
 #  target = the new target domain
     set_as_target
 # XEN_DOMCTL_set_cpu_policy
-# XEN_DOMCTL_set_cpuid
-    set_cpuid
+    set_cpu_policy
 # XEN_DOMCTL_gettscinfo
     gettsc
 # XEN_DOMCTL_settscinfo
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.