[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 13/13] x86/domctl: Implement XEN_DOMCTL_get_cpu_policy



From: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>

This finally (after literally years of work!) marks the point where the
toolstack can ask the hypervisor for the current CPUID configuration of a
specific domain.

Also extend xen-cpuid's --policy mode to be able to take a domid and dump a
specific domains CPUID and MSR policy.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
CC: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h       |  3 ++
 tools/libxc/xc_cpuid_x86.c          | 40 +++++++++++++++++++++++
 tools/misc/xen-cpuid.c              | 64 +++++++++++++++++++++++++++++++------
 xen/arch/x86/domctl.c               | 34 ++++++++++++++++++++
 xen/include/public/domctl.h         | 18 +++++++++++
 xen/xsm/flask/hooks.c               |  1 +
 xen/xsm/flask/policy/access_vectors |  1 +
 7 files changed, 152 insertions(+), 9 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index ee3ab09..3f156c1 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2558,6 +2558,9 @@ int xc_get_cpu_policy_size(xc_interface *xch, uint32_t 
*nr_leaves,
 int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index,
                              uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
                              uint32_t *nr_msrs, xen_msr_entry_t *msrs);
+int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+                             uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
+                             uint32_t *nr_msrs, xen_msr_entry_t *msrs);
 
 uint32_t xc_get_cpu_featureset_size(void);
 
diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index 8fd04ef..e4c7a34 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -191,6 +191,46 @@ int xc_get_system_cpu_policy(xc_interface *xch, uint32_t 
index,
     return ret;
 }
 
+int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+                             uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
+                             uint32_t *nr_msrs, xen_msr_entry_t *msrs)
+{
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(leaves,
+                             *nr_leaves * sizeof(*leaves),
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(msrs,
+                             *nr_msrs * sizeof(*msrs),
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    int ret;
+
+    if ( xc_hypercall_bounce_pre(xch, leaves) )
+        return -1;
+
+    if ( xc_hypercall_bounce_pre(xch, msrs) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_get_cpu_policy;
+    domctl.domain = domid;
+    domctl.u.cpu_policy.nr_leaves = *nr_leaves;
+    set_xen_guest_handle(domctl.u.cpu_policy.cpuid_policy, leaves);
+    domctl.u.cpu_policy.nr_msrs = *nr_msrs;
+    set_xen_guest_handle(domctl.u.cpu_policy.msr_policy, msrs);
+
+    ret = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, leaves);
+    xc_hypercall_bounce_post(xch, msrs);
+
+    if ( !ret )
+    {
+        *nr_leaves = domctl.u.cpu_policy.nr_leaves;
+        *nr_msrs = domctl.u.cpu_policy.nr_msrs;
+    }
+
+    return ret;
+}
+
 struct cpuid_domain_info
 {
     enum
diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
index 1c14d93..dd39268 100644
--- a/tools/misc/xen-cpuid.c
+++ b/tools/misc/xen-cpuid.c
@@ -3,6 +3,8 @@
 #include <err.h>
 #include <getopt.h>
 #include <string.h>
+#include <errno.h>
+#include <limits.h>
 
 #include <xenctrl.h>
 
@@ -309,11 +311,13 @@ int main(int argc, char **argv)
 {
     enum { MODE_UNKNOWN, MODE_INFO, MODE_DETAIL, MODE_INTERPRET, MODE_POLICY }
     mode = MODE_UNKNOWN;
+    int domid = -1;
 
     nr_features = xc_get_cpu_featureset_size();
 
     for ( ;; )
     {
+        const char *tmp_optarg;
         int option_index = 0, c;
         static struct option long_options[] =
         {
@@ -321,11 +325,11 @@ int main(int argc, char **argv)
             { "info", no_argument, NULL, 'i' },
             { "detail", no_argument, NULL, 'd' },
             { "verbose", no_argument, NULL, 'v' },
-            { "policy", no_argument, NULL, 'p' },
+            { "policy", optional_argument, NULL, 'p' },
             { NULL, 0, NULL, 0 },
         };
 
-        c = getopt_long(argc, argv, "hidvp", long_options, &option_index);
+        c = getopt_long(argc, argv, "hidvp::", long_options, &option_index);
 
         if ( c == -1 )
             break;
@@ -345,6 +349,28 @@ int main(int argc, char **argv)
 
         case 'p':
             mode = MODE_POLICY;
+
+            tmp_optarg = optarg;
+
+            /* Make "--policy $DOMID" and "-p $DOMID" work. */
+            if ( !optarg && optind < argc &&
+                 argv[optind] != NULL && argv[optind][0] != '\0' &&
+                 argv[optind][0] != '-' )
+                tmp_optarg = argv[optind++];
+
+            if ( tmp_optarg )
+            {
+                char *endptr;
+
+                errno = 0;
+                domid = strtol(tmp_optarg, &endptr, 0);
+
+                if ( (errno == ERANGE &&
+                      (domid == LONG_MAX || domid == LONG_MIN)) ||
+                     (errno != 0 && domid == 0) ||
+                     endptr == tmp_optarg )
+                    err(1, "strtol(%s,,)", tmp_optarg);
+            }
             break;
 
         case 'd':
@@ -398,8 +424,9 @@ int main(int argc, char **argv)
 
         if ( xc_get_cpu_policy_size(xch, &max_leaves, &max_msrs) )
             err(1, "xc_get_cpu_policy_size(...)");
-        printf("Xen reports there are maximum %u leaves and %u MSRs\n",
-                max_leaves, max_msrs);
+        if ( domid == -1 )
+            printf("Xen reports there are maximum %u leaves and %u MSRs\n",
+                   max_leaves, max_msrs);
 
         leaves = calloc(max_leaves, sizeof(xen_cpuid_leaf_t));
         if ( !leaves )
@@ -408,17 +435,36 @@ int main(int argc, char **argv)
         if ( !msrs )
             err(1, "calloc(max_msrs)");
 
-        for ( pol = 0; pol < ARRAY_SIZE(sys_policies); ++pol )
+        if ( domid != -1 )
         {
+            char name[20];
             uint32_t nr_leaves = max_leaves;
             uint32_t nr_msrs = max_msrs;
 
-            if ( xc_get_system_cpu_policy(xch, pol, &nr_leaves, leaves,
+            if ( xc_get_domain_cpu_policy(xch, domid, &nr_leaves, leaves,
                                              &nr_msrs, msrs) )
-                err(1, "xc_get_system_cpu_policy(, %s,,)",
-                    sys_policies[pol]);
+                err(1, "xc_get_domain_cpu_policy(, %d, %d,, %d,)",
+                    domid, nr_leaves, nr_msrs);
 
-            print_policy(sys_policies[pol], leaves, nr_leaves, msrs, nr_msrs);
+            snprintf(name, sizeof(name), "Domain %d", domid);
+            print_policy(name, leaves, nr_leaves, msrs, nr_msrs);
+        }
+        else
+        {
+            /* Get system policies */
+            for ( pol = 0; pol < ARRAY_SIZE(sys_policies); ++pol )
+            {
+                uint32_t nr_leaves = max_leaves;
+                uint32_t nr_msrs = max_msrs;
+
+                if ( xc_get_system_cpu_policy(xch, pol, &nr_leaves, leaves,
+                                                 &nr_msrs, msrs) )
+                    err(1, "xc_get_system_cpu_policy(, %s,,)",
+                        sys_policies[pol]);
+
+                print_policy(sys_policies[pol], leaves, nr_leaves,
+                             msrs, nr_msrs);
+            }
         }
 
         free(leaves);
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index b973629..b20aa7a 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1523,6 +1523,40 @@ long arch_do_domctl(
         recalculate_cpuid_policy(d);
         break;
 
+    case XEN_DOMCTL_get_cpu_policy:
+        if ( !guest_handle_is_null(domctl->u.cpu_policy.cpuid_policy) )
+        {
+            if ( (ret = x86_cpuid_copy_to_buffer(
+                      d->arch.cpuid,
+                      domctl->u.cpu_policy.cpuid_policy,
+                      &domctl->u.cpu_policy.nr_leaves)) )
+                break;
+
+            if ( __copy_field_to_guest(u_domctl, domctl,
+                                       u.cpu_policy.nr_leaves) )
+            {
+                ret = -EFAULT;
+                break;
+            }
+        }
+
+        if ( !guest_handle_is_null(domctl->u.cpu_policy.msr_policy) )
+        {
+            if ( (ret = x86_msr_copy_to_buffer(
+                      d->arch.msr,
+                      domctl->u.cpu_policy.msr_policy,
+                      &domctl->u.cpu_policy.nr_msrs)) )
+                break;
+
+            if ( __copy_field_to_guest(u_domctl, domctl,
+                                       u.cpu_policy.nr_msrs) )
+            {
+                ret = -EFAULT;
+                break;
+            }
+        }
+        break;
+
     default:
         ret = iommu_do_domctl(domctl, d, u_domctl);
         break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 5c3916c..2114412 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -635,6 +635,22 @@ struct xen_domctl_cpuid {
   uint32_t ecx;
   uint32_t edx;
 };
+
+/*
+ * XEN_SYSCTL_{get,set}_cpu_policy (x86 specific)
+ *
+ * Query or set the CPUID and MSR policies for a specific domain.
+ */
+struct xen_domctl_cpu_policy {
+    uint32_t nr_leaves; /* IN/OUT: Number of leaves in/written to
+                         * 'cpuid_policy'. */
+    uint32_t nr_msrs;   /* IN/OUT: Number of MSRs in/written to
+                         * 'msr_domain_policy' */
+    XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) cpuid_policy; /* IN/OUT: */
+    XEN_GUEST_HANDLE_64(xen_msr_entry_t) msr_policy;    /* IN/OUT: */
+};
+typedef struct xen_domctl_cpu_policy xen_domctl_cpu_policy_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpu_policy_t);
 #endif
 
 /*
@@ -1174,6 +1190,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_soft_reset                    79
 #define XEN_DOMCTL_set_gnttab_limits             80
 #define XEN_DOMCTL_vuart_op                      81
+#define XEN_DOMCTL_get_cpu_policy                82
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1218,6 +1235,7 @@ struct xen_domctl {
         struct xen_domctl_mem_sharing_op    mem_sharing_op;
 #if defined(__i386__) || defined(__x86_64__)
         struct xen_domctl_cpuid             cpuid;
+        struct xen_domctl_cpu_policy        cpu_policy;
         struct xen_domctl_vcpuextstate      vcpuextstate;
         struct xen_domctl_vcpu_msrs         vcpu_msrs;
 #endif
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index f614272..7d4fa1c 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -717,6 +717,7 @@ static int flask_domctl(struct domain *d, int cmd)
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER);
 
     case XEN_DOMCTL_set_cpuid:
+    case XEN_DOMCTL_get_cpu_policy:
         return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_CPUID);
 
     case XEN_DOMCTL_gettscinfo:
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 8c5baff..140d3a5 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -213,6 +213,7 @@ class domain2
 #  target = the new target domain
     set_as_target
 # XEN_DOMCTL_set_cpuid
+# XEN_DOMCTL_get_cpu_policy
     set_cpuid
 # XEN_DOMCTL_gettscinfo
     gettsc
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.