[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/cpuid: Split update_domain_cpuid_info() in half



commit 205f3b638070110683a451ee98b57c60e23235d7
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Sep 6 15:55:59 2019 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Sep 26 13:40:18 2019 +0100

    x86/cpuid: Split update_domain_cpuid_info() in half
    
    update_domain_cpuid_info() currently serves two purposes.  First to merge 
new
    CPUID data from the toolstack, and second, to perform any necessary updating
    of derived domain/vcpu settings.
    
    The first part of this is going to be superseded by a new and substantially
    more efficient hypercall.
    
    Carve the second part out into a new domain_cpu_policy_changed() helper, and
    call this from the remains of update_domain_cpuid_info().
    
    This does drop the call_policy_changed, but with the new hypercall in place,
    the common case will be a single call per domain.  Dropping the optimisation
    here allows for a cleaner set of following changes.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Release-acked-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/arch/x86/domctl.c | 245 ++++++++++++++++++++------------------------------
 1 file changed, 99 insertions(+), 146 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index d1334c7112..fb76140c8d 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -46,101 +46,14 @@ static int gdbsx_guest_mem_io(domid_t domid, struct 
xen_domctl_gdbsx_memio *iop)
     return iop->remain ? -EFAULT : 0;
 }
 
-static int update_domain_cpuid_info(struct domain *d,
-                                    const struct xen_domctl_cpuid *ctl)
+static void domain_cpu_policy_changed(struct domain *d)
 {
-    struct cpuid_policy *p = d->arch.cpuid;
-    const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
-    int old_vendor = p->x86_vendor;
-    unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b;
-    bool call_policy_changed = false; /* Avoid for_each_vcpu() unnecessarily */
-
-    /*
-     * Skip update for leaves we don't care about, to avoid the overhead of
-     * recalculate_cpuid_policy().
-     */
-    switch ( ctl->input[0] )
-    {
-    case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
-        if ( ctl->input[0] == 4 &&
-             ctl->input[1] >= ARRAY_SIZE(p->cache.raw) )
-            return 0;
-
-        if ( ctl->input[0] == 7 &&
-             ctl->input[1] >= ARRAY_SIZE(p->feat.raw) )
-            return 0;
-
-        if ( ctl->input[0] == 0xb &&
-             ctl->input[1] >= ARRAY_SIZE(p->topo.raw) )
-            return 0;
-
-        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) < 2);
-        if ( ctl->input[0] == XSTATE_CPUID &&
-             ctl->input[1] != 1 ) /* Everything else automatically calculated. 
*/
-            return 0;
-        break;
-
-    case 0x40000000: case 0x40000100:
-        /* Only care about the max_leaf limit. */
-
-    case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
-        break;
-
-    default:
-        return 0;
-    }
-
-    /* Insert ctl data into cpuid_policy. */
-    switch ( ctl->input[0] )
-    {
-    case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
-        switch ( ctl->input[0] )
-        {
-        case 4:
-            p->cache.raw[ctl->input[1]] = leaf;
-            break;
-
-        case 7:
-            p->feat.raw[ctl->input[1]] = leaf;
-            break;
-
-        case 0xb:
-            p->topo.raw[ctl->input[1]] = leaf;
-            break;
-
-        case XSTATE_CPUID:
-            p->xstate.raw[ctl->input[1]] = leaf;
-            break;
-
-        default:
-            p->basic.raw[ctl->input[0]] = leaf;
-            break;
-        }
-        break;
-
-    case 0x40000000:
-        p->hv_limit = ctl->eax;
-        break;
+    const struct cpuid_policy *p = d->arch.cpuid;
+    struct vcpu *v;
 
-    case 0x40000100:
-        p->hv2_limit = ctl->eax;
-        break;
-
-    case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
-        p->extd.raw[ctl->input[0] - 0x80000000] = leaf;
-        break;
-    }
-
-    recalculate_cpuid_policy(d);
-
-    switch ( ctl->input[0] )
+    if ( is_pv_domain(d) )
     {
-    case 0:
-        call_policy_changed = (p->x86_vendor != old_vendor);
-        break;
-
-    case 1:
-        if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
+        if ( ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
         {
             uint64_t mask = cpuidmask_defaults._1cd;
             uint32_t ecx = p->basic._1c;
@@ -197,25 +110,18 @@ static int update_domain_cpuid_info(struct domain *d,
 
             d->arch.pv.cpuidmasks->_1cd = mask;
         }
-        break;
 
-    case 6:
-        if ( is_pv_domain(d) && ((levelling_caps & LCAP_6c) == LCAP_6c) )
+        if ( ((levelling_caps & LCAP_6c) == LCAP_6c) )
         {
             uint64_t mask = cpuidmask_defaults._6c;
 
             if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
-                mask &= (~0ULL << 32) | ctl->ecx;
+                mask &= (~0ULL << 32) | p->basic.raw[6].c;
 
             d->arch.pv.cpuidmasks->_6c = mask;
         }
-        break;
-
-    case 7:
-        if ( ctl->input[1] != 0 )
-            break;
 
-        if ( is_pv_domain(d) && ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) )
+        if ( ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) )
         {
             uint64_t mask = cpuidmask_defaults._7ab0;
 
@@ -232,35 +138,7 @@ static int update_domain_cpuid_info(struct domain *d,
             d->arch.pv.cpuidmasks->_7ab0 = mask;
         }
 
-        /*
-         * If the IBRS/IBPB policy has changed, we need to recalculate the MSR
-         * interception bitmaps.
-         */
-        call_policy_changed = (is_hvm_domain(d) &&
-                               ((old_7d0 ^ p->feat.raw[0].d) &
-                                (cpufeat_mask(X86_FEATURE_IBRSB) |
-                                 cpufeat_mask(X86_FEATURE_L1D_FLUSH))));
-        break;
-
-    case 0xa:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
-            break;
-
-        /* If PMU version is zero then the guest doesn't have VPMU */
-        if ( p->basic.pmu_version == 0 )
-        {
-            struct vcpu *v;
-
-            for_each_vcpu ( d, v )
-                vpmu_destroy(v);
-        }
-        break;
-
-    case 0xd:
-        if ( ctl->input[1] != 1 )
-            break;
-
-        if ( is_pv_domain(d) && ((levelling_caps & LCAP_Da1) == LCAP_Da1) )
+        if ( ((levelling_caps & LCAP_Da1) == LCAP_Da1) )
         {
             uint64_t mask = cpuidmask_defaults.Da1;
             uint32_t eax = p->xstate.Da1;
@@ -270,10 +148,8 @@ static int update_domain_cpuid_info(struct domain *d,
 
             d->arch.pv.cpuidmasks->Da1 = mask;
         }
-        break;
 
-    case 0x80000001:
-        if ( is_pv_domain(d) && ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) )
+        if ( ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) )
         {
             uint64_t mask = cpuidmask_defaults.e1cd;
             uint32_t ecx = p->extd.e1c;
@@ -317,27 +193,104 @@ static int update_domain_cpuid_info(struct domain *d,
 
             d->arch.pv.cpuidmasks->e1cd = mask;
         }
+    }
+
+    for_each_vcpu ( d, v )
+    {
+        cpuid_policy_updated(v);
+
+        /* If PMU version is zero then the guest doesn't have VPMU */
+        if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+             p->basic.pmu_version == 0 )
+            vpmu_destroy(v);
+    }
+}
+
+static int update_domain_cpuid_info(struct domain *d,
+                                    const struct xen_domctl_cpuid *ctl)
+{
+    struct cpuid_policy *p = d->arch.cpuid;
+    const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
+
+    /*
+     * Skip update for leaves we don't care about, to avoid the overhead of
+     * recalculate_cpuid_policy().
+     */
+    switch ( ctl->input[0] )
+    {
+    case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
+        if ( ctl->input[0] == 4 &&
+             ctl->input[1] >= ARRAY_SIZE(p->cache.raw) )
+            return 0;
+
+        if ( ctl->input[0] == 7 &&
+             ctl->input[1] >= ARRAY_SIZE(p->feat.raw) )
+            return 0;
+
+        if ( ctl->input[0] == 0xb &&
+             ctl->input[1] >= ARRAY_SIZE(p->topo.raw) )
+            return 0;
+
+        BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) < 2);
+        if ( ctl->input[0] == XSTATE_CPUID &&
+             ctl->input[1] != 1 ) /* Everything else automatically calculated. 
*/
+            return 0;
         break;
 
-    case 0x80000008:
-        /*
-         * If the IBPB policy has changed, we need to recalculate the MSR
-         * interception bitmaps.
-         */
-        call_policy_changed = (is_hvm_domain(d) &&
-                               ((old_e8b ^ p->extd.raw[8].b) &
-                                cpufeat_mask(X86_FEATURE_IBPB)));
+    case 0x40000000: case 0x40000100:
+        /* Only care about the max_leaf limit. */
+
+    case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
         break;
+
+    default:
+        return 0;
     }
 
-    if ( call_policy_changed )
+    /* Insert ctl data into cpuid_policy. */
+    switch ( ctl->input[0] )
     {
-        struct vcpu *v;
+    case 0x00000000 ... ARRAY_SIZE(p->basic.raw) - 1:
+        switch ( ctl->input[0] )
+        {
+        case 4:
+            p->cache.raw[ctl->input[1]] = leaf;
+            break;
+
+        case 7:
+            p->feat.raw[ctl->input[1]] = leaf;
+            break;
+
+        case 0xb:
+            p->topo.raw[ctl->input[1]] = leaf;
+            break;
 
-        for_each_vcpu( d, v )
-            cpuid_policy_updated(v);
+        case XSTATE_CPUID:
+            p->xstate.raw[ctl->input[1]] = leaf;
+            break;
+
+        default:
+            p->basic.raw[ctl->input[0]] = leaf;
+            break;
+        }
+        break;
+
+    case 0x40000000:
+        p->hv_limit = ctl->eax;
+        break;
+
+    case 0x40000100:
+        p->hv2_limit = ctl->eax;
+        break;
+
+    case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
+        p->extd.raw[ctl->input[0] - 0x80000000] = leaf;
+        break;
     }
 
+    recalculate_cpuid_policy(d);
+    domain_cpu_policy_changed(d);
+
     return 0;
 }
 
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.