[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/cpuid: Compile out unused logic/objects



commit 885d2d20425df9c35926c0462d53ab97074aa97d
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Feb 25 17:36:12 2020 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Feb 27 18:54:58 2020 +0000

    x86/cpuid: Compile out unused logic/objects
    
    CPUID Policy objects are large (1860 bytes at the time of writing), so
    compiling them out based on CONFIG_{PV,HVM} makes a lot of sense.
    
    This involves a bit of complexity in init_domain_cpuid_policy() and
    recalculate_cpuid_policy() as is_pv_domain() can't be evaulated at compile
    time.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpuid.c | 47 +++++++++++++++++++++++++++++++++--------------
 1 file changed, 33 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 546ae31bb9..cd9a02143c 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -95,10 +95,14 @@ static void zero_leaves(struct cpuid_leaf *l,
     memset(&l[first], 0, sizeof(*l) * (last - first + 1));
 }
 
-struct cpuid_policy __read_mostly raw_cpuid_policy,
-    __read_mostly host_cpuid_policy,
-    __read_mostly pv_max_cpuid_policy,
-    __read_mostly hvm_max_cpuid_policy;
+struct cpuid_policy __read_mostly     raw_cpuid_policy,
+                    __read_mostly    host_cpuid_policy;
+#ifdef CONFIG_PV
+struct cpuid_policy __read_mostly  pv_max_cpuid_policy;
+#endif
+#ifdef CONFIG_HVM
+struct cpuid_policy __read_mostly hvm_max_cpuid_policy;
+#endif
 
 static void sanitise_featureset(uint32_t *fs)
 {
@@ -384,9 +388,6 @@ static void __init calculate_hvm_max_policy(void)
     unsigned int i;
     const uint32_t *hvm_featuremask;
 
-    if ( !hvm_enabled )
-        return;
-
     *p = host_cpuid_policy;
     cpuid_policy_to_featureset(p, hvm_featureset);
 
@@ -443,8 +444,12 @@ void __init init_guest_cpuid(void)
 {
     calculate_raw_policy();
     calculate_host_policy();
-    calculate_pv_max_policy();
-    calculate_hvm_max_policy();
+
+    if ( IS_ENABLED(CONFIG_PV) )
+        calculate_pv_max_policy();
+
+    if ( hvm_enabled )
+        calculate_hvm_max_policy();
 }
 
 bool recheck_cpu_features(unsigned int cpu)
@@ -472,11 +477,18 @@ bool recheck_cpu_features(unsigned int cpu)
 void recalculate_cpuid_policy(struct domain *d)
 {
     struct cpuid_policy *p = d->arch.cpuid;
-    const struct cpuid_policy *max =
-        is_pv_domain(d) ? &pv_max_cpuid_policy : &hvm_max_cpuid_policy;
+    const struct cpuid_policy *max = is_pv_domain(d)
+        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_max_cpuid_policy : NULL)
+        : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpuid_policy : NULL);
     uint32_t fs[FSCAPINTS], max_fs[FSCAPINTS];
     unsigned int i;
 
+    if ( !max )
+    {
+        ASSERT_UNREACHABLE();
+        return;
+    }
+
     p->x86_vendor = x86_cpuid_lookup_vendor(
         p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx);
 
@@ -612,10 +624,17 @@ void recalculate_cpuid_policy(struct domain *d)
 
 int init_domain_cpuid_policy(struct domain *d)
 {
-    struct cpuid_policy *p =
-        xmemdup(is_pv_domain(d) ?  &pv_max_cpuid_policy
-                                : &hvm_max_cpuid_policy);
+    struct cpuid_policy *p = is_pv_domain(d)
+        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_max_cpuid_policy : NULL)
+        : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpuid_policy : NULL);
+
+    if ( !p )
+    {
+        ASSERT_UNREACHABLE();
+        return -EOPNOTSUPP;
+    }
 
+    p = xmemdup(p);
     if ( !p )
         return -ENOMEM;
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.