[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/cpu-policy: Extend the guest max policy max leaf/subleaves



commit fa2d8318033e468a4ded1fc3d721dc3e019e449b
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue Oct 29 17:21:08 2024 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Oct 30 17:34:32 2024 +0000

    x86/cpu-policy: Extend the guest max policy max leaf/subleaves
    
    We already have one migration case opencoded (feat.max_subleaf).  A more
    recent discovery is that we advertise x2APIC to guests without ensuring that
    we provide max_leaf >= 0xb.
    
    In general, any leaf known to Xen can be safely configured by the toolstack 
if
    it doesn't violate other constraints.
    
    Therefore, introduce guest_common_{max,default}_leaves() to generalise the
    special case we currently have for feat.max_subleaf, in preparation to be 
able
    to provide x2APIC topology in leaf 0xb even on older hardware.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu-policy.c | 39 +++++++++++++++++++++++++--------------
 1 file changed, 25 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index b6d9fad567..78bc9872b0 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -391,6 +391,27 @@ static void __init calculate_host_policy(void)
     p->platform_info.cpuid_faulting = cpu_has_cpuid_faulting;
 }
 
+/*
+ * Guest max policies can have any max leaf/subleaf within bounds.
+ *
+ * - Some incoming VMs have a larger-than-necessary feat max_subleaf.
+ * - Some VMs we'd like to synthesise leaves not present on the host.
+ */
+static void __init guest_common_max_leaves(struct cpu_policy *p)
+{
+    p->basic.max_leaf       = ARRAY_SIZE(p->basic.raw) - 1;
+    p->feat.max_subleaf     = ARRAY_SIZE(p->feat.raw) - 1;
+    p->extd.max_leaf        = 0x80000000U + ARRAY_SIZE(p->extd.raw) - 1;
+}
+
+/* Guest default policies inherit the host max leaf/subleaf settings. */
+static void __init guest_common_default_leaves(struct cpu_policy *p)
+{
+    p->basic.max_leaf       = host_cpu_policy.basic.max_leaf;
+    p->feat.max_subleaf     = host_cpu_policy.feat.max_subleaf;
+    p->extd.max_leaf        = host_cpu_policy.extd.max_leaf;
+}
+
 static void __init guest_common_max_feature_adjustments(uint32_t *fs)
 {
     if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
@@ -579,11 +600,7 @@ static void __init calculate_pv_max_policy(void)
 
     *p = host_cpu_policy;
 
-    /*
-     * Some VMs may have a larger-than-necessary feat max_subleaf.  Allow them
-     * to migrate in.
-     */
-    p->feat.max_subleaf = ARRAY_SIZE(p->feat.raw) - 1;
+    guest_common_max_leaves(p);
 
     x86_cpu_policy_to_featureset(p, fs);
 
@@ -626,8 +643,7 @@ static void __init calculate_pv_def_policy(void)
 
     *p = pv_max_cpu_policy;
 
-    /* Default to the same max_subleaf as the host. */
-    p->feat.max_subleaf = host_cpu_policy.feat.max_subleaf;
+    guest_common_default_leaves(p);
 
     x86_cpu_policy_to_featureset(p, fs);
 
@@ -666,11 +682,7 @@ static void __init calculate_hvm_max_policy(void)
 
     *p = host_cpu_policy;
 
-    /*
-     * Some VMs may have a larger-than-necessary feat max_subleaf.  Allow them
-     * to migrate in.
-     */
-    p->feat.max_subleaf = ARRAY_SIZE(p->feat.raw) - 1;
+    guest_common_max_leaves(p);
 
     x86_cpu_policy_to_featureset(p, fs);
 
@@ -790,8 +802,7 @@ static void __init calculate_hvm_def_policy(void)
 
     *p = hvm_max_cpu_policy;
 
-    /* Default to the same max_subleaf as the host. */
-    p->feat.max_subleaf = host_cpu_policy.feat.max_subleaf;
+    guest_common_default_leaves(p);
 
     x86_cpu_policy_to_featureset(p, fs);
 
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.