[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.7] x86/AMD: distinguish compute units from hyper-threads
commit e90e2431a4aee843db49db9a3ac7ee0d9e1bf8f0 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Mon Jul 30 14:10:30 2018 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Jul 30 14:10:30 2018 +0200 x86/AMD: distinguish compute units from hyper-threads Fam17 replaces CUs by HTs, which we should reflect accordingly, even if the difference is not very big. The most relevant change (requiring some code restructuring) is that the topoext feature no longer means there is a valid CU ID. Take the opportunity and convert wrongly plain int variables in set_cpu_sibling_map() to unsigned int. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Brian Woods <brian.woods@xxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> master commit: 9429b07a0af7f92a5f25e4068e11db881e157495 master date: 2018-07-19 09:42:42 +0200 --- xen/arch/x86/cpu/amd.c | 16 +++++++++++----- xen/arch/x86/smpboot.c | 32 ++++++++++++++++++++------------ 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index 5fc2b1dcab..bc16f7e632 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -502,17 +502,23 @@ static void amd_get_topology(struct cpuinfo_x86 *c) u32 eax, ebx, ecx, edx; cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); - c->compute_unit_id = ebx & 0xFF; c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1; + + if (c->x86 < 0x17) + c->compute_unit_id = ebx & 0xFF; + else { + c->cpu_core_id = ebx & 0xFF; + c->x86_max_cores /= c->x86_num_siblings; + } } if (opt_cpu_info) printk("CPU %d(%d) -> Processor %d, %s %d\n", cpu, c->x86_max_cores, c->phys_proc_id, - cpu_has(c, X86_FEATURE_TOPOEXT) ? "Compute Unit" : - "Core", - cpu_has(c, X86_FEATURE_TOPOEXT) ? c->compute_unit_id : - c->cpu_core_id); + c->compute_unit_id != INVALID_CUID ? "Compute Unit" + : "Core", + c->compute_unit_id != INVALID_CUID ? c->compute_unit_id + : c->cpu_core_id); } static void early_init_amd(struct cpuinfo_x86 *c) diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 97804d3b4d..7874e694ed 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -219,33 +219,41 @@ static void link_thread_siblings(int cpu1, int cpu2) cpumask_set_cpu(cpu2, per_cpu(cpu_core_mask, cpu1)); } -static void set_cpu_sibling_map(int cpu) +static void set_cpu_sibling_map(unsigned int cpu) { - int i; + unsigned int i; struct cpuinfo_x86 *c = cpu_data; cpumask_set_cpu(cpu, &cpu_sibling_setup_map); cpumask_set_cpu(cpu, socket_cpumask[cpu_to_socket(cpu)]); + cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu)); + cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu)); if ( c[cpu].x86_num_siblings > 1 ) { for_each_cpu ( i, &cpu_sibling_setup_map ) { - if ( cpu_has(c, X86_FEATURE_TOPOEXT) ) { - if ( (c[cpu].phys_proc_id == c[i].phys_proc_id) && - (c[cpu].compute_unit_id == c[i].compute_unit_id) ) + if ( cpu == i || c[cpu].phys_proc_id != c[i].phys_proc_id ) + continue; + if ( c[cpu].compute_unit_id != INVALID_CUID && + c[i].compute_unit_id != INVALID_CUID ) + { + if ( c[cpu].compute_unit_id == c[i].compute_unit_id ) + link_thread_siblings(cpu, i); + } + else if ( c[cpu].cpu_core_id != XEN_INVALID_CORE_ID && + c[i].cpu_core_id != XEN_INVALID_CORE_ID ) + { + if ( c[cpu].cpu_core_id == c[i].cpu_core_id ) link_thread_siblings(cpu, i); - } else if ( (c[cpu].phys_proc_id == c[i].phys_proc_id) && - (c[cpu].cpu_core_id == c[i].cpu_core_id) ) { - link_thread_siblings(cpu, i); } + else + printk(XENLOG_WARNING + "CPU%u: unclear relationship with CPU%u\n", + cpu, i); } } - else - { - cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu)); - } if ( c[cpu].x86_max_cores == 1 ) { -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.7 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |