[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/2] x86/cpuid: Move x86_vendor from arch_domain to cpuid_policy
No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx> CC: Paul Durrant <paul.durrant@xxxxxxxxxx> CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx> CC: Kevin Tian <kevin.tian@xxxxxxxxx> v2: Break out family/model logic --- xen/arch/x86/cpuid.c | 9 +++++---- xen/arch/x86/domain.c | 2 -- xen/arch/x86/domctl.c | 9 ++++----- xen/arch/x86/hvm/emulate.c | 2 +- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/hvm/ioreq.c | 2 +- xen/arch/x86/hvm/svm/svm.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/arch/x86/mm.c | 4 ++-- xen/arch/x86/mm/shadow/common.c | 2 +- xen/arch/x86/traps.c | 2 +- xen/include/asm-x86/cpuid.h | 3 +++ xen/include/asm-x86/domain.h | 3 --- 13 files changed, 21 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c index 95040f9..bcdac03 100644 --- a/xen/arch/x86/cpuid.c +++ b/xen/arch/x86/cpuid.c @@ -130,6 +130,8 @@ static void __init calculate_raw_policy(void) for ( i = 1; i < min(ARRAY_SIZE(p->extd.raw), p->extd.max_leaf + 1 - 0x80000000ul); ++i ) cpuid_leaf(0x80000000 + i, &p->extd.raw[i]); + + p->x86_vendor = boot_cpu_data.x86_vendor; } static void __init calculate_host_policy(void) @@ -592,7 +594,7 @@ static void pv_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res) res->d = p->extd.e1d; /* If not emulating AMD, clear the duplicated features in e1d. */ - if ( currd->arch.x86_vendor != X86_VENDOR_AMD ) + if ( p->x86_vendor != X86_VENDOR_AMD ) res->d &= ~CPUID_COMMON_1D_FEATURES; /* @@ -805,7 +807,7 @@ static void hvm_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res) res->d = p->extd.e1d; /* If not emulating AMD, clear the duplicated features in e1d. */ - if ( d->arch.x86_vendor != X86_VENDOR_AMD ) + if ( p->x86_vendor != X86_VENDOR_AMD ) res->d &= ~CPUID_COMMON_1D_FEATURES; /* fast-forward MSR_APIC_BASE.EN if it hasn't already been clobbered. */ else if ( vlapic_hw_disabled(vcpu_vlapic(v)) ) @@ -829,8 +831,7 @@ static void hvm_cpuid(uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res) res->d &= ~cpufeat_mask(X86_FEATURE_PSE36); /* SYSCALL is hidden outside of long mode on Intel. */ - if ( d->arch.x86_vendor == X86_VENDOR_INTEL && - !hvm_long_mode_enabled(v)) + if ( p->x86_vendor == X86_VENDOR_INTEL && !hvm_long_mode_enabled(v) ) res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL); break; diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index f966da7..de4a3d6 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -606,8 +606,6 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags, if ( (rc = init_domain_cpuid_policy(d)) ) goto fail; - d->arch.x86_vendor = boot_cpu_data.x86_vendor; - d->arch.ioport_caps = rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); rc = -ENOMEM; diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 0458d8f..969df12e 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -154,12 +154,11 @@ static int update_domain_cpuid_info(struct domain *d, switch ( ctl->input[0] ) { case 0: { - int old_vendor = d->arch.x86_vendor; + int old_vendor = p->x86_vendor; - d->arch.x86_vendor = get_cpu_vendor( - ctl->ebx, ctl->ecx, ctl->edx, gcv_guest); + p->x86_vendor = get_cpu_vendor(ctl->ebx, ctl->ecx, ctl->edx, gcv_guest); - if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) ) + if ( is_hvm_domain(d) && (p->x86_vendor != old_vendor) ) { struct vcpu *v; @@ -290,7 +289,7 @@ static int update_domain_cpuid_info(struct domain *d, ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY); /* If not emulating AMD, clear the duplicated features in e1d. */ - if ( d->arch.x86_vendor != X86_VENDOR_AMD ) + if ( p->x86_vendor != X86_VENDOR_AMD ) edx &= ~CPUID_COMMON_1D_FEATURES; switch ( boot_cpu_data.x86_vendor ) diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index e22740f..0d21fe1 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1910,7 +1910,7 @@ void hvm_emulate_init_once( hvmemul_ctxt->validate = validate; hvmemul_ctxt->ctxt.regs = regs; - hvmemul_ctxt->ctxt.vendor = curr->domain->arch.x86_vendor; + hvmemul_ctxt->ctxt.vendor = curr->domain->arch.cpuid->x86_vendor; hvmemul_ctxt->ctxt.force_writeback = true; if ( cpu_has_vmx ) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 2ec0800..63748dc 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3619,7 +3619,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs) { struct vcpu *cur = current; bool should_emulate = - cur->domain->arch.x86_vendor != boot_cpu_data.x86_vendor; + cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor; struct hvm_emulate_ctxt ctxt; hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs); diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index 8ad8465..26a0cb8 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -1140,7 +1140,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, (p->addr & 3); /* AMD extended configuration space access? */ if ( CF8_ADDR_HI(cf8) && - d->arch.x86_vendor == X86_VENDOR_AMD && + d->arch.cpuid->x86_vendor == X86_VENDOR_AMD && (x86_fam = get_cpu_family( d->arch.cpuid->basic.raw_fms, NULL, NULL)) > 0x10 && x86_fam <= 0x17 ) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index ae8e2c4..e8ef88d 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -591,7 +591,7 @@ static void svm_update_guest_vendor(struct vcpu *v) u32 bitmap = vmcb_get_exception_intercepts(vmcb); if ( opt_hvm_fep || - (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) ) + (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) ) bitmap |= (1U << TRAP_invalid_op); else bitmap &= ~(1U << TRAP_invalid_op); diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 61925cf..a5e5ffd 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -544,7 +544,7 @@ void vmx_update_exception_bitmap(struct vcpu *v) static void vmx_update_guest_vendor(struct vcpu *v) { if ( opt_hvm_fep || - (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) ) + (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) ) v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op); else v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index d707d1c..a5521f1 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5358,7 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr, struct ptwr_emulate_ctxt ptwr_ctxt = { .ctxt = { .regs = regs, - .vendor = d->arch.x86_vendor, + .vendor = d->arch.cpuid->x86_vendor, .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .swint_emulate = x86_swint_emulate_none, @@ -5514,7 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr, struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr }; struct x86_emulate_ctxt ctxt = { .regs = regs, - .vendor = v->domain->arch.x86_vendor, + .vendor = v->domain->arch.cpuid->x86_vendor, .addr_size = addr_size, .sp_size = addr_size, .swint_emulate = x86_swint_emulate_none, diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 4113351..e4ccf92 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -330,7 +330,7 @@ const struct x86_emulate_ops *shadow_init_emulation( memset(sh_ctxt, 0, sizeof(*sh_ctxt)); sh_ctxt->ctxt.regs = regs; - sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor; + sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor; sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none; /* Segment cache initialisation. Primed with CS. */ diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 4f29c3a..bc95604 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2981,7 +2981,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) struct domain *currd = curr->domain; struct priv_op_ctxt ctxt = { .ctxt.regs = regs, - .ctxt.vendor = currd->arch.x86_vendor, + .ctxt.vendor = currd->arch.cpuid->x86_vendor, }; int rc; unsigned int eflags, ar; diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h index c56190b..24ad3e0 100644 --- a/xen/include/asm-x86/cpuid.h +++ b/xen/include/asm-x86/cpuid.h @@ -204,6 +204,9 @@ struct cpuid_policy /* Toolstack selected Hypervisor max_leaf (if non-zero). */ uint8_t hv_limit, hv2_limit; + /* Value calculated from raw data above. */ + uint8_t x86_vendor; + /* Temporary: Legacy data array. */ #define MAX_CPUID_INPUT 40 xen_domctl_cpuid_t legacy[MAX_CPUID_INPUT]; diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 82296c8..e6c7e13 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -337,9 +337,6 @@ struct arch_domain /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */ bool_t auto_unmask; - /* Values snooped from updates to cpuids[] (below). */ - u8 x86_vendor; /* CPU vendor */ - /* * The width of the FIP/FDP register in the FPU that needs to be * saved/restored during a context switch. This is needed because -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |