|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/cpuid: Move vendor/family/model information from arch_domain to cpuid_policy
Rename the x86 field to x86_family so its name actually reflects its meaning.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
---
xen/arch/x86/cpuid.c | 11 +++++++----
xen/arch/x86/domain.c | 4 ----
xen/arch/x86/domctl.c | 11 +++++------
xen/arch/x86/hvm/emulate.c | 2 +-
xen/arch/x86/hvm/hvm.c | 2 +-
xen/arch/x86/hvm/ioreq.c | 5 +++--
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/mm.c | 4 ++--
xen/arch/x86/mm/shadow/common.c | 2 +-
xen/arch/x86/traps.c | 2 +-
xen/include/asm-x86/cpuid.h | 3 +++
xen/include/asm-x86/domain.h | 5 -----
13 files changed, 26 insertions(+), 29 deletions(-)
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index b685874..1699630 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -130,6 +130,10 @@ static void __init calculate_raw_policy(void)
for ( i = 1; i < min(ARRAY_SIZE(p->extd.raw),
p->extd.max_leaf + 1 - 0x80000000ul); ++i )
cpuid_leaf(0x80000000 + i, &p->extd.raw[i]);
+
+ p->x86_vendor = boot_cpu_data.x86_vendor;
+ p->x86_family = boot_cpu_data.x86;
+ p->x86_model = boot_cpu_data.x86_model;
}
static void __init calculate_host_policy(void)
@@ -572,7 +576,7 @@ static void pv_cpuid(uint32_t leaf, uint32_t subleaf,
struct cpuid_leaf *res)
res->d = p->extd.e1d;
/* If not emulating AMD, clear the duplicated features in e1d. */
- if ( currd->arch.x86_vendor != X86_VENDOR_AMD )
+ if ( p->x86_vendor != X86_VENDOR_AMD )
res->d &= ~CPUID_COMMON_1D_FEATURES;
/*
@@ -785,7 +789,7 @@ static void hvm_cpuid(uint32_t leaf, uint32_t subleaf,
struct cpuid_leaf *res)
res->d = p->extd.e1d;
/* If not emulating AMD, clear the duplicated features in e1d. */
- if ( d->arch.x86_vendor != X86_VENDOR_AMD )
+ if ( p->x86_vendor != X86_VENDOR_AMD )
res->d &= ~CPUID_COMMON_1D_FEATURES;
/* fast-forward MSR_APIC_BASE.EN if it hasn't already been clobbered.
*/
else if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
@@ -809,8 +813,7 @@ static void hvm_cpuid(uint32_t leaf, uint32_t subleaf,
struct cpuid_leaf *res)
res->d &= ~cpufeat_mask(X86_FEATURE_PSE36);
/* SYSCALL is hidden outside of long mode on Intel. */
- if ( d->arch.x86_vendor == X86_VENDOR_INTEL &&
- !hvm_long_mode_enabled(v))
+ if ( p->x86_vendor == X86_VENDOR_INTEL && !hvm_long_mode_enabled(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
break;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 319cc8a..42ad7d0 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -616,10 +616,6 @@ int arch_domain_create(struct domain *d, unsigned int
domcr_flags,
d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED;
}
- d->arch.x86_vendor = boot_cpu_data.x86_vendor;
- d->arch.x86 = boot_cpu_data.x86;
- d->arch.x86_model = boot_cpu_data.x86_model;
-
d->arch.ioport_caps =
rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
rc = -ENOMEM;
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index b01a1f9..bb4ca5a 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -78,12 +78,11 @@ static void update_domain_cpuid_info(struct domain *d,
switch ( ctl->input[0] )
{
case 0: {
- int old_vendor = d->arch.x86_vendor;
+ int old_vendor = p->x86_vendor;
- d->arch.x86_vendor = get_cpu_vendor(
- ctl->ebx, ctl->ecx, ctl->edx, gcv_guest);
+ p->x86_vendor = get_cpu_vendor(ctl->ebx, ctl->ecx, ctl->edx,
gcv_guest);
- if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
+ if ( is_hvm_domain(d) && (p->x86_vendor != old_vendor) )
{
struct vcpu *v;
@@ -95,7 +94,7 @@ static void update_domain_cpuid_info(struct domain *d,
}
case 1:
- d->arch.x86 = get_cpu_family(ctl->eax, &d->arch.x86_model, NULL);
+ p->x86_family = get_cpu_family(ctl->eax, &p->x86_model, NULL);
if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
{
@@ -216,7 +215,7 @@ static void update_domain_cpuid_info(struct domain *d,
ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY);
/* If not emulating AMD, clear the duplicated features in e1d. */
- if ( d->arch.x86_vendor != X86_VENDOR_AMD )
+ if ( p->x86_vendor != X86_VENDOR_AMD )
edx &= ~CPUID_COMMON_1D_FEATURES;
switch ( boot_cpu_data.x86_vendor )
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index e22740f..0d21fe1 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1910,7 +1910,7 @@ void hvm_emulate_init_once(
hvmemul_ctxt->validate = validate;
hvmemul_ctxt->ctxt.regs = regs;
- hvmemul_ctxt->ctxt.vendor = curr->domain->arch.x86_vendor;
+ hvmemul_ctxt->ctxt.vendor = curr->domain->arch.cpuid->x86_vendor;
hvmemul_ctxt->ctxt.force_writeback = true;
if ( cpu_has_vmx )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2ec0800..63748dc 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3619,7 +3619,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
{
struct vcpu *cur = current;
bool should_emulate =
- cur->domain->arch.x86_vendor != boot_cpu_data.x86_vendor;
+ cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor;
struct hvm_emulate_ctxt ctxt;
hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 2830f6c..36a1c74 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -1140,8 +1140,9 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct
domain *d,
(p->addr & 3);
/* AMD extended configuration space access? */
if ( CF8_ADDR_HI(cf8) &&
- d->arch.x86_vendor == X86_VENDOR_AMD &&
- d->arch.x86 >= 0x10 && d->arch.x86 <= 0x17 )
+ d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
+ d->arch.cpuid->x86_family >= 0x10 &&
+ d->arch.cpuid->x86_family <= 0x17 )
{
uint64_t msr_val;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index ae8e2c4..e8ef88d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -591,7 +591,7 @@ static void svm_update_guest_vendor(struct vcpu *v)
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
if ( opt_hvm_fep ||
- (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
bitmap |= (1U << TRAP_invalid_op);
else
bitmap &= ~(1U << TRAP_invalid_op);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 61925cf..a5e5ffd 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -544,7 +544,7 @@ void vmx_update_exception_bitmap(struct vcpu *v)
static void vmx_update_guest_vendor(struct vcpu *v)
{
if ( opt_hvm_fep ||
- (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
else
v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d707d1c..a5521f1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5358,7 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
struct ptwr_emulate_ctxt ptwr_ctxt = {
.ctxt = {
.regs = regs,
- .vendor = d->arch.x86_vendor,
+ .vendor = d->arch.cpuid->x86_vendor,
.addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
.sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
.swint_emulate = x86_swint_emulate_none,
@@ -5514,7 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long
addr,
struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
struct x86_emulate_ctxt ctxt = {
.regs = regs,
- .vendor = v->domain->arch.x86_vendor,
+ .vendor = v->domain->arch.cpuid->x86_vendor,
.addr_size = addr_size,
.sp_size = addr_size,
.swint_emulate = x86_swint_emulate_none,
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 4113351..e4ccf92 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -330,7 +330,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
memset(sh_ctxt, 0, sizeof(*sh_ctxt));
sh_ctxt->ctxt.regs = regs;
- sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
+ sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor;
sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
/* Segment cache initialisation. Primed with CS. */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 7bb42ac..37814b3 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2990,7 +2990,7 @@ static int emulate_privileged_op(struct cpu_user_regs
*regs)
struct domain *currd = curr->domain;
struct priv_op_ctxt ctxt = {
.ctxt.regs = regs,
- .ctxt.vendor = currd->arch.x86_vendor,
+ .ctxt.vendor = currd->arch.cpuid->x86_vendor,
};
int rc;
unsigned int eflags, ar;
diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h
index 5b1448a..01791e4 100644
--- a/xen/include/asm-x86/cpuid.h
+++ b/xen/include/asm-x86/cpuid.h
@@ -200,6 +200,9 @@ struct cpuid_policy
#undef __DECL_BITFIELD
#undef _DECL_BITFIELD
#undef DECL_BITFIELD
+
+ /* Value calculated from raw data above. */
+ uint8_t x86_vendor, x86_family, x86_model;
};
/* Fill in a featureset bitmap from a CPUID policy. */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 896e78d..90c422e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -340,11 +340,6 @@ struct arch_domain
/* Is PHYSDEVOP_eoi to automatically unmask the event channel? */
bool_t auto_unmask;
- /* Values snooped from updates to cpuids[] (below). */
- u8 x86; /* CPU family */
- u8 x86_vendor; /* CPU vendor */
- u8 x86_model; /* CPU model */
-
/*
* The width of the FIP/FDP register in the FPU that needs to be
* saved/restored during a context switch. This is needed because
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |