|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 09/27] x86/cpuid: Dispatch cpuid_hypervisor_leaves() from guest_cpuid()
... rather than from the legacy path. Update the API to match guest_cpuid(),
and remove its dependence on current.
Make use of guest_cpuid() unconditionally zeroing res to avoid repeated
re-zeroing. To use a const struct domain, domain_cpuid() needs to be
const-corrected.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
---
xen/arch/x86/cpuid.c | 3 ++
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/hvm/hvm.c | 3 --
xen/arch/x86/traps.c | 102 ++++++++++++++++------------------------
xen/include/asm-x86/domain.h | 2 +-
xen/include/asm-x86/processor.h | 4 +-
6 files changed, 48 insertions(+), 68 deletions(-)
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index c38e477..86f598f 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -332,6 +332,9 @@ void guest_cpuid(const struct vcpu *v, unsigned int leaf,
case 0x40000000 ... 0x400000ff:
if ( is_viridian_domain(d) )
return cpuid_viridian_leaves(v, leaf, subleaf, res);
+ /* Fallthrough. */
+ case 0x40000100 ... 0x4fffffff:
+ return cpuid_hypervisor_leaves(v, leaf, subleaf, res);
}
/* {pv,hvm}_cpuid() have this expectation. */
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 7d33c41..b554a9c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2623,7 +2623,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
}
void domain_cpuid(
- struct domain *d,
+ const struct domain *d,
unsigned int input,
unsigned int sub_input,
unsigned int *eax,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ce2785f..bdf8ca8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3353,9 +3353,6 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
if ( !edx )
edx = &dummy;
- if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
- return;
-
if ( input & 0x7fffffff )
{
/*
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 02f2d5c..8b395f8 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -902,20 +902,18 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
return 0;
}
-int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
- uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+void cpuid_hypervisor_leaves(const struct vcpu *v, unsigned int leaf,
+ unsigned int subleaf, struct cpuid_leaf *res)
{
- struct vcpu *curr = current;
- struct domain *currd = curr->domain;
- /* Optionally shift out of the way of Viridian architectural leaves. */
- uint32_t base = is_viridian_domain(currd) ? 0x40000100 : 0x40000000;
+ const struct domain *d = v->domain;
+ unsigned int base = leaf & ~0xff;
+ unsigned int idx = leaf - base;
uint32_t limit, dummy;
- idx -= base;
- if ( idx > XEN_CPUID_MAX_NUM_LEAVES )
- return 0; /* Avoid unnecessary pass through domain_cpuid() */
+ if ( base > 0x40000100 || idx > XEN_CPUID_MAX_NUM_LEAVES )
+ return; /* Avoid unnecessary pass through domain_cpuid() */
- domain_cpuid(currd, base, 0, &limit, &dummy, &dummy, &dummy);
+ domain_cpuid(d, base, 0, &limit, &dummy, &dummy, &dummy);
if ( limit == 0 )
/* Default number of leaves */
limit = XEN_CPUID_MAX_NUM_LEAVES;
@@ -929,83 +927,71 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t
sub_idx,
limit = XEN_CPUID_MAX_NUM_LEAVES;
}
- if ( idx > limit )
- return 0;
+ if ( idx > limit )
+ return;
switch ( idx )
{
case 0:
- *eax = base + limit; /* Largest leaf */
- *ebx = XEN_CPUID_SIGNATURE_EBX;
- *ecx = XEN_CPUID_SIGNATURE_ECX;
- *edx = XEN_CPUID_SIGNATURE_EDX;
+ res->a = base + limit; /* Largest leaf */
+ res->b = XEN_CPUID_SIGNATURE_EBX;
+ res->c = XEN_CPUID_SIGNATURE_ECX;
+ res->d = XEN_CPUID_SIGNATURE_EDX;
break;
case 1:
- *eax = (xen_major_version() << 16) | xen_minor_version();
- *ebx = 0; /* Reserved */
- *ecx = 0; /* Reserved */
- *edx = 0; /* Reserved */
+ res->a = (xen_major_version() << 16) | xen_minor_version();
break;
case 2:
- *eax = 1; /* Number of hypercall-transfer pages */
- *ebx = 0x40000000; /* MSR base address */
- if ( is_viridian_domain(currd) )
- *ebx = 0x40000200;
- *ecx = 0; /* Features 1 */
- *edx = 0; /* Features 2 */
- if ( is_pv_domain(currd) )
- *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
+ res->a = 1; /* Number of hypercall-transfer pages */
+ res->b = 0x40000000; /* MSR base address */
+ if ( is_viridian_domain(d) )
+ res->b = 0x40000200;
+ if ( is_pv_domain(d) )
+ res->c |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
break;
case 3: /* Time leaf. */
- switch ( sub_idx )
+ switch ( subleaf )
{
case 0: /* features */
- *eax = ((!!currd->arch.vtsc << 0) |
- (!!host_tsc_is_safe() << 1) |
- (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2));
- *ebx = currd->arch.tsc_mode;
- *ecx = currd->arch.tsc_khz;
- *edx = currd->arch.incarnation;
+ res->a = ((!!d->arch.vtsc << 0) |
+ (!!host_tsc_is_safe() << 1) |
+ (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2));
+ res->b = d->arch.tsc_mode;
+ res->c = d->arch.tsc_khz;
+ res->d = d->arch.incarnation;
break;
case 1: /* scale and offset */
{
uint64_t offset;
- if ( !currd->arch.vtsc )
- offset = currd->arch.vtsc_offset;
+ if ( !d->arch.vtsc )
+ offset = d->arch.vtsc_offset;
else
/* offset already applied to value returned by virtual rdtscp
*/
offset = 0;
- *eax = (uint32_t)offset;
- *ebx = (uint32_t)(offset >> 32);
- *ecx = currd->arch.vtsc_to_ns.mul_frac;
- *edx = (s8)currd->arch.vtsc_to_ns.shift;
+ res->a = (uint32_t)offset;
+ res->b = (uint32_t)(offset >> 32);
+ res->c = d->arch.vtsc_to_ns.mul_frac;
+ res->d = (s8)d->arch.vtsc_to_ns.shift;
break;
}
case 2: /* physical cpu_khz */
- *eax = cpu_khz;
- *ebx = *ecx = *edx = 0;
- break;
-
- default:
- *eax = *ebx = *ecx = *edx = 0;
+ res->a = cpu_khz;
break;
}
break;
case 4: /* HVM hypervisor leaf. */
- *eax = *ebx = *ecx = *edx = 0;
-
- if ( !has_hvm_container_domain(currd) || sub_idx != 0 )
+ if ( !has_hvm_container_domain(d) || subleaf != 0 )
break;
if ( cpu_has_vmx_apic_reg_virt )
- *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
+ res->a |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
/*
* We want to claim that x2APIC is virtualized if APIC MSR accesses
@@ -1016,24 +1002,22 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t
sub_idx,
if ( cpu_has_vmx_virtualize_x2apic_mode &&
cpu_has_vmx_apic_reg_virt &&
cpu_has_vmx_virtual_intr_delivery )
- *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
+ res->a |= XEN_HVM_CPUID_X2APIC_VIRT;
/*
* Indicate that memory mapped from other domains (either grants or
* foreign pages) has valid IOMMU entries.
*/
- *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
+ res->a |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
/* Indicate presence of vcpu id and set it in ebx */
- *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
- *ebx = curr->vcpu_id;
+ res->a |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
+ res->b = v->vcpu_id;
break;
default:
BUG();
}
-
- return 1;
}
void pv_cpuid(struct cpu_user_regs *regs)
@@ -1047,9 +1031,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
subleaf = c = regs->_ecx;
d = regs->_edx;
- if ( cpuid_hypervisor_leaves(leaf, subleaf, &a, &b, &c, &d) )
- goto out;
-
if ( leaf & 0x7fffffff )
{
/*
@@ -1381,7 +1362,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
break;
}
- out:
regs->rax = a;
regs->rbx = b;
regs->rcx = c;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index d2e087f..1ae0cd3 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -617,7 +617,7 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *,
unsigned long guest_cr4);
X86_CR4_OSXSAVE | X86_CR4_SMEP | \
X86_CR4_FSGSBASE | X86_CR4_SMAP))
-void domain_cpuid(struct domain *d,
+void domain_cpuid(const struct domain *d,
unsigned int input,
unsigned int sub_input,
unsigned int *eax,
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index aff115b..e43b956 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -610,8 +610,8 @@ struct stubs {
DECLARE_PER_CPU(struct stubs, stubs);
unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn);
-int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
- uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+void cpuid_hypervisor_leaves(const struct vcpu *v, unsigned int leaf,
+ unsigned int subleaf, struct cpuid_leaf *res);
int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val);
int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |