[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 3/5] x86/cpuid: update signature of hvm_cr4_guest_valid_bits()
With the new cpuid infrastructure there is a domain-wide struct cpuid policy and there is no need to pass a separate struct vcpu * into hvm_cr4_guest_valid_bits() anymore. Make the function accept struct domain * instead and update callers. Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- v5 --> v6: - Added brackets to expression in vmx.c and replaced 0 with false - Added Reviewed-by --- xen/arch/x86/hvm/domain.c | 3 ++- xen/arch/x86/hvm/hvm.c | 7 +++---- xen/arch/x86/hvm/svm/svmdebug.c | 4 ++-- xen/arch/x86/hvm/vmx/vmx.c | 4 ++-- xen/arch/x86/hvm/vmx/vvmx.c | 2 +- xen/include/asm-x86/hvm/hvm.h | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c index 60474649de..ce15ce0470 100644 --- a/xen/arch/x86/hvm/domain.c +++ b/xen/arch/x86/hvm/domain.c @@ -111,6 +111,7 @@ static int check_segment(struct segment_register *reg, enum x86_segment seg) /* Called by VCPUOP_initialise for HVM guests. */ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) { + const struct domain *d = v->domain; struct cpu_user_regs *uregs = &v->arch.user_regs; struct segment_register cs, ds, ss, es, tr; const char *errstr; @@ -272,7 +273,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) if ( v->arch.hvm_vcpu.guest_efer & EFER_LME ) v->arch.hvm_vcpu.guest_efer |= EFER_LMA; - if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) ) + if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) ) { gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n", v->arch.hvm_vcpu.guest_cr[4]); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 5759c73dd4..fe253034f2 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -931,9 +931,8 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, X86_CR0_CD | X86_CR0_PG))) /* These bits in CR4 can be set by the guest. */ -unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore) +unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore) { - const struct domain *d = v->domain; const struct cpuid_policy *p; bool mce, vmxe; @@ -1000,7 +999,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) return -EINVAL; } - if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(v, 1) ) + if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(d, true) ) { printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n", d->domain_id, ctxt.cr4); @@ -2350,7 +2349,7 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer) struct vcpu *v = current; unsigned long old_cr; - if ( value & ~hvm_cr4_guest_valid_bits(v, 0) ) + if ( value & ~hvm_cr4_guest_valid_bits(v->domain, false) ) { HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to set reserved bit in CR4: %lx", diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c index 091c58fa1b..6c215d19fe 100644 --- a/xen/arch/x86/hvm/svm/svmdebug.c +++ b/xen/arch/x86/hvm/svm/svmdebug.c @@ -121,9 +121,9 @@ bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb, (cr3 >> v->domain->arch.cpuid->extd.maxphysaddr))) ) PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", cr3); - if ( cr4 & ~hvm_cr4_guest_valid_bits(v, false) ) + if ( cr4 & ~hvm_cr4_guest_valid_bits(v->domain, false) ) PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n", - cr4, hvm_cr4_guest_valid_bits(v, false)); + cr4, hvm_cr4_guest_valid_bits(v->domain, false)); if ( vmcb_get_dr6(vmcb) >> 32 ) PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n", diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index c5cc96339e..847c314a08 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1598,8 +1598,8 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, * Update CR4 host mask to only trap when the guest tries to set * bits that are controlled by the hypervisor. */ - v->arch.hvm_vmx.cr4_host_mask = HVM_CR4_HOST_MASK | X86_CR4_PKE | - ~hvm_cr4_guest_valid_bits(v, 0); + v->arch.hvm_vmx.cr4_host_mask = (HVM_CR4_HOST_MASK | X86_CR4_PKE | + ~hvm_cr4_guest_valid_bits(v->domain, false)); v->arch.hvm_vmx.cr4_host_mask |= v->arch.hvm_vmx.vmx_realmode ? X86_CR4_VME : 0; v->arch.hvm_vmx.cr4_host_mask |= !hvm_paging_enabled(v) ? diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index dcd3b28f86..43f7297c04 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -2140,7 +2140,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content) data = X86_CR4_VMXE; break; case MSR_IA32_VMX_CR4_FIXED1: - data = hvm_cr4_guest_valid_bits(v, 0); + data = hvm_cr4_guest_valid_bits(d, false); break; case MSR_IA32_VMX_MISC: /* Do not support CR3-target feature now */ diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 2376ed6912..bfd42b065d 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -621,7 +621,7 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v) /* Check CR4/EFER values */ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, signed int cr0_pg); -unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore); +unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore); /* * This must be defined as a macro instead of an inline function, -- 2.14.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |