[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 1/5] x86/pvh: Set 32b PVH guest mode in XEN_DOMCTL_set_address_size
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/domain.c | 27 ++++++++++++++++----------- xen/arch/x86/hvm/hvm.c | 24 +++++++++++++++++++++++- xen/arch/x86/hvm/vmx/vmcs.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 19 +++++++++++++++++++ xen/include/asm-x86/hvm/hvm.h | 2 ++ 5 files changed, 61 insertions(+), 13 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 045f6ff..7fa8b9c 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -366,7 +366,11 @@ int switch_native(struct domain *d) for_each_vcpu( d, v ) { free_compat_arg_xlat(v); - release_compat_l4(v); + + if ( !is_pvh_domain(d) ) + release_compat_l4(v); + else + hvm_set_mode(v, 8); } return 0; @@ -377,25 +381,26 @@ int switch_compat(struct domain *d) struct vcpu *v; int rc; - if ( is_pvh_domain(d) ) - { - printk(XENLOG_G_INFO - "Xen currently does not support 32bit PVH guests\n"); - return -EINVAL; - } - if ( !may_switch_mode(d) ) return -EACCES; if ( is_pv_32bit_domain(d) ) return 0; - d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1; + d->arch.has_32bit_shinfo = 1; + if ( is_pv_domain(d) ) + d->arch.is_32bit_pv = 1; for_each_vcpu( d, v ) { rc = setup_compat_arg_xlat(v); if ( !rc ) - rc = setup_compat_l4(v); + { + if ( !is_pvh_domain(d) ) + rc = setup_compat_l4(v); + else + rc = hvm_set_mode(v, 4); + } + if ( rc ) goto undo_and_fail; } @@ -410,7 +415,7 @@ int switch_compat(struct domain *d) { free_compat_arg_xlat(v); - if ( !pagetable_is_null(v->arch.guest_table) ) + if ( !is_pvh_domain(d) && !pagetable_is_null(v->arch.guest_table) ) release_compat_l4(v); } diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 615fa89..90ba676 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2424,7 +2424,6 @@ int hvm_vcpu_initialise(struct vcpu *v) if ( is_pvh_domain(d) ) { - v->arch.hvm_vcpu.hcall_64bit = 1; /* PVH 32bitfixme. */ /* This is for hvm_long_mode_enabled(v). */ v->arch.hvm_vcpu.guest_efer = EFER_LMA | EFER_LME; return 0; @@ -6825,6 +6824,29 @@ bool_t altp2m_vcpu_emulate_ve(struct vcpu *v) return 0; } +int hvm_set_mode(struct vcpu *v, int mode) +{ + + switch ( mode ) + { + case 4: + v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME); + break; + case 8: + v->arch.hvm_vcpu.guest_efer |= (EFER_LMA | EFER_LME); + break; + default: + return -EOPNOTSUPP; + } + + hvm_update_guest_efer(v); + + if ( hvm_funcs.set_mode ) + return hvm_funcs.set_mode(v, mode); + + return 0; +} + /* * Local variables: * mode: C diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index a0a97e7..08f2078 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -1160,7 +1160,7 @@ static int construct_vmcs(struct vcpu *v) __vmwrite(GUEST_FS_AR_BYTES, 0xc093); __vmwrite(GUEST_GS_AR_BYTES, 0xc093); if ( is_pvh_domain(d) ) - /* CS.L == 1, exec, read/write, accessed. PVH 32bitfixme. */ + /* CS.L == 1, exec, read/write, accessed. */ __vmwrite(GUEST_CS_AR_BYTES, 0xa09b); else __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */ diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 2582cdd..bbec0e8 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1886,6 +1886,24 @@ static bool_t vmx_vcpu_emulate_ve(struct vcpu *v) return rc; } +static int vmx_set_mode(struct vcpu *v, int mode) +{ + unsigned long attr; + + if ( !is_pvh_vcpu(v) ) + return 0; + + ASSERT((mode == 4) || (mode == 8)); + + attr = (mode == 4) ? 0xc09b : 0xa09b; + + vmx_vmcs_enter(v); + __vmwrite(GUEST_CS_AR_BYTES, attr); + vmx_vmcs_exit(v); + + return 0; +} + static struct hvm_function_table __initdata vmx_function_table = { .name = "VMX", .cpu_up_prepare = vmx_cpu_up_prepare, @@ -1945,6 +1963,7 @@ static struct hvm_function_table __initdata vmx_function_table = { .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf, .enable_msr_exit_interception = vmx_enable_msr_exit_interception, .is_singlestep_supported = vmx_is_singlestep_supported, + .set_mode = vmx_set_mode, .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp, .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve, .altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve, diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 68b216c..c21a768 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -206,6 +206,7 @@ struct hvm_function_table { void (*enable_msr_exit_interception)(struct domain *d); bool_t (*is_singlestep_supported)(void); + int (*set_mode)(struct vcpu *v, int mode); /* Alternate p2m */ void (*altp2m_vcpu_update_p2m)(struct vcpu *v); @@ -246,6 +247,7 @@ void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc); u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc); #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0) +int hvm_set_mode(struct vcpu *v, int mode); void hvm_init_guest_time(struct domain *d); void hvm_set_guest_time(struct vcpu *v, u64 guest_time); u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc); -- 1.8.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |