|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH 5/8]: PVH: smp changes
On Thu, 2012-08-16 at 02:04 +0100, Mukesh Rathor wrote:
> ---
> arch/x86/xen/smp.c | 33 +++++++++++++++++++++++++--------
> 1 files changed, 25 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
> index cdf269d..017d7fa 100644
> --- a/arch/x86/xen/smp.c
> +++ b/arch/x86/xen/smp.c
> @@ -68,9 +68,11 @@ static void __cpuinit cpu_bringup(void)
> touch_softlockup_watchdog();
> preempt_disable();
>
> - xen_enable_sysenter();
> - xen_enable_syscall();
> -
> + /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
> + if (!xen_pvh_domain()) {
Can this be gated on X86_FEATURE_BLAH, with appropriate masking for the
PV vs PVH cases.
Looks like enable_sysenter actually does that already, maybe
enable_syscall could too?
> + xen_enable_sysenter();
> + xen_enable_syscall();
> + }
> cpu = smp_processor_id();
> smp_store_cpu_info(cpu);
> cpu_data(cpu).x86_max_cores = 1;
> @@ -230,10 +232,11 @@ static void __init xen_smp_prepare_boot_cpu(void)
> BUG_ON(smp_processor_id() != 0);
> native_smp_prepare_boot_cpu();
>
> - /* We've switched to the "real" per-cpu gdt, so make sure the
> - old memory can be recycled */
> - make_lowmem_page_readwrite(xen_initial_gdt);
> -
> + if (!xen_pvh_domain()) {
XENFEAT_writable_pagetalbes, I think? And possibly pushed down into
make_lowmem_page_readwrite?
> + /* We've switched to the "real" per-cpu gdt, so make sure the
> + * old memory can be recycled */
> + make_lowmem_page_readwrite(xen_initial_gdt);
> + }
> xen_filter_cpu_maps();
> xen_setup_vcpu_info_placement();
> }
> @@ -312,6 +315,7 @@ cpu_initialize_context(unsigned int cpu, struct
> task_struct *idle)
>
> memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
>
> + if (!xen_pvh_domain()) {
> ctxt->user_regs.ds = __USER_DS;
> ctxt->user_regs.es = __USER_DS;
>
> @@ -339,7 +343,20 @@ cpu_initialize_context(unsigned int cpu, struct
> task_struct *idle)
> (unsigned long)xen_hypervisor_callback;
> ctxt->failsafe_callback_eip =
> (unsigned long)xen_failsafe_callback;
> -
> + } else {
> + ctxt->user_regs.ds = __KERNEL_DS;
> + ctxt->user_regs.es = 0;
> + ctxt->user_regs.gs = 0;
Not __KERNEL_DS for es too?
Not sure about gs -- shouldn't that point to some per-cpu segment or
something? Maybe that happens somewhere else? (in which case a comment?)
> +
> + ctxt->gdt_frames[0] = (unsigned long)gdt;
> + ctxt->gdt_ents = (unsigned long)(GDT_SIZE - 1);
> +
> + /* Note: PVH is not supported on x86_32. */
> +#ifdef __x86_64__
ITYM CONFIG_X86_64?
> + ctxt->gs_base_user = (unsigned long)
> + per_cpu(irq_stack_union.gs_base, cpu);
> +#endif
> + }
> ctxt->user_regs.cs = __KERNEL_CS;
> ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |