[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 11/17] x86: modify interrupt handlers to support stack switching
Modify the interrupt handlers to switch stacks on interrupt entry in case they are running on a per-vcpu stack. Same applies to returning to the guest: in case the to be loaded context is located on a per-vcpu stack switch to this one before returning to the guest. The NMI and MCE interrupt handlers share most of their code today. Use the common part only after switching stacks as this will enable us calculating the correct stack address mostly at build time instead of doing it all at runtime. guest_cpu_user_regs() is modified to always return the correct user registers address, either like today the one of the per physical cpu stack, or that of the per-vcpu stack. Depending on the usage some callers of guest_cpu_user_regs() need to be adapted to use get_cpu_info() instead. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- V3: - rework SWITCH_FROM_VCPU_STACK_IST to take the ist as parameter --- xen/arch/x86/pv/xpti-stub.S | 4 ++-- xen/arch/x86/x86_64/asm-offsets.c | 2 ++ xen/arch/x86/x86_64/entry.S | 14 ++++++++++++-- xen/common/wait.c | 8 ++++---- xen/include/asm-x86/asm_defns.h | 19 +++++++++++++++++++ xen/include/asm-x86/current.h | 15 ++++++++++++++- xen/include/asm-x86/processor.h | 12 ++++++------ 7 files changed, 59 insertions(+), 15 deletions(-) diff --git a/xen/arch/x86/pv/xpti-stub.S b/xen/arch/x86/pv/xpti-stub.S index efa1e3f661..92f2ef6dac 100644 --- a/xen/arch/x86/pv/xpti-stub.S +++ b/xen/arch/x86/pv/xpti-stub.S @@ -26,7 +26,7 @@ ENTRY(xpti_lstar) movl $TRAP_syscall, 4(%rsp) SAVE_ALL mov %rsp, %r12 - + SWITCH_FROM_VCPU_STACK sti SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */ @@ -46,7 +46,7 @@ ENTRY(xpti_cstar) movl $TRAP_syscall, 4(%rsp) SAVE_ALL movq %rsp, %r12 - + SWITCH_FROM_VCPU_STACK sti SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */ diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index cc7753c0a9..b0060be261 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -141,6 +141,8 @@ void __dummy__(void) OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl); OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info, use_shadow_spec_ctrl); OFFSET(CPUINFO_bti_ist_info, struct cpu_info, bti_ist_info); + OFFSET(CPUINFO_stack_bottom_cpu, struct cpu_info, stack_bottom_cpu); + OFFSET(CPUINFO_flags, struct cpu_info, flags); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 69590d0b17..909f6eea66 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -45,6 +45,7 @@ restore_all_guest: /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=cpuinfo, Clob: cd */ + SWITCH_TO_VCPU_STACK RESTORE_ALL testw $TRAP_syscall,4(%rsp) jz iret_exit_to_guest @@ -202,7 +203,6 @@ process_trap: jmp test_all_events ENTRY(sysenter_entry) - sti pushq $FLAT_USER_SS pushq $0 pushfq @@ -214,6 +214,8 @@ GLOBAL(sysenter_eflags_saved) movl $TRAP_syscall, 4(%rsp) SAVE_ALL mov %rsp, %r12 + SWITCH_FROM_VCPU_STACK + sti SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ @@ -254,6 +256,7 @@ ENTRY(int80_direct_trap) movl $0x80, 4(%rsp) SAVE_ALL mov %rsp, %r12 + SWITCH_FROM_VCPU_STACK SPEC_CTRL_ENTRY_FROM_PV /* Req: %r12=regs, %rsp=cpuinfo, Clob: acd */ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ @@ -422,6 +425,7 @@ ENTRY(dom_crash_sync_extable) ENTRY(common_interrupt) SAVE_ALL CLAC mov %rsp, %r12 + SWITCH_FROM_VCPU_STACK GET_STACK_END(14) @@ -449,6 +453,7 @@ ENTRY(page_fault) GLOBAL(handle_exception) SAVE_ALL CLAC mov %rsp, %r12 + SWITCH_FROM_VCPU_STACK GET_STACK_END(14) @@ -631,6 +636,7 @@ ENTRY(double_fault) /* Set AC to reduce chance of further SMAP faults */ SAVE_ALL STAC movq %rsp, %r12 + SWITCH_FROM_VCPU_STACK_IST(IST_DF) GET_STACK_END(14) @@ -653,10 +659,11 @@ ENTRY(early_page_fault) ENTRY(nmi) pushq $0 movl $TRAP_nmi,4(%rsp) -handle_ist_exception: SAVE_ALL CLAC mov %rsp, %r12 + SWITCH_FROM_VCPU_STACK_IST(IST_NMI) +handle_ist_exception: GET_STACK_END(14) SPEC_CTRL_ENTRY_FROM_INTR_IST /* Req: %r12=regs, %r14=end, Clob: acd */ @@ -703,6 +710,9 @@ handle_ist_exception: ENTRY(machine_check) pushq $0 movl $TRAP_machine_check,4(%rsp) + SAVE_ALL CLAC + mov %rsp, %r12 + SWITCH_FROM_VCPU_STACK_IST(IST_MCE) jmp handle_ist_exception /* Enable NMIs. No special register assumptions. Only %rax is not preserved. */ diff --git a/xen/common/wait.c b/xen/common/wait.c index a57bc10d61..fbb5d996e5 100644 --- a/xen/common/wait.c +++ b/xen/common/wait.c @@ -122,10 +122,10 @@ void wake_up_all(struct waitqueue_head *wq) static void __prepare_to_wait(struct waitqueue_vcpu *wqv) { - struct cpu_info *cpu_info = get_cpu_info(); + struct cpu_user_regs *user_regs = guest_cpu_user_regs(); struct vcpu *curr = current; unsigned long dummy; - u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector; + u32 entry_vector = user_regs->entry_vector; ASSERT(wqv->esp == 0); @@ -160,7 +160,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv) "pop %%r11; pop %%r10; pop %%r9; pop %%r8;" "pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax" : "=&S" (wqv->esp), "=&c" (dummy), "=&D" (dummy) - : "i" (PAGE_SIZE), "0" (0), "1" (cpu_info), "2" (wqv->stack) + : "i" (PAGE_SIZE), "0" (0), "1" (user_regs), "2" (wqv->stack) : "memory" ); if ( unlikely(wqv->esp == 0) ) @@ -169,7 +169,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv) domain_crash_synchronous(); } - cpu_info->guest_cpu_user_regs.entry_vector = entry_vector; + user_regs->entry_vector = entry_vector; } static void __finish_wait(struct waitqueue_vcpu *wqv) diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h index 7d26391be8..f626cc6134 100644 --- a/xen/include/asm-x86/asm_defns.h +++ b/xen/include/asm-x86/asm_defns.h @@ -7,6 +7,7 @@ #include <asm/asm-offsets.h> #endif #include <asm/bug.h> +#include <asm/current.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/percpu.h> @@ -136,6 +137,24 @@ void ret_from_intr(void); GET_STACK_END(reg); \ movq STACK_CPUINFO_FIELD(current_vcpu)(%r##reg), %r##reg +#define SWITCH_FROM_VCPU_STACK \ + GET_STACK_END(ax); \ + testb $ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax); \ + jz 1f; \ + movq STACK_CPUINFO_FIELD(stack_bottom_cpu)(%rax), %rsp; \ +1: + +#define SWITCH_FROM_VCPU_STACK_IST(ist) \ + GET_STACK_END(ax); \ + testb $ON_VCPUSTACK, STACK_CPUINFO_FIELD(flags)(%rax); \ + jz 1f; \ + sub $(STACK_SIZE - 1 - ist * PAGE_SIZE), %rax; \ + mov %rax, %rsp; \ +1: + +#define SWITCH_TO_VCPU_STACK \ + mov %r12, %rsp + #ifndef NDEBUG #define ASSERT_NOT_IN_ATOMIC \ sti; /* sometimes called with interrupts disabled: safe to enable */ \ diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index 5963114e08..e128c13a1e 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -9,8 +9,10 @@ #include <xen/percpu.h> #include <public/xen.h> +#include <asm/config.h> #include <asm/page.h> +#ifndef __ASSEMBLY__ /* * Xen's physical cpu stacks are 8 pages (8-page aligned), arranged as: * @@ -71,8 +73,10 @@ struct cpu_info { }; unsigned int processor_id; /* per physical cpu mapping only */ unsigned int flags; +#endif /* !__ASSEMBLY__ */ #define ON_VCPUSTACK 0x00000001 #define VCPUSTACK_ACTIVE 0x00000002 +#ifndef __ASSEMBLY__ /* get_stack_bottom() must be 16-byte aligned */ }; @@ -97,9 +101,16 @@ static inline struct cpu_info *get_cpu_info(void) #define set_processor_id(id) do { \ struct cpu_info *ci__ = get_cpu_info(); \ ci__->per_cpu_offset = __per_cpu_offset[ci__->processor_id = (id)]; \ + ci__->flags = 0; \ } while (0) -#define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs) +#define guest_cpu_user_regs() ({ \ + struct cpu_info *info = get_cpu_info(); \ + if ( info->flags & VCPUSTACK_ACTIVE ) \ + info = (struct cpu_info *)(XPTI_START(info->current_vcpu) + \ + STACK_SIZE) - 1; \ + &info->guest_cpu_user_regs; \ +}) /* * Get the bottom-of-stack, as stored in the per-CPU TSS. This actually points @@ -142,4 +153,6 @@ unsigned long get_stack_dump_bottom (unsigned long sp); */ DECLARE_PER_CPU(struct vcpu *, curr_vcpu); +#endif /* !__ASSEMBLY__ */ + #endif /* __X86_CURRENT_H__ */ diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h index 625f6e9f69..58e47bf6e1 100644 --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -97,6 +97,12 @@ X86_EFLAGS_NT|X86_EFLAGS_DF|X86_EFLAGS_IF| \ X86_EFLAGS_TF) +#define IST_NONE _AC(0,UL) +#define IST_DF _AC(1,UL) +#define IST_NMI _AC(2,UL) +#define IST_MCE _AC(3,UL) +#define IST_MAX _AC(3,UL) + #ifndef __ASSEMBLY__ struct domain; @@ -400,12 +406,6 @@ struct __packed __cacheline_aligned tss_struct { uint8_t __cacheline_filler[24]; }; -#define IST_NONE 0UL -#define IST_DF 1UL -#define IST_NMI 2UL -#define IST_MCE 3UL -#define IST_MAX 3UL - /* Set the interrupt stack table used by a particular interrupt * descriptor table entry. */ static always_inline void set_ist(idt_entry_t *idt, unsigned long ist) -- 2.13.6 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |