[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 5/6] xen/x86: use flag byte for decision whether xen_cr3 is valid
Today cpu_info->xen_cr3 is either 0 to indicate %cr3 doesn't need to be switched on entry to Xen, or negative for keeping the value while indicating not to restore %cr3, or positive in case %cr3 is to be restored. Switch to use a flag byte instead of a negative xen_cr3 value in order to allow %cr3 values with the high bit set in case we want to keep TLB entries when using the PCID feature. This reduces the number of branches in interrupt handling and results in better performance (e.g. parallel make of the Xen hypervisor on my system was using about 3% less system time). Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- xen/arch/x86/domain.c | 1 + xen/arch/x86/mm.c | 1 + xen/arch/x86/smpboot.c | 2 ++ xen/arch/x86/x86_64/asm-offsets.c | 1 + xen/arch/x86/x86_64/compat/entry.S | 5 ++-- xen/arch/x86/x86_64/entry.S | 59 ++++++++++++++++---------------------- xen/include/asm-x86/current.h | 11 ++++--- 7 files changed, 39 insertions(+), 41 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 1f8b08ef02..c01ae60296 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1698,6 +1698,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next) ASSERT(local_irq_is_enabled()); get_cpu_info()->xen_cr3 = 0; + get_cpu_info()->use_xen_cr3 = false; if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN ) { diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 0d0badea86..2d8366a01c 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -520,6 +520,7 @@ void write_ptbase(struct vcpu *v) { get_cpu_info()->root_pgt_changed = false; /* Make sure to clear xen_cr3 before pv_cr3; write_cr3() serializes. */ + get_cpu_info()->use_xen_cr3 = false; get_cpu_info()->xen_cr3 = 0; write_cr3(v->arch.cr3); get_cpu_info()->pv_cr3 = 0; diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 60604f4535..1b665991bd 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -330,6 +330,7 @@ void start_secondary(void *unused) */ spin_debug_disable(); + get_cpu_info()->use_xen_cr3 = false; get_cpu_info()->xen_cr3 = 0; get_cpu_info()->pv_cr3 = 0; @@ -1129,6 +1130,7 @@ void __init smp_prepare_boot_cpu(void) per_cpu(scratch_cpumask, cpu) = &scratch_cpu0mask; #endif + get_cpu_info()->use_xen_cr3 = false; get_cpu_info()->xen_cr3 = 0; get_cpu_info()->pv_cr3 = 0; } diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index c9225b06c1..d276f70a75 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -145,6 +145,7 @@ void __dummy__(void) OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info, use_shadow_spec_ctrl); OFFSET(CPUINFO_bti_ist_info, struct cpu_info, bti_ist_info); OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed); + OFFSET(CPUINFO_use_xen_cr3, struct cpu_info, use_xen_cr3); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 27cdf244e6..74055eade8 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -218,10 +218,9 @@ ENTRY(cstar_enter) GET_STACK_END(bx) .Lcstar_cr3_start: mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Lcstar_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Lcstar_cr3_okay: diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index a8d38e7eb2..2c9ce8d821 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -74,6 +74,7 @@ restore_all_guest: rep movsq .Lrag_copy_done: mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx) + movb $1, STACK_CPUINFO_FIELD(use_xen_cr3)(%rdx) mov %rax, %cr3 .Lrag_cr3_end: ALTERNATIVE_NOP .Lrag_cr3_start, .Lrag_cr3_end, X86_FEATURE_NO_XPTI @@ -123,14 +124,9 @@ restore_all_xen: * case we return to late PV exit code (from an NMI or #MC). */ GET_STACK_END(bx) - mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rdx + cmpb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%rbx) +UNLIKELY_START(ne, exit_cr3) mov STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax - test %rdx, %rdx - /* - * Ideally the condition would be "nsz", but such doesn't exist, - * so "g" will have to do. - */ -UNLIKELY_START(g, exit_cr3) mov %rax, %cr3 UNLIKELY_END(exit_cr3) @@ -173,10 +169,9 @@ ENTRY(lstar_enter) GET_STACK_END(bx) .Llstar_cr3_start: mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Llstar_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Llstar_cr3_okay: @@ -283,10 +278,9 @@ GLOBAL(sysenter_eflags_saved) GET_STACK_END(bx) .Lsyse_cr3_start: mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Lsyse_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Lsyse_cr3_okay: @@ -336,10 +330,9 @@ ENTRY(int80_direct_trap) GET_STACK_END(bx) .Lint80_cr3_start: mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx - neg %rcx + test %rcx, %rcx jz .Lint80_cr3_okay - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) - neg %rcx + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%rbx) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx) .Lint80_cr3_okay: @@ -523,18 +516,17 @@ ENTRY(common_interrupt) .Lintr_cr3_start: mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov STACK_CPUINFO_FIELD(use_xen_cr3)(%r14), %bl mov %rcx, %r15 - neg %rcx + test %rcx, %rcx jz .Lintr_cr3_okay - jns .Lintr_cr3_load - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rcx -.Lintr_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) mov %rcx, %cr3 xor %ecx, %ecx mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) testb $3, UREGS_cs(%rsp) cmovnz %rcx, %r15 + cmovnz %cx, %bx .Lintr_cr3_okay: CR4_PV32_RESTORE @@ -542,6 +534,7 @@ ENTRY(common_interrupt) callq do_IRQ .Lintr_cr3_restore: mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %bl, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) .Lintr_cr3_end: jmp ret_from_intr @@ -571,18 +564,17 @@ GLOBAL(handle_exception) .Lxcpt_cr3_start: mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov STACK_CPUINFO_FIELD(use_xen_cr3)(%r14), %r13b mov %rcx, %r15 - neg %rcx + test %rcx, %rcx jz .Lxcpt_cr3_okay - jns .Lxcpt_cr3_load - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rcx -.Lxcpt_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) mov %rcx, %cr3 xor %ecx, %ecx mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) testb $3, UREGS_cs(%rsp) cmovnz %rcx, %r15 + cmovnz %rcx, %r13 .Lxcpt_cr3_okay: handle_exception_saved: @@ -652,6 +644,7 @@ handle_exception_saved: INDIRECT_CALL %rdx .Lxcpt_cr3_restore1: mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %r13b, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) .Lxcpt_cr3_end1: testb $3,UREGS_cs(%rsp) jz restore_all_xen @@ -687,6 +680,7 @@ exception_with_ints_disabled: movq %rax,UREGS_kernel_sizeof(%rsp) .Lxcpt_cr3_restore2: mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %r13b, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) .Lxcpt_cr3_end2: jmp restore_all_xen # return to fixup code @@ -781,9 +775,6 @@ ENTRY(double_fault) mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rbx test %rbx, %rbx jz .Ldblf_cr3_okay - jns .Ldblf_cr3_load - neg %rbx -.Ldblf_cr3_load: mov %rbx, %cr3 .Ldblf_cr3_okay: @@ -812,13 +803,11 @@ handle_ist_exception: /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx + mov STACK_CPUINFO_FIELD(use_xen_cr3)(%r14), %bl mov %rcx, %r15 - neg %rcx + test %rcx, %rcx jz .List_cr3_okay - jns .List_cr3_load - mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14) - neg %rcx -.List_cr3_load: + movb $0, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) mov %rcx, %cr3 movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14) .List_cr3_okay: @@ -831,6 +820,7 @@ handle_ist_exception: * and copy the context to stack bottom. */ xor %r15, %r15 + xor %bl, %bl GET_CPUINFO_FIELD(guest_cpu_user_regs,di) movq %rsp,%rsi movl $UREGS_kernel_sizeof/8,%ecx @@ -842,6 +832,7 @@ handle_ist_exception: mov (%rdx, %rax, 8), %rdx INDIRECT_CALL %rdx mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14) + mov %bl, STACK_CPUINFO_FIELD(use_xen_cr3)(%r14) cmpb $TRAP_nmi,UREGS_entry_vector(%rsp) jne ret_from_intr diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index d5236c82de..34a9512d93 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -47,10 +47,7 @@ struct cpu_info { * context is being entered. A value of zero indicates no setting of CR3 * is to be performed. * The former is the value to restore when re-entering Xen, if any. IOW - * its value being zero means there's nothing to restore. However, its - * value can also be negative, indicating to the exit-to-Xen code that - * restoring is not necessary, but allowing any nested entry code paths - * to still know the value to put back into CR3. + * its value being zero means there's nothing to restore. */ unsigned long xen_cr3; unsigned long pv_cr3; @@ -68,6 +65,12 @@ struct cpu_info { */ bool root_pgt_changed; + /* + * use_xen_cr3 is set in case the value of xen_cr3 is to be written into + * CR3 when entering the hypervisor. + */ + bool use_xen_cr3; + unsigned long __pad; /* get_stack_bottom() must be 16-byte aligned */ }; -- 2.13.6 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |