x86: use only a single branch for upcall-pending exit path checks This utilizes the fact that the two bytes of interest are adjacent to one another and that the resulting 16-bit values of interest are within a contiguous range of numbers. Signed-off-by: Jan Beulich --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -219,10 +219,10 @@ test_all_events: jnz process_nmi test_guest_events: movl VCPU_vcpu_info(%ebx),%eax - testb $0xFF,VCPUINFO_upcall_mask(%eax) - jnz restore_all_guest - testb $0xFF,VCPUINFO_upcall_pending(%eax) - jz restore_all_guest + movzwl VCPUINFO_upcall_pending(%eax),%eax + decl %eax + cmpl $0xfe,%eax + ja restore_all_guest /*process_guest_events:*/ sti leal VCPU_trap_bounce(%ebx),%edx --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -108,10 +108,10 @@ ENTRY(compat_test_all_events) jnz compat_process_nmi compat_test_guest_events: movq VCPU_vcpu_info(%rbx),%rax - testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax) - jnz compat_restore_all_guest - testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax) - jz compat_restore_all_guest + movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax + decl %eax + cmpl $0xfe,%eax + ja compat_restore_all_guest /*compat_process_guest_events:*/ sti leaq VCPU_trap_bounce(%rbx),%rdx --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -199,8 +199,8 @@ test_all_events: movl VCPU_processor(%rbx),%eax shl $IRQSTAT_shift,%rax leaq irq_stat(%rip),%rcx - testl $~0,(%rcx,%rax,1) - jnz process_softirqs + cmpl $0,(%rcx,%rax,1) + jne process_softirqs testb $1,VCPU_mce_pending(%rbx) jnz process_mce .Ltest_guest_nmi: @@ -208,10 +208,10 @@ test_all_events: jnz process_nmi test_guest_events: movq VCPU_vcpu_info(%rbx),%rax - testb $0xFF,VCPUINFO_upcall_mask(%rax) - jnz restore_all_guest - testb $0xFF,VCPUINFO_upcall_pending(%rax) - jz restore_all_guest + movzwl VCPUINFO_upcall_pending(%rax),%eax + decl %eax + cmpl $0xfe,%eax + ja restore_all_guest /*process_guest_events:*/ sti leaq VCPU_trap_bounce(%rbx),%rdx