[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86, hvm: Assembly stub cleanups.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1208776755 -3600 # Node ID dac7703e8d932866aa29cc8a53d954df26134f6e # Parent 658f031557f60230071dc5de06e91707677d380d x86, hvm: Assembly stub cleanups. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/x86_32/exits.S | 19 ++++++------------ xen/arch/x86/hvm/svm/x86_64/exits.S | 19 ++++++------------ xen/arch/x86/hvm/vmx/x86_32/exits.S | 37 +++++++++++++++++------------------ xen/arch/x86/hvm/vmx/x86_64/exits.S | 38 ++++++++++++++++++------------------ 4 files changed, 51 insertions(+), 62 deletions(-) diff -r 658f031557f6 -r dac7703e8d93 xen/arch/x86/hvm/svm/x86_32/exits.S --- a/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Apr 21 10:33:40 2008 +0100 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Mon Apr 21 12:19:15 2008 +0100 @@ -42,7 +42,7 @@ ENTRY(svm_asm_do_resume) movl VCPU_processor(%ebx),%eax shl $IRQSTAT_shift,%eax testl $~0,irq_stat(%eax,1) - jnz svm_process_softirqs + jnz .Lsvm_process_softirqs call svm_asid_handle_vmrun call svm_intr_assist @@ -52,8 +52,8 @@ ENTRY(svm_asm_do_resume) * instead of having a mostly taken branch over the unlikely code. */ cmpb $0,tb_init_done - jnz svm_trace -svm_trace_done: + jnz .Lsvm_trace +.Lsvm_trace_done: movl VCPU_svm_vmcb(%ebx),%ecx movl UREGS_eax(%esp),%eax @@ -108,7 +108,7 @@ svm_trace_done: #endif STGI -.globl svm_stgi_label; +.globl svm_stgi_label svm_stgi_label: movl %esp,%eax push %eax @@ -116,16 +116,11 @@ svm_stgi_label: addl $4,%esp jmp svm_asm_do_resume - ALIGN -svm_process_softirqs: +.Lsvm_process_softirqs: STGI call do_softirq jmp svm_asm_do_resume -svm_trace: - /* Call out to C, as this is not speed critical path - * Note: svm_trace_vmentry will recheck the tb_init_done, - * but this is on the slow path, so who cares - */ +.Lsvm_trace: call svm_trace_vmentry - jmp svm_trace_done + jmp .Lsvm_trace_done diff -r 658f031557f6 -r dac7703e8d93 xen/arch/x86/hvm/svm/x86_64/exits.S --- a/xen/arch/x86/hvm/svm/x86_64/exits.S Mon Apr 21 10:33:40 2008 +0100 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S Mon Apr 21 12:19:15 2008 +0100 @@ -43,7 +43,7 @@ ENTRY(svm_asm_do_resume) shl $IRQSTAT_shift,%rax leaq irq_stat(%rip),%rdx testl $~0,(%rdx,%rax,1) - jnz svm_process_softirqs + jnz .Lsvm_process_softirqs call svm_asid_handle_vmrun call svm_intr_assist @@ -53,8 +53,8 @@ ENTRY(svm_asm_do_resume) * instead of having a mostly taken branch over the unlikely code. */ cmpb $0,tb_init_done(%rip) - jnz svm_trace -svm_trace_done: + jnz .Lsvm_trace +.Lsvm_trace_done: movq VCPU_svm_vmcb(%rbx),%rcx movq UREGS_rax(%rsp),%rax @@ -127,22 +127,17 @@ svm_trace_done: #endif STGI -.globl svm_stgi_label; +.globl svm_stgi_label svm_stgi_label: movq %rsp,%rdi call svm_vmexit_handler jmp svm_asm_do_resume - ALIGN -svm_process_softirqs: +.Lsvm_process_softirqs: STGI call do_softirq jmp svm_asm_do_resume -svm_trace: - /* Call out to C, as this is not speed critical path - * Note: svm_trace_vmentry will recheck the tb_init_done, - * but this is on the slow path, so who cares - */ +.Lsvm_trace: call svm_trace_vmentry - jmp svm_trace_done + jmp .Lsvm_trace_done diff -r 658f031557f6 -r dac7703e8d93 xen/arch/x86/hvm/vmx/x86_32/exits.S --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Apr 21 10:33:40 2008 +0100 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Apr 21 12:19:15 2008 +0100 @@ -58,9 +58,12 @@ popl %eax ALIGN -ENTRY(vmx_asm_vmexit_handler) +.globl vmx_asm_vmexit_handler +vmx_asm_vmexit_handler: HVM_SAVE_ALL_NOSEGREGS GET_CURRENT(%ebx) + + movb $1,VCPU_vmx_launched(%ebx) movl $GUEST_RIP,%eax VMREAD(UREGS_eip) @@ -89,28 +92,21 @@ ENTRY(vmx_asm_vmexit_handler) push %eax call vmx_vmexit_handler addl $4,%esp - jmp vmx_asm_do_vmentry - ALIGN -vmx_process_softirqs: - sti - call do_softirq - jmp vmx_asm_do_vmentry - - ALIGN -ENTRY(vmx_asm_do_vmentry) +.globl vmx_asm_do_vmentry +vmx_asm_do_vmentry: GET_CURRENT(%ebx) cli # tests must not race interrupts movl VCPU_processor(%ebx),%eax shl $IRQSTAT_shift,%eax cmpl $0,irq_stat(%eax,1) - jnz vmx_process_softirqs + jnz .Lvmx_process_softirqs call vmx_intr_assist testb $0xff,VCPU_vmx_emul(%ebx) - jnz vmx_goto_realmode + jnz .Lvmx_goto_realmode movl VCPU_hvm_guest_cr2(%ebx),%eax movl %eax,%cr2 @@ -124,25 +120,28 @@ ENTRY(vmx_asm_do_vmentry) VMWRITE(UREGS_eflags) cmpb $0,VCPU_vmx_launched(%ebx) - je vmx_launch + HVM_RESTORE_ALL_NOSEGREGS + je .Lvmx_launch -/*vmx_resume:*/ - HVM_RESTORE_ALL_NOSEGREGS +/*.Lvmx_resume:*/ VMRESUME call vm_resume_fail ud2 -vmx_launch: - movb $1,VCPU_vmx_launched(%ebx) - HVM_RESTORE_ALL_NOSEGREGS +.Lvmx_launch: VMLAUNCH call vm_launch_fail ud2 -vmx_goto_realmode: +.Lvmx_goto_realmode: sti movl %esp,%eax push %eax call vmx_realmode addl $4,%esp jmp vmx_asm_do_vmentry + +.Lvmx_process_softirqs: + sti + call do_softirq + jmp vmx_asm_do_vmentry diff -r 658f031557f6 -r dac7703e8d93 xen/arch/x86/hvm/vmx/x86_64/exits.S --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Apr 21 10:33:40 2008 +0100 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Apr 21 12:19:15 2008 +0100 @@ -74,9 +74,12 @@ popq %rdi ALIGN -ENTRY(vmx_asm_vmexit_handler) +.globl vmx_asm_vmexit_handler +vmx_asm_vmexit_handler: HVM_SAVE_ALL_NOSEGREGS GET_CURRENT(%rbx) + + movb $1,VCPU_vmx_launched(%rbx) leaq UREGS_rip(%rsp),%rdi movl $GUEST_RIP,%eax @@ -105,16 +108,9 @@ ENTRY(vmx_asm_vmexit_handler) movq %rsp,%rdi call vmx_vmexit_handler - jmp vmx_asm_do_vmentry - ALIGN -vmx_process_softirqs: - sti - call do_softirq - jmp vmx_asm_do_vmentry - - ALIGN -ENTRY(vmx_asm_do_vmentry) +.globl vmx_asm_do_vmentry +vmx_asm_do_vmentry: GET_CURRENT(%rbx) cli # tests must not race interrupts @@ -122,12 +118,12 @@ ENTRY(vmx_asm_do_vmentry) shl $IRQSTAT_shift,%rax leaq irq_stat(%rip),%rdx cmpl $0,(%rdx,%rax,1) - jnz vmx_process_softirqs + jnz .Lvmx_process_softirqs call vmx_intr_assist testb $0xff,VCPU_vmx_emul(%rbx) - jnz vmx_goto_realmode + jnz .Lvmx_goto_realmode movq VCPU_hvm_guest_cr2(%rbx),%rax movq %rax,%cr2 @@ -143,23 +139,27 @@ ENTRY(vmx_asm_do_vmentry) VMWRITE(UREGS_eflags) cmpb $0,VCPU_vmx_launched(%rbx) - je vmx_launch + HVM_RESTORE_ALL_NOSEGREGS + je .Lvmx_launch -/*vmx_resume:*/ - HVM_RESTORE_ALL_NOSEGREGS +/*.Lvmx_resume:*/ VMRESUME call vm_resume_fail ud2 -vmx_launch: - movb $1,VCPU_vmx_launched(%rbx) - HVM_RESTORE_ALL_NOSEGREGS +.Lvmx_launch: VMLAUNCH call vm_launch_fail ud2 -vmx_goto_realmode: +.Lvmx_goto_realmode: sti movq %rsp,%rdi call vmx_realmode jmp vmx_asm_do_vmentry + jmp vmx_asm_do_vmentry + +.Lvmx_process_softirqs: + sti + call do_softirq + jmp vmx_asm_do_vmentry _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |