[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/hvm: Remove 32-/64-bit abstraction macros from asm files.


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Wed, 16 Jan 2013 08:22:26 +0000
  • Delivery-date: Wed, 16 Jan 2013 08:22:33 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1358172365 0
# Node ID f6a81b41ad710464f255fb6e75f569c71f58045e
# Parent  e6f74afc78d26e0201c64d08b8a3ed089c2120ff
x86/hvm: Remove 32-/64-bit abstraction macros from asm files.

Not needed now we target 64-bit only.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r e6f74afc78d2 -r f6a81b41ad71 xen/arch/x86/hvm/svm/entry.S
--- a/xen/arch/x86/hvm/svm/entry.S      Mon Jan 14 13:55:45 2013 +0000
+++ b/xen/arch/x86/hvm/svm/entry.S      Mon Jan 14 14:06:05 2013 +0000
@@ -31,55 +31,48 @@
 #define STGI   .byte 0x0F,0x01,0xDC
 #define CLGI   .byte 0x0F,0x01,0xDD
 
-#define get_current(reg) GET_CURRENT(r(reg))
-        
-#define r(reg) %r##reg
-#define addr_of(lbl) lbl(%rip)
-#define call_with_regs(fn)                      \
-        mov  %rsp,%rdi;                         \
-        call fn;
-
 ENTRY(svm_asm_do_resume)
         call svm_intr_assist
-        call_with_regs(nsvm_vcpu_switch)
+        mov  %rsp,%rdi
+        call nsvm_vcpu_switch
         ASSERT_NOT_IN_ATOMIC
 
-        get_current(bx)
+        GET_CURRENT(%rbx)
         CLGI
 
-        mov  VCPU_processor(r(bx)),%eax
-        shl  $IRQSTAT_shift,r(ax)
-        lea  addr_of(irq_stat),r(dx)
-        testl $~0,(r(dx),r(ax),1)
+        mov  VCPU_processor(%rbx),%eax
+        shl  $IRQSTAT_shift,%rax
+        lea  irq_stat(%rip),%rdx
+        testl $~0,(%rdx,%rax,1)
         jnz  .Lsvm_process_softirqs
 
-        testb $0, VCPU_nsvm_hap_enabled(r(bx))
+        testb $0, VCPU_nsvm_hap_enabled(%rbx)
 UNLIKELY_START(nz, nsvm_hap)
-        mov  VCPU_nhvm_p2m(r(bx)),r(ax)
-        test r(ax),r(ax)
+        mov  VCPU_nhvm_p2m(%rbx),%rax
+        test %rax,%rax
         sete %al
-        andb VCPU_nhvm_guestmode(r(bx)),%al
+        andb VCPU_nhvm_guestmode(%rbx),%al
         jnz  .Lsvm_nsvm_no_p2m
 UNLIKELY_END(nsvm_hap)
 
         call svm_asid_handle_vmrun
 
-        cmpb $0,addr_of(tb_init_done)
+        cmpb $0,tb_init_done(%rip)
 UNLIKELY_START(nz, svm_trace)
         call svm_trace_vmentry
 UNLIKELY_END(svm_trace)
 
-        mov  VCPU_svm_vmcb(r(bx)),r(cx)
-        mov  UREGS_rax(r(sp)),r(ax)
-        mov  r(ax),VMCB_rax(r(cx))
-        mov  UREGS_rip(r(sp)),r(ax)
-        mov  r(ax),VMCB_rip(r(cx))
-        mov  UREGS_rsp(r(sp)),r(ax)
-        mov  r(ax),VMCB_rsp(r(cx))
-        mov  UREGS_eflags(r(sp)),r(ax)
-        mov  r(ax),VMCB_rflags(r(cx))
+        mov  VCPU_svm_vmcb(%rbx),%rcx
+        mov  UREGS_rax(%rsp),%rax
+        mov  %rax,VMCB_rax(%rcx)
+        mov  UREGS_rip(%rsp),%rax
+        mov  %rax,VMCB_rip(%rcx)
+        mov  UREGS_rsp(%rsp),%rax
+        mov  %rax,VMCB_rsp(%rcx)
+        mov  UREGS_eflags(%rsp),%rax
+        mov  %rax,VMCB_rflags(%rcx)
 
-        mov  VCPU_svm_vmcb_pa(r(bx)),r(ax)
+        mov  VCPU_svm_vmcb_pa(%rbx),%rax
 
         pop  %r15
         pop  %r14
@@ -115,35 +108,36 @@ UNLIKELY_END(svm_trace)
         push %r14
         push %r15
 
-        get_current(bx)
-        movb $0,VCPU_svm_vmcb_in_sync(r(bx))
-        mov  VCPU_svm_vmcb(r(bx)),r(cx)
-        mov  VMCB_rax(r(cx)),r(ax)
-        mov  r(ax),UREGS_rax(r(sp))
-        mov  VMCB_rip(r(cx)),r(ax)
-        mov  r(ax),UREGS_rip(r(sp))
-        mov  VMCB_rsp(r(cx)),r(ax)
-        mov  r(ax),UREGS_rsp(r(sp))
-        mov  VMCB_rflags(r(cx)),r(ax)
-        mov  r(ax),UREGS_eflags(r(sp))
+        GET_CURRENT(%rbx)
+        movb $0,VCPU_svm_vmcb_in_sync(%rbx)
+        mov  VCPU_svm_vmcb(%rbx),%rcx
+        mov  VMCB_rax(%rcx),%rax
+        mov  %rax,UREGS_rax(%rsp)
+        mov  VMCB_rip(%rcx),%rax
+        mov  %rax,UREGS_rip(%rsp)
+        mov  VMCB_rsp(%rcx),%rax
+        mov  %rax,UREGS_rsp(%rsp)
+        mov  VMCB_rflags(%rcx),%rax
+        mov  %rax,UREGS_eflags(%rsp)
 
 #ifndef NDEBUG
         mov  $0xbeef,%ax
-        mov  %ax,UREGS_error_code(r(sp))
-        mov  %ax,UREGS_entry_vector(r(sp))
-        mov  %ax,UREGS_saved_upcall_mask(r(sp))
-        mov  %ax,UREGS_cs(r(sp))
-        mov  %ax,UREGS_ds(r(sp))
-        mov  %ax,UREGS_es(r(sp))
-        mov  %ax,UREGS_fs(r(sp))
-        mov  %ax,UREGS_gs(r(sp))
-        mov  %ax,UREGS_ss(r(sp))
+        mov  %ax,UREGS_error_code(%rsp)
+        mov  %ax,UREGS_entry_vector(%rsp)
+        mov  %ax,UREGS_saved_upcall_mask(%rsp)
+        mov  %ax,UREGS_cs(%rsp)
+        mov  %ax,UREGS_ds(%rsp)
+        mov  %ax,UREGS_es(%rsp)
+        mov  %ax,UREGS_fs(%rsp)
+        mov  %ax,UREGS_gs(%rsp)
+        mov  %ax,UREGS_ss(%rsp)
 #endif
 
         STGI
 .globl svm_stgi_label
 svm_stgi_label:
-        call_with_regs(svm_vmexit_handler)
+        mov  %rsp,%rdi
+        call svm_vmexit_handler
         jmp  svm_asm_do_resume
 
 .Lsvm_process_softirqs:
diff -r e6f74afc78d2 -r f6a81b41ad71 xen/arch/x86/hvm/vmx/entry.S
--- a/xen/arch/x86/hvm/vmx/entry.S      Mon Jan 14 13:55:45 2013 +0000
+++ b/xen/arch/x86/hvm/vmx/entry.S      Mon Jan 14 14:06:05 2013 +0000
@@ -36,14 +36,6 @@
 #define GUEST_RIP    0x681e
 #define GUEST_RFLAGS 0x6820
 
-#define get_current(reg) GET_CURRENT(r(reg))
-
-#define r(reg) %r##reg
-#define addr_of(lbl) lbl(%rip)
-#define call_with_regs(fn)                      \
-        mov  %rsp,%rdi;                         \
-        call fn;
-
         ALIGN
 .globl vmx_asm_vmexit_handler
 vmx_asm_vmexit_handler:
@@ -63,36 +55,37 @@ vmx_asm_vmexit_handler:
         push %r14
         push %r15
 
-        get_current(bx)
+        GET_CURRENT(%rbx)
 
-        movb $1,VCPU_vmx_launched(r(bx))
+        movb $1,VCPU_vmx_launched(%rbx)
 
-        lea  UREGS_rip(r(sp)),r(di)
+        lea  UREGS_rip(%rsp),%rdi
         mov  $GUEST_RIP,%eax
         /*VMREAD(UREGS_rip)*/
-        .byte 0x0f,0x78,0x07  /* vmread r(ax),(r(di)) */
+        .byte 0x0f,0x78,0x07  /* vmread %rax,(%rdi) */
         mov  $GUEST_RSP,%eax
         VMREAD(UREGS_rsp)
         mov  $GUEST_RFLAGS,%eax
         VMREAD(UREGS_eflags)
 
-        mov  %cr2,r(ax)
-        mov  r(ax),VCPU_hvm_guest_cr2(r(bx))
+        mov  %cr2,%rax
+        mov  %rax,VCPU_hvm_guest_cr2(%rbx)
 
 #ifndef NDEBUG
         mov  $0xbeef,%ax
-        mov  %ax,UREGS_error_code(r(sp))
-        mov  %ax,UREGS_entry_vector(r(sp))
-        mov  %ax,UREGS_saved_upcall_mask(r(sp))
-        mov  %ax,UREGS_cs(r(sp))
-        mov  %ax,UREGS_ds(r(sp))
-        mov  %ax,UREGS_es(r(sp))
-        mov  %ax,UREGS_fs(r(sp))
-        mov  %ax,UREGS_gs(r(sp))
-        mov  %ax,UREGS_ss(r(sp))
+        mov  %ax,UREGS_error_code(%rsp)
+        mov  %ax,UREGS_entry_vector(%rsp)
+        mov  %ax,UREGS_saved_upcall_mask(%rsp)
+        mov  %ax,UREGS_cs(%rsp)
+        mov  %ax,UREGS_ds(%rsp)
+        mov  %ax,UREGS_es(%rsp)
+        mov  %ax,UREGS_fs(%rsp)
+        mov  %ax,UREGS_gs(%rsp)
+        mov  %ax,UREGS_ss(%rsp)
 #endif
 
-        call_with_regs(vmx_vmexit_handler)
+        mov  %rsp,%rdi
+        call vmx_vmexit_handler
 
 .globl vmx_asm_do_vmentry
 vmx_asm_do_vmentry:
@@ -100,38 +93,39 @@ vmx_asm_do_vmentry:
         call nvmx_switch_guest
         ASSERT_NOT_IN_ATOMIC
 
-        get_current(bx)
+        GET_CURRENT(%rbx)
         cli
 
-        mov  VCPU_processor(r(bx)),%eax
-        shl  $IRQSTAT_shift,r(ax)
-        lea  addr_of(irq_stat),r(dx)
-        cmpl $0,(r(dx),r(ax),1)
+        mov  VCPU_processor(%rbx),%eax
+        shl  $IRQSTAT_shift,%rax
+        lea  irq_stat(%rip),%rdx
+        cmpl $0,(%rdx,%rax,1)
         jnz  .Lvmx_process_softirqs
 
-        testb $0xff,VCPU_vmx_emulate(r(bx))
+        testb $0xff,VCPU_vmx_emulate(%rbx)
         jnz .Lvmx_goto_emulator
-        testb $0xff,VCPU_vmx_realmode(r(bx))
+        testb $0xff,VCPU_vmx_realmode(%rbx)
 UNLIKELY_START(nz, realmode)
-        cmpw $0,VCPU_vm86_seg_mask(r(bx))
+        cmpw $0,VCPU_vm86_seg_mask(%rbx)
         jnz .Lvmx_goto_emulator
-        call_with_regs(vmx_enter_realmode) 
+        mov  %rsp,%rdi
+        call vmx_enter_realmode
 UNLIKELY_END(realmode)
 
         call vmx_vmenter_helper
-        mov  VCPU_hvm_guest_cr2(r(bx)),r(ax)
-        mov  r(ax),%cr2
+        mov  VCPU_hvm_guest_cr2(%rbx),%rax
+        mov  %rax,%cr2
 
-        lea  UREGS_rip(r(sp)),r(di)
+        lea  UREGS_rip(%rsp),%rdi
         mov  $GUEST_RIP,%eax
         /*VMWRITE(UREGS_rip)*/
-        .byte 0x0f,0x79,0x07  /* vmwrite (r(di)),r(ax) */
+        .byte 0x0f,0x79,0x07  /* vmwrite (%rdi),%rax */
         mov  $GUEST_RSP,%eax
         VMWRITE(UREGS_rsp)
         mov  $GUEST_RFLAGS,%eax
         VMWRITE(UREGS_eflags)
 
-        cmpb $0,VCPU_vmx_launched(r(bx))
+        cmpb $0,VCPU_vmx_launched(%rbx)
         pop  %r15
         pop  %r14
         pop  %r13
@@ -163,7 +157,8 @@ UNLIKELY_END(realmode)
 
 .Lvmx_goto_emulator:
         sti
-        call_with_regs(vmx_realmode)
+        mov  %rsp,%rdi
+        call vmx_realmode
         jmp  vmx_asm_do_vmentry
 
 .Lvmx_process_softirqs:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.