[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2 3/6] mini-os/x86-64 entry: code refactoring; no functional changes



Re-arrange assembly code blocks so that they are in called
order instead of jumping around, enhancing readability.
Macros are grouped together as well.

Signed-off-by: Xu Zhang <xzhang@xxxxxxxxxx>
---
 extras/mini-os/arch/x86/x86_64.S |  113 +++++++++++++++++++-------------------
 1 files changed, 57 insertions(+), 56 deletions(-)

diff --git a/extras/mini-os/arch/x86/x86_64.S b/extras/mini-os/arch/x86/x86_64.S
index 24f35cd..d9b34a7 100644
--- a/extras/mini-os/arch/x86/x86_64.S
+++ b/extras/mini-os/arch/x86/x86_64.S
@@ -36,6 +36,22 @@ hypercall_page:
         .org 0x3000
 
 
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
+
+#define XEN_BLOCK_EVENTS(reg)  XEN_GET_VCPU_INFO(reg)                  ; \
+                                       XEN_LOCKED_BLOCK_EVENTS(reg)    ; \
+                                           XEN_PUT_VCPU_INFO(reg)
+
+#define XEN_UNBLOCK_EVENTS(reg)        XEN_GET_VCPU_INFO(reg)                  
; \
+                                               XEN_LOCKED_UNBLOCK_EVENTS(reg)  
; \
+                                       XEN_PUT_VCPU_INFO(reg)
+
+
 /* Offsets into shared_info_t. */                
 #define evtchn_upcall_pending          /* 0 */
 #define evtchn_upcall_mask             1
@@ -46,6 +62,27 @@ NMI_MASK = 0x80000000
 #define ORIG_RAX 120       /* + error_code */ 
 #define EFLAGS 144
 
+
+/* Macros */
+.macro zeroentry sym
+       movq (%rsp),%rcx
+       movq 8(%rsp),%r11
+       addq $0x10,%rsp /* skip rcx and r11 */
+       pushq $0        /* push error code/oldrax */
+       pushq %rax      /* push real oldrax to the rdi slot */
+       leaq  \sym(%rip),%rax
+       jmp error_entry
+.endm
+
+.macro errorentry sym
+       movq (%rsp),%rcx
+       movq 8(%rsp),%r11
+       addq $0x10,%rsp /* rsp points to the error code */
+       pushq %rax
+       leaq  \sym(%rip),%rax
+       jmp error_entry
+.endm
+
 .macro RESTORE_ALL
        movq (%rsp),%r11
        movq 1*8(%rsp),%r10
@@ -130,42 +167,10 @@ error_call_handler:
        call *%rax
        jmp error_exit
 
-.macro zeroentry sym
-       movq (%rsp),%rcx
-       movq 8(%rsp),%r11
-       addq $0x10,%rsp /* skip rcx and r11 */
-       pushq $0        /* push error code/oldrax */ 
-       pushq %rax      /* push real oldrax to the rdi slot */ 
-       leaq  \sym(%rip),%rax
-       jmp error_entry
-.endm  
-
-.macro errorentry sym
-       movq (%rsp),%rcx
-       movq 8(%rsp),%r11
-       addq $0x10,%rsp /* rsp points to the error code */
-       pushq %rax
-       leaq  \sym(%rip),%rax
-       jmp error_entry
-.endm
-
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-#define XEN_PUT_VCPU_INFO(reg)
-#define XEN_PUT_VCPU_INFO_fixup
-#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
-#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
-
-#define XEN_BLOCK_EVENTS(reg)  XEN_GET_VCPU_INFO(reg)                  ; \
-                                       XEN_LOCKED_BLOCK_EVENTS(reg)    ; \
-                                           XEN_PUT_VCPU_INFO(reg)
-
-#define XEN_UNBLOCK_EVENTS(reg)        XEN_GET_VCPU_INFO(reg)                  
; \
-                                               XEN_LOCKED_UNBLOCK_EVENTS(reg)  
; \
-                                       XEN_PUT_VCPU_INFO(reg)
-
-
 
+/*
+ * Xen event (virtual interrupt) entry point.
+ */
 ENTRY(hypervisor_callback)
        zeroentry hypervisor_callback2
 
@@ -178,7 +183,23 @@ ENTRY(hypervisor_callback2)
        call do_hypervisor_callback
        popq %rsp
        decl %gs:0
-       jmp error_exit
+
+error_exit:
+       RESTORE_REST
+       XEN_BLOCK_EVENTS(%rsi)          
+
+retint_kernel:
+retint_restore_args:
+       movl EFLAGS-6*8(%rsp), %eax
+       shr $9, %eax                    # EAX[0] == IRET_EFLAGS.IF
+       XEN_GET_VCPU_INFO(%rsi)
+       andb evtchn_upcall_mask(%rsi),%al
+       andb $1,%al                     # EAX[0] == IRET_EFLAGS.IF & event_mask
+       jnz restore_all_enable_events   #        != 0 => enable event delivery
+       XEN_PUT_VCPU_INFO(%rsi)
+
+       RESTORE_ALL
+       HYPERVISOR_IRET 0
 
 restore_all_enable_events:
        XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
@@ -198,26 +219,6 @@ scrit:     /**** START OF CRITICAL REGION ****/
 ecrit:  /**** END OF CRITICAL REGION ****/
 
 
-retint_kernel:
-retint_restore_args:
-       movl EFLAGS-6*8(%rsp), %eax
-       shr $9, %eax                    # EAX[0] == IRET_EFLAGS.IF
-       XEN_GET_VCPU_INFO(%rsi)
-       andb evtchn_upcall_mask(%rsi),%al
-       andb $1,%al                     # EAX[0] == IRET_EFLAGS.IF & event_mask
-       jnz restore_all_enable_events   #        != 0 => enable event delivery
-       XEN_PUT_VCPU_INFO(%rsi)
-
-       RESTORE_ALL
-       HYPERVISOR_IRET 0
-
-
-error_exit:
-       RESTORE_REST
-       XEN_BLOCK_EVENTS(%rsi)          
-       jmp retint_kernel
-
-
 
 ENTRY(failsafe_callback)
         popq  %rcx
-- 
1.7.7.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.