[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Xen saves the upcall mask onto the stack when making an upcall to the



ChangeSet 1.1427, 2005/05/17 10:14:58+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Xen saves the upcall mask onto the stack when making an upcall to the
        guest. This can be used by the guest to determine whether it must
        re-enable event delivery on return from the upcall activation.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S |   95 ++++++-------------
 xen/arch/x86/traps.c                                 |    8 -
 xen/arch/x86/x86_32/asm-offsets.c                    |    1 
 xen/arch/x86/x86_32/entry.S                          |   12 +-
 xen/arch/x86/x86_32/seg_fixup.c                      |    4 
 xen/arch/x86/x86_64/asm-offsets.c                    |    1 
 xen/arch/x86/x86_64/entry.S                          |   16 ++-
 xen/arch/x86/x86_64/traps.c                          |    2 
 xen/include/public/arch-x86_32.h                     |   14 +-
 xen/include/public/arch-x86_64.h                     |   34 +++---
 10 files changed, 85 insertions(+), 102 deletions(-)


diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S 
b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S      2005-05-17 
07:03:51 -04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S      2005-05-17 
07:03:51 -04:00
@@ -83,42 +83,28 @@
 #define sizeof_vcpu_shift              3
 
 #ifdef CONFIG_SMP
-#define XEN_GET_VCPU_INFO(reg)
 #define preempt_disable(reg)   incl TI_preempt_count(reg)
 #define preempt_enable(reg)    decl TI_preempt_count(reg)
-#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp)              ; \
+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%ebp)                   ; \
                                movl TI_cpu(%ebp),reg                   ; \
                                shl  $sizeof_vcpu_shift,reg             ; \
                                addl HYPERVISOR_shared_info,reg
-#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp)
-#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff
-#define Ux00 0xff
-#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
-#define XEN_BLOCK_EVENTS(reg)  XEN_LOCK_VCPU_INFO_SMP(reg)             ; \
-                               XEN_LOCKED_BLOCK_EVENTS(reg)            ; \
-                               XEN_UNLOCK_VCPU_INFO_SMP(reg)
-#define XEN_UNBLOCK_EVENTS(reg)        XEN_LOCK_VCPU_INFO_SMP(reg)             
; \
-                               movb $0,evtchn_upcall_mask(reg)         ; \
-                               XEN_UNLOCK_VCPU_INFO_SMP(reg)
-#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp)                
; \
-                               XEN_LOCK_VCPU_INFO_SMP(reg)             ; \
-                               movb evtchn_upcall_mask(reg), tmp       ; \
-                               movb tmp, off(%esp)                     ; \
-                               XEN_UNLOCK_VCPU_INFO_SMP(reg)
+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
 #else
-#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-#define XEN_LOCK_VCPU_INFO_SMP(reg)
-#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
-#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
-#define Ux00 0x00
-#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
-#define XEN_BLOCK_EVENTS(reg)  XEN_LOCKED_BLOCK_EVENTS(reg)
-#define XEN_UNBLOCK_EVENTS(reg)        movb $0,evtchn_upcall_mask(reg)
-#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
-       movb evtchn_upcall_mask(reg), tmp; \
-       movb tmp, off(%esp)
+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
 #endif
 
+#define XEN_LOCKED_BLOCK_EVENTS(reg)   movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_BLOCK_EVENTS(reg)  XEN_GET_VCPU_INFO(reg)                  ; \
+                               XEN_LOCKED_BLOCK_EVENTS(reg)            ; \
+                               XEN_PUT_VCPU_INFO(reg)
+#define XEN_UNBLOCK_EVENTS(reg)        XEN_GET_VCPU_INFO(reg)                  
; \
+                               XEN_LOCKED_UNBLOCK_EVENTS(reg)          ; \
+                               XEN_PUT_VCPU_INFO(reg)
 #define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
 
 #ifdef CONFIG_PREEMPT
@@ -128,7 +114,7 @@
 #define resume_kernel          restore_all
 #endif
 
-#define SAVE_ALL_NO_EVENTMASK \
+#define SAVE_ALL \
        cld; \
        pushl %es; \
        pushl %ds; \
@@ -141,12 +127,7 @@
        pushl %ebx; \
        movl $(__USER_DS), %edx; \
        movl %edx, %ds; \
-       movl %edx, %es;
-
-#define SAVE_ALL \
-       SAVE_ALL_NO_EVENTMASK; \
-       XEN_GET_VCPU_INFO(%esi); \
-       XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
+       movl %edx, %es
 
 #define RESTORE_INT_REGS \
        popl %ebx;      \
@@ -196,7 +177,6 @@
        call schedule_tail
        GET_THREAD_INFO(%ebp)
        popl %eax
-       XEN_GET_VCPU_INFO(%esi)
        jmp syscall_exit
 
 /*
@@ -217,7 +197,6 @@
        testl $(VM_MASK | 2), %eax
        jz resume_kernel                # returning to kernel or vm86-space
 ENTRY(resume_userspace)
-       XEN_GET_VCPU_INFO(%esi)
        XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
@@ -229,7 +208,6 @@
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-       XEN_GET_VCPU_INFO(%esi)
        XEN_BLOCK_EVENTS(%esi)
        cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
        jnz restore_all
@@ -316,11 +294,11 @@
        jnz resume_vm86
        movb EVENT_MASK(%esp), %al
        notb %al                        # %al == ~saved_mask
-       XEN_LOCK_VCPU_INFO_SMP(%esi)
+       XEN_GET_VCPU_INFO(%esi)
        andb evtchn_upcall_mask(%esi),%al
        andb $1,%al                     # %al == mask & ~saved_mask
        jnz restore_all_enable_events   #     != 0 => reenable event delivery
-       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+       XEN_PUT_VCPU_INFO(%esi)
        RESTORE_ALL
 
 resume_vm86:
@@ -470,8 +448,6 @@
        movl %ecx, %ds
        movl %ecx, %es
        movl %esp,%eax                  # pt_regs pointer
-       XEN_GET_VCPU_INFO(%esi)
-       XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
        call *%edi
        jmp ret_from_exception
 
@@ -488,29 +464,27 @@
 # activation and restart the handler using the previous one.
 ENTRY(hypervisor_callback)
        pushl %eax
-       SAVE_ALL_NO_EVENTMASK
+       SAVE_ALL
        movl EIP(%esp),%eax
        cmpl $scrit,%eax
        jb   11f
        cmpl $ecrit,%eax
        jb   critical_region_fixup
-11:    XEN_GET_VCPU_INFO(%esi)
-       movb $0, EVENT_MASK(%esp)
-       push %esp
+11:    push %esp
        call evtchn_do_upcall
        add  $4,%esp
        jmp  ret_from_intr
 
         ALIGN
 restore_all_enable_events:  
-       XEN_UNBLOCK_EVENTS(%esi)
+       XEN_LOCKED_UNBLOCK_EVENTS(%esi)
 scrit: /**** START OF CRITICAL REGION ****/
        XEN_TEST_PENDING(%esi)
        jnz  14f                        # process more events if necessary...
-       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+       XEN_PUT_VCPU_INFO(%esi)
        RESTORE_ALL
 14:    XEN_LOCKED_BLOCK_EVENTS(%esi)
-       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+       XEN_PUT_VCPU_INFO(%esi)
        jmp  11b
 ecrit:  /**** END OF CRITICAL REGION ****/
 # [How we do the fixup]. We want to merge the current stack frame with the
@@ -523,15 +497,12 @@
 critical_region_fixup:
        addl $critical_fixup_table-scrit,%eax
        movzbl (%eax),%eax              # %eax contains num bytes popped
-#ifdef CONFIG_SMP
-       cmpb $0xff,%al
+       cmpb $0xff,%al                  # 0xff => vcpu_info critical region
        jne  15f
-       add  $1,%al
        GET_THREAD_INFO(%ebp)
-       XEN_UNLOCK_VCPU_INFO_SMP(%esi)
-15:
-#endif
-       mov  %esp,%esi
+       XEN_PUT_VCPU_INFO(%esi)         # abort vcpu_info critical region
+        xorl %eax,%eax
+15:    mov  %esp,%esi
        add  %eax,%esi                  # %esi points at end of src region
        mov  %esp,%edi
        add  $0x34,%edi                 # %edi points at end of dst region
@@ -547,9 +518,9 @@
        jmp  11b
 
 critical_fixup_table:
-       .byte Ux00,Ux00,Ux00            # testb $0xff,(%esi) = XEN_TEST_PENDING
-       .byte Ux00,Ux00                 # jnz  14f
-       XEN_UNLOCK_VCPU_INFO_SMP_fixup
+       .byte 0xff,0xff,0xff            # testb $0xff,(%esi) = XEN_TEST_PENDING
+       .byte 0xff,0xff                 # jnz  14f
+       XEN_PUT_VCPU_INFO_fixup
        .byte 0x00                      # pop  %ebx
        .byte 0x04                      # pop  %ecx
        .byte 0x08                      # pop  %edx
@@ -561,8 +532,8 @@
        .byte 0x20                      # pop  %es
        .byte 0x24,0x24,0x24            # add  $4,%esp
        .byte 0x28                      # iret
-       .byte Ux00,Ux00,Ux00,Ux00       # movb $1,1(%esi)
-       XEN_UNLOCK_VCPU_INFO_SMP_fixup
+       .byte 0xff,0xff,0xff,0xff       # movb $1,1(%esi)
+       XEN_PUT_VCPU_INFO_fixup
        .byte 0x00,0x00                 # jmp  11b
 
 # Hypervisor uses this for application faults while it executes.
@@ -766,8 +737,6 @@
        movl %eax, %ds
        movl %eax, %es
        movl %esp,%eax                  /* pt_regs pointer */
-       XEN_GET_VCPU_INFO(%esi)
-       XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
        call do_page_fault
        jmp ret_from_exception
 
diff -Nru a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      2005-05-17 07:03:50 -04:00
+++ b/xen/arch/x86/traps.c      2005-05-17 07:03:51 -04:00
@@ -257,7 +257,7 @@
         tb->error_code = regs->error_code;
     }
     if ( TI_GET_IF(ti) )
-        ed->vcpu_info->evtchn_upcall_mask = 1;
+        tb->flags |= TBF_INTERRUPT;
     return 0;
 
  xen_fault:
@@ -322,7 +322,7 @@
     tb->cs    = ti->cs;
     tb->eip   = ti->address;
     if ( TI_GET_IF(ti) )
-        ed->vcpu_info->evtchn_upcall_mask = 1;
+        tb->flags |= TBF_INTERRUPT;
 
     return 0;
 }
@@ -345,7 +345,7 @@
     tb->cs         = ti->cs;
     tb->eip        = ti->address;
     if ( TI_GET_IF(ti) )
-        ed->vcpu_info->evtchn_upcall_mask = 1;
+        tb->flags |= TBF_INTERRUPT;
 
     ed->arch.guest_cr2 = addr;
 }
@@ -911,7 +911,7 @@
     tb->cs         = ti->cs;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.