[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [PATCH 09/22] mini-os: support HVMlite traps



Trap handling in HVMlite domain is different from pv one.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 arch/x86/traps.c  |  2 --
 arch/x86/x86_32.S | 19 ++++++++++++++--
 arch/x86/x86_64.S | 22 +++++++++++++++++-
 include/x86/os.h  | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++----
 4 files changed, 101 insertions(+), 9 deletions(-)

diff --git a/arch/x86/traps.c b/arch/x86/traps.c
index 3b1fffb..0b3d85b 100644
--- a/arch/x86/traps.c
+++ b/arch/x86/traps.c
@@ -191,8 +191,6 @@ static void dump_mem(unsigned long addr)
     }
     printk("\n");
 }
-#define read_cr2() \
-        (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
 
 static int handling_pg_fault = 0;
 
diff --git a/arch/x86/x86_32.S b/arch/x86/x86_32.S
index 6f38708..9241418 100644
--- a/arch/x86/x86_32.S
+++ b/arch/x86/x86_32.S
@@ -8,6 +8,9 @@
 #include <xen/arch-x86_32.h>
 
 #ifdef CONFIG_PARAVIRT
+
+#define KERNEL_DS __KERNEL_DS
+
 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "Mini-OS")
 ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
 ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _WORD hypercall_page)
@@ -21,6 +24,8 @@ _start:
         lss stack_start,%esp
 #else
 
+#define KERNEL_DS __KERN_DS
+
 #include "x86_hvm.S"
         movl stack_start,%esp
 
@@ -61,7 +66,7 @@ CS            = 0x2C
        pushl %edx; \
        pushl %ecx; \
        pushl %ebx; \
-       movl $(__KERNEL_DS),%edx; \
+       movl $(KERNEL_DS),%edx; \
        movl %edx,%ds; \
        movl %edx,%es;
 
@@ -98,7 +103,7 @@ do_exception:
        movl ORIG_EAX(%esp), %edx       # get the error code
        movl %eax, ORIG_EAX(%esp)
        movl %ecx, ES(%esp)
-       movl $(__KERNEL_DS), %ecx
+       movl $(KERNEL_DS), %ecx
        movl %ecx, %ds
        movl %ecx, %es
        movl %esp,%eax                  # pt_regs pointer
@@ -112,6 +117,7 @@ ret_from_exception:
     addl $8,%esp
     RESTORE_ALL
 
+#ifdef CONFIG_PARAVIRT
 # A note on the "critical region" in our callback handler.
 # We want to avoid stacking callback handlers due to events occurring
 # during handling of the last event. To do this, we keep events disabled
@@ -189,6 +195,15 @@ critical_fixup_table:
         .byte 0x28                            # iret
         .byte 0x00,0x00,0x00,0x00             # movb $1,1(%esi)
         .byte 0x00,0x00                       # jmp  11b
+
+#else
+
+ENTRY(hypervisor_callback)
+       pushl $0
+       pushl $do_hypervisor_callback
+       jmp do_exception
+
+#endif
        
 # Hypervisor uses this for application faults while it executes.
 ENTRY(failsafe_callback)
diff --git a/arch/x86/x86_64.S b/arch/x86/x86_64.S
index e725c63..17a9ead 100644
--- a/arch/x86/x86_64.S
+++ b/arch/x86/x86_64.S
@@ -78,9 +78,11 @@ KERNEL_CS_MASK = 0xfc
 
 /* Macros */
 .macro zeroentry sym
+#ifdef CONFIG_PARAVIRT
        movq (%rsp),%rcx
        movq 8(%rsp),%r11
        addq $0x10,%rsp /* skip rcx and r11 */
+#endif
        pushq $0        /* push error code/oldrax */
        pushq %rax      /* push real oldrax to the rdi slot */
        leaq  \sym(%rip),%rax
@@ -88,9 +90,11 @@ KERNEL_CS_MASK = 0xfc
 .endm
 
 .macro errorentry sym
+#ifdef CONFIG_PARAVIRT
        movq (%rsp),%rcx
        movq 8(%rsp),%r11
        addq $0x10,%rsp /* rsp points to the error code */
+#endif
        pushq %rax
        leaq  \sym(%rip),%rax
        jmp error_entry
@@ -133,11 +137,11 @@ KERNEL_CS_MASK = 0xfc
 #ifdef CONFIG_PARAVIRT
        testl $NMI_MASK,2*8(%rsp)
        jnz   2f
-#endif
 
        /* Direct iret to kernel space. Correct CS and SS. */
        orb   $3,1*8(%rsp)
        orb   $3,4*8(%rsp)
+#endif
        iretq
 
 #ifdef CONFIG_PARAVIRT
@@ -182,6 +186,7 @@ error_call_handler:
        jmp error_exit
 
 
+#ifdef CONFIG_PARAVIRT
 /*
  * Xen event (virtual interrupt) entry point.
  */
@@ -285,11 +290,26 @@ critical_region_fixup:
        andb $KERNEL_CS_MASK,CS(%rsp)      # CS might have changed
        jmp  11b
 
+#else
+error_exit:
+       RESTORE_REST
+       RESTORE_ALL
+       HYPERVISOR_IRET 0
 
+/*
+ * Xen event (virtual interrupt) entry point.
+ */
+ENTRY(hypervisor_callback)
+       zeroentry do_hypervisor_callback
+
+
+#endif
 
 ENTRY(failsafe_callback)
+#ifdef CONFIG_PARAVIRT
         popq  %rcx
         popq  %r11
+#endif
         iretq
 
 
diff --git a/include/x86/os.h b/include/x86/os.h
index 6826b9f..1083328 100644
--- a/include/x86/os.h
+++ b/include/x86/os.h
@@ -31,6 +31,8 @@
 #define X86_CR4_PAE       0x00000020    /* enable physical address extensions 
*/
 #define X86_CR4_OSFXSR    0x00000200    /* enable fast FPU save and restore */
 
+#define X86_EFLAGS_IF     0x00000200
+
 #define __KERNEL_CS  FLAT_KERNEL_CS
 #define __KERNEL_DS  FLAT_KERNEL_DS
 #define __KERNEL_SS  FLAT_KERNEL_SS
@@ -70,7 +72,7 @@ void arch_fini(void);
 
 
 
-
+#ifdef CONFIG_PARAVIRT
 
 /* 
  * The use of 'barrier' in the following reflects their use as local-lock
@@ -129,15 +131,57 @@ do {                                                      
                \
        barrier();                                                      \
 } while (0)
 
+#define irqs_disabled()                        \
+    HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
+
+#else
+
+#if defined(__i386__)
+#define __SZ "l"
+#define __REG "e"
+#else
+#define __SZ "q"
+#define __REG "r"
+#endif
+
+#define __cli() asm volatile ( "cli" : : : "memory" )
+#define __sti() asm volatile ( "sti" : : : "memory" )
+
+#define __save_flags(x)                                                 \
+do {                                                                    \
+    unsigned long __f;                                                  \
+    asm volatile ( "pushf" __SZ " ; pop" __SZ " %0" : "=g" (__f));      \
+    x = (__f & X86_EFLAGS_IF) ? 1 : 0;                                  \
+} while (0)
+
+#define __restore_flags(x)                                              \
+do {                                                                    \
+    if (x) __sti();                                                     \
+    else __cli();                                                       \
+} while (0)
+
+#define __save_and_cli(x)                                               \
+do {                                                                    \
+    __save_flags(x);                                                    \
+    __cli();                                                            \
+} while (0)
+
+static inline int irqs_disabled(void)
+{
+    int flag;
+
+    __save_flags(flag);
+    return !flag;
+}
+
+#endif
+
 #define local_irq_save(x)      __save_and_cli(x)
 #define local_irq_restore(x)   __restore_flags(x)
 #define local_save_flags(x)    __save_flags(x)
 #define local_irq_disable()    __cli()
 #define local_irq_enable()     __sti()
 
-#define irqs_disabled()                        \
-    HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
-
 /* This is a barrier for the compiler only, NOT the processor! */
 #define barrier() __asm__ __volatile__("": : :"memory")
 
@@ -586,5 +630,20 @@ static inline void cpuid(uint32_t leaf,
 
 #undef ADDR
 
+#ifdef CONFIG_PARAVIRT
+static inline unsigned long read_cr2(void)
+{
+    return HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2;
+}
+#else
+static inline unsigned long read_cr2(void)
+{
+    unsigned long cr2;
+
+    asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) );
+    return cr2;
+}
+#endif
+
 #endif /* not assembly */
 #endif /* _OS_H_ */
-- 
2.6.6


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/cgi-bin/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.