[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: Avoid assumptions about C struct layouts from asm code.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1279041135 -3600 # Node ID a3a55a6e47610192b44f88282aacefe481b3471a # Parent 9f08c4e82037415f0677da080470aac97d11d010 x86: Avoid assumptions about C struct layouts from asm code. Largely this involves avoiding assumptions about 'struct cpu_info'. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/acpi/suspend.c | 41 ++++++++++++++----- xen/arch/x86/acpi/wakeup_prot.S | 71 +++------------------------------ xen/arch/x86/boot/head.S | 2 xen/arch/x86/hvm/svm/entry.S | 8 --- xen/arch/x86/hvm/vmx/entry.S | 6 -- xen/arch/x86/x86_32/asm-offsets.c | 5 +- xen/arch/x86/x86_32/entry.S | 13 ------ xen/arch/x86/x86_64/asm-offsets.c | 4 + xen/arch/x86/x86_64/compat/entry.S | 11 ----- xen/arch/x86/x86_64/entry.S | 17 +------ xen/include/asm-x86/x86_32/asm_defns.h | 8 +++ xen/include/asm-x86/x86_64/asm_defns.h | 8 +++ 12 files changed, 71 insertions(+), 123 deletions(-) diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/acpi/suspend.c --- a/xen/arch/x86/acpi/suspend.c Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/acpi/suspend.c Tue Jul 13 18:12:15 2010 +0100 @@ -13,10 +13,13 @@ #include <asm/hvm/hvm.h> #include <asm/hvm/support.h> #include <asm/i387.h> +#include <xen/hypercall.h> #if defined(CONFIG_X86_64) static unsigned long saved_lstar, saved_cstar; static unsigned long saved_sysenter_esp, saved_sysenter_eip; +static unsigned long saved_fs_base, saved_gs_base, saved_kernel_gs_base; +static uint16_t saved_segs[4]; #endif void save_rest_processor_state(void) @@ -25,6 +28,12 @@ void save_rest_processor_state(void) unlazy_fpu(current); #if defined(CONFIG_X86_64) + asm volatile ( + "mov %%ds,(%0); mov %%es,2(%0); mov %%fs,4(%0); mov %%gs,6(%0)" + : : "r" (saved_segs) : "memory" ); + rdmsrl(MSR_FS_BASE, saved_fs_base); + rdmsrl(MSR_GS_BASE, saved_gs_base); + rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); rdmsrl(MSR_CSTAR, saved_cstar); rdmsrl(MSR_LSTAR, saved_lstar); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) @@ -35,9 +44,10 @@ void save_rest_processor_state(void) #endif } + void restore_rest_processor_state(void) { - struct vcpu *v = current; + struct vcpu *curr = current; load_TR(); @@ -51,6 +61,10 @@ void restore_rest_processor_state(void) X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF, 0U); + wrmsrl(MSR_FS_BASE, saved_fs_base); + wrmsrl(MSR_GS_BASE, saved_gs_base); + wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { /* Recover sysenter MSRs */ @@ -58,21 +72,30 @@ void restore_rest_processor_state(void) wrmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0); } + + if ( !is_idle_vcpu(curr) ) + { + asm volatile ( + "mov (%0),%%ds; mov 2(%0),%%es; mov 4(%0),%%fs" + : : "r" (saved_segs) : "memory" ); + do_set_segment_base(SEGBASE_GS_USER_SEL, saved_segs[3]); + } + #else /* !defined(CONFIG_X86_64) */ if ( supervisor_mode_kernel && cpu_has_sep ) wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0); #endif /* Maybe load the debug registers. */ - BUG_ON(is_hvm_vcpu(v)); - if ( !is_idle_vcpu(v) && unlikely(v->arch.guest_context.debugreg[7]) ) + BUG_ON(is_hvm_vcpu(curr)); + if ( !is_idle_vcpu(curr) && curr->arch.guest_context.debugreg[7] ) { - write_debugreg(0, v->arch.guest_context.debugreg[0]); - write_debugreg(1, v->arch.guest_context.debugreg[1]); - write_debugreg(2, v->arch.guest_context.debugreg[2]); - write_debugreg(3, v->arch.guest_context.debugreg[3]); - write_debugreg(6, v->arch.guest_context.debugreg[6]); - write_debugreg(7, v->arch.guest_context.debugreg[7]); + write_debugreg(0, curr->arch.guest_context.debugreg[0]); + write_debugreg(1, curr->arch.guest_context.debugreg[1]); + write_debugreg(2, curr->arch.guest_context.debugreg[2]); + write_debugreg(3, curr->arch.guest_context.debugreg[3]); + write_debugreg(6, curr->arch.guest_context.debugreg[6]); + write_debugreg(7, curr->arch.guest_context.debugreg[7]); } /* Reload FPU state on next FPU use. */ diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/acpi/wakeup_prot.S --- a/xen/arch/x86/acpi/wakeup_prot.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/acpi/wakeup_prot.S Tue Jul 13 18:12:15 2010 +0100 @@ -19,21 +19,6 @@ #define LOAD_GREG(x) movq SAVED_GREG(x), GREG(x) #define REF(x) x(%rip) - -#define RDMSR(ind, m) \ - xorq %rdx, %rdx; \ - mov $ind, %ecx; \ - rdmsr; \ - shlq $0x20, %rdx; \ - orq %rax, %rdx; \ - movq %rdx, m(%rip); - -#define WRMSR(ind, m) \ - mov $ind, %ecx; \ - movq m(%rip), %rdx; \ - mov %edx, %eax; \ - shrq $0x20, %rdx; \ - wrmsr; #else /* !defined(__x86_64__) */ @@ -76,21 +61,16 @@ ENTRY(do_suspend_lowlevel) mov %cr8, GREG(ax) mov GREG(ax), REF(saved_cr8) - RDMSR(MSR_FS_BASE, saved_fs_base) - RDMSR(MSR_GS_BASE, saved_gs_base) - RDMSR(MSR_SHADOW_GS_BASE, saved_kernel_gs_base) - #else /* !defined(__x86_64__) */ pushfl; popl SAVED_GREG(flags) -#endif - mov %ds, REF(saved_ds) mov %es, REF(saved_es) - mov %fs, REF(saved_fs) - mov %gs, REF(saved_gs) + +#endif + mov %ss, REF(saved_ss) sgdt REF(saved_gdt) @@ -148,40 +128,11 @@ __ret_point: pushq %rax lretq 1: - mov REF(saved_cr8), %rax mov %rax, %cr8 pushq SAVED_GREG(flags) popfq - - /* Idle vcpu doesn't need segment selectors reload, since - * those may contain stale value from other domains and - * reload may result page fault due to no matched gdt entry - */ - mov $(STACK_SIZE - 8), %rax - or %rsp, %rax - and $~7, %rax - mov (%rax), %rax - mov 0x10(%rax), %rax - cmpw $0x7fff, (%rax) - je 1f - - /* These selectors are from guest, and thus need reload */ - mov REF(saved_ds), %ds - mov REF(saved_es), %es - mov REF(saved_fs), %fs - - /* gs load is special */ - mov REF(saved_gs), %rsi - mov $3, %rdi # SEGBASE_GS_USER_SEL - call do_set_segment_base - -1: - # MSR restore - WRMSR(MSR_FS_BASE, saved_fs_base) - WRMSR(MSR_GS_BASE, saved_gs_base) - WRMSR(MSR_SHADOW_GS_BASE, saved_kernel_gs_base) #else /* !defined(__x86_64__) */ @@ -217,15 +168,10 @@ 1: .data .align 16 -saved_ds: .word 0 -saved_es: .word 0 -saved_ss: .word 0 -saved_gs: .word 0 -saved_fs: .word 0 - - .align 4 .globl saved_magic saved_magic: .long 0x9abcdef0 + +saved_ss: .word 0 .align 8 DECLARE_GREG(sp) @@ -257,10 +203,6 @@ saved_cr3: .quad 0 saved_cr3: .quad 0 saved_cr8: .quad 0 -saved_gs_base: .quad 0 -saved_fs_base: .quad 0 -saved_kernel_gs_base: .quad 0 - #else /* !defined(__x86_64__) */ saved_gdt: .long 0,0 @@ -270,4 +212,7 @@ saved_cr0: .long 0 saved_cr0: .long 0 saved_cr3: .long 0 +saved_ds: .word 0 +saved_es: .word 0 + #endif diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/boot/head.S --- a/xen/arch/x86/boot/head.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/boot/head.S Tue Jul 13 18:12:15 2010 +0100 @@ -101,7 +101,7 @@ __start: mov $0x80000001,%eax cpuid 1: mov %edx,sym_phys(cpuid_ext_features) - mov %edx,sym_phys(boot_cpu_data)+CPUINFO_ext_features + mov %edx,sym_phys(boot_cpu_data)+CPUINFO86_ext_features #if defined(__x86_64__) /* Check for availability of long mode. */ diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/hvm/svm/entry.S --- a/xen/arch/x86/hvm/svm/entry.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/hvm/svm/entry.S Tue Jul 13 18:12:15 2010 +0100 @@ -31,12 +31,8 @@ #define STGI .byte 0x0F,0x01,0xDC #define CLGI .byte 0x0F,0x01,0xDD -#define get_current(reg) \ - mov $STACK_SIZE-BYTES_PER_LONG, r(reg); \ - or r(sp), r(reg); \ - and $~(BYTES_PER_LONG-1),r(reg); \ - mov (r(reg)),r(reg); - +#define get_current(reg) GET_CURRENT(r(reg)) + #if defined(__x86_64__) #define r(reg) %r##reg #define addr_of(lbl) lbl(%rip) diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/hvm/vmx/entry.S --- a/xen/arch/x86/hvm/vmx/entry.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/hvm/vmx/entry.S Tue Jul 13 18:12:15 2010 +0100 @@ -36,11 +36,7 @@ #define GUEST_RIP 0x681e #define GUEST_RFLAGS 0x6820 -#define get_current(reg) \ - mov $STACK_SIZE-BYTES_PER_LONG, r(reg); \ - or r(sp), r(reg); \ - and $~(BYTES_PER_LONG-1),r(reg); \ - mov (r(reg)),r(reg); +#define get_current(reg) GET_CURRENT(r(reg)) #if defined(__x86_64__) #define r(reg) %r##reg diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/x86_32/asm-offsets.c --- a/xen/arch/x86/x86_32/asm-offsets.c Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/x86_32/asm-offsets.c Tue Jul 13 18:12:15 2010 +0100 @@ -106,6 +106,9 @@ void __dummy__(void) OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask); BLANK(); + OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs); + OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id); + OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); @@ -127,7 +130,7 @@ void __dummy__(void) DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t))); BLANK(); - OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]); + OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]); BLANK(); OFFSET(MB_flags, multiboot_info_t, flags); diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/x86_32/entry.S --- a/xen/arch/x86/x86_32/entry.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/x86_32/entry.S Tue Jul 13 18:12:15 2010 +0100 @@ -60,17 +60,6 @@ #include <asm/apicdef.h> #include <asm/page.h> #include <public/xen.h> - -#define GET_GUEST_REGS(reg) \ - movl $~(STACK_SIZE-1),reg; \ - andl %esp,reg; \ - orl $(STACK_SIZE-CPUINFO_sizeof),reg; - -#define GET_CURRENT(reg) \ - movl $STACK_SIZE-4, reg; \ - orl %esp, reg; \ - andl $~3,reg; \ - movl (reg),reg; ALIGN restore_all_guest: @@ -590,7 +579,7 @@ handle_nmi_mce: jmp ret_from_intr .Lnmi_mce_xen: /* Check the outer (guest) context for %ds/%es state validity. */ - GET_GUEST_REGS(%ebx) + GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%ebx) testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx) mov %ds,%eax mov %es,%edx diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/x86_64/asm-offsets.c --- a/xen/arch/x86/x86_64/asm-offsets.c Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/x86_64/asm-offsets.c Tue Jul 13 18:12:15 2010 +0100 @@ -132,6 +132,8 @@ void __dummy__(void) OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask); BLANK(); + OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs); + OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id); OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); @@ -151,7 +153,7 @@ void __dummy__(void) DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t))); BLANK(); - OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]); + OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]); BLANK(); OFFSET(MB_flags, multiboot_info_t, flags); diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/x86_64/compat/entry.S --- a/xen/arch/x86/x86_64/compat/entry.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/x86_64/compat/entry.S Tue Jul 13 18:12:15 2010 +0100 @@ -10,17 +10,6 @@ #include <asm/page.h> #include <asm/desc.h> #include <public/xen.h> - -#define GET_GUEST_REGS(reg) \ - movq $~(STACK_SIZE-1),reg; \ - andq %rsp,reg; \ - orq $(STACK_SIZE-CPUINFO_sizeof),reg; - -#define GET_CURRENT(reg) \ - movq $STACK_SIZE-8, reg; \ - orq %rsp, reg; \ - andq $~7,reg; \ - movq (reg),reg; ALIGN ENTRY(compat_hypercall) diff -r 9f08c4e82037 -r a3a55a6e4761 xen/arch/x86/x86_64/entry.S --- a/xen/arch/x86/x86_64/entry.S Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/arch/x86/x86_64/entry.S Tue Jul 13 18:12:15 2010 +0100 @@ -11,17 +11,6 @@ #include <asm/apicdef.h> #include <asm/page.h> #include <public/xen.h> - -#define GET_GUEST_REGS(reg) \ - movq $~(STACK_SIZE-1),reg; \ - andq %rsp,reg; \ - orq $(STACK_SIZE-CPUINFO_sizeof),reg; - -#define GET_CURRENT(reg) \ - movq $STACK_SIZE-8, reg; \ - orq %rsp, reg; \ - andq $~7,reg; \ - movq (reg),reg; ALIGN /* %rbx: struct vcpu */ @@ -434,10 +423,10 @@ domain_crash_synchronous_string: ENTRY(domain_crash_synchronous) # Get out of the guest-save area of the stack. - GET_GUEST_REGS(%rax) + GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%rax) movq %rax,%rsp # create_bounce_frame() temporarily clobbers CS.RPL. Fix up. - movq CPUINFO_current_vcpu(%rax),%rax + GET_CURRENT(%rax) movq VCPU_domain(%rax),%rax testb $1,DOMAIN_is_32bit_pv(%rax) setz %al @@ -610,7 +599,7 @@ handle_ist_exception: testb $3,UREGS_cs(%rsp) jz 1f /* Interrupted guest context. Copy the context to stack bottom. */ - GET_GUEST_REGS(%rdi) + GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%rdi) movq %rsp,%rsi movl $UREGS_kernel_sizeof/8,%ecx movq %rdi,%rsp diff -r 9f08c4e82037 -r a3a55a6e4761 xen/include/asm-x86/x86_32/asm_defns.h --- a/xen/include/asm-x86/x86_32/asm_defns.h Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/include/asm-x86/x86_32/asm_defns.h Tue Jul 13 18:12:15 2010 +0100 @@ -142,4 +142,12 @@ STR(IRQ) #nr "_interrupt:\n\t" "pushl $"#nr"<<16\n\t" \ "jmp common_interrupt"); +#define GET_CPUINFO_FIELD(field,reg) \ + movl $~(STACK_SIZE-1),reg; \ + andl %esp,reg; \ + orl $(STACK_SIZE-CPUINFO_sizeof+field),reg; +#define GET_CURRENT(reg) \ + GET_CPUINFO_FIELD(CPUINFO_current_vcpu,reg) \ + movl (reg),reg; + #endif /* __X86_32_ASM_DEFNS_H__ */ diff -r 9f08c4e82037 -r a3a55a6e4761 xen/include/asm-x86/x86_64/asm_defns.h --- a/xen/include/asm-x86/x86_64/asm_defns.h Mon Jul 12 18:39:18 2010 +0100 +++ b/xen/include/asm-x86/x86_64/asm_defns.h Tue Jul 13 18:12:15 2010 +0100 @@ -122,4 +122,12 @@ STR(IRQ) #nr "_interrupt:\n\t" "movl $"#nr",4(%rsp)\n\t" \ "jmp common_interrupt"); +#define GET_CPUINFO_FIELD(field,reg) \ + movq $~(STACK_SIZE-1),reg; \ + andq %rsp,reg; \ + orq $(STACK_SIZE-CPUINFO_sizeof+field),reg; +#define GET_CURRENT(reg) \ + GET_CPUINFO_FIELD(CPUINFO_current_vcpu,reg) \ + movq (reg),reg; + #endif /* __X86_64_ASM_DEFNS_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |