[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: improve output resulting from sending '0' over serial
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1245157038 -3600 # Node ID 67a0ffade665e23c784613e7f34c6ac07867bb02 # Parent cb6f8a34b59af59b08c016a64afaba5e71cec79c x86: improve output resulting from sending '0' over serial While the original logic already implied that the kernel part of the guest's address space is identical on all vCPU-s (i.e. for all guest processes), it didn't fully leverage the potential here: As long as the top page table currently active is owned by the subject domain (currently only Dom0), the stack dump can be done without extra effort. For x86-64, additionally add page table traversal so that the stack can be dumped in all cases (unless it's invalid or user space). I left the 32-bit variant of do_page_walk() unimplemented for the moment as I couldn't convince myself using map_domain_page() there is a good idea, and didn't want to introduce new fixmap entries either. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> --- xen/arch/x86/traps.c | 59 ++++++++++++++++++++++++++--------- xen/arch/x86/x86_32/mm.c | 5 ++ xen/arch/x86/x86_64/compat/traps.c | 40 ++++++++++++++++++++--- xen/arch/x86/x86_64/mm.c | 41 ++++++++++++++++++++++++ xen/include/asm-x86/mm.h | 1 xen/include/asm-x86/processor.h | 6 +-- xen/include/asm-x86/x86_64/uaccess.h | 9 +++-- 7 files changed, 134 insertions(+), 27 deletions(-) diff -r cb6f8a34b59a -r 67a0ffade665 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/arch/x86/traps.c Tue Jun 16 13:57:18 2009 +0100 @@ -129,18 +129,18 @@ boolean_param("ler", opt_ler); #define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)regs->rsp) #endif -static void show_guest_stack(struct cpu_user_regs *regs) +static void show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs) { int i; - struct vcpu *curr = current; unsigned long *stack, addr; - - if ( is_hvm_vcpu(curr) ) + unsigned long mask = STACK_SIZE; + + if ( is_hvm_vcpu(v) ) return; - if ( is_pv_32on64_vcpu(curr) ) - { - compat_show_guest_stack(regs, debug_stack_lines); + if ( is_pv_32on64_vcpu(v) ) + { + compat_show_guest_stack(v, regs, debug_stack_lines); return; } @@ -156,11 +156,42 @@ static void show_guest_stack(struct cpu_ printk("Guest stack trace from "__OP"sp=%p:\n ", stack); } + if ( !access_ok(stack, sizeof(*stack)) ) + { + printk("Guest-inaccessible memory.\n"); + return; + } + + if ( v != current ) + { + struct vcpu *vcpu; + + ASSERT(guest_kernel_mode(v, regs)); +#ifndef __x86_64__ + addr = read_cr3(); + for_each_vcpu( v->domain, vcpu ) + if ( vcpu->arch.cr3 == addr ) + break; +#else + vcpu = maddr_get_owner(read_cr3()) == v->domain ? v : NULL; +#endif + if ( !vcpu ) + { + stack = do_page_walk(v, (unsigned long)stack); + if ( (unsigned long)stack < PAGE_SIZE ) + { + printk("Inaccessible guest memory.\n"); + return; + } + mask = PAGE_SIZE; + } + } + for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) { - if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 ) - break; - if ( get_user(addr, stack) ) + if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask ) + break; + if ( __get_user(addr, stack) ) { if ( i != 0 ) printk("\n "); @@ -264,7 +295,7 @@ void show_stack(struct cpu_user_regs *re int i; if ( guest_mode(regs) ) - return show_guest_stack(regs); + return show_guest_stack(current, regs); printk("Xen stack trace from "__OP"sp=%p:\n ", stack); @@ -346,10 +377,8 @@ void vcpu_show_execution_state(struct vc vcpu_pause(v); /* acceptably dangerous */ vcpu_show_registers(v); - /* Todo: map arbitrary vcpu's top guest stack page here. */ - if ( (v->domain == current->domain) && - guest_kernel_mode(v, &v->arch.guest_context.user_regs) ) - show_guest_stack(&v->arch.guest_context.user_regs); + if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) ) + show_guest_stack(v, &v->arch.guest_context.user_regs); vcpu_unpause(v); } diff -r cb6f8a34b59a -r 67a0ffade665 xen/arch/x86/x86_32/mm.c --- a/xen/arch/x86/x86_32/mm.c Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/arch/x86/x86_32/mm.c Tue Jun 16 13:57:18 2009 +0100 @@ -61,6 +61,11 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l l2_pgentry_t *virt_to_xen_l2e(unsigned long v) { return &idle_pg_table_l2[l2_linear_offset(v)]; +} + +void *do_page_walk(struct vcpu *v, unsigned long addr) +{ + return NULL; } void __init paging_init(void) diff -r cb6f8a34b59a -r 67a0ffade665 xen/arch/x86/x86_64/compat/traps.c --- a/xen/arch/x86/x86_64/compat/traps.c Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/arch/x86/x86_64/compat/traps.c Tue Jun 16 13:57:18 2009 +0100 @@ -5,18 +5,46 @@ #include <compat/callback.h> #include <compat/arch-x86_32.h> -void compat_show_guest_stack(struct cpu_user_regs *regs, int debug_stack_lines) -{ - unsigned int i, *stack, addr; +void compat_show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs, + int debug_stack_lines) +{ + unsigned int i, *stack, addr, mask = STACK_SIZE; stack = (unsigned int *)(unsigned long)regs->_esp; printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack); + if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) ) + { + printk("Guest-inaccessible memory.\n"); + return; + } + + if ( v != current ) + { + struct vcpu *vcpu; + + ASSERT(guest_kernel_mode(v, regs)); + addr = read_cr3() >> PAGE_SHIFT; + for_each_vcpu( v->domain, vcpu ) + if ( pagetable_get_pfn(vcpu->arch.guest_table) == addr ) + break; + if ( !vcpu ) + { + stack = do_page_walk(v, (unsigned long)stack); + if ( (unsigned long)stack < PAGE_SIZE ) + { + printk("Inaccessible guest memory.\n"); + return; + } + mask = PAGE_SIZE; + } + } + for ( i = 0; i < debug_stack_lines * 8; i++ ) { - if ( (((long)stack + 3) & (STACK_SIZE - 4)) == 0 ) - break; - if ( get_user(addr, stack) ) + if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask ) + break; + if ( __get_user(addr, stack) ) { if ( i != 0 ) printk("\n "); diff -r cb6f8a34b59a -r 67a0ffade665 xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/arch/x86/x86_64/mm.c Tue Jun 16 13:57:18 2009 +0100 @@ -101,6 +101,47 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l BUG_ON(l3e_get_flags(*pl3e) & _PAGE_PSE); return l3e_to_l2e(*pl3e) + l2_table_offset(v); +} + +void *do_page_walk(struct vcpu *v, unsigned long addr) +{ + unsigned long mfn = pagetable_get_pfn(v->arch.guest_table); + l4_pgentry_t l4e, *l4t; + l3_pgentry_t l3e, *l3t; + l2_pgentry_t l2e, *l2t; + l1_pgentry_t l1e, *l1t; + + if ( is_hvm_vcpu(v) ) + return NULL; + + l4t = mfn_to_virt(mfn); + l4e = l4t[l4_table_offset(addr)]; + mfn = l4e_get_pfn(l4e); + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) + return NULL; + + l3t = mfn_to_virt(mfn); + l3e = l3t[l3_table_offset(addr)]; + mfn = l3e_get_pfn(l3e); + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) ) + if ( (l3e_get_flags(l3e) & _PAGE_PSE) ) + return mfn_to_virt(mfn) + (addr & ((1UL << L3_PAGETABLE_SHIFT) - 1)); + + l2t = mfn_to_virt(mfn); + l2e = l2t[l2_table_offset(addr)]; + mfn = l2e_get_pfn(l2e); + if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) ) + return NULL; + if ( (l2e_get_flags(l2e) & _PAGE_PSE) ) + return mfn_to_virt(mfn) + (addr & ((1UL << L2_PAGETABLE_SHIFT) - 1)); + + l1t = mfn_to_virt(mfn); + l1e = l1t[l1_table_offset(addr)]; + mfn = l1e_get_pfn(l1e); + if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) ) + return NULL; + + return mfn_to_virt(mfn) + (addr & ~PAGE_MASK); } void __init paging_init(void) diff -r cb6f8a34b59a -r 67a0ffade665 xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/include/asm-x86/mm.h Tue Jun 16 13:57:18 2009 +0100 @@ -475,6 +475,7 @@ void make_cr3(struct vcpu *v, unsigned l void make_cr3(struct vcpu *v, unsigned long mfn); void update_cr3(struct vcpu *v); void propagate_page_fault(unsigned long addr, u16 error_code); +void *do_page_walk(struct vcpu *v, unsigned long addr); int __sync_lazy_execstate(void); diff -r cb6f8a34b59a -r 67a0ffade665 xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/include/asm-x86/processor.h Tue Jun 16 13:57:18 2009 +0100 @@ -536,9 +536,9 @@ asmlinkage void fatal_trap(int trapnr, s asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs); #ifdef CONFIG_COMPAT -void compat_show_guest_stack(struct cpu_user_regs *, int lines); -#else -#define compat_show_guest_stack(regs, lines) ((void)0) +void compat_show_guest_stack(struct vcpu *, struct cpu_user_regs *, int lines); +#else +#define compat_show_guest_stack(vcpu, regs, lines) ((void)0) #endif extern void mtrr_ap_init(void); diff -r cb6f8a34b59a -r 67a0ffade665 xen/include/asm-x86/x86_64/uaccess.h --- a/xen/include/asm-x86/x86_64/uaccess.h Tue Jun 16 13:52:13 2009 +0100 +++ b/xen/include/asm-x86/x86_64/uaccess.h Tue Jun 16 13:57:18 2009 +0100 @@ -27,11 +27,14 @@ DECLARE_PER_CPU(char, compat_arg_xlat[CO #define array_access_ok(addr, count, size) \ (access_ok(addr, (count)*(size))) -#define __compat_addr_ok(addr) \ - ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain)) +#define __compat_addr_ok(d, addr) \ + ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(d)) + +#define __compat_access_ok(d, addr, size) \ + __compat_addr_ok(d, (unsigned long)(addr) + ((size) ? (size) - 1 : 0)) #define compat_access_ok(addr, size) \ - __compat_addr_ok((unsigned long)(addr) + ((size) ? (size) - 1 : 0)) + __compat_access_ok(current->domain, addr, size) #define compat_array_access_ok(addr,count,size) \ (likely((count) < (~0U / (size))) && \ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |