[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/PVH: actually show Dom0's stacks from debug key '0'



commit cae8e262727bd5763210dd0e7ae314b39fde998b
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Wed Sep 29 11:57:22 2021 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Sep 29 11:57:22 2021 +0200

    x86/PVH: actually show Dom0's stacks from debug key '0'
    
    show_guest_stack() does nothing for HVM. Introduce a HVM-specific
    dumping function, paralleling the 64- and 32-bit PV ones. We don't know
    the real stack size, so only dump up to the next page boundary.
    
    Rather than adding a vcpu parameter to hvm_copy_from_guest_linear(),
    introduce hvm_copy_from_vcpu_linear() which - for now at least - in
    return won't need a "pfinfo" parameter.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |  9 ++++
 xen/arch/x86/traps.c              | 87 +++++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/hvm/support.h |  3 ++
 3 files changed, 95 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c8c86dd6a6..aa418a3ca1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3410,6 +3410,15 @@ enum hvm_translation_result hvm_copy_from_guest_linear(
                       PFEC_page_present | pfec, pfinfo);
 }
 
+enum hvm_translation_result hvm_copy_from_vcpu_linear(
+    void *buf, unsigned long addr, unsigned int size, struct vcpu *v,
+    unsigned int pfec)
+{
+    return __hvm_copy(buf, addr, size, v,
+                      HVMCOPY_from_guest | HVMCOPY_linear,
+                      PFEC_page_present | pfec, NULL);
+}
+
 unsigned int copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c2e2603c39..64f3396f20 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -365,6 +365,71 @@ static void show_guest_stack(struct vcpu *v, const struct 
cpu_user_regs *regs)
     printk("\n");
 }
 
+static void show_hvm_stack(struct vcpu *v, const struct cpu_user_regs *regs)
+{
+#ifdef CONFIG_HVM
+    unsigned long sp = regs->rsp, addr;
+    unsigned int i, bytes, words_per_line, pfec = PFEC_page_present;
+    struct segment_register ss, cs;
+
+    hvm_get_segment_register(v, x86_seg_ss, &ss);
+    hvm_get_segment_register(v, x86_seg_cs, &cs);
+
+    if ( hvm_long_mode_active(v) && cs.l )
+        i = 16, bytes = 8;
+    else
+    {
+        sp = ss.db ? (uint32_t)sp : (uint16_t)sp;
+        i = ss.db ? 8 : 4;
+        bytes = cs.db ? 4 : 2;
+    }
+
+    if ( bytes == 8 || (ss.db && !ss.base) )
+        printk("Guest stack trace from sp=%0*lx:", i, sp);
+    else
+        printk("Guest stack trace from ss:sp=%04x:%0*lx:", ss.sel, i, sp);
+
+    if ( !hvm_vcpu_virtual_to_linear(v, x86_seg_ss, &ss, sp, bytes,
+                                     hvm_access_read, &cs, &addr) )
+    {
+        printk(" Guest-inaccessible memory\n");
+        return;
+    }
+
+    if ( ss.dpl == 3 )
+        pfec |= PFEC_user_mode;
+
+    words_per_line = stack_words_per_line * (sizeof(void *) / bytes);
+    for ( i = 0; i < debug_stack_lines * words_per_line; )
+    {
+        unsigned long val = 0;
+
+        if ( (addr ^ (addr + bytes - 1)) & PAGE_SIZE )
+            break;
+
+        if ( !(i++ % words_per_line) )
+            printk("\n  ");
+
+        if ( hvm_copy_from_vcpu_linear(&val, addr, bytes, v,
+                                       pfec) != HVMTRANS_okay )
+        {
+            printk(" Fault while accessing guest memory.");
+            break;
+        }
+
+        printk(" %0*lx", 2 * bytes, val);
+
+        addr += bytes;
+        if ( !(addr & (PAGE_SIZE - 1)) )
+            break;
+    }
+
+    if ( !i )
+        printk(" Stack empty.");
+    printk("\n");
+#endif
+}
+
 /*
  * Notes for get_{stack,shstk}*_bottom() helpers
  *
@@ -630,7 +695,7 @@ void show_execution_state(const struct cpu_user_regs *regs)
 
 void vcpu_show_execution_state(struct vcpu *v)
 {
-    unsigned long flags;
+    unsigned long flags = 0;
 
     if ( test_bit(_VPF_down, &v->pause_flags) )
     {
@@ -668,10 +733,24 @@ void vcpu_show_execution_state(struct vcpu *v)
     flags = console_lock_recursive_irqsave();
 
     vcpu_show_registers(v);
-    if ( guest_kernel_mode(v, &v->arch.user_regs) )
-        show_guest_stack(v, &v->arch.user_regs);
 
-    console_unlock_recursive_irqrestore(flags);
+    if ( is_hvm_vcpu(v) )
+    {
+        /*
+         * Stop interleaving prevention: The necessary P2M lookups involve
+         * locking, which has to occur with IRQs enabled.
+         */
+        console_unlock_recursive_irqrestore(flags);
+
+        show_hvm_stack(v, &v->arch.user_regs);
+    }
+    else
+    {
+        if ( guest_kernel_mode(v, &v->arch.user_regs) )
+            show_guest_stack(v, &v->arch.user_regs);
+
+        console_unlock_recursive_irqrestore(flags);
+    }
 
 #ifdef CONFIG_HVM
     if ( cpu_has_vmx && is_hvm_vcpu(v) )
diff --git a/xen/include/asm-x86/hvm/support.h 
b/xen/include/asm-x86/hvm/support.h
index a4950e3d4b..6b583738ec 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -101,6 +101,9 @@ enum hvm_translation_result hvm_copy_to_guest_linear(
 enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, unsigned int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
+enum hvm_translation_result hvm_copy_from_vcpu_linear(
+    void *buf, unsigned long addr, unsigned int size, struct vcpu *v,
+    unsigned int pfec);
 
 /*
  * Get a reference on the page under an HVM physical or linear address.  If
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.