[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] xen: Improvements to domain_crash()



There original reason for this patch was to fix a livepatching problem;
unnecesserily large livepatchs due to the use of __LINE__.

A second problem is one of debugability.  A number of domain_crash()
invocations have no logging at all, and number of others only have logging
when compiled with a debug hypervisor.

Change the interface to require the caller to pass a printk() message, which
is emitted at guest error level.  This should ensure that every time a domain
is crashed, an informative log message is also present.

Update all callers to either merge with a previous printk(), or invent an
informative log message.  A few select callers are switched to the
non-printing version, when they've already emitted a relevent state dump.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Juergen Gross <jgross@xxxxxxxx>
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
CC: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
CC: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>

It is unfortunate that this is one monolithic patch, but I can't find any
reasonable way to split it up.
---
 xen/arch/arm/mem_access.c               | 12 ++----
 xen/arch/arm/traps.c                    |  6 +--
 xen/arch/x86/cpu/mcheck/mcaction.c      |  2 +-
 xen/arch/x86/domain.c                   | 13 ++----
 xen/arch/x86/hvm/emulate.c              |  9 ++--
 xen/arch/x86/hvm/hvm.c                  | 74 ++++++++++++++++-----------------
 xen/arch/x86/hvm/intercept.c            | 25 +++++++----
 xen/arch/x86/hvm/io.c                   |  3 +-
 xen/arch/x86/hvm/ioreq.c                | 19 +++++----
 xen/arch/x86/hvm/svm/svm.c              | 53 ++++++++++-------------
 xen/arch/x86/hvm/viridian.c             |  2 +-
 xen/arch/x86/hvm/vlapic.c               |  5 +--
 xen/arch/x86/hvm/vmx/realmode.c         |  2 +-
 xen/arch/x86/hvm/vmx/vmcs.c             |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c              | 55 ++++++++++--------------
 xen/arch/x86/hvm/vmx/vvmx.c             |  4 +-
 xen/arch/x86/hvm/vpt.c                  | 10 ++---
 xen/arch/x86/mm.c                       |  9 ++--
 xen/arch/x86/mm/hap/hap.c               |  7 +---
 xen/arch/x86/mm/hap/nested_hap.c        |  9 ++--
 xen/arch/x86/mm/mem_access.c            |  5 +--
 xen/arch/x86/mm/p2m-pod.c               | 19 ++++-----
 xen/arch/x86/mm/p2m.c                   | 35 ++++++----------
 xen/arch/x86/mm/shadow/common.c         | 42 +++++++------------
 xen/arch/x86/mm/shadow/multi.c          | 17 ++++----
 xen/arch/x86/msi.c                      |  2 +-
 xen/arch/x86/pv/iret.c                  | 30 ++++++-------
 xen/arch/x86/traps.c                    |  8 ++--
 xen/arch/x86/x86_emulate/x86_emulate.c  |  2 +-
 xen/arch/x86/xstate.c                   | 14 +++----
 xen/common/compat/grant_table.c         |  2 +-
 xen/common/compat/memory.c              |  6 +--
 xen/common/domain.c                     |  2 +-
 xen/common/grant_table.c                | 17 +++-----
 xen/common/memory.c                     |  2 +-
 xen/common/page_alloc.c                 |  2 +-
 xen/common/wait.c                       | 12 ++----
 xen/drivers/passthrough/amd/iommu_map.c | 25 +++++------
 xen/drivers/passthrough/iommu.c         |  8 ++--
 xen/drivers/passthrough/pci.c           |  2 +-
 xen/drivers/passthrough/vtd/iommu.c     |  2 +-
 xen/include/xen/sched.h                 | 10 +++--
 42 files changed, 253 insertions(+), 332 deletions(-)

diff --git a/xen/arch/arm/mem_access.c b/xen/arch/arm/mem_access.c
index ba4ec78..be99fbd 100644
--- a/xen/arch/arm/mem_access.c
+++ b/xen/arch/arm/mem_access.c
@@ -293,12 +293,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const 
struct npfec npfec)
     {
         /* No listener */
         if ( p2m->access_required )
-        {
-            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
-                                  "no vm_event listener VCPU %d, dom %d\n",
-                                  v->vcpu_id, v->domain->domain_id);
-            domain_crash(v->domain);
-        }
+            domain_crash(v->domain, "No vm_event listener\n");
         else
         {
             /* n2rwx was already handled */
@@ -337,8 +332,9 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const 
struct npfec npfec)
         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
 
-        if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
-            domain_crash(v->domain);
+        rc = monitor_traps(v, (xma != XENMEM_access_n2rwx), req);
+        if ( rc < 0 )
+            domain_crash(v->domain, "monitor_traps() returned %d\n", rc);
 
         xfree(req);
     }
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 9ae64ae..151794b 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1532,9 +1532,9 @@ static bool check_multicall_32bit_clean(struct 
multicall_entry *multi)
     {
         if ( unlikely(multi->args[i] & 0xffffffff00000000ULL) )
         {
-            printk("%pv: multicall argument %d is not 32-bit clean 
%"PRIx64"\n",
-                   current, i, multi->args[i]);
-            domain_crash(current->domain);
+            domain_crash(current->domain,
+                         "Multicall arg %d is not 32-bit clean: %"PRIx64"\n",
+                         i, multi->args[i]);
             return false;
         }
     }
diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c 
b/xen/arch/x86/cpu/mcheck/mcaction.c
index e422674..a9b3772 100644
--- a/xen/arch/x86/cpu/mcheck/mcaction.c
+++ b/xen/arch/x86/cpu/mcheck/mcaction.c
@@ -142,7 +142,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
                 return;
 vmce_failed:
                 put_domain(d);
-                domain_crash(d);
+                domain_crash(d, "Failed to inject vMCE\n");
             }
         }
     }
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 64b40c7..f84cbe3 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1380,11 +1380,8 @@ static void load_segments(struct vcpu *n)
                  put_user(uregs->fs,           esp-5) |
                  put_user(uregs->es,           esp-6) |
                  put_user(uregs->ds,           esp-7) )
-            {
-                gprintk(XENLOG_ERR,
-                        "error while creating compat failsafe callback 
frame\n");
-                domain_crash(n->domain);
-            }
+                domain_crash(n->domain,
+                             "Fault creating compat failsafe frame\n");
 
             if ( n->arch.vgc_flags & VGCF_failsafe_disables_events )
                 vcpu_info(n, evtchn_upcall_mask) = 1;
@@ -1419,11 +1416,7 @@ static void load_segments(struct vcpu *n)
              put_user(uregs->ds,           rsp- 9) |
              put_user(regs->r11,           rsp-10) |
              put_user(regs->rcx,           rsp-11) )
-        {
-            gprintk(XENLOG_ERR,
-                    "error while creating failsafe callback frame\n");
-            domain_crash(n->domain);
-        }
+            domain_crash(n->domain, "Fault creating failsafe frame\n");
 
         if ( n->arch.vgc_flags & VGCF_failsafe_disables_events )
             vcpu_info(n, evtchn_upcall_mask) = 1;
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 20d1d5b..5e78fd8 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -165,7 +165,7 @@ static int hvmemul_do_io(
              (p.df != df) ||
              (p.data_is_ptr != data_is_addr) ||
              (data_is_addr && (p.data != data)) )
-            domain_crash(currd);
+            domain_crash(currd, "Bad IOREQ re-issue\n");
 
         if ( data_is_addr )
             return X86EMUL_UNHANDLEABLE;
@@ -374,7 +374,8 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct 
page_info **page)
     /* This code should not be reached if the gmfn is not RAM */
     if ( p2m_is_mmio(p2mt) )
     {
-        domain_crash(curr_d);
+        domain_crash(curr_d,
+                     "Attemping to acquire MMIO gfn %"PRI_gfn"\n", gmfn);
 
         put_page(*page);
         return X86EMUL_UNHANDLEABLE;
@@ -885,7 +886,7 @@ static int hvmemul_phys_mmio_access(
             if ( dir == IOREQ_READ )
                 memcpy(&buffer[offset], &cache->buffer[offset], chunk);
             else if ( memcmp(&buffer[offset], &cache->buffer[offset], chunk) 
!= 0 )
-                domain_crash(current->domain);
+                domain_crash(current->domain, "Emulation cache mismatch\n");
         }
         else
         {
@@ -945,7 +946,7 @@ static struct hvm_mmio_cache *hvmemul_find_mmio_cache(
     i = vio->mmio_cache_count++;
     if( i == ARRAY_SIZE(vio->mmio_cache) )
     {
-        domain_crash(current->domain);
+        domain_crash(current->domain, "Too many cached entries\n");
         return NULL;
     }
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 72c51fa..69f1873 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2078,19 +2078,15 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
         rc = X86EMUL_OKAY;
         break;
 
-    default:
-        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
-        goto exit_and_crash;
+    default: /* VT-x/SVM intercept malfunction? */
+        domain_crash(curr->domain, "Invalid cr %u\n", cr);
+        rc = X86EMUL_UNHANDLEABLE;
     }
 
     if ( rc == X86EMUL_EXCEPTION )
         hvm_inject_hw_exception(TRAP_gp_fault, 0);
 
     return rc;
-
- exit_and_crash:
-    domain_crash(curr->domain);
-    return X86EMUL_UNHANDLEABLE;
 }
 
 int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
@@ -2109,9 +2105,10 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
     case 8:
         val = (vlapic_get_reg(vcpu_vlapic(curr), APIC_TASKPRI) & 0xf0) >> 4;
         break;
-    default:
-        gdprintk(XENLOG_ERR, "invalid cr: %u\n", cr);
-        goto exit_and_crash;
+
+    default: /* VT-x/SVM intercept malfunction? */
+        domain_crash(curr->domain, "Invalid cr %u\n", cr);
+        return X86EMUL_UNHANDLEABLE;
     }
 
     *reg = val;
@@ -2119,10 +2116,6 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%u, value = %lx", cr, val);
 
     return X86EMUL_OKAY;
-
- exit_and_crash:
-    domain_crash(curr->domain);
-    return X86EMUL_UNHANDLEABLE;
 }
 
 void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
@@ -2229,9 +2222,8 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
             page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
             if ( !page )
             {
-                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx\n",
-                         v->arch.hvm_vcpu.guest_cr[3]);
-                domain_crash(d);
+                domain_crash(d, "Invalid CR3 value %lx\n",
+                             v->arch.hvm_vcpu.guest_cr[3]);
                 return X86EMUL_UNHANDLEABLE;
             }
 
@@ -2315,12 +2307,17 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
     if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
          (value != v->arch.hvm_vcpu.guest_cr[3]) )
     {
+        unsigned long gfn = paddr_to_pfn(value);
+
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-        page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
-                                 NULL, P2M_ALLOC);
+        page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
         if ( !page )
-            goto bad_cr3;
+        {
+            domain_crash(v->domain, "Bad CR3 gfn %"PRI_gfn"\n", gfn);
+
+            return X86EMUL_UNHANDLEABLE;
+        }
 
         put_page(pagetable_get_page(v->arch.guest_table));
         v->arch.guest_table = pagetable_from_page(page);
@@ -2331,11 +2328,6 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
     v->arch.hvm_vcpu.guest_cr[3] = value;
     paging_update_cr3(v, noflush);
     return X86EMUL_OKAY;
-
- bad_cr3:
-    gdprintk(XENLOG_ERR, "Invalid CR3\n");
-    domain_crash(v->domain);
-    return X86EMUL_UNHANDLEABLE;
 }
 
 int hvm_set_cr4(unsigned long value, bool_t may_defer)
@@ -2669,13 +2661,13 @@ static void *hvm_map_entry(unsigned long va, bool_t 
*writable)
 {
     unsigned long gfn;
     uint32_t pfec;
-    char *v;
+    void *v = NULL;
 
     if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
     {
-        gdprintk(XENLOG_ERR, "Descriptor table entry "
-                 "straddles page boundary\n");
-        goto fail;
+        domain_crash(current->domain,
+                     "Descriptor table entry straddles page boundary\n");
+        goto out;
     }
 
     /*
@@ -2686,17 +2678,22 @@ static void *hvm_map_entry(unsigned long va, bool_t 
*writable)
     pfec = PFEC_page_present;
     gfn = paging_gva_to_gfn(current, va, &pfec);
     if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-        goto fail;
+    {
+        domain_crash(current->domain, "Descriptor table is paged or shared\n");
+        goto out;
+    }
 
     v = hvm_map_guest_frame_rw(gfn, 0, writable);
     if ( v == NULL )
-        goto fail;
+    {
+        domain_crash(current->domain, "Failed to map descriptor table\n");
+        goto out;
+    }
 
-    return v + (va & ~PAGE_MASK);
+    v += (va & ~PAGE_MASK);
 
- fail:
-    domain_crash(current->domain);
-    return NULL;
+ out:
+    return v;
 }
 
 static void hvm_unmap_entry(void *p)
@@ -3719,7 +3716,7 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
                                       descriptor, is_write);
     }
     else if ( !hvm_emulate_one_insn(is_sysdesc_access, "sysdesc access") )
-        domain_crash(currd);
+        domain_crash(currd, "sysdesc emulation failure\n");
 
     return X86EMUL_OKAY;
 }
@@ -4302,7 +4299,7 @@ static int hvmop_set_param(
         if ( a.value != (uint32_t)a.value )
         {
             if ( d == curr_d )
-                domain_crash(d);
+                domain_crash(d, "Invalid VM86_TSS value %#lx\n", a.value);
             rc = -EINVAL;
         }
         /* Old hvmloader binaries hardcode the size to 128 bytes. */
@@ -4315,7 +4312,8 @@ static int hvmop_set_param(
         if ( (a.value >> 32) < sizeof(struct tss32) )
         {
             if ( d == curr_d )
-                domain_crash(d);
+                domain_crash(d, "Invalid VM86_TSS_SIZED value %#lx\n",
+                             a.value >> 32);
             rc = -EINVAL;
         }
         /*
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 2bc156d..edaf75a 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -44,7 +44,7 @@ static bool_t hvm_mmio_accept(const struct hvm_io_handler 
*handler,
     last = hvm_mmio_last_byte(p);
     if ( last != first &&
          !handler->mmio.ops->check(current, last) )
-        domain_crash(current->domain);
+        domain_crash(current->domain, "Fatal MMIO error\n");
 
     return 1;
 }
@@ -134,8 +134,10 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
 
             if ( p->data_is_ptr )
             {
-                switch ( hvm_copy_to_guest_phys(p->data + step * i,
-                                                &data, p->size, current) )
+                enum hvm_translation_result res =
+                    hvm_copy_to_guest_phys(p->data + step * i,
+                                           &data, p->size, current);
+                switch ( res )
                 {
                 case HVMTRANS_okay:
                     break;
@@ -148,7 +150,9 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
-                    domain_crash(current->domain);
+                    domain_crash(current->domain,
+                                 "Error copying %u bytes to gpa %"PRIpaddr": 
%d\n",
+                                 p->size, p->data + step * i, res);
                     return X86EMUL_UNHANDLEABLE;
                 }
             }
@@ -162,9 +166,12 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
         {
             if ( p->data_is_ptr )
             {
+                enum hvm_translation_result res;
+
                 data = 0;
-                switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
-                                                  p->size) )
+                res = hvm_copy_from_guest_phys(&data, p->data + step * i,
+                                               p->size);
+                switch ( res )
                 {
                 case HVMTRANS_okay:
                     break;
@@ -177,7 +184,9 @@ int hvm_process_io_intercept(const struct hvm_io_handler 
*handler,
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
-                    domain_crash(current->domain);
+                    domain_crash(current->domain,
+                                 "Error copying %u bytes from gpa %"PRIpaddr": 
%d\n",
+                                 p->size, p->data + step * i, res);
                     return X86EMUL_UNHANDLEABLE;
                 }
             }
@@ -263,7 +272,7 @@ struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
 
     if ( i == NR_IO_HANDLERS )
     {
-        domain_crash(d);
+        domain_crash(d, "Too many IO handlers\n");
         return NULL;
     }
 
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index bf4d874..03ae3de 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -167,8 +167,7 @@ bool handle_pio(uint16_t port, unsigned int size, int dir)
         break;
 
     default:
-        gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
-        domain_crash(curr->domain);
+        domain_crash(curr->domain, "Unexpected PIO result %d\n", rc);
         return false;
     }
 
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 940a2c9..7052d20 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -150,10 +150,10 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, 
ioreq_t *p)
 
         if ( unlikely(state < prev_state) )
         {
-            gdprintk(XENLOG_ERR, "Weird HVM ioreq state transition %u -> %u\n",
-                     prev_state, state);
             sv->pending = false;
-            domain_crash(sv->vcpu->domain);
+            domain_crash(sv->vcpu->domain,
+                         "Weird HVM ioreq state transition %u -> %u\n",
+                         prev_state, state);
             return false; /* bail */
         }
 
@@ -171,9 +171,9 @@ static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, 
ioreq_t *p)
                                          state != prev_state; }));
             goto recheck;
         default:
-            gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
             sv->pending = false;
-            domain_crash(sv->vcpu->domain);
+            domain_crash(sv->vcpu->domain,
+                         "Weird HVM iorequest state %u\n", state);
             return false; /* bail */
         }
     }
@@ -415,13 +415,16 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server 
*s, bool buf)
 {
     struct domain *d = s->target;
     struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
+    int rc;
 
     if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) )
         return;
 
-    if ( guest_physmap_remove_page(d, iorp->gfn,
-                                   page_to_mfn(iorp->page), 0) )
-        domain_crash(d);
+    rc = guest_physmap_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0);
+    if ( rc )
+        domain_crash(d, "Error removing gfn %"PRI_gfn" from physmap: %d\n",
+                     gfn_x(iorp->gfn), rc);
+
     clear_page(iorp->va);
 }
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a16f372..db4980d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -91,7 +91,7 @@ static void svm_crash_or_fault(struct vcpu *v)
     if ( vmcb_get_cpl(v->arch.hvm_svm.vmcb) )
         hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
     else
-        domain_crash(v->domain);
+        __domain_crash(v->domain);
 }
 
 void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
@@ -103,7 +103,7 @@ void __update_guest_eip(struct cpu_user_regs *regs, 
unsigned int inst_len)
 
     if ( unlikely(inst_len > MAX_INST_LEN) )
     {
-        gdprintk(XENLOG_ERR, "Bad instruction length %u\n", inst_len);
+        gprintk(XENLOG_ERR, "Bad instruction length %u\n", inst_len);
         svm_crash_or_fault(curr);
         return;
     }
@@ -411,9 +411,10 @@ static void svm_save_vmcb_ctxt(struct vcpu *v, struct 
hvm_hw_cpu *ctxt)
 static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
 {
     svm_load_cpu_state(v, ctxt);
-    if (svm_vmcb_restore(v, ctxt)) {
-        gdprintk(XENLOG_ERR, "svm_vmcb restore failed!\n");
-        domain_crash(v->domain);
+
+    if ( svm_vmcb_restore(v, ctxt) )
+    {
+        domain_crash(v->domain, "svm_vmcb restore failed!\n");
         return -EINVAL;
     }
 
@@ -747,7 +748,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
 
     default:
         ASSERT_UNREACHABLE();
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Bad segment %u\n", seg);
         *reg = (struct segment_register){};
     }
 }
@@ -783,7 +784,7 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
 
     default:
         ASSERT_UNREACHABLE();
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Bad segment %u\n", seg);
         return;
     }
 
@@ -1783,10 +1784,10 @@ static void svm_do_nested_pgfault(struct vcpu *v,
         p2m = p2m_get_p2m(v);
     /* Everything else is an error. */
     mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
-    gdprintk(XENLOG_ERR,
-         "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
-         gpa, mfn_x(mfn), p2mt);
-    domain_crash(v->domain);
+
+    domain_crash(v->domain,
+                 "SVM violation gpa %#"PRIpaddr", mfn %"PRI_mfn", type %u\n",
+                 gpa, mfn_x(mfn), p2mt);
 }
 
 static void svm_fpu_dirty_intercept(void)
@@ -2461,10 +2462,7 @@ static void svm_vmexit_mce_intercept(
     struct vcpu *v, struct cpu_user_regs *regs)
 {
     if ( svm_is_erratum_383(regs) )
-    {
-        gdprintk(XENLOG_ERR, "SVM hits AMD erratum 383\n");
-        domain_crash(v->domain);
-    }
+        domain_crash(v->domain, "SVM hits AMD erratum 383\n");
 }
 
 static void svm_wbinvd_intercept(void)
@@ -2679,8 +2677,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
                 nestedsvm_vmexit_defer(v, exit_reason, exitinfo1, exitinfo2);
                 goto out;
             case NESTEDHVM_VMEXIT_FATALERROR:
-                gdprintk(XENLOG_ERR, "unexpected nestedsvm_vmexit() error\n");
-                domain_crash(v->domain);
+                domain_crash(v->domain,
+                             "nestedsvm_vmexit() returned FATALERROR\n");
                 goto out;
             default:
                 BUG();
@@ -2693,14 +2691,12 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
                 "nestedsvm_check_intercepts() returned 
NESTEDHVM_VMEXIT_ERROR\n");
             goto out;
         case NESTEDHVM_VMEXIT_FATALERROR:
-            gdprintk(XENLOG_ERR,
-                "unexpected nestedsvm_check_intercepts() error\n");
-            domain_crash(v->domain);
+            domain_crash(v->domain,
+                         "nestedsvm_check_intercepts() returned FATALERROR\n");
             goto out;
         default:
-            gdprintk(XENLOG_INFO, "nestedsvm_check_intercepts() returned %i\n",
-                nsret);
-            domain_crash(v->domain);
+            domain_crash(v->domain,
+                         "nestedsvm_check_intercepts() returned %u\n", nsret);
             goto out;
         }
     }
@@ -2709,7 +2705,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     {
         gdprintk(XENLOG_ERR, "invalid VMCB state:\n");
         svm_vmcb_dump(__func__, vmcb);
-        domain_crash(v->domain);
+        __domain_crash(v->domain);
         goto out;
     }
 
@@ -3028,12 +3024,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
         if ( rc >= 0 )
             svm_do_nested_pgfault(v, regs, vmcb->exitinfo1, vmcb->exitinfo2);
         else
-        {
-            printk(XENLOG_G_ERR
-                   "%pv: Error %d handling NPF (gpa=%08lx ec=%04lx)\n",
-                   v, rc, vmcb->exitinfo2, vmcb->exitinfo1);
-            domain_crash(v->domain);
-        }
+            domain_crash(v->domain,
+                         "%pv: Error %d handling NPF (gpa=%08lx ec=%04lx)\n",
+                         v, rc, vmcb->exitinfo2, vmcb->exitinfo1);
         v->arch.hvm_svm.cached_insn_len = 0;
         break;
 
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 4860651..1d5df7f 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -446,7 +446,7 @@ void viridian_apic_assist_set(struct vcpu *v)
      * to make the problem clear.
      */
     if ( v->arch.hvm_vcpu.viridian.vp_assist.pending )
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Assist already pending\n");
 
     v->arch.hvm_vcpu.viridian.vp_assist.pending = true;
     *va |= 1u;
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index ec089cc..f031ecd 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -379,9 +379,8 @@ static void vlapic_accept_irq(struct vcpu *v, uint32_t 
icr_low)
         BUG(); /* Handled in vlapic_ipi(). */
 
     default:
-        gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode in ICR %x\n",
-                 icr_low);
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Unsupported delivery mode in ICR %x\n",
+                     icr_low);
     }
 }
 
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index b20d8c4..7cf33ed 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -148,7 +148,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt 
*hvmemul_ctxt)
 
  fail:
     hvm_dump_emulation_state(XENLOG_G_ERR, "Real-mode", hvmemul_ctxt, rc);
-    domain_crash(curr->domain);
+    domain_crash(curr->domain, "Emulation failure\n");
 }
 
 void vmx_realmode(struct cpu_user_regs *regs)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 6681032..186e22b 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1789,7 +1789,7 @@ void vmx_vmentry_failure(void)
          error == VMX_INSN_INVALID_HOST_STATE )
         vmcs_dump_vcpu(curr);
 
-    domain_crash(curr->domain);
+    __domain_crash(curr->domain);
 }
 
 void vmx_do_resume(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index fcd3225..0335239 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -799,8 +799,7 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct 
hvm_hw_cpu *ctxt)
 
     if ( vmx_vmcs_restore(v, ctxt) )
     {
-        gdprintk(XENLOG_ERR, "vmx_vmcs restore failed!\n");
-        domain_crash(v->domain);
+        domain_crash(v->domain, "vmcs_vmcs_restore() failed\n");
         return -EINVAL;
     }
 
@@ -1381,18 +1380,21 @@ static void vmx_load_pdptrs(struct vcpu *v)
         return;
 
     if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
-        goto crash;
+    {
+        domain_crash(v->domain, "Bad cr3 %p\n", _p(cr3));
+        return;
+    }
 
     page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
     if ( !page )
     {
-        /* Ideally you don't want to crash but rather go into a wait 
-         * queue, but this is the wrong place. We're holding at least
-         * the paging lock */
-        gdprintk(XENLOG_ERR,
-                 "Bad cr3 on load pdptrs gfn %lx type %d\n",
-                 cr3 >> PAGE_SHIFT, (int) p2mt);
-        goto crash;
+        /*
+         * Ideally we don't want to crash but rather go into a wait queue, but
+         * this is the wrong place.  We're holding at least the paging lock.
+         */
+        domain_crash(v->domain, "Bad cr3 on load pdptrs gfn %lx type %d\n",
+                     cr3 >> PAGE_SHIFT, (int) p2mt);
+        return;
     }
 
     p = __map_domain_page(page);
@@ -1416,10 +1418,6 @@ static void vmx_load_pdptrs(struct vcpu *v)
 
     unmap_domain_page(p);
     put_page(page);
-    return;
-
- crash:
-    domain_crash(v->domain);
 }
 
 static void vmx_update_host_cr3(struct vcpu *v)
@@ -3195,8 +3193,7 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 
             if ( unlikely(!lbr) )
             {
-                gprintk(XENLOG_ERR, "Unknown Host LBR MSRs\n");
-                domain_crash(v->domain);
+                domain_crash(v->domain, "Unknown Host LBR MSRs\n");
                 return X86EMUL_OKAY;
             }
 
@@ -3210,9 +3207,8 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 
                     if ( unlikely(rc) )
                     {
-                        gprintk(XENLOG_ERR,
-                                "Guest load/save list error %d\n", rc);
-                        domain_crash(v->domain);
+                        domain_crash(v->domain,
+                                     "Guest load/save list error %d\n", rc);
                         return X86EMUL_OKAY;
                     }
 
@@ -3391,7 +3387,7 @@ static void ept_handle_violation(ept_qual_t q, paddr_t 
gpa)
     if ( q.gla_valid )
         gprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
 
-    domain_crash(d);
+    domain_crash(d, "Unhandled EPT violation\n");
 }
 
 static void vmx_failed_vmentry(unsigned int exit_reason,
@@ -3439,11 +3435,8 @@ static void vmx_failed_vmentry(unsigned int exit_reason,
         break;
     }
 
-    printk("************* VMCS Area **************\n");
     vmcs_dump_vcpu(curr);
-    printk("**************************************\n");
-
-    domain_crash(curr->domain);
+    __domain_crash(curr->domain);
 }
 
 void vmx_enter_realmode(struct cpu_user_regs *regs)
@@ -3553,14 +3546,12 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
 
 static void vmx_handle_xsaves(void)
 {
-    gdprintk(XENLOG_ERR, "xsaves should not cause vmexit\n");
-    domain_crash(current->domain);
+    domain_crash(current->domain, "xsaves shouldn't vmexit\n");
 }
 
 static void vmx_handle_xrstors(void)
 {
-    gdprintk(XENLOG_ERR, "xrstors should not cause vmexit\n");
-    domain_crash(current->domain);
+    domain_crash(current->domain, "xrstors shouldn't vmexit\n");
 }
 
 static void vmx_handle_descriptor_access(uint32_t exit_reason)
@@ -3689,8 +3680,8 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             if ( (idx = p2m_find_altp2m_by_eptp(v->domain, eptp)) ==
                  INVALID_ALTP2M )
             {
-                gdprintk(XENLOG_ERR, "EPTP not found in alternate p2m list\n");
-                domain_crash(v->domain);
+                domain_crash(v->domain,
+                             "EPTP not found in alternate p2m list\n");
 
                 return;
             }
@@ -4222,7 +4213,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
             hvm_inject_hw_exception(TRAP_invalid_op,
                                     X86_EVENT_NO_EC);
         else
-            domain_crash(v->domain);
+            __domain_crash(v->domain);
         break;
     }
 
@@ -4259,7 +4250,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
                 regs->rip = regs->eip;
         }
         else
-            domain_crash(v->domain);
+            __domain_crash(v->domain);
     }
 }
 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index b7d9a1a..b2849d2 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -2332,8 +2332,8 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
         if ( !(ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP) )
             nvcpu->nv_vmexit_pending = 1;
         else if ( !nvmx->msrbitmap )
-            /* ACTIVATE_MSR_BITMAP set, but L2 bitmap not mapped??? */
-            domain_crash(v->domain);
+            domain_crash(v->domain,
+                         "ACTIVATE_MSR_BITMAP set, but L2 bitmap not 
mapped\n");
         else
             nvcpu->nv_vmexit_pending =
                 vmx_msr_is_intercepted(nvmx->msrbitmap, regs->ecx,
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 6ac4c91..96c8630 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -95,9 +95,7 @@ static int pt_irq_vector(struct periodic_time *pt, enum 
hvm_intsrc src)
     vector = vioapic_get_vector(v->domain, gsi);
     if ( vector < 0 )
     {
-        dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n",
-                v->domain->domain_id, gsi);
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Invalid GSI (%u) for platform timer\n", gsi);
         return -1;
     }
 
@@ -137,10 +135,8 @@ static int pt_irq_masked(struct periodic_time *pt)
 
         if ( mask < 0 )
         {
-            dprintk(XENLOG_WARNING,
-                    "d%d: invalid GSI (%u) for platform timer\n",
-                    v->domain->domain_id, gsi);
-            domain_crash(v->domain);
+            domain_crash(v->domain, "Invalid GSI (%u) for platform timer\n",
+                         gsi);
             return -1;
         }
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 84979f2..c3a20a6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1200,12 +1200,9 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain 
*l1e_owner)
      */
     if ( _PAGE_GNTTAB && (l1e_get_flags(l1e) & _PAGE_GNTTAB) &&
          !l1e_owner->is_shutting_down && !l1e_owner->is_dying )
-    {
-        gdprintk(XENLOG_WARNING,
-                 "Attempt to implicitly unmap a granted PTE %" PRIpte "\n",
-                 l1e_get_intpte(l1e));
-        domain_crash(l1e_owner);
-    }
+        domain_crash(l1e_owner,
+                     "Attempt to implicitly unmap a granted PTE %" PRIpte "\n",
+                     l1e_get_intpte(l1e));
 
     /*
      * Remember we didn't take a type-count of foreign writable mappings
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 946e301..27ad423 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -408,8 +408,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v)
     return m4mfn;
 
  oom:
-    printk(XENLOG_G_ERR "out of memory building monitor pagetable\n");
-    domain_crash(d);
+    domain_crash(d, "out of memory building monitor pagetable\n");
     return INVALID_MFN;
 }
 
@@ -639,10 +638,8 @@ void hap_vcpu_init(struct vcpu *v)
 static int hap_page_fault(struct vcpu *v, unsigned long va,
                           struct cpu_user_regs *regs)
 {
-    struct domain *d = v->domain;
+    domain_crash(v->domain, "Intercepted #PF from %pv with HAP enabled\n", v);
 
-    printk(XENLOG_G_ERR "Intercepted #PF from %pv with HAP enabled\n", v);
-    domain_crash(d);
     return 0;
 }
 
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index d2a07a5..856e103 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -116,12 +116,9 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
     rc = p2m_set_entry(p2m, _gfn(gfn), mfn, page_order, p2mt, p2ma);
 
     if ( rc )
-    {
-        gdprintk(XENLOG_ERR,
-                 "failed to set entry for %#"PRIx64" -> %#"PRIx64" rc:%d\n",
-                 L2_gpa, L0_gpa, rc);
-        domain_crash(p2m->domain);
-    }
+        domain_crash(p2m->domain,
+                     "failed to set entry for %#"PRIx64" -> %#"PRIx64" 
rc:%d\n",
+                     L2_gpa, L0_gpa, rc);
 }
 
 /* This function uses L2_gpa to walk the P2M page table in L1. If the
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 84d260e..ee88de8 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -186,10 +186,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
         /* No listener */
         if ( p2m->access_required )
         {
-            gdprintk(XENLOG_INFO, "Memory access permissions failure, "
-                                  "no vm_event listener VCPU %d, dom %d\n",
-                                  v->vcpu_id, d->domain_id);
-            domain_crash(v->domain);
+            domain_crash(v->domain, "No vm_event listener\n");
             return false;
         }
         else
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index ba37344..e437479 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -567,7 +567,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, 
unsigned int order)
              * impossible.
              */
             if ( order != 0 )
-                domain_crash(d);
+                domain_crash(d, "Fatal PoD error\n");
             goto out_unlock;
         }
         ret = 1UL << order;
@@ -617,7 +617,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, 
unsigned int order)
                                p2m_invalid, p2m->default_access) )
             {
                 ASSERT_UNREACHABLE();
-                domain_crash(d);
+                domain_crash(d, "Fatal PoD error\n");
                 goto out_unlock;
             }
             p2m->pod.entry_count -= n;
@@ -646,7 +646,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, 
unsigned int order)
                                p2m_invalid, p2m->default_access) )
             {
                 ASSERT_UNREACHABLE();
-                domain_crash(d);
+                domain_crash(d, "Fatal PoD error\n");
                 goto out_unlock;
             }
             p2m_tlb_flush_sync(p2m);
@@ -854,7 +854,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t 
gfn)
                                 type0, p2m->default_access) )
     {
         ASSERT_UNREACHABLE();
-        domain_crash(d);
+        domain_crash(d, "Fatal PoD error\n");
     }
 
 out:
@@ -939,7 +939,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, int count)
                                types[i], p2m->default_access) )
             {
                 ASSERT_UNREACHABLE();
-                domain_crash(d);
+                domain_crash(d, "Fatal PoD error\n");
                 goto out_unmap;
             }
 
@@ -981,7 +981,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, int count)
                                types[i], p2m->default_access) )
             {
                 ASSERT_UNREACHABLE();
-                domain_crash(d);
+                domain_crash(d, "Fatal PoD error\n");
                 goto out_unmap;
             }
         }
@@ -1237,10 +1237,9 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t 
gfn,
 out_of_memory:
     pod_unlock(p2m);
 
-    printk("%s: Dom%d out of PoD memory! (tot=%"PRIu32" ents=%ld dom%d)\n",
-           __func__, d->domain_id, d->tot_pages, p2m->pod.entry_count,
-           current->domain->domain_id);
-    domain_crash(d);
+    domain_crash(d, "d%d out of PoD memory (tot=%"PRIu32" ents=%ld dom%d)\n",
+                 d->domain_id, d->tot_pages, p2m->pod.entry_count,
+                 current->domain->domain_id);
     return false;
 out_fail:
     pod_unlock(p2m);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6020553..37e4110 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -451,7 +451,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn_l,
         /* Return invalid_mfn to avoid caller's access */
         mfn = INVALID_MFN;
         if ( q & P2M_ALLOC )
-            domain_crash(p2m->domain);
+            domain_crash(p2m->domain, "Broken page\n");
     }
 
     return mfn;
@@ -839,7 +839,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
         if ( p2m_is_grant(ot) || p2m_is_foreign(ot) )
         {
             /* Really shouldn't be unmapping grant/foreign maps this way */
-            domain_crash(d);
+            domain_crash(d, "Attempting to implicitly unmap grant/foreign 
frame\n");
             p2m_unlock(p2m);
             
             return -EINVAL;
@@ -991,11 +991,8 @@ void p2m_change_type_range(struct domain *d,
     if ( gfn < end )
         rc = p2m->change_entry_type_range(p2m, ot, nt, gfn, end - 1);
     if ( rc )
-    {
-        printk(XENLOG_G_ERR "Error %d changing Dom%d GFNs [%lx,%lx] from %d to 
%d\n",
-               rc, d->domain_id, start, end - 1, ot, nt);
-        domain_crash(d);
-    }
+        domain_crash(d, "Error %d changing d%d GFNs [%lx,%lx] from %d to %d\n",
+                     rc, d->domain_id, start, end - 1, ot, nt);
 
     switch ( nt )
     {
@@ -1011,11 +1008,8 @@ void p2m_change_type_range(struct domain *d,
         break;
     }
     if ( rc )
-    {
-        printk(XENLOG_G_ERR "Error %d manipulating Dom%d's log-dirty ranges\n",
-               rc, d->domain_id);
-        domain_crash(d);
-    }
+        domain_crash(d, "Error %d manipulating Dom%d's log-dirty ranges\n",
+                     rc, d->domain_id);
 
     p2m->defer_nested_flush = 0;
     if ( nestedhvm_enabled(d) )
@@ -1095,7 +1089,8 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
     if ( p2m_is_grant(ot) || p2m_is_foreign(ot) )
     {
         gfn_unlock(p2m, gfn, order);
-        domain_crash(d);
+        domain_crash(d, "Attempted to set gfn %"PRI_gfn", type %u\n",
+                     gfn_x(gfn), ot);
         return -ENOENT;
     }
     else if ( p2m_is_ram(ot) )
@@ -1523,12 +1518,10 @@ void p2m_mem_paging_populate(struct domain *d, unsigned 
long gfn_l)
     int rc = vm_event_claim_slot(d, d->vm_event_paging);
     if ( rc == -ENOSYS )
     {
-        gdprintk(XENLOG_ERR, "Domain %hu paging gfn %lx yet no ring "
-                             "in place\n", d->domain_id, gfn_l);
         /* Prevent the vcpu from faulting repeatedly on the same gfn */
         if ( v->domain == d )
             vcpu_pause_nosync(v);
-        domain_crash(d);
+        domain_crash(d, "Paging gfn %"PRI_gfn" with no ring set up\n", gfn_l);
         return;
     }
     else if ( rc < 0 )
@@ -2237,12 +2230,10 @@ bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
     p2m_unlock(*ap2m);
 
     if ( rv )
-    {
-        gdprintk(XENLOG_ERR,
-           "failed to set entry for %#"PRIx64" -> %#"PRIx64" p2m %#"PRIx64"\n",
-           gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m);
-        domain_crash(hp2m->domain);
-    }
+        domain_crash(hp2m->domain,
+                     "Failed to set entry for %#"PRIx64" -> %#"PRIx64
+                     " p2m %#"PRIx64"\n",
+                     gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m);
 
     return 1;
 }
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index f2b9e8f..a4c51a2 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1954,12 +1954,9 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn,
     /* If this isn't a "normal" writeable page, the domain is trying to
      * put pagetables in special memory of some kind.  We can't allow that. */
     if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_writable_page )
-    {
-        printk(XENLOG_G_ERR "can't remove write access to mfn %"PRI_mfn
-               ", type_info is %"PRtype_info "\n",
-               mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info);
-        domain_crash(d);
-    }
+        domain_crash(d, "can't remove write access to mfn %"PRI_mfn
+                     ", type_info is %"PRtype_info"\n",
+                     mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info);
 
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
     if ( curr->domain == d )
@@ -2104,10 +2101,9 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn,
         if ( level == 0 )
             return -1;
 
-        printk(XENLOG_G_ERR "can't remove write access to mfn %"PRI_mfn
-               ": guest has %lu special-use mappings\n", mfn_x(gmfn),
-               mfn_to_page(gmfn)->u.inuse.type_info & PGT_count_mask);
-        domain_crash(d);
+        domain_crash(d, "can't remove write access to mfn %"PRI_mfn
+                     ": guest has %lu special-use mappings\n", mfn_x(gmfn),
+                     mfn_to_page(gmfn)->u.inuse.type_info & PGT_count_mask);
     }
 
     /* We killed at least one writeable mapping, so must flush TLBs. */
@@ -2398,11 +2394,8 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int 
fast, int all)
 
     /* If that didn't catch the shadows, something is wrong */
     if ( !fast && all && (pg->count_info & PGC_page_table) )
-    {
-        printk(XENLOG_G_ERR "can't find all shadows of mfn %"PRI_mfn
-               " (shadow_flags=%08x)\n", mfn_x(gmfn), pg->shadow_flags);
-        domain_crash(d);
-    }
+        domain_crash(d, "can't find all shadows of mfn %"PRI_mfn
+                     " (shadow_flags=%08x)\n", mfn_x(gmfn), pg->shadow_flags);
 
     /* Need to flush TLBs now, so that linear maps are safe next time we
      * take a fault. */
@@ -2480,8 +2473,8 @@ static void sh_update_paging_modes(struct vcpu *v)
         v->arch.paging.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES);
         if ( unlikely(!v->arch.paging.vtlb) )
         {
-            printk(XENLOG_G_ERR "Could not allocate vTLB space for %pv\n", v);
-            domain_crash(v->domain);
+            domain_crash(v->domain,
+                         "Could not allocate vTLB space for %pv\n", v);
             return;
         }
         spin_lock_init(&v->arch.paging.vtlb_lock);
@@ -2583,13 +2576,12 @@ static void sh_update_paging_modes(struct vcpu *v)
 
                 if ( v != current && vcpu_runnable(v) )
                 {
-                    printk(XENLOG_G_ERR
-                           "Some third party (%pv) is changing this HVM vcpu's"
-                           " (%pv) paging mode while it is running\n",
-                           current, v);
                     /* It's not safe to do that because we can't change
                      * the host CR3 for a running domain */
-                    domain_crash(v->domain);
+                    domain_crash(v->domain,
+                                 "Some third party (%pv) is changing this "
+                                 "HVM vcpu's (%pv) paging mode while it is "
+                                 "running\n", current, v);
                     return;
                 }
 
@@ -3562,11 +3554,7 @@ void pv_l1tf_tasklet(unsigned long data)
         int ret = shadow_one_bit_enable(d, PG_SH_forced);
 
         if ( ret )
-        {
-            printk(XENLOG_G_ERR "d%d Failed to enable PG_SH_forced: %d\n",
-                   d->domain_id, ret);
-            domain_crash(d);
-        }
+            domain_crash(d, "Failed to enable PG_SH_forced: %d\n", ret);
     }
 
     paging_unlock(d);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index dd86aa1..e85293e 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -940,7 +940,7 @@ static int shadow_set_l4e(struct domain *d,
 
         if ( !sh_get_ref(d, sl3mfn, paddr) )
         {
-            domain_crash(d);
+            domain_crash(d, "Failed to get ref\n");
             return SHADOW_SET_ERROR;
         }
 
@@ -993,7 +993,7 @@ static int shadow_set_l3e(struct domain *d,
         /* About to install a new reference */
         if ( !sh_get_ref(d, shadow_l3e_get_mfn(new_sl3e), paddr) )
         {
-            domain_crash(d);
+            domain_crash(d, "Failed to get ref\n");
             return SHADOW_SET_ERROR;
         }
     }
@@ -1055,7 +1055,7 @@ static int shadow_set_l2e(struct domain *d,
         /* About to install a new reference */
         if ( !sh_get_ref(d, sl1mfn, paddr) )
         {
-            domain_crash(d);
+            domain_crash(d, "Failed to get ref\n");
             return SHADOW_SET_ERROR;
         }
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3946,9 +3946,8 @@ sh_set_toplevel_shadow(struct vcpu *v,
     }
     else
     {
-        printk(XENLOG_G_ERR "can't install %"PRI_mfn" as toplevel shadow\n",
-               mfn_x(smfn));
-        domain_crash(d);
+        domain_crash(d, "can't install %"PRI_mfn" as toplevel shadow\n",
+                     mfn_x(smfn));
         new_entry = pagetable_null();
     }
 
@@ -3966,10 +3965,8 @@ sh_set_toplevel_shadow(struct vcpu *v,
          * by shadow_prealloc(): in PV mode we're still running on this
          * shadow and it's not safe to free it yet. */
         if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(d, old_smfn) )
-        {
-            printk(XENLOG_G_ERR "can't re-pin %"PRI_mfn"\n", mfn_x(old_smfn));
-            domain_crash(d);
-        }
+            domain_crash(d, "can't re-pin %"PRI_mfn"\n", mfn_x(old_smfn));
+
         sh_put_ref(d, old_smfn, 0);
     }
 }
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index 5567990..b41d54a 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -1010,7 +1010,7 @@ static int msix_capability_init(struct pci_dev *dev,
             if ( !is_hardware_domain(d) &&
                  /* Assume a domain without memory has no mappings yet. */
                  (!is_hardware_domain(currd) || d->tot_pages) )
-                domain_crash(d);
+                domain_crash(d, "Fatal MSI-X error\n");
             /* XXX How to deal with existing mappings? */
         }
     }
diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c
index c359a1d..077f9e8 100644
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -33,9 +33,9 @@ unsigned long do_iret(void)
     if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp,
                                  sizeof(iret_saved))) )
     {
-        gprintk(XENLOG_ERR,
-                "Fault while reading IRET context from guest stack\n");
-        goto exit_and_crash;
+        domain_crash(v->domain,
+                     "Fault while reading IRET context from guest stack\n");
+        return 0;
     }
 
     /* Returning to user mode? */
@@ -43,9 +43,9 @@ unsigned long do_iret(void)
     {
         if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) )
         {
-            gprintk(XENLOG_ERR,
-                    "Guest switching to user mode with no user page tables\n");
-            goto exit_and_crash;
+            domain_crash(v->domain,
+                         "Switching to user mode with no user page tables\n");
+            return 0;
         }
         toggle_guest_mode(v);
     }
@@ -74,10 +74,6 @@ unsigned long do_iret(void)
 
     /* Saved %rax gets written back to regs->rax in entry.S. */
     return iret_saved.rax;
-
- exit_and_crash:
-    domain_crash(v->domain);
-    return 0;
 }
 
 unsigned int compat_iret(void)
@@ -92,7 +88,7 @@ unsigned int compat_iret(void)
     /* Restore EAX (clobbered by hypercall). */
     if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
     {
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Failed to read eax\n");
         return 0;
     }
 
@@ -100,7 +96,7 @@ unsigned int compat_iret(void)
     if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
         unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
     {
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Failed to read cs/eip\n");
         return 0;
     }
 
@@ -110,7 +106,7 @@ unsigned int compat_iret(void)
      */
     if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
     {
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Failed to read eflags\n");
         return 0;
     }
 
@@ -154,7 +150,7 @@ unsigned int compat_iret(void)
         }
         if ( rc )
         {
-            domain_crash(v->domain);
+            domain_crash(v->domain, "Failed to copy exception frame\n");
             return 0;
         }
         regs->esp = ksp;
@@ -167,7 +163,7 @@ unsigned int compat_iret(void)
                           X86_EFLAGS_NT|X86_EFLAGS_TF);
         if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
         {
-            domain_crash(v->domain);
+            domain_crash(v->domain, "Failed to write error code\n");
             return 0;
         }
         regs->eip = ti->address;
@@ -175,7 +171,7 @@ unsigned int compat_iret(void)
     }
     else if ( unlikely(ring_0(regs)) )
     {
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Trying to enter ring 0\n");
         return 0;
     }
     else if ( ring_1(regs) )
@@ -184,7 +180,7 @@ unsigned int compat_iret(void)
     else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
               __get_user(regs->esp, (u32 *)regs->rsp + 4) )
     {
-        domain_crash(v->domain);
+        domain_crash(v->domain, "Failed to read ss/esp\n");
         return 0;
     }
 
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d8325a3..7c11689 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1447,12 +1447,10 @@ void do_page_fault(struct cpu_user_regs *regs)
     {
         pf_type = spurious_page_fault(addr, regs);
         if ( (pf_type == smep_fault) || (pf_type == smap_fault))
-        {
-            printk(XENLOG_G_ERR "%pv fatal SM%cP violation\n",
-                   current, (pf_type == smep_fault) ? 'E' : 'A');
+            domain_crash(current->domain,
+                         "Fatal SM%cP violation\n",
+                         (pf_type == smep_fault) ? 'E' : 'A');
 
-            domain_crash(current->domain);
-        }
         if ( pf_type != real_fault )
             return;
     }
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index e372c4b..1155159 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -8686,7 +8686,7 @@ x86_emulate(
     gprintk(XENLOG_INFO, "  stub: %"__stringify(MAX_INST_LEN)"ph\n",
             stub.func);
     generate_exception_if(stub_exn.info.fields.trapnr == EXC_UD, EXC_UD);
-    domain_crash(current->domain);
+    domain_crash(current->domain, "Fatal exception during emulation\n");
     rc = X86EMUL_UNHANDLEABLE;
     goto done;
 #endif
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 15edd5d..ff6352b 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -477,7 +477,7 @@ void xrstor(struct vcpu *v, uint64_t mask)
             continue;
         }
 
-        domain_crash(current->domain);
+        domain_crash(current->domain, "Unable to fix XRSTOR fault\n");
         return;
     }
 }
@@ -713,10 +713,9 @@ int handle_xsetbv(u32 index, u64 new_bv)
      */
     if ( unlikely(xcr0_max & ~xfeature_mask) )
     {
-        gprintk(XENLOG_ERR,
-                "xcr0_max %016" PRIx64 " exceeds hardware max %016" PRIx64 
"\n",
-                xcr0_max, xfeature_mask);
-        domain_crash(curr->domain);
+        domain_crash(curr->domain, "xcr0_max %016" PRIx64
+                     " exceeds hardware max %016" PRIx64 "\n",
+                     xcr0_max, xfeature_mask);
 
         return -EINVAL;
     }
@@ -727,9 +726,8 @@ int handle_xsetbv(u32 index, u64 new_bv)
     /* By this point, new_bv really should be accepted by hardware. */
     if ( unlikely(!set_xcr0(new_bv)) )
     {
-        gprintk(XENLOG_ERR, "new_bv %016" PRIx64 " rejected by hardware\n",
-                new_bv);
-        domain_crash(curr->domain);
+        domain_crash(curr->domain,
+                     "new_bv %016" PRIx64 " rejected by hardware\n", new_bv);
 
         return -EFAULT;
     }
diff --git a/xen/common/compat/grant_table.c b/xen/common/compat/grant_table.c
index ff1d678..0be2c5d 100644
--- a/xen/common/compat/grant_table.c
+++ b/xen/common/compat/grant_table.c
@@ -323,7 +323,7 @@ int compat_grant_table_op(unsigned int cmd,
         }
 
         default:
-            domain_crash(current->domain);
+            domain_crash(current->domain, "Unhandled op %u", cmd_op);
             break;
         }
     }
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 13fd64d..508dd37 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -560,8 +560,8 @@ int compat_memory_op(unsigned int cmd, 
XEN_GUEST_HANDLE_PARAM(void) compat)
             if ( rc < 0 )
             {
                 if ( split < 0 )
-                    /* Cannot cancel the continuation... */
-                    domain_crash(current->domain);
+                    domain_crash(current->domain,
+                                 "Unable to cancel continuation\n");
                 return rc;
             }
             break;
@@ -636,7 +636,7 @@ int compat_memory_op(unsigned int cmd, 
XEN_GUEST_HANDLE_PARAM(void) compat)
         }
 
         default:
-            domain_crash(current->domain);
+            domain_crash(current->domain, "Unhandled op %u", op);
             split = 0;
             break;
         }
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 6f294c7..3a8581b 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1124,7 +1124,7 @@ int domain_soft_reset(struct domain *d)
     if ( !rc )
         domain_resume(d);
     else
-        domain_crash(d);
+        domain_crash(d, "Soft reset failure: %d\n", rc);
 
     return rc;
 }
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index ad55cfa..50fe1e1 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1291,8 +1291,7 @@ unmap_common(
     if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
     {
         /* This can happen when a grant is implicitly unmapped. */
-        gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
-        domain_crash(ld); /* naughty... */
+        domain_crash(ld, "Could not find d%d\n", dom);
         return;
     }
 
@@ -1684,10 +1683,8 @@ gnttab_unpopulate_status_frames(struct domain *d, struct 
grant_table *gt)
 
             if ( rc )
             {
-                gprintk(XENLOG_ERR,
-                        "Could not remove status frame %u (GFN %#lx) from 
P2M\n",
-                        i, gfn_x(gfn));
-                domain_crash(d);
+                domain_crash(d, "Could not remove status frame %u (GFN 
%"PRI_gfn
+                             ") from P2M: %d\n", i, gfn_x(gfn), rc);
                 return rc;
             }
             gnttab_set_frame_gfn(gt, true, i, INVALID_GFN);
@@ -1700,12 +1697,8 @@ gnttab_unpopulate_status_frames(struct domain *d, struct 
grant_table *gt)
         if ( pg->count_info & ~PGC_xen_heap )
         {
             if ( paging_mode_translate(d) )
-            {
-                gprintk(XENLOG_ERR,
-                        "Wrong page state %#lx of status frame %u (GFN 
%#lx)\n",
-                        pg->count_info, i, gfn_x(gfn));
-                domain_crash(d);
-            }
+                domain_crash(d, "Bad page state %#lx of status frame %u (GFN %"
+                             PRI_gfn")\n", pg->count_info, i, gfn_x(gfn));
             else
             {
                 if ( get_page(pg, d) )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 996f94b..48e2ae8 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -684,7 +684,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
             /* Pages were unshared above */
             BUG_ON(SHARED_M2P(gfn));
             if ( guest_physmap_remove_page(d, _gfn(gfn), mfn, 0) )
-                domain_crash(d);
+                domain_crash(d, "Unable to remove gfn %"PRI_gfn"\n", gfn);
             put_page(page);
         }
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 02aeed7..3c31410 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1569,7 +1569,7 @@ int offline_page(unsigned long mfn, int broken, uint32_t 
*status)
     if ( (pg->count_info & PGC_broken) && (owner = page_get_owner(pg)) )
     {
         *status = PG_OFFLINE_AGAIN;
-        domain_crash(owner);
+        domain_crash(owner, "Repeated broken page\n");
         return 0;
     }
 
diff --git a/xen/common/wait.c b/xen/common/wait.c
index 4f830a1..1d4cd7c 100644
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -135,8 +135,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
     cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
     if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
     {
-        gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
-        domain_crash(current->domain);
+        domain_crash(current->domain, "Unable to set vcpu affinity\n");
 
         for ( ; ; )
             do_softirq();
@@ -169,8 +168,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
 
     if ( unlikely(wqv->esp == 0) )
     {
-        gdprintk(XENLOG_ERR, "Stack too large in %s\n", __func__);
-        domain_crash(current->domain);
+        domain_crash(current->domain, "Stack too large\n");
 
         for ( ; ; )
             do_softirq();
@@ -201,10 +199,8 @@ void check_wakeup_from_wait(void)
         struct vcpu *curr = current;
         cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
         if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
-        {
-            gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
-            domain_crash(current->domain);
-        }
+            domain_crash(current->domain, "Unable to set vcpu affinity\n");
+
         wait(); /* takes us back into the scheduler */
     }
 
diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 70b4345..bdd1022 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -651,8 +651,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, 
unsigned long mfn,
     if ( rc )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Root table alloc failed, gfn = %lx\n", gfn);
-        domain_crash(d);
+        domain_crash(d, "Root table alloc failed: %d\n", rc);
         return rc;
     }
 
@@ -660,20 +659,20 @@ int amd_iommu_map_page(struct domain *d, unsigned long 
gfn, unsigned long mfn,
      * we might need a deeper page table for lager gfn now */
     if ( is_hvm_domain(d) )
     {
-        if ( update_paging_mode(d, gfn) )
+        rc = update_paging_mode(d, gfn);
+        if ( rc )
         {
             spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
-            domain_crash(d);
-            return -EFAULT;
+            domain_crash(d, "Update page mode failed for gfn %"PRI_gfn": %d\n",
+                         gfn, rc);
+            return rc;
         }
     }
 
     if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
-        domain_crash(d);
+        domain_crash(d, "Invalid IO pagetable entry gfn %"PRI_gfn"\n", gfn);
         return -EFAULT;
     }
 
@@ -705,9 +704,8 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, 
unsigned long mfn,
                                flags, merge_level) )
         {
             spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Merge iommu page failed at level %d, "
+            domain_crash(d, "Merge iommu page failed at level %d, "
                             "gfn = %lx mfn = %lx\n", merge_level, gfn, mfn);
-            domain_crash(d);
             return -EFAULT;
         }
 
@@ -747,9 +745,9 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
gfn)
         if ( rc )
         {
             spin_unlock(&hd->arch.mapping_lock);
-            AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
             if ( rc != -EADDRNOTAVAIL )
-                domain_crash(d);
+                domain_crash(d, "Update page mode failed gfn %"PRI_gfn": %d\n",
+                             gfn, rc);
             return rc;
         }
     }
@@ -757,8 +755,7 @@ int amd_iommu_unmap_page(struct domain *d, unsigned long 
gfn)
     if ( iommu_pde_from_gfn(d, gfn, pt_mfn) || (pt_mfn[1] == 0) )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        AMD_IOMMU_DEBUG("Invalid IO pagetable entry gfn = %lx\n", gfn);
-        domain_crash(d);
+        domain_crash(d, "Invalid IO pagetable entry gfn %"PRI_gfn"\n", gfn);
         return -EFAULT;
     }
 
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 70d218f..61499df 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -273,7 +273,7 @@ int iommu_map_page(struct domain *d, unsigned long gfn, 
unsigned long mfn,
                    d->domain_id, gfn, mfn, rc);
 
         if ( !is_hardware_domain(d) )
-            domain_crash(d);
+            domain_crash(d, "Fatal IOMMU error\n");
     }
 
     return rc;
@@ -296,7 +296,7 @@ int iommu_unmap_page(struct domain *d, unsigned long gfn)
                    d->domain_id, gfn, rc);
 
         if ( !is_hardware_domain(d) )
-            domain_crash(d);
+            domain_crash(d, "Fatal IOMMU error\n");
     }
 
     return rc;
@@ -337,7 +337,7 @@ int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
                    d->domain_id, rc, gfn, page_count);
 
         if ( !is_hardware_domain(d) )
-            domain_crash(d);
+            domain_crash(d, "Fatal IOMMU error\n");
     }
 
     return rc;
@@ -360,7 +360,7 @@ int iommu_iotlb_flush_all(struct domain *d)
                    d->domain_id, rc);
 
         if ( !is_hardware_domain(d) )
-            domain_crash(d);
+            domain_crash(d, "Fatal IOMMU error\n");
     }
 
     return rc;
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index d1adffa..b02823c 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1586,7 +1586,7 @@ void iommu_dev_iotlb_flush_timeout(struct domain *d, 
struct pci_dev *pdev)
                d->domain_id, pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
                PCI_FUNC(pdev->devfn));
     if ( !is_hardware_domain(d) )
-        domain_crash(d);
+        domain_crash(d, "Fatal IOMMU error\n");
 
     pcidevs_unlock();
 }
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 1710256..89189e9 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1873,7 +1873,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
                    d->domain_id, rc);
 
         if ( !is_hardware_domain(d) )
-            domain_crash(d);
+            domain_crash(d, "Fatal IOMMU error\n");
     }
 
     return rc;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 0ba80cb..452ad36 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -611,10 +611,12 @@ void vcpu_end_shutdown_deferral(struct vcpu *v);
  * from any processor.
  */
 void __domain_crash(struct domain *d);
-#define domain_crash(d) do {                                              \
-    printk("domain_crash called from %s:%d\n", __FILE__, __LINE__);       \
-    __domain_crash(d);                                                    \
-} while (0)
+#define domain_crash(d, fmt, args...)                               \
+    do {                                                            \
+        printk(XENLOG_G_ERR "domain_crash called from %s: " fmt,    \
+               __func__, ## args);                                  \
+        __domain_crash(d);                                          \
+    } while (0)
 
 /*
  * Called from assembly code, with an optional address to help indicate why
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.