[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/mm: Drop {HAP,SHADOW}_ERROR() wrappers
Unlike the PRINTK/DEBUG wrappers, these go straight out to the console, rather than ending up in the debugtrace buffer. A number of these users are followed by domain_crash(), and future changes will want to combine the printk() into the domain_crash() call. Expand these wrappers in place, using XENLOG_ERR before a BUG(), and XENLOG_G_ERR before a domain_crash(). Perfom some %pv/PRI_mfn/etc cleanup while modifying the invocations, and explicitly drop some calls which are unnecessary (bad shadow op, and the empty stubs for incorrect sh_map_and_validate_gl?e() calls). Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Tim Deegan <tim@xxxxxxx> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx> --- xen/arch/x86/mm/hap/hap.c | 13 ++-- xen/arch/x86/mm/shadow/common.c | 126 ++++++++++++++++++++------------------- xen/arch/x86/mm/shadow/multi.c | 23 +++---- xen/arch/x86/mm/shadow/private.h | 6 +- xen/include/asm-x86/hap.h | 2 - 5 files changed, 82 insertions(+), 88 deletions(-) diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index d6449e6..946e301 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -304,10 +304,11 @@ static void hap_free_p2m_page(struct domain *d, struct page_info *pg) /* Should still have no owner and count zero. */ if ( owner || (pg->count_info & PGC_count_mask) ) { - HAP_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", - d->domain_id, mfn_x(page_to_mfn(pg)), - owner ? owner->domain_id : DOMID_INVALID, - pg->count_info, pg->u.inuse.type_info); + printk(XENLOG_ERR + "d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", + d->domain_id, mfn_x(page_to_mfn(pg)), + owner ? owner->domain_id : DOMID_INVALID, + pg->count_info, pg->u.inuse.type_info); WARN(); pg->count_info &= ~PGC_count_mask; page_set_owner(pg, NULL); @@ -407,7 +408,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v) return m4mfn; oom: - HAP_ERROR("out of memory building monitor pagetable\n"); + printk(XENLOG_G_ERR "out of memory building monitor pagetable\n"); domain_crash(d); return INVALID_MFN; } @@ -640,7 +641,7 @@ static int hap_page_fault(struct vcpu *v, unsigned long va, { struct domain *d = v->domain; - HAP_ERROR("Intercepted a guest #PF (%pv) with HAP enabled\n", v); + printk(XENLOG_G_ERR "Intercepted #PF from %pv with HAP enabled\n", v); domain_crash(d); return 0; } diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 16df41b..f2b9e8f 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -325,7 +325,8 @@ void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn) return; } - SHADOW_ERROR("gmfn %lx marked OOS but not in hash table\n", mfn_x(gmfn)); + printk(XENLOG_ERR "gmfn %"PRI_mfn" marked OOS but not in hash table\n", + mfn_x(gmfn)); BUG(); } #endif @@ -429,7 +430,8 @@ void oos_fixup_add(struct domain *d, mfn_t gmfn, } } - SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn)); + printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + mfn_x(gmfn)); BUG(); } @@ -579,7 +581,8 @@ static void oos_hash_remove(struct domain *d, mfn_t gmfn) } } - SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn)); + printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + mfn_x(gmfn)); BUG(); } @@ -603,7 +606,8 @@ mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn) } } - SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn)); + printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + mfn_x(gmfn)); BUG(); } @@ -633,7 +637,8 @@ void sh_resync(struct domain *d, mfn_t gmfn) } } - SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn)); + printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not in hash table\n", + mfn_x(gmfn)); BUG(); } @@ -648,8 +653,8 @@ static int sh_skip_sync(struct vcpu *v, mfn_t gl1mfn) return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 3)(v, gl1mfn); else if ( pg->shadow_flags & SHF_L1_64 ) return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 4)(v, gl1mfn); - SHADOW_ERROR("gmfn %#lx was OOS but not shadowed as an l1.\n", - mfn_x(gl1mfn)); + printk(XENLOG_ERR "gmfn %"PRI_mfn" was OOS but not shadowed as an l1\n", + mfn_x(gl1mfn)); BUG(); } @@ -990,7 +995,7 @@ void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only) SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(d, smfn, user_only); break; default: - SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type); + printk(XENLOG_ERR "Bad top-level shadow type %08x\n", sp->u.sh.type); BUG(); } } @@ -1060,12 +1065,12 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages) /* Nothing more we can do: all remaining shadows are of pages that * hold Xen mappings for some vcpu. This can never happen. */ - SHADOW_ERROR("Can't pre-allocate %u shadow pages!\n" - " shadow pages total = %u, free = %u, p2m=%u\n", - pages, - d->arch.paging.shadow.total_pages, - d->arch.paging.shadow.free_pages, - d->arch.paging.shadow.p2m_pages); + printk(XENLOG_ERR "Can't pre-allocate %u shadow pages!\n" + " shadow pages total = %u, free = %u, p2m=%u\n", + pages, + d->arch.paging.shadow.total_pages, + d->arch.paging.shadow.free_pages, + d->arch.paging.shadow.p2m_pages); BUG(); } @@ -1185,7 +1190,7 @@ mfn_t shadow_alloc(struct domain *d, * correctly before we allocated. We can't recover by calling * prealloc here, because we might free up higher-level pages * that the caller is working on. */ - SHADOW_ERROR("Can't allocate %i shadow pages!\n", pages); + printk(XENLOG_ERR "Can't allocate %u shadow pages!\n", pages); BUG(); } d->arch.paging.shadow.free_pages -= pages; @@ -1337,10 +1342,11 @@ shadow_free_p2m_page(struct domain *d, struct page_info *pg) /* Should still have no owner and count zero. */ if ( owner || (pg->count_info & PGC_count_mask) ) { - SHADOW_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", - d->domain_id, mfn_x(page_to_mfn(pg)), - owner ? owner->domain_id : DOMID_INVALID, - pg->count_info, pg->u.inuse.type_info); + printk(XENLOG_ERR + "d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n", + d->domain_id, mfn_x(page_to_mfn(pg)), + owner ? owner->domain_id : DOMID_INVALID, + pg->count_info, pg->u.inuse.type_info); pg->count_info &= ~PGC_count_mask; page_set_owner(pg, NULL); } @@ -1506,11 +1512,11 @@ static void sh_hash_audit_bucket(struct domain *d, int bucket) { if ( !page_is_out_of_sync(gpg) ) { - SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by %#"PRI_mfn")" - " and not OOS but has typecount %#lx\n", - __backpointer(sp), - mfn_x(page_to_mfn(sp)), - gpg->u.inuse.type_info); + printk(XENLOG_ERR + "MFN %"PRI_mfn" shadowed (by %"PRI_mfn")" + " and not OOS but has typecount %#lx\n", + __backpointer(sp), mfn_x(page_to_mfn(sp)), + gpg->u.inuse.type_info); BUG(); } } @@ -1520,10 +1526,10 @@ static void sh_hash_audit_bucket(struct domain *d, int bucket) if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page && (gpg->u.inuse.type_info & PGT_count_mask) != 0 ) { - SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by %#"PRI_mfn")" - " but has typecount %#lx\n", - __backpointer(sp), mfn_x(page_to_mfn(sp)), - gpg->u.inuse.type_info); + printk(XENLOG_ERR "MFN %"PRI_mfn" shadowed (by %"PRI_mfn")" + " but has typecount %#lx\n", + __backpointer(sp), mfn_x(page_to_mfn(sp)), + gpg->u.inuse.type_info); BUG(); } } @@ -1863,8 +1869,7 @@ void sh_destroy_shadow(struct domain *d, mfn_t smfn) break; default: - SHADOW_ERROR("tried to destroy shadow of bad type %08lx\n", - (unsigned long)t); + printk(XENLOG_ERR "tried to destroy shadow of bad type %08x\n", t); BUG(); } } @@ -1950,9 +1955,9 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, * put pagetables in special memory of some kind. We can't allow that. */ if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_writable_page ) { - SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %" - PRtype_info "\n", - mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info); + printk(XENLOG_G_ERR "can't remove write access to mfn %"PRI_mfn + ", type_info is %"PRtype_info "\n", + mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info); domain_crash(d); } @@ -2099,9 +2104,9 @@ int sh_remove_write_access(struct domain *d, mfn_t gmfn, if ( level == 0 ) return -1; - SHADOW_ERROR("can't remove write access to mfn %lx: guest has " - "%lu special-use mappings of it\n", mfn_x(gmfn), - (mfn_to_page(gmfn)->u.inuse.type_info&PGT_count_mask)); + printk(XENLOG_G_ERR "can't remove write access to mfn %"PRI_mfn + ": guest has %lu special-use mappings\n", mfn_x(gmfn), + mfn_to_page(gmfn)->u.inuse.type_info & PGT_count_mask); domain_crash(d); } @@ -2206,13 +2211,12 @@ static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn, gfn_t gfn) && ((page->u.inuse.type_info & PGT_count_mask) == (is_xen_heap_page(page) || (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) ) - { - SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): " - "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn_x(gfn), - page->count_info, page->u.inuse.type_info, - !!is_xen_heap_page(page), - is_hvm_domain(d) && is_ioreq_server_page(d, page)); - } + printk(XENLOG_G_ERR "can't find all mappings of mfn %"PRI_mfn + " (gfn %"PRI_gfn"): c=%lx t=%lx x=%d i=%d\n", + mfn_x(gmfn), gfn_x(gfn), + page->count_info, page->u.inuse.type_info, + !!is_xen_heap_page(page), + (is_hvm_domain(d) && is_ioreq_server_page(d, page))); } paging_unlock(d); @@ -2364,9 +2368,9 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all) smfn = shadow_hash_lookup(d, mfn_x(gmfn), t); \ if ( unlikely(!mfn_valid(smfn)) ) \ { \ - SHADOW_ERROR(": gmfn %#lx has flags %#"PRIx32 \ - " but no type-%#"PRIx32" shadow\n", \ - mfn_x(gmfn), (uint32_t)pg->shadow_flags, t); \ + printk(XENLOG_G_ERR "gmfn %"PRI_mfn" has flags %#x" \ + " but no type-%#x shadow\n", \ + mfn_x(gmfn), pg->shadow_flags, t); \ break; \ } \ if ( sh_type_is_pinnable(d, t) ) \ @@ -2395,9 +2399,8 @@ void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all) /* If that didn't catch the shadows, something is wrong */ if ( !fast && all && (pg->count_info & PGC_page_table) ) { - SHADOW_ERROR("can't find all shadows of mfn %"PRI_mfn" " - "(shadow_flags=%08x)\n", - mfn_x(gmfn), pg->shadow_flags); + printk(XENLOG_G_ERR "can't find all shadows of mfn %"PRI_mfn + " (shadow_flags=%08x)\n", mfn_x(gmfn), pg->shadow_flags); domain_crash(d); } @@ -2477,8 +2480,7 @@ static void sh_update_paging_modes(struct vcpu *v) v->arch.paging.vtlb = xzalloc_array(struct shadow_vtlb, VTLB_ENTRIES); if ( unlikely(!v->arch.paging.vtlb) ) { - SHADOW_ERROR("Could not allocate vTLB space for dom %u vcpu %u\n", - d->domain_id, v->vcpu_id); + printk(XENLOG_G_ERR "Could not allocate vTLB space for %pv\n", v); domain_crash(v->domain); return; } @@ -2581,10 +2583,10 @@ static void sh_update_paging_modes(struct vcpu *v) if ( v != current && vcpu_runnable(v) ) { - SHADOW_ERROR("Some third party (%pv) is changing " - "this HVM vcpu's (%pv) paging mode " - "while it is running.\n", - current, v); + printk(XENLOG_G_ERR + "Some third party (%pv) is changing this HVM vcpu's" + " (%pv) paging mode while it is running\n", + current, v); /* It's not safe to do that because we can't change * the host CR3 for a running domain */ domain_crash(v->domain); @@ -2901,10 +2903,11 @@ void shadow_teardown(struct domain *d, bool *preempted) /* Complain here in cases where shadow_free_p2m_page() won't. */ else if ( !page_get_owner(unpaged_pagetable) && !(unpaged_pagetable->count_info & PGC_count_mask) ) - SHADOW_ERROR("d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n", - d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)), - unpaged_pagetable->count_info, - unpaged_pagetable->u.inuse.type_info); + printk(XENLOG_ERR + "d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n", + d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)), + unpaged_pagetable->count_info, + unpaged_pagetable->u.inuse.type_info); shadow_free_p2m_page(d, unpaged_pagetable); } } @@ -3468,8 +3471,8 @@ int shadow_domctl(struct domain *d, { /* Can't set the allocation to zero unless the domain stops using * shadow pagetables first */ - SHADOW_ERROR("Can't set shadow allocation to zero, domain %u" - " is still using shadows.\n", d->domain_id); + dprintk(XENLOG_G_ERR, "Can't set shadow allocation to zero, " + "d%d is still using shadows\n", d->domain_id); paging_unlock(d); return -EINVAL; } @@ -3485,7 +3488,6 @@ int shadow_domctl(struct domain *d, return rc; default: - SHADOW_ERROR("Bad shadow op %u\n", sc->op); return -EINVAL; } } diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index f979dca..dd86aa1 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -2155,7 +2155,7 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, mfn_t sl4mfn, void *se) mfn_x(sl4mfn), shadow_index, new_sl4e.l4); if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) { - SHADOW_ERROR("out-of-range l4e update\n"); + printk(XENLOG_G_ERR "out-of-range l4e update\n"); result |= SHADOW_SET_ERROR; } @@ -2478,9 +2478,7 @@ sh_map_and_validate_gl4e(struct vcpu *v, mfn_t gl4mfn, shadow_l4_index, validate_gl4e); #else // ! GUEST_PAGING_LEVELS >= 4 - SHADOW_ERROR("called in wrong paging mode!\n"); - BUG(); - return 0; + BUG(); /* Called in wrong paging mode! */ #endif } @@ -2494,9 +2492,7 @@ sh_map_and_validate_gl3e(struct vcpu *v, mfn_t gl3mfn, shadow_l3_index, validate_gl3e); #else // ! GUEST_PAGING_LEVELS >= 4 - SHADOW_ERROR("called in wrong paging mode!\n"); - BUG(); - return 0; + BUG(); /* Called in wrong paging mode! */ #endif } @@ -2520,9 +2516,7 @@ sh_map_and_validate_gl2he(struct vcpu *v, mfn_t gl2mfn, shadow_l2_index, validate_gl2e); #else /* Non-PAE guests don't have different kinds of l2 table */ - SHADOW_ERROR("called in wrong paging mode!\n"); - BUG(); - return 0; + BUG(); /* Called in wrong paging mode! */ #endif } @@ -2966,8 +2960,8 @@ static int sh_page_fault(struct vcpu *v, * a BUG() when we try to take the lock again. */ if ( unlikely(paging_locked_by_me(d)) ) { - SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n", - d->arch.paging.lock.locker_function); + printk(XENLOG_G_ERR "Recursive shadow fault: lock taken by %s\n", + d->arch.paging.lock.locker_function); return 0; } @@ -3952,7 +3946,8 @@ sh_set_toplevel_shadow(struct vcpu *v, } else { - SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn)); + printk(XENLOG_G_ERR "can't install %"PRI_mfn" as toplevel shadow\n", + mfn_x(smfn)); domain_crash(d); new_entry = pagetable_null(); } @@ -3972,7 +3967,7 @@ sh_set_toplevel_shadow(struct vcpu *v, * shadow and it's not safe to free it yet. */ if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(d, old_smfn) ) { - SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn)); + printk(XENLOG_G_ERR "can't re-pin %"PRI_mfn"\n", mfn_x(old_smfn)); domain_crash(d); } sh_put_ref(d, old_smfn, 0); diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h index 691bcf6..a1fae50 100644 --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -75,8 +75,6 @@ extern int shadow_audit_enable; #define SHADOW_PRINTK(_f, _a...) \ debugtrace_printk("sh: %s(): " _f, __func__, ##_a) -#define SHADOW_ERROR(_f, _a...) \ - printk("sh error: %s(): " _f, __func__, ##_a) #define SHADOW_DEBUG(flag, _f, _a...) \ do { \ if (SHADOW_DEBUG_ ## flag) \ @@ -549,8 +547,8 @@ static inline void sh_put_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa) if ( unlikely(x == 0) ) { - SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%#lx t=%#x\n", - mfn_x(smfn), sp->u.sh.count + 0UL, sp->u.sh.type); + printk(XENLOG_ERR "shadow ref underflow, smfn=%"PRI_mfn" oc=%#lx t=%#x\n", + mfn_x(smfn), sp->u.sh.count + 0UL, sp->u.sh.type); BUG(); } diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h index c5d6038..b94bfb4 100644 --- a/xen/include/asm-x86/hap.h +++ b/xen/include/asm-x86/hap.h @@ -27,8 +27,6 @@ #define HAP_PRINTK(_f, _a...) \ debugtrace_printk("hap: %s(): " _f, __func__, ##_a) -#define HAP_ERROR(_f, _a...) \ - printk("hap error: %s(): " _f, __func__, ##_a) /************************************************/ /* hap domain level functions */ -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |