[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 04/12] x86/shadow: use lighter weight mode checks
shadow_mode_...(), with the exception of shadow_mode_enabled(), are shorthands for shadow_mode_enabled() && paging_mode_...(). While potentially useful outside of shadow-internal functions, when we already know that we're dealing with a domain in shadow mode, the "paging" checks are sufficient and cheaper. While the "shadow" ones commonly translate to a MOV/AND/CMP/Jcc sequence, the "paging" ones typically resolve to just TEST+Jcc. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Re-base over new earlier patch. --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1843,7 +1843,7 @@ int sh_remove_write_access(struct domain * In guest refcounting, we trust Xen to already be restricting * all the writes to the guest page tables, so we do not need to * do more. */ - if ( !shadow_mode_refcounts(d) ) + if ( !paging_mode_refcounts(d) ) return 0; /* Early exit if it's already a pagetable, or otherwise not writeable */ @@ -2075,7 +2075,7 @@ int sh_remove_all_mappings(struct domain * guest pages with an extra reference taken by * prepare_ring_for_helper(). */ - if ( !(shadow_mode_external(d) + if ( !(paging_mode_external(d) && (page->count_info & PGC_count_mask) <= 3 && ((page->u.inuse.type_info & PGT_count_mask) == (is_special_page(page) || @@ -2372,8 +2372,8 @@ static void sh_update_paging_modes(struc { const struct paging_mode *old_mode = v->arch.paging.mode; - ASSERT(shadow_mode_translate(d)); - ASSERT(shadow_mode_external(d)); + ASSERT(paging_mode_translate(d)); + ASSERT(paging_mode_external(d)); #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) /* Need to resync all our pages now, because if a page goes out @@ -2760,7 +2760,7 @@ void shadow_vcpu_teardown(struct vcpu *v sh_detach_old_tables(v); #ifdef CONFIG_HVM - if ( shadow_mode_external(d) ) + if ( paging_mode_external(d) ) { mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table); --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -514,7 +514,7 @@ _sh_propagate(struct vcpu *v, || (level == 1 && page_get_owner(mfn_to_page(target_mfn)) == dom_io); if ( mmio_mfn - && !(level == 1 && (!shadow_mode_refcounts(d) + && !(level == 1 && (!paging_mode_refcounts(d) || p2mt == p2m_mmio_direct)) ) { ASSERT((ft == ft_prefetch)); @@ -531,7 +531,7 @@ _sh_propagate(struct vcpu *v, _PAGE_RW | _PAGE_PRESENT); if ( guest_nx_enabled(v) ) pass_thru_flags |= _PAGE_NX_BIT; - if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn ) + if ( level == 1 && !paging_mode_refcounts(d) && mmio_mfn ) pass_thru_flags |= PAGE_CACHE_ATTRS; sflags = gflags & pass_thru_flags; @@ -651,7 +651,7 @@ _sh_propagate(struct vcpu *v, * (We handle log-dirty entirely inside the shadow code, without using the * p2m_ram_logdirty p2m type: only HAP uses that.) */ - if ( level == 1 && unlikely(shadow_mode_log_dirty(d)) && !mmio_mfn ) + if ( level == 1 && unlikely(paging_mode_log_dirty(d)) && !mmio_mfn ) { if ( ft & FETCH_TYPE_WRITE ) paging_mark_dirty(d, target_mfn); @@ -807,7 +807,7 @@ do { #define FOREACH_PRESENT_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \ do { \ int _i, _j; \ - ASSERT(shadow_mode_external(_dom)); \ + ASSERT(paging_mode_external(_dom)); \ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow); \ for ( _j = 0; _j < 4; _j++ ) \ { \ @@ -833,7 +833,7 @@ do { do { \ int _i; \ shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \ - ASSERT(shadow_mode_external(_dom)); \ + ASSERT(paging_mode_external(_dom)); \ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow); \ for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ { \ @@ -854,7 +854,7 @@ do { unsigned int _i, _end = SHADOW_L2_PAGETABLE_ENTRIES; \ shadow_l2e_t *_sp = map_domain_page((_sl2mfn)); \ ASSERT_VALID_L2(mfn_to_page(_sl2mfn)->u.sh.type); \ - if ( is_pv_32bit_domain(_dom) /* implies !shadow_mode_external */ && \ + if ( is_pv_32bit_domain(_dom) /* implies !paging_mode_external */ && \ mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2_64_shadow ) \ _end = COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom); \ for ( _i = 0; _i < _end; ++_i ) \ @@ -896,7 +896,7 @@ do { #define FOREACH_PRESENT_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code) \ do { \ shadow_l4e_t *_sp = map_domain_page((_sl4mfn)); \ - int _xen = !shadow_mode_external(_dom); \ + int _xen = !paging_mode_external(_dom); \ int _i; \ ASSERT(mfn_to_page(_sl4mfn)->u.sh.type == SH_type_l4_64_shadow);\ for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ ) \ @@ -965,7 +965,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf #endif // Create the Xen mappings... - if ( !shadow_mode_external(d) ) + if ( !paging_mode_external(d) ) { switch (shadow_type) { @@ -1367,7 +1367,7 @@ void sh_destroy_l1_shadow(struct domain shadow_demote(d, gmfn, t); } - if ( shadow_mode_refcounts(d) ) + if ( paging_mode_refcounts(d) ) { /* Decrement refcounts of all the old entries */ mfn_t sl1mfn = smfn; @@ -1464,7 +1464,7 @@ static int cf_check validate_gl4e( l4e_propagate_from_guest(v, new_gl4e, sl3mfn, &new_sl4e, ft_prefetch); // check for updates to xen reserved slots - if ( !shadow_mode_external(d) ) + if ( !paging_mode_external(d) ) { int shadow_index = (((unsigned long)sl4p & ~PAGE_MASK) / sizeof(shadow_l4e_t)); @@ -2387,7 +2387,7 @@ static int cf_check sh_page_fault( gfn = guest_walk_to_gfn(&gw); gmfn = get_gfn(d, gfn, &p2mt); - if ( shadow_mode_refcounts(d) && + if ( paging_mode_refcounts(d) && ((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) ) { @@ -2611,7 +2611,7 @@ static int cf_check sh_page_fault( return EXCRET_fault_fixed; emulate: - if ( !shadow_mode_refcounts(d) ) + if ( !paging_mode_refcounts(d) ) goto not_a_shadow_fault; #ifdef CONFIG_HVM @@ -3055,7 +3055,7 @@ sh_update_linear_entries(struct vcpu *v) */ /* Don't try to update the monitor table if it doesn't exist */ - if ( !shadow_mode_external(d) || + if ( !paging_mode_external(d) || pagetable_get_pfn(v->arch.hvm.monitor_table) == 0 ) return; @@ -3204,7 +3204,7 @@ static void cf_check sh_update_cr3(struc /* Double-check that the HVM code has sent us a sane guest_table */ if ( is_hvm_domain(d) ) { - ASSERT(shadow_mode_external(d)); + ASSERT(paging_mode_external(d)); if ( hvm_paging_enabled(v) ) ASSERT(pagetable_get_pfn(v->arch.guest_table)); else @@ -3229,7 +3229,7 @@ static void cf_check sh_update_cr3(struc * table. We cache the current state of that table and shadow that, * until the next CR3 write makes us refresh our cache. */ - ASSERT(shadow_mode_external(d)); + ASSERT(paging_mode_external(d)); /* * Find where in the page the l3 table is, but ignore the low 2 bits of @@ -3260,7 +3260,7 @@ static void cf_check sh_update_cr3(struc ASSERT(d->is_dying || d->is_shutting_down); return; } - if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) ) + if ( !paging_mode_external(d) && !is_pv_32bit_domain(d) ) { mfn_t smfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[0]); @@ -3354,7 +3354,7 @@ static void cf_check sh_update_cr3(struc /// /// v->arch.cr3 /// - if ( shadow_mode_external(d) ) + if ( paging_mode_external(d) ) { make_cr3(v, pagetable_get_mfn(v->arch.hvm.monitor_table)); } @@ -3371,7 +3371,7 @@ static void cf_check sh_update_cr3(struc /// /// v->arch.hvm.hw_cr[3] /// - if ( shadow_mode_external(d) ) + if ( paging_mode_external(d) ) { ASSERT(is_hvm_domain(d)); #if SHADOW_PAGING_LEVELS == 3 --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -407,7 +407,7 @@ static inline int sh_remove_write_access unsigned int level, unsigned long fault_addr) { - ASSERT(!shadow_mode_refcounts(d)); + ASSERT(!paging_mode_refcounts(d)); return 0; } #endif @@ -520,8 +520,8 @@ sh_mfn_is_a_page_table(mfn_t gmfn) return 0; owner = page_get_owner(page); - if ( owner && shadow_mode_refcounts(owner) - && (page->count_info & PGC_shadowed_pt) ) + if ( owner && paging_mode_refcounts(owner) && + (page->count_info & PGC_shadowed_pt) ) return 1; type_info = page->u.inuse.type_info & PGT_type_mask; --- a/xen/arch/x86/mm/shadow/set.c +++ b/xen/arch/x86/mm/shadow/set.c @@ -81,7 +81,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl struct domain *owner = NULL; ASSERT(!sh_l1e_is_magic(sl1e)); - ASSERT(shadow_mode_refcounts(d)); + ASSERT(paging_mode_refcounts(d)); if ( mfn_valid(mfn) ) { @@ -342,7 +342,7 @@ int shadow_set_l1e(struct domain *d, sha !sh_l1e_is_magic(new_sl1e) ) { /* About to install a new reference */ - if ( shadow_mode_refcounts(d) ) + if ( paging_mode_refcounts(d) ) { #define PAGE_FLIPPABLE (_PAGE_RW | _PAGE_PWT | _PAGE_PCD | _PAGE_PAT) int rc; @@ -375,7 +375,7 @@ int shadow_set_l1e(struct domain *d, sha old_sl1f = shadow_l1e_get_flags(old_sl1e); if ( (old_sl1f & _PAGE_PRESENT) && !sh_l1e_is_magic(old_sl1e) && - shadow_mode_refcounts(d) ) + paging_mode_refcounts(d) ) { /* * We lost a reference to an old mfn. --- a/xen/arch/x86/mm/shadow/types.h +++ b/xen/arch/x86/mm/shadow/types.h @@ -262,7 +262,7 @@ int shadow_set_l4e(struct domain *d, sha static void inline shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d) { - if ( !shadow_mode_refcounts(d) ) + if ( !paging_mode_refcounts(d) ) return; put_page_from_l1e(sl1e, d);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |