[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 5/6] x86: FLUSH_CACHE -> FLUSH_CACHE_EVICT
This is to make the difference to FLUSH_CACHE_WRITEBACK more explicit. Requested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- Note that this (of course) collides with "x86/HVM: restrict use of pinned cache attributes as well as associated flushing". --- v2: New. --- a/xen/arch/x86/flushtlb.c +++ b/xen/arch/x86/flushtlb.c @@ -232,7 +232,7 @@ unsigned int flush_area_local(const void if ( flags & FLUSH_HVM_ASID_CORE ) hvm_flush_guest_tlbs(); - if ( flags & (FLUSH_CACHE | FLUSH_CACHE_WRITEBACK) ) + if ( flags & (FLUSH_CACHE_EVICT | FLUSH_CACHE_WRITEBACK) ) { const struct cpuinfo_x86 *c = ¤t_cpu_data; unsigned long sz = 0; @@ -245,13 +245,13 @@ unsigned int flush_area_local(const void c->x86_clflush_size && c->x86_cache_size && sz && ((sz >> 10) < c->x86_cache_size) ) { - if ( flags & FLUSH_CACHE ) + if ( flags & FLUSH_CACHE_EVICT ) cache_flush(va, sz); else cache_writeback(va, sz); - flags &= ~(FLUSH_CACHE | FLUSH_CACHE_WRITEBACK); + flags &= ~(FLUSH_CACHE_EVICT | FLUSH_CACHE_WRITEBACK); } - else if ( flags & FLUSH_CACHE ) + else if ( flags & FLUSH_CACHE_EVICT ) wbinvd(); else wbnoinvd(); --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2228,7 +2228,7 @@ void hvm_shadow_handle_cd(struct vcpu *v domain_pause_nosync(v->domain); /* Flush physical caches. */ - flush_all(FLUSH_CACHE); + flush_all(FLUSH_CACHE_EVICT); hvm_set_uc_mode(v, 1); domain_unpause(v->domain); --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -614,7 +614,7 @@ int hvm_set_mem_pinned_cacheattr(struct break; /* fall through */ default: - flush_all(FLUSH_CACHE); + flush_all(FLUSH_CACHE_EVICT); break; } return 0; @@ -680,7 +680,7 @@ int hvm_set_mem_pinned_cacheattr(struct p2m_memory_type_changed(d); if ( type != X86_MT_WB ) - flush_all(FLUSH_CACHE); + flush_all(FLUSH_CACHE_EVICT); return rc; } @@ -782,7 +782,7 @@ void memory_type_changed(struct domain * d->vcpu && d->vcpu[0] ) { p2m_memory_type_changed(d); - flush_all(FLUSH_CACHE); + flush_all(FLUSH_CACHE_EVICT); } } --- a/xen/arch/x86/include/asm/flushtlb.h +++ b/xen/arch/x86/include/asm/flushtlb.h @@ -113,7 +113,7 @@ void switch_cr3_cr4(unsigned long cr3, u /* Flush TLBs (or parts thereof) including global mappings */ #define FLUSH_TLB_GLOBAL 0x200 /* Flush data caches */ -#define FLUSH_CACHE 0x400 +#define FLUSH_CACHE_EVICT 0x400 /* VA for the flush has a valid mapping */ #define FLUSH_VA_VALID 0x800 /* Flush CPU state */ @@ -191,7 +191,7 @@ static inline int clean_and_invalidate_d { unsigned int order = get_order_from_bytes(size); /* sub-page granularity support needs to be added if necessary */ - flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order)); + flush_area_local(p, FLUSH_CACHE_EVICT | FLUSH_ORDER(order)); return 0; } static inline int clean_dcache_va_range(const void *p, unsigned long size) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5193,7 +5193,7 @@ int map_pages_to_xen( if ( (flags & _PAGE_PRESENT) && \ (((o_) ^ flags) & PAGE_CACHE_ATTRS) ) \ { \ - flush_flags |= FLUSH_CACHE; \ + flush_flags |= FLUSH_CACHE_EVICT; \ if ( virt >= DIRECTMAP_VIRT_START && \ virt < HYPERVISOR_VIRT_END ) \ flush_flags |= FLUSH_VA_VALID; \
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |