[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] xen/arm: Introduce clean_and_invalidate_xen_dcache() macro
commit bc31a1ac2ecd56e8348171de85e4d59c56d70397 Author: Oleksandr Tyshchenko <oleksandr.tyshchenko@xxxxxxxxxxxxxxx> AuthorDate: Tue Mar 11 15:19:44 2014 +0200 Commit: Ian Campbell <ian.campbell@xxxxxxxxxx> CommitDate: Fri Mar 14 14:56:22 2014 +0000 xen/arm: Introduce clean_and_invalidate_xen_dcache() macro This macro is very similar to clean_xen_dcache(), but it performs clean and invalidate dcache. Also modify flush_page_to_ram() to call clean_and_invalidate_xen_dcache_va_range() function. Signed-off-by: Oleksandr Tyshchenko <oleksandr.tyshchenko@xxxxxxxxxxxxxxx> Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx> --- xen/arch/arm/mm.c | 8 ++------ xen/include/asm-arm/page.h | 26 ++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 308a798..544aa87 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -344,13 +344,9 @@ unsigned long domain_page_map_to_mfn(const void *ptr) void flush_page_to_ram(unsigned long mfn) { - void *p, *v = map_domain_page(mfn); - - dsb(); /* So the CPU issues all writes to the range */ - for ( p = v; p < v + PAGE_SIZE ; p += cacheline_bytes ) - asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p)); - dsb(); /* So we know the flushes happen before continuing */ + void *v = map_domain_page(mfn); + clean_and_invalidate_xen_dcache_va_range(v, PAGE_SIZE); unmap_domain_page(v); } diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index 6fe7fc5..905beb8 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -226,7 +226,7 @@ static inline lpae_t mfn_to_xen_entry(unsigned long mfn) /* Actual cacheline size on the boot CPU. */ extern size_t cacheline_bytes; -/* Function for flushing medium-sized areas. +/* Functions for flushing medium-sized areas. * if 'range' is large enough we might want to use model-specific * full-cache flushes. */ static inline void clean_xen_dcache_va_range(void *p, unsigned long size) @@ -238,7 +238,17 @@ static inline void clean_xen_dcache_va_range(void *p, unsigned long size) dsb(); /* So we know the flushes happen before continuing */ } -/* Macro for flushing a single small item. The predicate is always +static inline void clean_and_invalidate_xen_dcache_va_range + (void *p, unsigned long size) +{ + void *end; + dsb(); /* So the CPU issues all writes to the range */ + for ( end = p + size; p < end; p += cacheline_bytes ) + asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p)); + dsb(); /* So we know the flushes happen before continuing */ +} + +/* Macros for flushing a single small item. The predicate is always * compile-time constant so this will compile down to 3 instructions in * the common case. */ #define clean_xen_dcache(x) do { \ @@ -253,6 +263,18 @@ static inline void clean_xen_dcache_va_range(void *p, unsigned long size) : : "r" (_p), "m" (*_p)); \ } while (0) +#define clean_and_invalidate_xen_dcache(x) do { \ + typeof(x) *_p = &(x); \ + if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \ + clean_and_invalidate_xen_dcache_va_range(_p, sizeof(x)); \ + else \ + asm volatile ( \ + "dsb sy;" /* Finish all earlier writes */ \ + __clean_and_invalidate_xen_dcache_one(0) \ + "dsb sy;" /* Finish flush before continuing */ \ + : : "r" (_p), "m" (*_p)); \ +} while (0) + /* Flush the dcache for an entire page. */ void flush_page_to_ram(unsigned long mfn); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |