[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XenARM] [PATCH 09/14] arm: implement cache ops for ARMv7
arm: implement cache ops for ARMv7 xen/arch/arm/xen/Makefile | 1 + xen/arch/arm/xen/cache-v7.S | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 0 deletions(-) Signed-off-by: Jaemin Ryu <jm77.ryu@xxxxxxxxxxx> diff -r 15aaa20e14bf xen/arch/arm/xen/Makefile --- a/xen/arch/arm/xen/Makefile Sun Feb 12 11:55:04 2012 +0900 +++ b/xen/arch/arm/xen/Makefile Sun Feb 12 12:05:16 2012 +0900 @@ -22,3 +22,4 @@ obj-y += p2m.o obj-y += perfmon.o obj-y += pci.o obj-y += armv7.o +obj-y += cache-v7.o diff -r 15aaa20e14bf xen/arch/arm/xen/cache-v7.S --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/arm/xen/cache-v7.S Sun Feb 12 12:05:16 2012 +0900 @@ -0,0 +1,94 @@ +#include <xen/linkage.h> +#include <asm/page.h> +#include <asm/cpu-ops.h> +#include <asm/system.h> +#include <asm/asm-offsets.h> + + .macro v7_way_op, op + dmb @ ensure ordering with previous memory accesses + mrc p15, 1, r0, c0, c0, 1 @ read clidr + ands r3, r0, #0x7000000 @ extract loc from clidr + mov r3, r3, lsr #23 @ left align loc bit field + beq 50f @ if loc is 0, then no need to clean + mov r10, #0 @ start clean at cache level 0 +10: + add r2, r10, r10, lsr #1 @ work out 3x current cache level + mov r1, r0, lsr r2 @ extract cache type bits from clidr + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt 40f @ skip if no cache, or just i-cache + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + ldr r4, =0x3ff + ands r4, r4, r1, lsr #3 @ find maximum number on the way size + clz r5, r4 @ find bit position of way size increment + ldr r7, =0x7fff + ands r7, r7, r1, lsr #13 @ extract max number of the index size +20: + mov r9, r4 @ create working copy of max way size +30: + orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 + orr r11, r11, r7, lsl r2 @ factor index number into r11 + mcr p15, 0, r11, c7, \op , 2 @ clean & invalidate by set/way + subs r9, r9, #1 @ decrement the way + bge 30b + subs r7, r7, #1 @ decrement the index + bge 20b +40: + add r10, r10, #2 @ increment cache number + cmp r3, r10 + bgt 10b +50: + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + dsb + isb + .endm + .text + +PRIVATE(v7_flush_cache_all) + stmfd sp!, {r4-r5, r7, r9-r11, lr} + + v7_way_op c14 + + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate + ldmfd sp!, {r4-r5, r7, r9-r11, lr} + mov pc, lr + +DECLARE_CPU_OP(cpu_flush_cache_all, v7_flush_cache_all) + +PRIVATE(v7_flush_cache_range) + mrc p15, 1, r3, c0, c0, 0 @ read CSIDR + and r3, r3, #7 @ cache line size encoding + mov r3, #16 @ size offset + mov r2, r2, lsl r3 @ actual cache line size +1: + mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + mov pc, lr + +DECLARE_CPU_OP(cpu_flush_cache_range, v7_flush_cache_range) + +PRIVATE(v7_clean_cache_range) + mrc p15, 1, r3, c0, c0, 0 @ read CSIDR + and r3, r3, #7 @ cache line size encoding + mov r3, #16 @ size offset + mov r2, r2, lsl r3 @ actual cache line size + +1: + mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + mov pc, lr + +DECLARE_CPU_OP(cpu_clean_cache_range, v7_clean_cache_range) + Attachment:
patch09.diff _______________________________________________ Xen-arm mailing list Xen-arm@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/mailman/listinfo/xen-arm
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |