[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 1/7] xen/arm: Read the dcache line size from CTR register
See the corresponding Linux commit as reference: commit f91e2c3bd427239c198351f44814dd39db91afe0 Author: Catalin Marinas <catalin.marinas@xxxxxxx> Date: Tue Dec 7 16:52:04 2010 +0100 ARM: 6527/1: Use CTR instead of CCSIDR for the D-cache line size on ARMv7 The current implementation of the dcache_line_size macro reads the L1 cache size from the CCSIDR register. This, however, is not guaranteed to be the smallest cache line in the cache hierarchy. The patch changes to the macro to use the more architecturally correct CTR register. Reported-by: Kevin Sapp <ksapp@xxxxxxxxxxx> Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Russell King <rmk+kernel@xxxxxxxxxxxxxxxx> Also rename cacheline_bytes to dcache_line_bytes to clarify that it is the minimum D-Cache line size. Suggested-by: Julien Grall <julien.grall@xxxxxxx> Signed-off-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> --- Changes in v4: - move patch to the beginning of the series - rename cacheline_bytes to dcache_line_bytes - improve commit message --- xen/arch/arm/arm32/head.S | 2 +- xen/arch/arm/arm64/head.S | 2 +- xen/arch/arm/setup.c | 13 ++++++------- xen/include/asm-arm/cpregs.h | 2 ++ xen/include/asm-arm/page.h | 16 ++++++++-------- 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index 43374e7..2b12908 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -504,7 +504,7 @@ ENTRY(relocate_xen) dsb /* So the CPU issues all writes to the range */ mov r5, r4 - ldr r6, =cacheline_bytes /* r6 := step */ + ldr r6, =dcache_line_bytes /* r6 := step */ ldr r6, [r6] mov r7, r3 diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S index fa0ef70..38899c7 100644 --- a/xen/arch/arm/arm64/head.S +++ b/xen/arch/arm/arm64/head.S @@ -631,7 +631,7 @@ ENTRY(relocate_xen) dsb sy /* So the CPU issues all writes to the range */ mov x9, x3 - ldr x10, =cacheline_bytes /* x10 := step */ + ldr x10, =dcache_line_bytes /* x10 := step */ ldr x10, [x10] mov x11, x2 diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 032a6a8..fced75a 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -680,19 +680,18 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size) } #endif -size_t __read_mostly cacheline_bytes; +size_t __read_mostly dcache_line_bytes; /* Very early check of the CPU cache properties */ void __init setup_cache(void) { - uint32_t ccsid; + uint32_t ctr; - /* Read the cache size ID register for the level-0 data cache */ - WRITE_SYSREG32(0, CSSELR_EL1); - ccsid = READ_SYSREG32(CCSIDR_EL1); + /* Read CTR */ + ctr = READ_SYSREG32(CTR_EL0); - /* Low 3 bits are log2(cacheline size in words) - 2. */ - cacheline_bytes = 1U << (4 + (ccsid & 0x7)); + /* Bits 16-19 are the log2 number of words in the cacheline. */ + dcache_line_bytes = (size_t) (4 << ((ctr >> 16) & 0xf)); } /* C entry point for boot CPU */ diff --git a/xen/include/asm-arm/cpregs.h b/xen/include/asm-arm/cpregs.h index 9e13848..8db65d5 100644 --- a/xen/include/asm-arm/cpregs.h +++ b/xen/include/asm-arm/cpregs.h @@ -106,6 +106,7 @@ /* CP15 CR0: CPUID and Cache Type Registers */ #define MIDR p15,0,c0,c0,0 /* Main ID Register */ +#define CTR p15,0,c0,c0,1 /* Cache Type Register */ #define MPIDR p15,0,c0,c0,5 /* Multiprocessor Affinity Register */ #define ID_PFR0 p15,0,c0,c1,0 /* Processor Feature Register 0 */ #define ID_PFR1 p15,0,c0,c1,1 /* Processor Feature Register 1 */ @@ -303,6 +304,7 @@ #define CPACR_EL1 CPACR #define CPTR_EL2 HCPTR #define CSSELR_EL1 CSSELR +#define CTR_EL0 CTR #define DACR32_EL2 DACR #define ESR_EL1 DFSR #define ESR_EL2 HSR diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index d948250..ce18f0c 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -134,7 +134,7 @@ /* Architectural minimum cacheline size is 4 32-bit words. */ #define MIN_CACHELINE_BYTES 16 /* Actual cacheline size on the boot CPU. */ -extern size_t cacheline_bytes; +extern size_t dcache_line_bytes; #define copy_page(dp, sp) memcpy(dp, sp, PAGE_SIZE) @@ -145,7 +145,7 @@ extern size_t cacheline_bytes; static inline int invalidate_dcache_va_range(const void *p, unsigned long size) { const void *end = p + size; - size_t cacheline_mask = cacheline_bytes - 1; + size_t cacheline_mask = dcache_line_bytes - 1; dsb(sy); /* So the CPU issues all writes to the range */ @@ -153,7 +153,7 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) { p = (void *)((uintptr_t)p & ~cacheline_mask); asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); - p += cacheline_bytes; + p += dcache_line_bytes; } if ( (uintptr_t)end & cacheline_mask ) { @@ -161,7 +161,7 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end)); } - for ( ; p < end; p += cacheline_bytes ) + for ( ; p < end; p += dcache_line_bytes ) asm volatile (__invalidate_dcache_one(0) : : "r" (p)); dsb(sy); /* So we know the flushes happen before continuing */ @@ -173,8 +173,8 @@ static inline int clean_dcache_va_range(const void *p, unsigned long size) { const void *end = p + size; dsb(sy); /* So the CPU issues all writes to the range */ - p = (void *)((uintptr_t)p & ~(cacheline_bytes - 1)); - for ( ; p < end; p += cacheline_bytes ) + p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1)); + for ( ; p < end; p += dcache_line_bytes ) asm volatile (__clean_dcache_one(0) : : "r" (p)); dsb(sy); /* So we know the flushes happen before continuing */ /* ARM callers assume that dcache_* functions cannot fail. */ @@ -186,8 +186,8 @@ static inline int clean_and_invalidate_dcache_va_range { const void *end = p + size; dsb(sy); /* So the CPU issues all writes to the range */ - p = (void *)((uintptr_t)p & ~(cacheline_bytes - 1)); - for ( ; p < end; p += cacheline_bytes ) + p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1)); + for ( ; p < end; p += dcache_line_bytes ) asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); dsb(sy); /* So we know the flushes happen before continuing */ /* ARM callers assume that dcache_* functions cannot fail. */ -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |