[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.17] xen/arm: page: Avoid pointer overflow on cache clean & invalidate
commit 958706fd2e178ffe8e5597b05b694b494e24258b Author: Michal Orzel <michal.orzel@xxxxxxx> AuthorDate: Tue Dec 12 14:51:20 2023 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Dec 12 14:51:20 2023 +0100 xen/arm: page: Avoid pointer overflow on cache clean & invalidate On Arm32, after cleaning and invalidating the last dcache line of the top domheap page i.e. VA = 0xfffff000 (as a result of flushing the page to RAM), we end up adding the value of a dcache line size to the pointer once again, which results in a pointer arithmetic overflow (with 64B line size, operation 0xffffffc0 + 0x40 overflows to 0x0). Such behavior is undefined and given the wide range of compiler versions we support, it is difficult to determine what could happen in such scenario. Modify clean_and_invalidate_dcache_va_range() as well as clean_dcache_va_range() and invalidate_dcache_va_range() due to similarity of handling to prevent pointer arithmetic overflow. Modify the loops to use an additional variable to store the index of the next cacheline. Add an assert to prevent passing a region that wraps around which is illegal and would end up in a page fault anyway (region 0-2MB is unmapped). Lastly, return early if size passed is 0. Note that on Arm64, we don't have this problem given that the max VA space we support is 48-bits. This is XSA-447 / CVE-2023-46837. Signed-off-by: Michal Orzel <michal.orzel@xxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> master commit: 190b7f49af6487a9665da63d43adc9d9a5fbd01e master date: 2023-12-12 14:01:00 +0100 --- xen/arch/arm/include/asm/page.h | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h index d7fe770a5e..bf71042fb5 100644 --- a/xen/arch/arm/include/asm/page.h +++ b/xen/arch/arm/include/asm/page.h @@ -161,6 +161,13 @@ static inline size_t read_dcache_line_bytes(void) static inline int invalidate_dcache_va_range(const void *p, unsigned long size) { size_t cacheline_mask = dcache_line_bytes - 1; + unsigned long idx = 0; + + if ( !size ) + return 0; + + /* Passing a region that wraps around is illegal */ + ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); dsb(sy); /* So the CPU issues all writes to the range */ @@ -173,11 +180,11 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) } for ( ; size >= dcache_line_bytes; - p += dcache_line_bytes, size -= dcache_line_bytes ) - asm volatile (__invalidate_dcache_one(0) : : "r" (p)); + idx += dcache_line_bytes, size -= dcache_line_bytes ) + asm volatile (__invalidate_dcache_one(0) : : "r" (p + idx)); if ( size > 0 ) - asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); + asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); dsb(sy); /* So we know the flushes happen before continuing */ @@ -187,14 +194,21 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) static inline int clean_dcache_va_range(const void *p, unsigned long size) { size_t cacheline_mask = dcache_line_bytes - 1; + unsigned long idx = 0; + + if ( !size ) + return 0; + + /* Passing a region that wraps around is illegal */ + ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); dsb(sy); /* So the CPU issues all writes to the range */ size += (uintptr_t)p & cacheline_mask; size = (size + cacheline_mask) & ~cacheline_mask; p = (void *)((uintptr_t)p & ~cacheline_mask); for ( ; size >= dcache_line_bytes; - p += dcache_line_bytes, size -= dcache_line_bytes ) - asm volatile (__clean_dcache_one(0) : : "r" (p)); + idx += dcache_line_bytes, size -= dcache_line_bytes ) + asm volatile (__clean_dcache_one(0) : : "r" (p + idx)); dsb(sy); /* So we know the flushes happen before continuing */ /* ARM callers assume that dcache_* functions cannot fail. */ return 0; @@ -204,14 +218,21 @@ static inline int clean_and_invalidate_dcache_va_range (const void *p, unsigned long size) { size_t cacheline_mask = dcache_line_bytes - 1; + unsigned long idx = 0; + + if ( !size ) + return 0; + + /* Passing a region that wraps around is illegal */ + ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); dsb(sy); /* So the CPU issues all writes to the range */ size += (uintptr_t)p & cacheline_mask; size = (size + cacheline_mask) & ~cacheline_mask; p = (void *)((uintptr_t)p & ~cacheline_mask); for ( ; size >= dcache_line_bytes; - p += dcache_line_bytes, size -= dcache_line_bytes ) - asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); + idx += dcache_line_bytes, size -= dcache_line_bytes ) + asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); dsb(sy); /* So we know the flushes happen before continuing */ /* ARM callers assume that dcache_* functions cannot fail. */ return 0; -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.17
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |