[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/mm: add speculation barriers to open coded locks
commit 42a572a38e22a97d86a4b648a22597628d5b42e4 Author: Roger Pau Monné <roger.pau@xxxxxxxxxx> AuthorDate: Mon Mar 4 18:08:48 2024 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue Mar 12 15:50:04 2024 +0000 x86/mm: add speculation barriers to open coded locks Add a speculation barrier to the clearly identified open-coded lock taking functions. Note that the memory sharing page_lock() replacement (_page_lock()) is left as-is, as the code is experimental and not security supported. This is part of XSA-453 / CVE-2024-2193 Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/include/asm/mm.h | 4 +++- xen/arch/x86/mm.c | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h index 7d26d9cd2f..65d209d5ff 100644 --- a/xen/arch/x86/include/asm/mm.h +++ b/xen/arch/x86/include/asm/mm.h @@ -399,7 +399,9 @@ const struct platform_bad_page *get_platform_badpages(unsigned int *array_size); * The use of PGT_locked in mem_sharing does not collide, since mem_sharing is * only supported for hvm guests, which do not have PV PTEs updated. */ -int page_lock(struct page_info *page); +int page_lock_unsafe(struct page_info *page); +#define page_lock(pg) lock_evaluate_nospec(page_lock_unsafe(pg)) + void page_unlock(struct page_info *page); void put_page_type(struct page_info *page); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 0c6658298d..2ba4c26401 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2034,7 +2034,7 @@ static inline bool current_locked_page_ne_check(struct page_info *page) { #define current_locked_page_ne_check(x) true #endif -int page_lock(struct page_info *page) +int page_lock_unsafe(struct page_info *page) { unsigned long x, nx; @@ -2095,7 +2095,7 @@ void page_unlock(struct page_info *page) * l3t_lock(), so to avoid deadlock we must avoid grabbing them in * reverse order. */ -static void l3t_lock(struct page_info *page) +static always_inline void l3t_lock(struct page_info *page) { unsigned long x, nx; @@ -2104,6 +2104,8 @@ static void l3t_lock(struct page_info *page) cpu_relax(); nx = x | PGT_locked; } while ( cmpxchg(&page->u.inuse.type_info, x, nx) != x ); + + block_lock_speculation(); } static void l3t_unlock(struct page_info *page) -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |