[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.18] x86: protect conditional lock taking from speculative execution
commit 4da8ca9cb9cfdb92c9dd09d5270ae16a3b2dbc89 Author: Roger Pau Monné <roger.pau@xxxxxxxxxx> AuthorDate: Mon Mar 4 16:24:21 2024 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue Mar 12 16:00:27 2024 +0000 x86: protect conditional lock taking from speculative execution Conditionally taken locks that use the pattern: if ( lock ) spin_lock(...); Need an else branch in order to issue an speculation barrier in the else case, just like it's done in case the lock needs to be acquired. eval_nospec() could be used on the condition itself, but that would result in a double barrier on the branch where the lock is taken. Introduce a new pair of helpers, {gfn,spin}_lock_if() that can be used to conditionally take a lock in a speculation safe way. This is part of XSA-453 / CVE-2024-2193 Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> (cherry picked from commit 03cf7ca23e0e876075954c558485b267b7d02406) --- xen/arch/x86/mm.c | 35 +++++++++++++---------------------- xen/arch/x86/mm/mm-locks.h | 9 +++++++++ xen/arch/x86/mm/p2m.c | 5 ++--- xen/include/xen/spinlock.h | 8 ++++++++ 4 files changed, 32 insertions(+), 25 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 000fd0fb55..45bfbc2522 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5007,8 +5007,7 @@ static l3_pgentry_t *virt_to_xen_l3e(unsigned long v) if ( !l3t ) return NULL; UNMAP_DOMAIN_PAGE(l3t); - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) ) { l4_pgentry_t l4e = l4e_from_mfn(l3mfn, __PAGE_HYPERVISOR); @@ -5045,8 +5044,7 @@ static l2_pgentry_t *virt_to_xen_l2e(unsigned long v) return NULL; } UNMAP_DOMAIN_PAGE(l2t); - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) ) { l3e_write(pl3e, l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR)); @@ -5084,8 +5082,7 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v) return NULL; } UNMAP_DOMAIN_PAGE(l1t); - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ) { l2e_write(pl2e, l2e_from_mfn(l1mfn, __PAGE_HYPERVISOR)); @@ -5116,6 +5113,8 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v) do { \ if ( locking ) \ l3t_lock(page); \ + else \ + block_lock_speculation(); \ } while ( false ) #define L3T_UNLOCK(page) \ @@ -5331,8 +5330,7 @@ int map_pages_to_xen( if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL ) flush_flags |= FLUSH_TLB_GLOBAL; - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && (l3e_get_flags(*pl3e) & _PAGE_PSE) ) { @@ -5436,8 +5434,7 @@ int map_pages_to_xen( if ( l2e_get_flags(*pl2e) & _PAGE_GLOBAL ) flush_flags |= FLUSH_TLB_GLOBAL; - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) && (l2e_get_flags(*pl2e) & _PAGE_PSE) ) { @@ -5478,8 +5475,7 @@ int map_pages_to_xen( unsigned long base_mfn; const l1_pgentry_t *l1t; - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); ol2e = *pl2e; /* @@ -5533,8 +5529,7 @@ int map_pages_to_xen( unsigned long base_mfn; const l2_pgentry_t *l2t; - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); ol3e = *pl3e; /* @@ -5678,8 +5673,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) l3e_get_flags(*pl3e))); UNMAP_DOMAIN_PAGE(l2t); - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) && (l3e_get_flags(*pl3e) & _PAGE_PSE) ) { @@ -5738,8 +5732,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) l2e_get_flags(*pl2e) & ~_PAGE_PSE)); UNMAP_DOMAIN_PAGE(l1t); - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) && (l2e_get_flags(*pl2e) & _PAGE_PSE) ) { @@ -5783,8 +5776,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) */ if ( (nf & _PAGE_PRESENT) || ((v != e) && (l1_table_offset(v) != 0)) ) continue; - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); /* * L2E may be already cleared, or set to a superpage, by @@ -5831,8 +5823,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf) if ( (nf & _PAGE_PRESENT) || ((v != e) && (l2_table_offset(v) + l1_table_offset(v) != 0)) ) continue; - if ( locking ) - spin_lock(&map_pgdir_lock); + spin_lock_if(locking, &map_pgdir_lock); /* * L3E may be already cleared, or set to a superpage, by diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h index 5ec080c02f..b4960fb90e 100644 --- a/xen/arch/x86/mm/mm-locks.h +++ b/xen/arch/x86/mm/mm-locks.h @@ -335,6 +335,15 @@ static inline void p2m_unlock(struct p2m_domain *p) #define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock) #define gfn_locked_by_me(p,g) p2m_locked_by_me(p) +static always_inline void gfn_lock_if(bool condition, struct p2m_domain *p2m, + gfn_t gfn, unsigned int order) +{ + if ( condition ) + gfn_lock(p2m, gfn, order); + else + block_lock_speculation(); +} + /* PoD lock (per-p2m-table) * * Protects private PoD data structs: entry and cache diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 0983bd71d9..22ab1d606e 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -280,9 +280,8 @@ mfn_t p2m_get_gfn_type_access(struct p2m_domain *p2m, gfn_t gfn, if ( q & P2M_UNSHARE ) q |= P2M_ALLOC; - if ( locked ) - /* Grab the lock here, don't release until put_gfn */ - gfn_lock(p2m, gfn, 0); + /* Grab the lock here, don't release until put_gfn */ + gfn_lock_if(locked, p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index 28fce5615e..c830df3430 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -222,6 +222,14 @@ static always_inline void spin_lock_irq(spinlock_t *l) block_lock_speculation(); \ }) +/* Conditionally take a spinlock in a speculation safe way. */ +static always_inline void spin_lock_if(bool condition, spinlock_t *l) +{ + if ( condition ) + _spin_lock(l); + block_lock_speculation(); +} + #define spin_unlock(l) _spin_unlock(l) #define spin_unlock_irq(l) _spin_unlock_irq(l) #define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f) -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.18
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |