[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen stable-4.16] percpu-rwlock: introduce support for blocking speculation into critical regions



commit 475511953efc8cbe31d410f3f41542ea2b97f080
Author:     Roger Pau Monné <roger.pau@xxxxxxxxxx>
AuthorDate: Tue Feb 13 17:57:38 2024 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Mar 12 16:26:38 2024 +0000

    percpu-rwlock: introduce support for blocking speculation into critical 
regions
    
    Add direct calls to block_lock_speculation() where required in order to 
prevent
    speculation into the lock protected critical regions.  Also convert
    _percpu_read_lock() from inline to always_inline.
    
    Note that _percpu_write_lock() has been modified the use the non speculation
    safe of the locking primites, as a speculation is added unconditionally by 
the
    calling wrapper.
    
    This is part of XSA-453 / CVE-2024-2193
    
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit f218daf6d3a3b847736d37c6a6b76031a0d08441)
---
 xen/common/rwlock.c      |  6 +++++-
 xen/include/xen/rwlock.h | 14 ++++++++++----
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/xen/common/rwlock.c b/xen/common/rwlock.c
index 2464f74548..703276f4aa 100644
--- a/xen/common/rwlock.c
+++ b/xen/common/rwlock.c
@@ -125,8 +125,12 @@ void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
     /*
      * First take the write lock to protect against other writers or slow
      * path readers.
+     *
+     * Note we use the speculation unsafe variant of write_lock(), as the
+     * calling wrapper already adds a speculation barrier after the lock has
+     * been taken.
      */
-    write_lock(&percpu_rwlock->rwlock);
+    _write_lock(&percpu_rwlock->rwlock);
 
     /* Now set the global variable so that readers start using read_lock. */
     percpu_rwlock->writer_activating = 1;
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index fd0458be94..abe0804bf7 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -326,8 +326,8 @@ static inline void 
_percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
 #define percpu_rwlock_resource_init(l, owner) \
     (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
 
-static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
-                                         percpu_rwlock_t *percpu_rwlock)
+static always_inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+                                            percpu_rwlock_t *percpu_rwlock)
 {
     /* Validate the correct per_cpudata variable has been provided. */
     _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
@@ -362,6 +362,8 @@ static inline void _percpu_read_lock(percpu_rwlock_t 
**per_cpudata,
     }
     else
     {
+        /* Other branch already has a speculation barrier in read_lock(). */
+        block_lock_speculation();
         /* All other paths have implicit check_lock() calls via read_lock(). */
         check_lock(&percpu_rwlock->rwlock.lock.debug, false);
     }
@@ -410,8 +412,12 @@ static inline void _percpu_write_unlock(percpu_rwlock_t 
**per_cpudata,
     _percpu_read_lock(&get_per_cpu_var(percpu), lock)
 #define percpu_read_unlock(percpu, lock) \
     _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_lock(percpu, lock) \
-    _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+
+#define percpu_write_lock(percpu, lock)                 \
+({                                                      \
+    _percpu_write_lock(&get_per_cpu_var(percpu), lock); \
+    block_lock_speculation();                           \
+})
 #define percpu_write_unlock(percpu, lock) \
     _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.16



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.