[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/amd: Work around CLFLUSH ordering on older parts



commit 062868a5a8b428b85db589fa9a6d6e43969ffeb9
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Jun 9 14:23:07 2022 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jun 9 14:23:07 2022 +0200

    x86/amd: Work around CLFLUSH ordering on older parts
    
    On pre-CLFLUSHOPT AMD CPUs, CLFLUSH is weakely ordered with everything,
    including reads and writes to the address, and LFENCE/SFENCE instructions.
    
    This creates a multitude of problematic corner cases, laid out in the 
manual.
    Arrange to use MFENCE on both sides of the CLFLUSH to force proper ordering.
    
    This is part of XSA-402.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/amd.c                 |  8 ++++++++
 xen/arch/x86/flushtlb.c                | 13 ++++++++++++-
 xen/arch/x86/include/asm/cpufeatures.h |  1 +
 3 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 4999f8be2b..94b9e31016 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -812,6 +812,14 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
        if (!cpu_has_lfence_dispatch)
                __set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
 
+       /*
+        * On pre-CLFLUSHOPT AMD CPUs, CLFLUSH is weakly ordered with
+        * everything, including reads and writes to address, and
+        * LFENCE/SFENCE instructions.
+        */
+       if (!cpu_has_clflushopt)
+               setup_force_cpu_cap(X86_BUG_CLFLUSH_MFENCE);
+
        switch(c->x86)
        {
        case 0xf ... 0x11:
diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c
index 471b3e31c4..18748b2bc8 100644
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -260,6 +260,13 @@ unsigned int flush_area_local(const void *va, unsigned int 
flags)
     return flags;
 }
 
+/*
+ * On pre-CLFLUSHOPT AMD CPUs, CLFLUSH is weakly ordered with everything,
+ * including reads and writes to address, and LFENCE/SFENCE instructions.
+ *
+ * This function only works safely after alternatives have run.  Luckily, at
+ * the time of writing, we don't flush the caches that early.
+ */
 void cache_flush(const void *addr, unsigned int size)
 {
     /*
@@ -269,6 +276,8 @@ void cache_flush(const void *addr, unsigned int size)
     unsigned int clflush_size = current_cpu_data.x86_clflush_size ?: 16;
     const void *end = addr + size;
 
+    alternative("", "mfence", X86_BUG_CLFLUSH_MFENCE);
+
     addr -= (unsigned long)addr & (clflush_size - 1);
     for ( ; addr < end; addr += clflush_size )
     {
@@ -284,7 +293,9 @@ void cache_flush(const void *addr, unsigned int size)
                        [p] "m" (*(const char *)(addr)));
     }
 
-    alternative("", "sfence", X86_FEATURE_CLFLUSHOPT);
+    alternative_2("",
+                  "sfence", X86_FEATURE_CLFLUSHOPT,
+                  "mfence", X86_BUG_CLFLUSH_MFENCE);
 }
 
 void cache_writeback(const void *addr, unsigned int size)
diff --git a/xen/arch/x86/include/asm/cpufeatures.h 
b/xen/arch/x86/include/asm/cpufeatures.h
index 7413febd7a..ff3157d52d 100644
--- a/xen/arch/x86/include/asm/cpufeatures.h
+++ b/xen/arch/x86/include/asm/cpufeatures.h
@@ -47,6 +47,7 @@ XEN_CPUFEATURE(XEN_IBT,           X86_SYNTH(27)) /* Xen uses 
CET Indirect Branch
 
 #define X86_BUG_FPU_PTRS          X86_BUG( 0) /* (F)X{SAVE,RSTOR} doesn't 
save/restore FOP/FIP/FDP. */
 #define X86_BUG_NULL_SEG          X86_BUG( 1) /* NULL-ing a selector preserves 
the base and limit. */
+#define X86_BUG_CLFLUSH_MFENCE    X86_BUG( 2) /* MFENCE needed to serialise 
CLFLUSH */
 
 /* Total number of capability words, inc synth and bug words. */
 #define NCAPINTS (FSCAPINTS + X86_NR_SYNTH + X86_NR_BUG) /* N 32-bit words 
worth of info */
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.