[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 11/16] microcode: pass a patch pointer to apply_microcode()



apply_microcode()'s always loading the cached ucode patch forces
a patch to be stored before being loading. Make apply_microcode()
accept a patch pointer to remove the limitation so that a patch
can be stored after a successful loading.

Signed-off-by: Chao Gao <chao.gao@xxxxxxxxx>
---
Changes in v8:
 - new
---
 xen/arch/x86/microcode.c        | 2 +-
 xen/arch/x86/microcode_amd.c    | 5 ++---
 xen/arch/x86/microcode_intel.c  | 5 ++---
 xen/include/asm-x86/microcode.h | 2 +-
 4 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c
index 082b29c..03bc0aa 100644
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -208,7 +208,7 @@ int microcode_resume_cpu(void)
 
     err = microcode_ops->collect_cpu_info(sig);
     if ( likely(!err) )
-        err = microcode_ops->apply_microcode();
+        err = microcode_ops->apply_microcode(microcode_cache);
     spin_unlock(&microcode_mutex);
 
     return err;
diff --git a/xen/arch/x86/microcode_amd.c b/xen/arch/x86/microcode_amd.c
index 3d1505d..fed044a 100644
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -247,7 +247,7 @@ static enum microcode_match_result compare_patch(
     return MIS_UCODE;
 }
 
-static int apply_microcode(void)
+static int apply_microcode(const struct microcode_patch *patch)
 {
     unsigned long flags;
     uint32_t rev;
@@ -255,7 +255,6 @@ static int apply_microcode(void)
     unsigned int cpu = smp_processor_id();
     struct cpu_signature *sig = &per_cpu(cpu_sig, cpu);
     const struct microcode_header_amd *hdr;
-    const struct microcode_patch *patch = microcode_get_cache();
 
     if ( !match_cpu(patch) )
         return -EINVAL;
@@ -562,7 +561,7 @@ static int cpu_request_microcode(const void *buf, size_t 
bufsize)
 
         if ( match_cpu(microcode_get_cache()) )
         {
-            error = apply_microcode();
+            error = apply_microcode(microcode_get_cache());
             if ( error )
                 break;
         }
diff --git a/xen/arch/x86/microcode_intel.c b/xen/arch/x86/microcode_intel.c
index 867e4f2..bcb48bc 100644
--- a/xen/arch/x86/microcode_intel.c
+++ b/xen/arch/x86/microcode_intel.c
@@ -318,7 +318,7 @@ static int get_matching_microcode(const void *mc)
     return 1;
 }
 
-static int apply_microcode(void)
+static int apply_microcode(const struct microcode_patch *patch)
 {
     unsigned long flags;
     uint64_t msr_content;
@@ -326,7 +326,6 @@ static int apply_microcode(void)
     unsigned int cpu_num = raw_smp_processor_id();
     struct cpu_signature *sig = &this_cpu(cpu_sig);
     const struct microcode_intel *mc_intel;
-    const struct microcode_patch *patch = microcode_get_cache();
 
     if ( !match_cpu(patch) )
         return -EINVAL;
@@ -421,7 +420,7 @@ static int cpu_request_microcode(const void *buf, size_t 
size)
         error = offset;
 
     if ( !error && match_cpu(microcode_get_cache()) )
-        error = apply_microcode();
+        error = apply_microcode(microcode_get_cache());
 
     return error;
 }
diff --git a/xen/include/asm-x86/microcode.h b/xen/include/asm-x86/microcode.h
index c8d2c4f..8c7de9d 100644
--- a/xen/include/asm-x86/microcode.h
+++ b/xen/include/asm-x86/microcode.h
@@ -22,7 +22,7 @@ struct microcode_patch {
 struct microcode_ops {
     int (*cpu_request_microcode)(const void *buf, size_t size);
     int (*collect_cpu_info)(struct cpu_signature *csig);
-    int (*apply_microcode)(void);
+    int (*apply_microcode)(const struct microcode_patch *patch);
     int (*start_update)(void);
     void (*end_update)(void);
     void (*free_patch)(void *mc);
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.