[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 06/10] x86/mtrr: remove set_all callback from struct mtrr_ops



Instead of using an indirect call to mtrr_if->set_all just call the
only possible target cache_cpu_init() directly. This enables to remove
the set_all callback from struct mtrr_ops.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 arch/x86/kernel/cpu/mtrr/generic.c |  1 -
 arch/x86/kernel/cpu/mtrr/mtrr.c    | 10 +++++-----
 arch/x86/kernel/cpu/mtrr/mtrr.h    |  2 --
 3 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/cpu/mtrr/generic.c 
b/arch/x86/kernel/cpu/mtrr/generic.c
index fc7b2d952737..5f83ee865def 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -843,7 +843,6 @@ int positive_have_wrcomb(void)
  * Generic structure...
  */
 const struct mtrr_ops generic_mtrr_ops = {
-       .set_all                = cache_cpu_init,
        .get                    = generic_get_mtrr,
        .get_free_region        = generic_get_free_region,
        .set                    = generic_set_mtrr,
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 7d7d5bd30219..9609a0d235f8 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -165,15 +165,15 @@ static int mtrr_rendezvous_handler(void *info)
         * saved, and we want to replicate that across all the cpus that come
         * online (either at the end of boot or resume or during a runtime cpu
         * online). If we're doing that, @reg is set to something special and on
-        * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
+        * all the cpu's we do cache_cpu_init() (On the logical cpu that
         * started the boot/resume sequence, this might be a duplicate
-        * set_all()).
+        * cache_cpu_init()).
         */
        if (data->smp_reg != ~0U) {
                mtrr_if->set(data->smp_reg, data->smp_base,
                             data->smp_size, data->smp_type);
        } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
-               mtrr_if->set_all();
+               cache_cpu_init();
        }
        return 0;
 }
@@ -768,7 +768,7 @@ void __init mtrr_bp_init(void)
 
                        if (mtrr_cleanup(phys_addr)) {
                                changed_by_mtrr_cleanup = 1;
-                               mtrr_if->set_all();
+                               cache_cpu_init();
                        }
                }
        }
@@ -854,7 +854,7 @@ void mtrr_bp_restore(void)
        if (!cache_generic)
                return;
 
-       mtrr_if->set_all();
+       cache_cpu_init();
 }
 
 static int __init mtrr_init_finialize(void)
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 88b1c4b6174a..3b1883185185 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -16,8 +16,6 @@ struct mtrr_ops {
        u32     vendor;
        void    (*set)(unsigned int reg, unsigned long base,
                       unsigned long size, mtrr_type type);
-       void    (*set_all)(void);
-
        void    (*get)(unsigned int reg, unsigned long *base,
                       unsigned long *size, mtrr_type *type);
        int     (*get_free_region)(unsigned long base, unsigned long size,
-- 
2.35.3




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.