[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/4] x86/mcheck: replace remaining uses of __get_cpu_var()



this_cpu() is shorter, and when there are multiple uses in a function
per_cpu() it's also more efficient.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -473,7 +473,8 @@ void mcheck_cmn_handler(const struct cpu
     static atomic_t found_error = ATOMIC_INIT(0);
     static cpumask_t mce_fatal_cpus;
     struct mca_banks *bankmask = mca_allbanks;
-    struct mca_banks *clear_bank = __get_cpu_var(mce_clear_banks);
+    unsigned int cpu = smp_processor_id();
+    struct mca_banks *clear_bank = per_cpu(mce_clear_banks, cpu);
     uint64_t gstatus;
     mctelem_cookie_t mctc = NULL;
     struct mca_summary bs;
@@ -504,17 +505,17 @@ void mcheck_cmn_handler(const struct cpu
              * the telemetry after reboot (the MSRs are sticky)
              */
             if ( bs.pcc || !bs.recoverable )
-                cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
+                cpumask_set_cpu(cpu, &mce_fatal_cpus);
         }
         else if ( mctc != NULL )
             mctelem_commit(mctc);
         atomic_set(&found_error, 1);
 
         /* The last CPU will be take check/clean-up etc */
-        atomic_set(&severity_cpu, smp_processor_id());
+        atomic_set(&severity_cpu, cpu);
 
-        mce_printk(MCE_CRITICAL, "MCE: clear_bank map %lx on CPU%d\n",
-                   *((unsigned long *)clear_bank), smp_processor_id());
+        mce_printk(MCE_CRITICAL, "MCE: clear_bank map %lx on CPU%u\n",
+                   *((unsigned long *)clear_bank), cpu);
         if ( clear_bank != NULL )
             mcheck_mca_clearbanks(clear_bank);
     }
@@ -524,14 +525,14 @@ void mcheck_cmn_handler(const struct cpu
 
     mce_barrier_enter(&mce_trap_bar, bcast);
     if ( mctc != NULL && mce_urgent_action(regs, mctc) )
-        cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
+        cpumask_set_cpu(cpu, &mce_fatal_cpus);
     mce_barrier_exit(&mce_trap_bar, bcast);
 
     /*
      * Wait until everybody has processed the trap.
      */
     mce_barrier_enter(&mce_trap_bar, bcast);
-    if ( lmce || atomic_read(&severity_cpu) == smp_processor_id() )
+    if ( lmce || atomic_read(&severity_cpu) == cpu )
     {
         /*
          * According to SDM, if no error bank found on any cpus,
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -492,6 +492,7 @@ static int do_cmci_discover(int i)
     unsigned msr = MSR_IA32_MCx_CTL2(i);
     u64 val;
     unsigned int threshold, max_threshold;
+    unsigned int cpu = smp_processor_id();
     static unsigned int cmci_threshold = 2;
     integer_param("cmci-threshold", cmci_threshold);
 
@@ -499,7 +500,7 @@ static int do_cmci_discover(int i)
     /* Some other CPU already owns this bank. */
     if ( val & CMCI_EN )
     {
-        mcabanks_clear(i, __get_cpu_var(mce_banks_owned));
+        mcabanks_clear(i, per_cpu(mce_banks_owned, cpu));
         goto out;
     }
 
@@ -512,7 +513,7 @@ static int do_cmci_discover(int i)
     if ( !(val & CMCI_EN) )
     {
         /* This bank does not support CMCI. Polling timer has to handle it. */
-        mcabanks_set(i, __get_cpu_var(no_cmci_banks));
+        mcabanks_set(i, per_cpu(no_cmci_banks, cpu));
         wrmsrl(msr, val & ~CMCI_THRESHOLD_MASK);
         return 0;
     }
@@ -522,13 +523,13 @@ static int do_cmci_discover(int i)
     {
         mce_printk(MCE_QUIET,
                    "CMCI: threshold %#x too large for CPU%u bank %u, using 
%#x\n",
-                   threshold, smp_processor_id(), i, max_threshold);
+                   threshold, cpu, i, max_threshold);
         threshold = max_threshold;
     }
     wrmsrl(msr, (val & ~CMCI_THRESHOLD_MASK) | CMCI_EN | threshold);
-    mcabanks_set(i, __get_cpu_var(mce_banks_owned));
+    mcabanks_set(i, per_cpu(mce_banks_owned, cpu));
 out:
-    mcabanks_clear(i, __get_cpu_var(no_cmci_banks));
+    mcabanks_clear(i, per_cpu(no_cmci_banks, cpu));
     return 1;
 }
 
@@ -648,7 +649,7 @@ static void cmci_interrupt(struct cpu_us
     ack_APIC_irq();
 
     mctc = mcheck_mca_logout(
-        MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL);
+        MCA_CMCI_HANDLER, this_cpu(mce_banks_owned), &bs, NULL);
 
     if ( bs.errcnt && mctc != NULL )
     {
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -38,7 +38,8 @@ static void mce_checkregs (void *info)
        struct mca_summary bs;
        static uint64_t dumpcount = 0;
 
-       mctc = mcheck_mca_logout(MCA_POLLER, __get_cpu_var(poll_bankmask), &bs, 
NULL);
+       mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
+                                &bs, NULL);
 
        if (bs.errcnt && mctc != NULL) {
                adjust++;
@@ -93,7 +94,7 @@ static int __init init_nonfatal_mce_chec
        if (!opt_mce || !mce_available(c))
                return -ENODEV;
 
-       if (__get_cpu_var(poll_bankmask) == NULL)
+       if (!this_cpu(poll_bankmask))
                return -EINVAL;
 
        /*




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.