[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/MCE: move intel mcheck init code to separate file



commit 78d980071a0964a7dcf0d02b4a9066c16b5a7140
Author:     Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
AuthorDate: Fri Apr 19 08:45:23 2024 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Apr 19 08:45:23 2024 +0200

    x86/MCE: move intel mcheck init code to separate file
    
    Separate Intel nonfatal MCE initialization code from generic MCE code, the 
same
    way it is done for AMD code. This is to be able to later make intel/amd MCE
    code optional in the build.
    
    Convert to Xen coding style. Clean up unused includes. Remove seemingly
    outdated comment about MCE check period.
    
    No functional change intended.
    
    Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
    Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/Makefile         |  1 +
 xen/arch/x86/cpu/mcheck/intel-nonfatal.c | 86 ++++++++++++++++++++++++++++++++
 xen/arch/x86/cpu/mcheck/mce.h            |  1 +
 xen/arch/x86/cpu/mcheck/non-fatal.c      | 82 +-----------------------------
 4 files changed, 89 insertions(+), 81 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/Makefile b/xen/arch/x86/cpu/mcheck/Makefile
index 0d63ff9096..f927f10b4d 100644
--- a/xen/arch/x86/cpu/mcheck/Makefile
+++ b/xen/arch/x86/cpu/mcheck/Makefile
@@ -2,6 +2,7 @@ obj-y += amd_nonfatal.o
 obj-y += mce_amd.o
 obj-y += mcaction.o
 obj-y += barrier.o
+obj-y += intel-nonfatal.o
 obj-y += mctelem.o
 obj-y += mce.o
 obj-y += mce-apei.o
diff --git a/xen/arch/x86/cpu/mcheck/intel-nonfatal.c 
b/xen/arch/x86/cpu/mcheck/intel-nonfatal.c
new file mode 100644
index 0000000000..092a828812
--- /dev/null
+++ b/xen/arch/x86/cpu/mcheck/intel-nonfatal.c
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Non Fatal Machine Check Exception Reporting
+ * (C) Copyright 2002 Dave Jones. <davej@xxxxxxxxxxxxxxxxx>
+ */
+
+#include <xen/event.h>
+
+#include "mce.h"
+#include "vmce.h"
+
+static struct timer mce_timer;
+
+#define MCE_PERIOD MILLISECS(8000)
+#define MCE_PERIOD_MIN MILLISECS(2000)
+#define MCE_PERIOD_MAX MILLISECS(16000)
+
+static uint64_t period = MCE_PERIOD;
+static int adjust = 0;
+static int variable_period = 1;
+
+static void cf_check mce_checkregs(void *info)
+{
+    mctelem_cookie_t mctc;
+    struct mca_summary bs;
+    static uint64_t dumpcount = 0;
+
+    mctc = mcheck_mca_logout(MCA_POLLER, this_cpu( poll_bankmask),
+                             &bs, NULL);
+
+    if ( bs.errcnt && mctc != NULL )
+    {
+        adjust++;
+
+        /*
+         * If Dom0 enabled the VIRQ_MCA event, then notify it.
+         * Otherwise, if dom0 has had plenty of time to register
+         * the virq handler but still hasn't then dump telemetry
+         * to the Xen console.  The call count may be incremented
+         * on multiple cpus at once and is indicative only - just
+         * a simple-minded attempt to avoid spamming the console
+         * for corrected errors in early startup.
+         */
+
+        if ( dom0_vmce_enabled() )
+        {
+            mctelem_commit(mctc);
+            send_global_virq(VIRQ_MCA);
+        }
+        else if ( ++dumpcount >= 10 )
+        {
+            x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
+            mctelem_dismiss(mctc);
+        }
+        else
+            mctelem_dismiss(mctc);
+    }
+    else if ( mctc != NULL )
+        mctelem_dismiss(mctc);
+}
+
+static void cf_check mce_work_fn(void *data)
+{
+    on_each_cpu(mce_checkregs, NULL, 1);
+
+    if ( variable_period )
+    {
+        if ( adjust )
+            period /= (adjust + 1);
+        else
+            period *= 2;
+        if ( period > MCE_PERIOD_MAX )
+            period = MCE_PERIOD_MAX;
+        if ( period < MCE_PERIOD_MIN )
+            period = MCE_PERIOD_MIN;
+    }
+
+    set_timer(&mce_timer, NOW() + period);
+    adjust = 0;
+}
+
+void __init intel_nonfatal_mcheck_init(struct cpuinfo_x86 *unused)
+{
+    init_timer(&mce_timer, mce_work_fn, NULL, 0);
+    set_timer(&mce_timer, NOW() + MCE_PERIOD);
+}
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 7f26afae23..4806405f96 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -47,6 +47,7 @@ enum mcheck_type amd_mcheck_init(const struct cpuinfo_x86 *c, 
bool bsp);
 enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool bsp);
 
 void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
+void intel_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
 
 extern unsigned int firstbank;
 extern unsigned int ppin_msr;
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c 
b/xen/arch/x86/cpu/mcheck/non-fatal.c
index 1c0c32ba08..33cacd15c2 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -7,84 +7,7 @@
  *
  */
 
-#include <xen/init.h>
-#include <xen/types.h>
-#include <xen/kernel.h>
-#include <xen/smp.h>
-#include <xen/timer.h>
-#include <xen/errno.h>
-#include <xen/event.h>
-#include <xen/sched.h>
-#include <asm/processor.h> 
-#include <asm/system.h>
-#include <asm/msr.h>
-
 #include "mce.h"
-#include "vmce.h"
-
-static struct timer mce_timer;
-
-#define MCE_PERIOD MILLISECS(8000)
-#define MCE_PERIOD_MIN MILLISECS(2000)
-#define MCE_PERIOD_MAX MILLISECS(16000)
-
-static uint64_t period = MCE_PERIOD;
-static int adjust = 0;
-static int variable_period = 1;
-
-static void cf_check mce_checkregs(void *info)
-{
-       mctelem_cookie_t mctc;
-       struct mca_summary bs;
-       static uint64_t dumpcount = 0;
-
-       mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
-                                &bs, NULL);
-
-       if (bs.errcnt && mctc != NULL) {
-               adjust++;
-
-               /* If Dom0 enabled the VIRQ_MCA event, then notify it.
-                * Otherwise, if dom0 has had plenty of time to register
-                * the virq handler but still hasn't then dump telemetry
-                * to the Xen console.  The call count may be incremented
-                * on multiple cpus at once and is indicative only - just
-                * a simple-minded attempt to avoid spamming the console
-                * for corrected errors in early startup.
-                */
-
-               if (dom0_vmce_enabled()) {
-                       mctelem_commit(mctc);
-                       send_global_virq(VIRQ_MCA);
-               } else if (++dumpcount >= 10) {
-                       x86_mcinfo_dump((struct mc_info 
*)mctelem_dataptr(mctc));
-                       mctelem_dismiss(mctc);
-               } else {
-                       mctelem_dismiss(mctc);
-               }
-       } else if (mctc != NULL) {
-               mctelem_dismiss(mctc);
-       }
-}
-
-static void cf_check mce_work_fn(void *data)
-{ 
-       on_each_cpu(mce_checkregs, NULL, 1);
-
-       if (variable_period) {
-               if (adjust)
-                       period /= (adjust + 1);
-               else
-                       period *= 2;
-               if (period > MCE_PERIOD_MAX)
-                       period = MCE_PERIOD_MAX;
-               if (period < MCE_PERIOD_MIN)
-                       period = MCE_PERIOD_MIN;
-       }
-
-       set_timer(&mce_timer, NOW() + period);
-       adjust = 0;
-}
 
 static int __init cf_check init_nonfatal_mce_checker(void)
 {
@@ -106,13 +29,10 @@ static int __init cf_check init_nonfatal_mce_checker(void)
                /* Assume we are on K8 or newer AMD or Hygon CPU here */
                amd_nonfatal_mcheck_init(c);
                break;
-
        case X86_VENDOR_INTEL:
-               init_timer(&mce_timer, mce_work_fn, NULL, 0);
-               set_timer(&mce_timer, NOW() + MCE_PERIOD);
+               intel_nonfatal_mcheck_init(c);
                break;
        }
-
        printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n");
        return 0;
 }
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.