Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Machine check handler. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 5 | * Rest from unknown author(s). |
| 6 | * 2004 Andi Kleen. Rewrote most of it. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 7 | * Copyright 2008 Intel Corporation |
| 8 | * Author: Andi Kleen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 13 | #include <linux/thread_info.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 14 | #include <linux/capability.h> |
| 15 | #include <linux/miscdevice.h> |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 16 | #include <linux/ratelimit.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 17 | #include <linux/kallsyms.h> |
| 18 | #include <linux/rcupdate.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 19 | #include <linux/kobject.h> |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 20 | #include <linux/uaccess.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 21 | #include <linux/kdebug.h> |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/percpu.h> |
| 24 | #include <linux/string.h> |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 25 | #include <linux/device.h> |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 26 | #include <linux/syscore_ops.h> |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 27 | #include <linux/delay.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 28 | #include <linux/ctype.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/sysfs.h> |
| 31 | #include <linux/types.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 32 | #include <linux/slab.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 33 | #include <linux/init.h> |
| 34 | #include <linux/kmod.h> |
| 35 | #include <linux/poll.h> |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 36 | #include <linux/nmi.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 37 | #include <linux/cpu.h> |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 38 | #include <linux/smp.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 39 | #include <linux/fs.h> |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 40 | #include <linux/mm.h> |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 41 | #include <linux/debugfs.h> |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 42 | #include <linux/irq_work.h> |
Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 43 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 45 | #include <asm/processor.h> |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 46 | #include <asm/traps.h> |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 47 | #include <asm/tlbflush.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 48 | #include <asm/mce.h> |
| 49 | #include <asm/msr.h> |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 50 | |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 51 | #include "mce-internal.h" |
Ingo Molnar | 711c2e4 | 2009-04-08 12:31:26 +0200 | [diff] [blame] | 52 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 53 | static DEFINE_MUTEX(mce_chrdev_read_mutex); |
Ingo Molnar | 2aa2b50 | 2010-03-14 08:57:03 +0100 | [diff] [blame] | 54 | |
Borislav Petkov | 9a7783d | 2015-08-12 18:29:43 +0200 | [diff] [blame] | 55 | #define mce_log_get_idx_check(p) \ |
Paul E. McKenney | e90328b | 2015-04-19 18:16:02 -0700 | [diff] [blame] | 56 | ({ \ |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 57 | RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ |
| 58 | !lockdep_is_held(&mce_chrdev_read_mutex), \ |
Linus Torvalds | 3959df1d | 2015-08-31 20:20:30 -0700 | [diff] [blame] | 59 | "suspicious mce_log_get_idx_check() usage"); \ |
Paul E. McKenney | e90328b | 2015-04-19 18:16:02 -0700 | [diff] [blame] | 60 | smp_load_acquire(&(p)); \ |
| 61 | }) |
Paul E. McKenney | f56e8a0 | 2010-03-05 15:03:27 -0800 | [diff] [blame] | 62 | |
Hidetoshi Seto | 8968f9d | 2009-10-13 16:19:41 +0900 | [diff] [blame] | 63 | #define CREATE_TRACE_POINTS |
| 64 | #include <trace/events/mce.h> |
| 65 | |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 66 | #define SPINUNIT 100 /* 100ns */ |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 67 | |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 68 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
| 69 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 70 | struct mce_bank *mce_banks __read_mostly; |
Aravind Gopalakrishnan | bf80bbd | 2015-03-23 10:42:52 -0500 | [diff] [blame] | 71 | struct mce_vendor_flags mce_flags __read_mostly; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 72 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 73 | struct mca_config mca_cfg __read_mostly = { |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 74 | .bootlog = -1, |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 75 | /* |
| 76 | * Tolerant levels: |
| 77 | * 0: always panic on uncorrected errors, log corrected errors |
| 78 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors |
| 79 | * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors |
| 80 | * 3: never panic or SIGBUS, log all errors (for testing only) |
| 81 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 82 | .tolerant = 1, |
| 83 | .monarch_timeout = -1 |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 84 | }; |
| 85 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 86 | /* User mode helper program triggered by machine check event */ |
| 87 | static unsigned long mce_need_notify; |
| 88 | static char mce_helper[128]; |
| 89 | static char *mce_helper_argv[2] = { mce_helper, NULL }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 91 | static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); |
| 92 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 93 | static DEFINE_PER_CPU(struct mce, mces_seen); |
| 94 | static int cpu_missing; |
| 95 | |
Naveen N. Rao | 0644414 | 2013-06-25 23:58:59 +0530 | [diff] [blame] | 96 | /* |
| 97 | * MCA banks polled by the period polling timer for corrected events. |
| 98 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). |
| 99 | */ |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 100 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
| 101 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL |
| 102 | }; |
| 103 | |
Naveen N. Rao | c3d1fb5 | 2013-07-01 21:08:47 +0530 | [diff] [blame] | 104 | /* |
| 105 | * MCA banks controlled through firmware first for corrected errors. |
| 106 | * This is a global list of banks for which we won't enable CMCI and we |
| 107 | * won't poll. Firmware controls these banks and is responsible for |
| 108 | * reporting corrected errors through GHES. Uncorrected/recoverable |
| 109 | * errors are still notified through a machine check. |
| 110 | */ |
| 111 | mce_banks_t mce_banks_ce_disabled; |
| 112 | |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 113 | static struct work_struct mce_work; |
| 114 | static struct irq_work mce_irq_work; |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 115 | |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 116 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
| 117 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 118 | /* |
| 119 | * CPU/chipset specific EDAC code can register a notifier call here to print |
| 120 | * MCE errors in a human-readable form. |
| 121 | */ |
Chen, Gong | 648ed94 | 2015-08-12 18:29:34 +0200 | [diff] [blame] | 122 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 123 | |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 124 | /* Do initial initialization of a struct mce */ |
| 125 | void mce_setup(struct mce *m) |
| 126 | { |
| 127 | memset(m, 0, sizeof(struct mce)); |
Andi Kleen | d620c67 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 128 | m->cpu = m->extcpu = smp_processor_id(); |
Andy Lutomirski | 4ea1636 | 2015-06-25 18:44:07 +0200 | [diff] [blame] | 129 | m->tsc = rdtsc(); |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 130 | /* We hope get_seconds stays lockless */ |
| 131 | m->time = get_seconds(); |
| 132 | m->cpuvendor = boot_cpu_data.x86_vendor; |
| 133 | m->cpuid = cpuid_eax(1); |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 134 | m->socketid = cpu_data(m->extcpu).phys_proc_id; |
Andi Kleen | 8ee0834 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 135 | m->apicid = cpu_data(m->extcpu).initial_apicid; |
| 136 | rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 137 | } |
| 138 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 139 | DEFINE_PER_CPU(struct mce, injectm); |
| 140 | EXPORT_PER_CPU_SYMBOL_GPL(injectm); |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | /* |
| 143 | * Lockless MCE logging infrastructure. |
| 144 | * This avoids deadlocks on printk locks without having to break locks. Also |
| 145 | * separate MCEs from kernel messages to avoid bogus bug reports. |
| 146 | */ |
| 147 | |
Adrian Bunk | 231fd90 | 2008-01-30 13:30:30 +0100 | [diff] [blame] | 148 | static struct mce_log mcelog = { |
Andi Kleen | f6fb0ac | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 149 | .signature = MCE_LOG_SIGNATURE, |
| 150 | .len = MCE_LOG_LEN, |
| 151 | .recordlen = sizeof(struct mce), |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 152 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
| 154 | void mce_log(struct mce *mce) |
| 155 | { |
| 156 | unsigned next, entry; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 157 | |
Hidetoshi Seto | 8968f9d | 2009-10-13 16:19:41 +0900 | [diff] [blame] | 158 | /* Emit the trace record: */ |
| 159 | trace_mce_record(mce); |
| 160 | |
Chen, Gong | f29a7af | 2015-08-12 18:29:37 +0200 | [diff] [blame] | 161 | if (!mce_gen_pool_add(mce)) |
| 162 | irq_work_queue(&mce_irq_work); |
Borislav Petkov | f0cb545 | 2011-07-18 11:24:45 -0300 | [diff] [blame] | 163 | |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 164 | wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | for (;;) { |
Borislav Petkov | 9a7783d | 2015-08-12 18:29:43 +0200 | [diff] [blame] | 166 | entry = mce_log_get_idx_check(mcelog.next); |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 167 | for (;;) { |
Mauro Carvalho Chehab | 696e409 | 2009-07-23 06:57:45 -0300 | [diff] [blame] | 168 | |
| 169 | /* |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 170 | * When the buffer fills up discard new entries. |
| 171 | * Assume that the earlier errors are the more |
| 172 | * interesting ones: |
| 173 | */ |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 174 | if (entry >= MCE_LOG_LEN) { |
Hidetoshi Seto | 14a0253 | 2009-04-30 16:04:51 +0900 | [diff] [blame] | 175 | set_bit(MCE_OVERFLOW, |
| 176 | (unsigned long *)&mcelog.flags); |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 177 | return; |
| 178 | } |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 179 | /* Old left over entry. Skip: */ |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 180 | if (mcelog.entry[entry].finished) { |
| 181 | entry++; |
| 182 | continue; |
| 183 | } |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 184 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | smp_rmb(); |
| 187 | next = entry + 1; |
| 188 | if (cmpxchg(&mcelog.next, entry, next) == entry) |
| 189 | break; |
| 190 | } |
| 191 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 192 | wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | mcelog.entry[entry].finished = 1; |
Mike Waychison | 7644143 | 2005-09-30 00:01:27 +0200 | [diff] [blame] | 194 | wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 196 | set_bit(0, &mce_need_notify); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Borislav Petkov | a79da38 | 2015-08-12 18:29:44 +0200 | [diff] [blame] | 199 | void mce_inject_log(struct mce *m) |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 200 | { |
Borislav Petkov | a79da38 | 2015-08-12 18:29:44 +0200 | [diff] [blame] | 201 | mutex_lock(&mce_chrdev_read_mutex); |
| 202 | mce_log(m); |
| 203 | mutex_unlock(&mce_chrdev_read_mutex); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 204 | } |
Borislav Petkov | a79da38 | 2015-08-12 18:29:44 +0200 | [diff] [blame] | 205 | EXPORT_SYMBOL_GPL(mce_inject_log); |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 206 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 207 | static struct notifier_block mce_srao_nb; |
Borislav Petkov | 0937195 | 2011-12-08 12:28:33 +0100 | [diff] [blame] | 208 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 209 | void mce_register_decode_chain(struct notifier_block *nb) |
| 210 | { |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 211 | /* Ensure SRAO notifier has the highest priority in the decode chain. */ |
| 212 | if (nb != &mce_srao_nb && nb->priority == INT_MAX) |
| 213 | nb->priority -= 1; |
| 214 | |
Borislav Petkov | 3653ada | 2011-12-04 15:12:09 +0100 | [diff] [blame] | 215 | atomic_notifier_chain_register(&x86_mce_decoder_chain, nb); |
| 216 | } |
| 217 | EXPORT_SYMBOL_GPL(mce_register_decode_chain); |
| 218 | |
| 219 | void mce_unregister_decode_chain(struct notifier_block *nb) |
| 220 | { |
| 221 | atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb); |
| 222 | } |
| 223 | EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); |
| 224 | |
Yazen Ghannam | a9750a3 | 2016-04-30 14:33:54 +0200 | [diff] [blame] | 225 | static inline u32 ctl_reg(int bank) |
| 226 | { |
| 227 | return MSR_IA32_MCx_CTL(bank); |
| 228 | } |
| 229 | |
| 230 | static inline u32 status_reg(int bank) |
| 231 | { |
| 232 | return MSR_IA32_MCx_STATUS(bank); |
| 233 | } |
| 234 | |
| 235 | static inline u32 addr_reg(int bank) |
| 236 | { |
| 237 | return MSR_IA32_MCx_ADDR(bank); |
| 238 | } |
| 239 | |
| 240 | static inline u32 misc_reg(int bank) |
| 241 | { |
| 242 | return MSR_IA32_MCx_MISC(bank); |
| 243 | } |
| 244 | |
| 245 | static inline u32 smca_ctl_reg(int bank) |
| 246 | { |
| 247 | return MSR_AMD64_SMCA_MCx_CTL(bank); |
| 248 | } |
| 249 | |
| 250 | static inline u32 smca_status_reg(int bank) |
| 251 | { |
| 252 | return MSR_AMD64_SMCA_MCx_STATUS(bank); |
| 253 | } |
| 254 | |
| 255 | static inline u32 smca_addr_reg(int bank) |
| 256 | { |
| 257 | return MSR_AMD64_SMCA_MCx_ADDR(bank); |
| 258 | } |
| 259 | |
| 260 | static inline u32 smca_misc_reg(int bank) |
| 261 | { |
| 262 | return MSR_AMD64_SMCA_MCx_MISC(bank); |
| 263 | } |
| 264 | |
| 265 | struct mca_msr_regs msr_ops = { |
| 266 | .ctl = ctl_reg, |
| 267 | .status = status_reg, |
| 268 | .addr = addr_reg, |
| 269 | .misc = misc_reg |
| 270 | }; |
| 271 | |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 272 | static void print_mce(struct mce *m) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | { |
Borislav Petkov | dffa4b2 | 2011-04-20 12:23:49 +0200 | [diff] [blame] | 274 | int ret = 0; |
| 275 | |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 276 | pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", |
Andi Kleen | d620c67 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 277 | m->extcpu, m->mcgstatus, m->bank, m->status); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 278 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 279 | if (m->ip) { |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 280 | pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 281 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", |
| 282 | m->cs, m->ip); |
| 283 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | if (m->cs == __KERNEL_CS) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 285 | print_symbol("{%s}", m->ip); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 286 | pr_cont("\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | } |
Borislav Petkov | 549d042 | 2009-07-24 13:51:42 +0200 | [diff] [blame] | 288 | |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 289 | pr_emerg(HW_ERR "TSC %llx ", m->tsc); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 290 | if (m->addr) |
| 291 | pr_cont("ADDR %llx ", m->addr); |
| 292 | if (m->misc) |
| 293 | pr_cont("MISC %llx ", m->misc); |
| 294 | |
| 295 | pr_cont("\n"); |
Andi Kleen | 506ed6b | 2011-10-12 17:46:33 -0700 | [diff] [blame] | 296 | /* |
| 297 | * Note this output is parsed by external tools and old fields |
| 298 | * should not be changed. |
| 299 | */ |
Borislav Petkov | 881e23e | 2011-10-17 16:45:10 +0200 | [diff] [blame] | 300 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", |
Andi Kleen | 506ed6b | 2011-10-12 17:46:33 -0700 | [diff] [blame] | 301 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, |
| 302 | cpu_data(m->extcpu).microcode); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 303 | |
| 304 | /* |
| 305 | * Print out human-readable details about the MCE error, |
Borislav Petkov | fb25319 | 2009-10-07 13:20:38 +0200 | [diff] [blame] | 306 | * (if the CPU has an implementation for that) |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 307 | */ |
Borislav Petkov | dffa4b2 | 2011-04-20 12:23:49 +0200 | [diff] [blame] | 308 | ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); |
| 309 | if (ret == NOTIFY_STOP) |
| 310 | return; |
| 311 | |
| 312 | pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); |
Andi Kleen | 8650356 | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 313 | } |
| 314 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 315 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
| 316 | |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 317 | static atomic_t mce_panicked; |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 318 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 319 | static int fake_panic; |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 320 | static atomic_t mce_fake_panicked; |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 321 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 322 | /* Panic in progress. Enable interrupts and wait for final IPI */ |
| 323 | static void wait_for_panic(void) |
| 324 | { |
| 325 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 326 | |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 327 | preempt_disable(); |
| 328 | local_irq_enable(); |
| 329 | while (timeout-- > 0) |
| 330 | udelay(1); |
Andi Kleen | 29b0f59 | 2009-05-27 21:56:56 +0200 | [diff] [blame] | 331 | if (panic_timeout == 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 332 | panic_timeout = mca_cfg.panic_timeout; |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 333 | panic("Panicing machine check CPU died"); |
| 334 | } |
| 335 | |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 336 | static void mce_panic(const char *msg, struct mce *final, char *exp) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 337 | { |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 338 | int apei_err = 0; |
| 339 | struct llist_node *pending; |
| 340 | struct mce_evt_llist *l; |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 341 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 342 | if (!fake_panic) { |
| 343 | /* |
| 344 | * Make sure only one CPU runs in machine check panic |
| 345 | */ |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 346 | if (atomic_inc_return(&mce_panicked) > 1) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 347 | wait_for_panic(); |
| 348 | barrier(); |
Andi Kleen | f94b61c | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 349 | |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 350 | bust_spinlocks(1); |
| 351 | console_verbose(); |
| 352 | } else { |
| 353 | /* Don't log too much for fake panic */ |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 354 | if (atomic_inc_return(&mce_fake_panicked) > 1) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 355 | return; |
| 356 | } |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 357 | pending = mce_gen_pool_prepare_records(); |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 358 | /* First print corrected ones that are still unlogged */ |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 359 | llist_for_each_entry(l, pending, llnode) { |
| 360 | struct mce *m = &l->mce; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 361 | if (!(m->status & MCI_STATUS_UC)) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 362 | print_mce(m); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 363 | if (!apei_err) |
| 364 | apei_err = apei_write_mce(m); |
| 365 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 367 | /* Now print uncorrected but with the final one last */ |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 368 | llist_for_each_entry(l, pending, llnode) { |
| 369 | struct mce *m = &l->mce; |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 370 | if (!(m->status & MCI_STATUS_UC)) |
| 371 | continue; |
Tony Luck | 5541c93 | 2016-04-30 14:33:56 +0200 | [diff] [blame] | 372 | if (!final || mce_cmp(m, final)) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 373 | print_mce(m); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 374 | if (!apei_err) |
| 375 | apei_err = apei_write_mce(m); |
| 376 | } |
Andi Kleen | a0189c7 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 377 | } |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 378 | if (final) { |
Hidetoshi Seto | 77e26cc | 2009-06-11 16:04:35 +0900 | [diff] [blame] | 379 | print_mce(final); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 380 | if (!apei_err) |
| 381 | apei_err = apei_write_mce(final); |
| 382 | } |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 383 | if (cpu_missing) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 384 | pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 385 | if (exp) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 386 | pr_emerg(HW_ERR "Machine check: %s\n", exp); |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 387 | if (!fake_panic) { |
| 388 | if (panic_timeout == 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 389 | panic_timeout = mca_cfg.panic_timeout; |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 390 | panic(msg); |
| 391 | } else |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 392 | pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 393 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 395 | /* Support code for software error injection */ |
| 396 | |
| 397 | static int msr_to_offset(u32 msr) |
| 398 | { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 399 | unsigned bank = __this_cpu_read(injectm.bank); |
Ingo Molnar | f436f8b | 2009-10-01 16:14:32 +0200 | [diff] [blame] | 400 | |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 401 | if (msr == mca_cfg.rip_msr) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 402 | return offsetof(struct mce, ip); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 403 | if (msr == msr_ops.status(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 404 | return offsetof(struct mce, status); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 405 | if (msr == msr_ops.addr(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 406 | return offsetof(struct mce, addr); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 407 | if (msr == msr_ops.misc(bank)) |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 408 | return offsetof(struct mce, misc); |
| 409 | if (msr == MSR_IA32_MCG_STATUS) |
| 410 | return offsetof(struct mce, mcgstatus); |
| 411 | return -1; |
| 412 | } |
| 413 | |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 414 | /* MSR access wrappers used for error injection */ |
| 415 | static u64 mce_rdmsrl(u32 msr) |
| 416 | { |
| 417 | u64 v; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 418 | |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 419 | if (__this_cpu_read(injectm.finished)) { |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 420 | int offset = msr_to_offset(msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 421 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 422 | if (offset < 0) |
| 423 | return 0; |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 424 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 425 | } |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 426 | |
| 427 | if (rdmsrl_safe(msr, &v)) { |
Borislav Petkov | 38c54cc | 2016-07-08 11:09:41 +0200 | [diff] [blame] | 428 | WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 429 | /* |
| 430 | * Return zero in case the access faulted. This should |
| 431 | * not happen normally but can happen if the CPU does |
| 432 | * something weird, or if the code is buggy. |
| 433 | */ |
| 434 | v = 0; |
| 435 | } |
| 436 | |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 437 | return v; |
| 438 | } |
| 439 | |
| 440 | static void mce_wrmsrl(u32 msr, u64 v) |
| 441 | { |
Tejun Heo | 0a3aee0 | 2010-12-18 16:28:55 +0100 | [diff] [blame] | 442 | if (__this_cpu_read(injectm.finished)) { |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 443 | int offset = msr_to_offset(msr); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 444 | |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 445 | if (offset >= 0) |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 446 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 447 | return; |
| 448 | } |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 449 | wrmsrl(msr, v); |
| 450 | } |
| 451 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 452 | /* |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 453 | * Collect all global (w.r.t. this processor) status about this machine |
| 454 | * check into our "mce" struct so that we can use it later to assess |
| 455 | * the severity of the problem as we read per-bank specific details. |
| 456 | */ |
| 457 | static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) |
| 458 | { |
| 459 | mce_setup(m); |
| 460 | |
| 461 | m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
| 462 | if (regs) { |
| 463 | /* |
| 464 | * Get the address of the instruction at the time of |
| 465 | * the machine check error. |
| 466 | */ |
| 467 | if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { |
| 468 | m->ip = regs->ip; |
| 469 | m->cs = regs->cs; |
Andi Kleen | a129a7c | 2010-11-19 13:16:22 +0100 | [diff] [blame] | 470 | |
| 471 | /* |
| 472 | * When in VM86 mode make the cs look like ring 3 |
| 473 | * always. This is a lie, but it's better than passing |
| 474 | * the additional vm86 bit around everywhere. |
| 475 | */ |
| 476 | if (v8086_mode(regs)) |
| 477 | m->cs |= 3; |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 478 | } |
| 479 | /* Use accurate RIP reporting if available. */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 480 | if (mca_cfg.rip_msr) |
| 481 | m->ip = mce_rdmsrl(mca_cfg.rip_msr); |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 482 | } |
| 483 | } |
| 484 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 485 | int mce_available(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 487 | if (mca_cfg.disabled) |
Andi Kleen | 5b4408f | 2009-02-12 13:39:30 +0100 | [diff] [blame] | 488 | return 0; |
Akinobu Mita | 3d1712c | 2006-03-24 03:15:11 -0800 | [diff] [blame] | 489 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | } |
| 491 | |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 492 | static void mce_schedule_work(void) |
| 493 | { |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 494 | if (!mce_gen_pool_empty() && keventd_up()) |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 495 | schedule_work(&mce_work); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 496 | } |
| 497 | |
Hidetoshi Seto | b77e70b | 2011-06-08 10:56:02 +0900 | [diff] [blame] | 498 | static void mce_irq_work_cb(struct irq_work *entry) |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 499 | { |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 500 | mce_notify_irq(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 501 | mce_schedule_work(); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 502 | } |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 503 | |
| 504 | static void mce_report_event(struct pt_regs *regs) |
| 505 | { |
| 506 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 507 | mce_notify_irq(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 508 | /* |
| 509 | * Triggering the work queue here is just an insurance |
| 510 | * policy in case the syscall exit notify handler |
| 511 | * doesn't run soon enough or ends up running on the |
| 512 | * wrong CPU (can happen when audit sleeps) |
| 513 | */ |
| 514 | mce_schedule_work(); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 515 | return; |
| 516 | } |
| 517 | |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 518 | irq_work_queue(&mce_irq_work); |
Andi Kleen | ccc3c31 | 2009-05-27 21:56:54 +0200 | [diff] [blame] | 519 | } |
| 520 | |
Borislav Petkov | feab21f | 2015-11-24 08:41:20 +0100 | [diff] [blame] | 521 | /* |
| 522 | * Check if the address reported by the CPU is in a format we can parse. |
| 523 | * It would be possible to add code for most other cases, but all would |
| 524 | * be somewhat complicated (e.g. segment offset would require an instruction |
| 525 | * parser). So only support physical addresses up to page granuality for now. |
| 526 | */ |
| 527 | static int mce_usable_address(struct mce *m) |
| 528 | { |
| 529 | if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV)) |
| 530 | return 0; |
| 531 | |
| 532 | /* Checks after this one are Intel-specific: */ |
| 533 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
| 534 | return 1; |
| 535 | |
| 536 | if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) |
| 537 | return 0; |
| 538 | if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) |
| 539 | return 0; |
| 540 | return 1; |
| 541 | } |
| 542 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 543 | static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, |
| 544 | void *data) |
| 545 | { |
| 546 | struct mce *mce = (struct mce *)data; |
| 547 | unsigned long pfn; |
| 548 | |
| 549 | if (!mce) |
| 550 | return NOTIFY_DONE; |
| 551 | |
Borislav Petkov | c0ec382 | 2015-11-24 08:41:18 +0100 | [diff] [blame] | 552 | if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 553 | pfn = mce->addr >> PAGE_SHIFT; |
| 554 | memory_failure(pfn, MCE_VECTOR, 0); |
| 555 | } |
| 556 | |
| 557 | return NOTIFY_OK; |
| 558 | } |
| 559 | static struct notifier_block mce_srao_nb = { |
| 560 | .notifier_call = srao_decode_notifier, |
| 561 | .priority = INT_MAX, |
| 562 | }; |
| 563 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 564 | /* |
| 565 | * Read ADDR and MISC registers. |
| 566 | */ |
| 567 | static void mce_read_aux(struct mce *m, int i) |
| 568 | { |
| 569 | if (m->status & MCI_STATUS_MISCV) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 570 | m->misc = mce_rdmsrl(msr_ops.misc(i)); |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 571 | if (m->status & MCI_STATUS_ADDRV) { |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 572 | m->addr = mce_rdmsrl(msr_ops.addr(i)); |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 573 | |
| 574 | /* |
| 575 | * Mask the reported address by the reported granularity. |
| 576 | */ |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 577 | if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 578 | u8 shift = MCI_MISC_ADDR_LSB(m->misc); |
| 579 | m->addr >>= shift; |
| 580 | m->addr <<= shift; |
| 581 | } |
| 582 | } |
| 583 | } |
| 584 | |
Chen Yucong | fa92c58 | 2014-11-18 10:09:20 +0800 | [diff] [blame] | 585 | static bool memory_error(struct mce *m) |
| 586 | { |
| 587 | struct cpuinfo_x86 *c = &boot_cpu_data; |
| 588 | |
| 589 | if (c->x86_vendor == X86_VENDOR_AMD) { |
Borislav Petkov | db548a2 | 2015-11-24 08:41:19 +0100 | [diff] [blame] | 590 | /* ErrCodeExt[20:16] */ |
| 591 | u8 xec = (m->status >> 16) & 0x1f; |
| 592 | |
| 593 | return (xec == 0x0 || xec == 0x8); |
Chen Yucong | fa92c58 | 2014-11-18 10:09:20 +0800 | [diff] [blame] | 594 | } else if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 595 | /* |
| 596 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes |
| 597 | * |
| 598 | * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for |
| 599 | * indicating a memory error. Bit 8 is used for indicating a |
| 600 | * cache hierarchy error. The combination of bit 2 and bit 3 |
| 601 | * is used for indicating a `generic' cache hierarchy error |
| 602 | * But we can't just blindly check the above bits, because if |
| 603 | * bit 11 is set, then it is a bus/interconnect error - and |
| 604 | * either way the above bits just gives more detail on what |
| 605 | * bus/interconnect error happened. Note that bit 12 can be |
| 606 | * ignored, as it's the "filter" bit. |
| 607 | */ |
| 608 | return (m->status & 0xef80) == BIT(7) || |
| 609 | (m->status & 0xef00) == BIT(8) || |
| 610 | (m->status & 0xeffc) == 0xc; |
| 611 | } |
| 612 | |
| 613 | return false; |
| 614 | } |
| 615 | |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 616 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
| 617 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 618 | /* |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 619 | * Poll for corrected events or events that happened before reset. |
| 620 | * Those are just logged through /dev/mcelog. |
| 621 | * |
| 622 | * This is executed in standard interrupt context. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 623 | * |
| 624 | * Note: spec recommends to panic for fatal unsignalled |
| 625 | * errors here. However this would be quite problematic -- |
| 626 | * we would need to reimplement the Monarch handling and |
| 627 | * it would mess up the exclusion between exception handler |
| 628 | * and poll hander -- * so we skip this for now. |
| 629 | * These cases should not happen anyways, or only when the CPU |
| 630 | * is already totally * confused. In this case it's likely it will |
| 631 | * not fully execute the machine check handler either. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 632 | */ |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 633 | bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 634 | { |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 635 | bool error_seen = false; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 636 | struct mce m; |
Chen Yucong | fa92c58 | 2014-11-18 10:09:20 +0800 | [diff] [blame] | 637 | int severity; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 638 | int i; |
| 639 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 640 | this_cpu_inc(mce_poll_count); |
Andi Kleen | ca84f69 | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 641 | |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 642 | mce_gather_info(&m, NULL); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 643 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 644 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 645 | if (!mce_banks[i].ctl || !test_bit(i, *b)) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 646 | continue; |
| 647 | |
| 648 | m.misc = 0; |
| 649 | m.addr = 0; |
| 650 | m.bank = i; |
| 651 | m.tsc = 0; |
| 652 | |
| 653 | barrier(); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 654 | m.status = mce_rdmsrl(msr_ops.status(i)); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 655 | if (!(m.status & MCI_STATUS_VAL)) |
| 656 | continue; |
| 657 | |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 658 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 659 | /* |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 660 | * Uncorrected or signalled events are handled by the exception |
| 661 | * handler when it is enabled, so don't process those here. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 662 | * |
| 663 | * TBD do the same check for MCI_STATUS_EN here? |
| 664 | */ |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 665 | if (!(flags & MCP_UC) && |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 666 | (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 667 | continue; |
| 668 | |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 669 | error_seen = true; |
| 670 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 671 | mce_read_aux(&m, i); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 672 | |
| 673 | if (!(flags & MCP_TIMESTAMP)) |
| 674 | m.tsc = 0; |
Chen Yucong | fa92c58 | 2014-11-18 10:09:20 +0800 | [diff] [blame] | 675 | |
| 676 | severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); |
| 677 | |
Borislav Petkov | c0ec382 | 2015-11-24 08:41:18 +0100 | [diff] [blame] | 678 | if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) |
| 679 | if (m.status & MCI_STATUS_ADDRV) |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 680 | m.severity = severity; |
Chen Yucong | fa92c58 | 2014-11-18 10:09:20 +0800 | [diff] [blame] | 681 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 682 | /* |
| 683 | * Don't get the IP here because it's unlikely to |
| 684 | * have anything to do with the actual error location. |
| 685 | */ |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 686 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) |
Andi Kleen | 5679af4 | 2009-04-07 17:06:55 +0200 | [diff] [blame] | 687 | mce_log(&m); |
Borislav Petkov | c0ec382 | 2015-11-24 08:41:18 +0100 | [diff] [blame] | 688 | else if (mce_usable_address(&m)) { |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 689 | /* |
| 690 | * Although we skipped logging this, we still want |
| 691 | * to take action. Add to the pool so the registered |
| 692 | * notifiers will see it. |
| 693 | */ |
| 694 | if (!mce_gen_pool_add(&m)) |
| 695 | mce_schedule_work(); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 696 | } |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 697 | |
| 698 | /* |
| 699 | * Clear state for this bank. |
| 700 | */ |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 701 | mce_wrmsrl(msr_ops.status(i), 0); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 702 | } |
| 703 | |
| 704 | /* |
| 705 | * Don't clear MCG_STATUS here because it's only defined for |
| 706 | * exceptions. |
| 707 | */ |
Andi Kleen | 88921be | 2009-05-27 21:56:51 +0200 | [diff] [blame] | 708 | |
| 709 | sync_core(); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 710 | |
Tony Luck | 8b38937 | 2015-11-24 08:41:17 +0100 | [diff] [blame] | 711 | return error_seen; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 712 | } |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 713 | EXPORT_SYMBOL_GPL(machine_check_poll); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 714 | |
| 715 | /* |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 716 | * Do a quick check if any of the events requires a panic. |
| 717 | * This decides if we keep the events around or clear them. |
| 718 | */ |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 719 | static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
| 720 | struct pt_regs *regs) |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 721 | { |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 722 | int i, ret = 0; |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 723 | char *tmp; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 724 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 725 | for (i = 0; i < mca_cfg.banks; i++) { |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 726 | m->status = mce_rdmsrl(msr_ops.status(i)); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 727 | if (m->status & MCI_STATUS_VAL) { |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 728 | __set_bit(i, validp); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 729 | if (quirk_no_way_out) |
| 730 | quirk_no_way_out(i, m, regs); |
| 731 | } |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 732 | |
| 733 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
| 734 | *msg = tmp; |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 735 | ret = 1; |
Borislav Petkov | 17fea54 | 2015-05-18 10:07:17 +0200 | [diff] [blame] | 736 | } |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 737 | } |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 738 | return ret; |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 742 | * Variable to establish order between CPUs while scanning. |
| 743 | * Each CPU spins initially until executing is equal its number. |
| 744 | */ |
| 745 | static atomic_t mce_executing; |
| 746 | |
| 747 | /* |
| 748 | * Defines order of CPUs on entry. First CPU becomes Monarch. |
| 749 | */ |
| 750 | static atomic_t mce_callin; |
| 751 | |
| 752 | /* |
| 753 | * Check if a timeout waiting for other CPUs happened. |
| 754 | */ |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 755 | static int mce_timed_out(u64 *t, const char *msg) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 756 | { |
| 757 | /* |
| 758 | * The others already did panic for some reason. |
| 759 | * Bail out like in a timeout. |
| 760 | * rmb() to tell the compiler that system_state |
| 761 | * might have been modified by someone else. |
| 762 | */ |
| 763 | rmb(); |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 764 | if (atomic_read(&mce_panicked)) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 765 | wait_for_panic(); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 766 | if (!mca_cfg.monarch_timeout) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 767 | goto out; |
| 768 | if ((s64)*t < SPINUNIT) { |
Borislav Petkov | 716079f | 2014-05-23 11:06:35 +0200 | [diff] [blame] | 769 | if (mca_cfg.tolerant <= 1) |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 770 | mce_panic(msg, NULL, NULL); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 771 | cpu_missing = 1; |
| 772 | return 1; |
| 773 | } |
| 774 | *t -= SPINUNIT; |
| 775 | out: |
| 776 | touch_nmi_watchdog(); |
| 777 | return 0; |
| 778 | } |
| 779 | |
| 780 | /* |
| 781 | * The Monarch's reign. The Monarch is the CPU who entered |
| 782 | * the machine check handler first. It waits for the others to |
| 783 | * raise the exception too and then grades them. When any |
| 784 | * error is fatal panic. Only then let the others continue. |
| 785 | * |
| 786 | * The other CPUs entering the MCE handler will be controlled by the |
| 787 | * Monarch. They are called Subjects. |
| 788 | * |
| 789 | * This way we prevent any potential data corruption in a unrecoverable case |
| 790 | * and also makes sure always all CPU's errors are examined. |
| 791 | * |
Hidetoshi Seto | 680b6cf | 2009-08-26 16:20:36 +0900 | [diff] [blame] | 792 | * Also this detects the case of a machine check event coming from outer |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 793 | * space (not detected by any CPUs) In this case some external agent wants |
| 794 | * us to shut down, so panic too. |
| 795 | * |
| 796 | * The other CPUs might still decide to panic if the handler happens |
| 797 | * in a unrecoverable place, but in this case the system is in a semi-stable |
| 798 | * state and won't corrupt anything by itself. It's ok to let the others |
| 799 | * continue for a bit first. |
| 800 | * |
| 801 | * All the spin loops have timeouts; when a timeout happens a CPU |
| 802 | * typically elects itself to be Monarch. |
| 803 | */ |
| 804 | static void mce_reign(void) |
| 805 | { |
| 806 | int cpu; |
| 807 | struct mce *m = NULL; |
| 808 | int global_worst = 0; |
| 809 | char *msg = NULL; |
| 810 | char *nmsg = NULL; |
| 811 | |
| 812 | /* |
| 813 | * This CPU is the Monarch and the other CPUs have run |
| 814 | * through their handlers. |
| 815 | * Grade the severity of the errors of all the CPUs. |
| 816 | */ |
| 817 | for_each_possible_cpu(cpu) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 818 | int severity = mce_severity(&per_cpu(mces_seen, cpu), |
| 819 | mca_cfg.tolerant, |
Chen Yucong | e348027 | 2014-11-18 10:09:19 +0800 | [diff] [blame] | 820 | &nmsg, true); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 821 | if (severity > global_worst) { |
| 822 | msg = nmsg; |
| 823 | global_worst = severity; |
| 824 | m = &per_cpu(mces_seen, cpu); |
| 825 | } |
| 826 | } |
| 827 | |
| 828 | /* |
| 829 | * Cannot recover? Panic here then. |
| 830 | * This dumps all the mces in the log buffer and stops the |
| 831 | * other CPUs. |
| 832 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 833 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
Derek Che | 8af7043 | 2015-02-02 10:30:21 -0800 | [diff] [blame] | 834 | mce_panic("Fatal machine check", m, msg); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 835 | |
| 836 | /* |
| 837 | * For UC somewhere we let the CPU who detects it handle it. |
| 838 | * Also must let continue the others, otherwise the handling |
| 839 | * CPU could deadlock on a lock. |
| 840 | */ |
| 841 | |
| 842 | /* |
| 843 | * No machine check event found. Must be some external |
| 844 | * source or one CPU is hung. Panic. |
| 845 | */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 846 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) |
Derek Che | 8af7043 | 2015-02-02 10:30:21 -0800 | [diff] [blame] | 847 | mce_panic("Fatal machine check from unknown source", NULL, NULL); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 848 | |
| 849 | /* |
| 850 | * Now clear all the mces_seen so that they don't reappear on |
| 851 | * the next mce. |
| 852 | */ |
| 853 | for_each_possible_cpu(cpu) |
| 854 | memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); |
| 855 | } |
| 856 | |
| 857 | static atomic_t global_nwo; |
| 858 | |
| 859 | /* |
| 860 | * Start of Monarch synchronization. This waits until all CPUs have |
| 861 | * entered the exception handler and then determines if any of them |
| 862 | * saw a fatal event that requires panic. Then it executes them |
| 863 | * in the entry order. |
| 864 | * TBD double check parallel CPU hotunplug |
| 865 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 866 | static int mce_start(int *no_way_out) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 867 | { |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 868 | int order; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 869 | int cpus = num_online_cpus(); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 870 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 871 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 872 | if (!timeout) |
| 873 | return -1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 874 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 875 | atomic_add(*no_way_out, &global_nwo); |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 876 | /* |
Davidlohr Bueso | bf92b1f | 2016-04-06 10:05:15 +0200 | [diff] [blame] | 877 | * Rely on the implied barrier below, such that global_nwo |
| 878 | * is updated before mce_callin. |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 879 | */ |
Borislav Petkov | a95436e | 2009-06-20 23:28:22 -0700 | [diff] [blame] | 880 | order = atomic_inc_return(&mce_callin); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 881 | |
| 882 | /* |
| 883 | * Wait for everyone. |
| 884 | */ |
| 885 | while (atomic_read(&mce_callin) != cpus) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 886 | if (mce_timed_out(&timeout, |
| 887 | "Timeout: Not all CPUs entered broadcast exception handler")) { |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 888 | atomic_set(&global_nwo, 0); |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 889 | return -1; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 890 | } |
| 891 | ndelay(SPINUNIT); |
| 892 | } |
| 893 | |
| 894 | /* |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 895 | * mce_callin should be read before global_nwo |
| 896 | */ |
| 897 | smp_rmb(); |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 898 | |
| 899 | if (order == 1) { |
| 900 | /* |
| 901 | * Monarch: Starts executing now, the others wait. |
| 902 | */ |
| 903 | atomic_set(&mce_executing, 1); |
| 904 | } else { |
| 905 | /* |
| 906 | * Subject: Now start the scanning loop one by one in |
| 907 | * the original callin order. |
| 908 | * This way when there are any shared banks it will be |
| 909 | * only seen by one CPU before cleared, avoiding duplicates. |
| 910 | */ |
| 911 | while (atomic_read(&mce_executing) < order) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 912 | if (mce_timed_out(&timeout, |
| 913 | "Timeout: Subject CPUs unable to finish machine check processing")) { |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 914 | atomic_set(&global_nwo, 0); |
| 915 | return -1; |
| 916 | } |
| 917 | ndelay(SPINUNIT); |
| 918 | } |
| 919 | } |
| 920 | |
Huang Ying | 184e1fd | 2009-06-15 15:37:07 +0800 | [diff] [blame] | 921 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 922 | * Cache the global no_way_out state. |
| 923 | */ |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 924 | *no_way_out = atomic_read(&global_nwo); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 925 | |
Hidetoshi Seto | 7fb06fc | 2009-06-15 18:18:43 +0900 | [diff] [blame] | 926 | return order; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 927 | } |
| 928 | |
| 929 | /* |
| 930 | * Synchronize between CPUs after main scanning loop. |
| 931 | * This invokes the bulk of the Monarch processing. |
| 932 | */ |
| 933 | static int mce_end(int order) |
| 934 | { |
| 935 | int ret = -1; |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 936 | u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 937 | |
| 938 | if (!timeout) |
| 939 | goto reset; |
| 940 | if (order < 0) |
| 941 | goto reset; |
| 942 | |
| 943 | /* |
| 944 | * Allow others to run. |
| 945 | */ |
| 946 | atomic_inc(&mce_executing); |
| 947 | |
| 948 | if (order == 1) { |
| 949 | /* CHECKME: Can this race with a parallel hotplug? */ |
| 950 | int cpus = num_online_cpus(); |
| 951 | |
| 952 | /* |
| 953 | * Monarch: Wait for everyone to go through their scanning |
| 954 | * loops. |
| 955 | */ |
| 956 | while (atomic_read(&mce_executing) <= cpus) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 957 | if (mce_timed_out(&timeout, |
| 958 | "Timeout: Monarch CPU unable to finish machine check processing")) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 959 | goto reset; |
| 960 | ndelay(SPINUNIT); |
| 961 | } |
| 962 | |
| 963 | mce_reign(); |
| 964 | barrier(); |
| 965 | ret = 0; |
| 966 | } else { |
| 967 | /* |
| 968 | * Subject: Wait for Monarch to finish. |
| 969 | */ |
| 970 | while (atomic_read(&mce_executing) != 0) { |
Andy Lutomirski | 6c80f87 | 2014-12-21 08:18:25 -0800 | [diff] [blame] | 971 | if (mce_timed_out(&timeout, |
| 972 | "Timeout: Monarch CPU did not finish machine check processing")) |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 973 | goto reset; |
| 974 | ndelay(SPINUNIT); |
| 975 | } |
| 976 | |
| 977 | /* |
| 978 | * Don't reset anything. That's done by the Monarch. |
| 979 | */ |
| 980 | return 0; |
| 981 | } |
| 982 | |
| 983 | /* |
| 984 | * Reset all global state. |
| 985 | */ |
| 986 | reset: |
| 987 | atomic_set(&global_nwo, 0); |
| 988 | atomic_set(&mce_callin, 0); |
| 989 | barrier(); |
| 990 | |
| 991 | /* |
| 992 | * Let others run again. |
| 993 | */ |
| 994 | atomic_set(&mce_executing, 0); |
| 995 | return ret; |
| 996 | } |
| 997 | |
| 998 | static void mce_clear_state(unsigned long *toclear) |
| 999 | { |
| 1000 | int i; |
| 1001 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1002 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1003 | if (test_bit(i, toclear)) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1004 | mce_wrmsrl(msr_ops.status(i), 0); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1005 | } |
| 1006 | } |
| 1007 | |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1008 | static int do_memory_failure(struct mce *m) |
| 1009 | { |
| 1010 | int flags = MF_ACTION_REQUIRED; |
| 1011 | int ret; |
| 1012 | |
| 1013 | pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr); |
| 1014 | if (!(m->mcgstatus & MCG_STATUS_RIPV)) |
| 1015 | flags |= MF_MUST_KILL; |
| 1016 | ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags); |
| 1017 | if (ret) |
| 1018 | pr_err("Memory error not recovered"); |
| 1019 | return ret; |
| 1020 | } |
| 1021 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1022 | /* |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1023 | * The actual machine check handler. This only handles real |
| 1024 | * exceptions when something got corrupted coming in through int 18. |
| 1025 | * |
| 1026 | * This is executed in NMI context not subject to normal locking rules. This |
| 1027 | * implies that most kernel services cannot be safely used. Don't even |
| 1028 | * think about putting a printk in there! |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1029 | * |
| 1030 | * On Intel systems this is entered on all CPUs in parallel through |
| 1031 | * MCE broadcast. However some CPUs might be broken beyond repair, |
| 1032 | * so be always careful when synchronizing with others. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | */ |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1034 | void do_machine_check(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1036 | struct mca_config *cfg = &mca_cfg; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1037 | struct mce m, *final; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | int i; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1039 | int worst = 0; |
| 1040 | int severity; |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1041 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1042 | /* |
| 1043 | * Establish sequential order between the CPUs entering the machine |
| 1044 | * check handler. |
| 1045 | */ |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1046 | int order = -1; |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1047 | /* |
| 1048 | * If no_way_out gets set, there is no safe way to recover from this |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1049 | * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1050 | */ |
| 1051 | int no_way_out = 0; |
| 1052 | /* |
| 1053 | * If kill_it gets set, there might be a way to recover from this |
| 1054 | * error. |
| 1055 | */ |
| 1056 | int kill_it = 0; |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1057 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1058 | DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); |
Andi Kleen | bd19a5e | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1059 | char *msg = "Unknown"; |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1060 | |
| 1061 | /* |
| 1062 | * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES |
| 1063 | * on Intel. |
| 1064 | */ |
| 1065 | int lmce = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | |
Ashok Raj | d90167a | 2015-12-10 11:12:26 +0100 | [diff] [blame] | 1067 | /* If this CPU is offline, just bail out. */ |
| 1068 | if (cpu_is_offline(smp_processor_id())) { |
| 1069 | u64 mcgstatus; |
| 1070 | |
| 1071 | mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); |
| 1072 | if (mcgstatus & MCG_STATUS_RIPV) { |
| 1073 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
| 1074 | return; |
| 1075 | } |
| 1076 | } |
| 1077 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 1078 | ist_enter(regs); |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 1079 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 1080 | this_cpu_inc(mce_exception_count); |
Andi Kleen | 01ca79f | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 1081 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1082 | if (!cfg->banks) |
Andi Kleen | 3256169 | 2009-05-27 21:56:53 +0200 | [diff] [blame] | 1083 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | |
Hidetoshi Seto | b8325c5 | 2011-06-08 10:57:46 +0900 | [diff] [blame] | 1085 | mce_gather_info(&m, regs); |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1086 | |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 1087 | final = this_cpu_ptr(&mces_seen); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1088 | *final = m; |
| 1089 | |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1090 | memset(valid_banks, 0, sizeof(valid_banks)); |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1091 | no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); |
Hidetoshi Seto | 680b6cf | 2009-08-26 16:20:36 +0900 | [diff] [blame] | 1092 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | barrier(); |
| 1094 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1095 | /* |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1096 | * When no restart IP might need to kill or panic. |
| 1097 | * Assume the worst for now, but if we find the |
| 1098 | * severity is MCE_AR_SEVERITY we have other options. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1099 | */ |
| 1100 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
| 1101 | kill_it = 1; |
| 1102 | |
| 1103 | /* |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1104 | * Check if this MCE is signaled to only this logical processor, |
| 1105 | * on Intel only. |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1106 | */ |
Yazen Ghannam | fead35c | 2016-04-30 14:33:57 +0200 | [diff] [blame] | 1107 | if (m.cpuvendor == X86_VENDOR_INTEL) |
| 1108 | lmce = m.mcgstatus & MCG_STATUS_LMCES; |
| 1109 | |
| 1110 | /* |
| 1111 | * Go through all banks in exclusion of the other CPUs. This way we |
| 1112 | * don't report duplicated events on shared banks because the first one |
| 1113 | * to see it will clear it. If this is a Local MCE, then no need to |
| 1114 | * perform rendezvous. |
| 1115 | */ |
| 1116 | if (!lmce) |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1117 | order = mce_start(&no_way_out); |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1118 | |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1119 | for (i = 0; i < cfg->banks; i++) { |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1120 | __clear_bit(i, toclear); |
Tony Luck | 95022b8 | 2012-04-18 15:19:40 -0700 | [diff] [blame] | 1121 | if (!test_bit(i, valid_banks)) |
| 1122 | continue; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1123 | if (!mce_banks[i].ctl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | continue; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1125 | |
| 1126 | m.misc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | m.addr = 0; |
| 1128 | m.bank = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1130 | m.status = mce_rdmsrl(msr_ops.status(i)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | if ((m.status & MCI_STATUS_VAL) == 0) |
| 1132 | continue; |
| 1133 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1134 | /* |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1135 | * Non uncorrected or non signaled errors are handled by |
| 1136 | * machine_check_poll. Leave them alone, unless this panics. |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1137 | */ |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1138 | if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1139 | !no_way_out) |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1140 | continue; |
| 1141 | |
| 1142 | /* |
| 1143 | * Set taint even when machine check was not enabled. |
| 1144 | */ |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 1145 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1146 | |
Chen Yucong | e348027 | 2014-11-18 10:09:19 +0800 | [diff] [blame] | 1147 | severity = mce_severity(&m, cfg->tolerant, NULL, true); |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1148 | |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1149 | /* |
Chen Yucong | e348027 | 2014-11-18 10:09:19 +0800 | [diff] [blame] | 1150 | * When machine check was for corrected/deferred handler don't |
| 1151 | * touch, unless we're panicing. |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1152 | */ |
Chen Yucong | e348027 | 2014-11-18 10:09:19 +0800 | [diff] [blame] | 1153 | if ((severity == MCE_KEEP_SEVERITY || |
| 1154 | severity == MCE_UCNA_SEVERITY) && !no_way_out) |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1155 | continue; |
| 1156 | __set_bit(i, toclear); |
| 1157 | if (severity == MCE_NO_SEVERITY) { |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1158 | /* |
| 1159 | * Machine check event was not enabled. Clear, but |
| 1160 | * ignore. |
| 1161 | */ |
| 1162 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1163 | } |
| 1164 | |
Tony Luck | 85f92694 | 2011-12-13 09:48:13 -0800 | [diff] [blame] | 1165 | mce_read_aux(&m, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 1167 | /* assuming valid severity level != 0 */ |
| 1168 | m.severity = severity; |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1169 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1170 | mce_log(&m); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1172 | if (severity > worst) { |
| 1173 | *final = m; |
| 1174 | worst = severity; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | } |
| 1177 | |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1178 | /* mce_clear_state will clear *final, save locally for use later */ |
| 1179 | m = *final; |
| 1180 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1181 | if (!no_way_out) |
| 1182 | mce_clear_state(toclear); |
| 1183 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1184 | /* |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1185 | * Do most of the synchronization with other CPUs. |
| 1186 | * When there's any problem use only local no_way_out state. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1187 | */ |
Ashok Raj | 243d657 | 2015-06-04 18:55:24 +0200 | [diff] [blame] | 1188 | if (!lmce) { |
| 1189 | if (mce_end(order) < 0) |
| 1190 | no_way_out = worst >= MCE_PANIC_SEVERITY; |
| 1191 | } else { |
| 1192 | /* |
| 1193 | * Local MCE skipped calling mce_reign() |
| 1194 | * If we found a fatal error, we need to panic here. |
| 1195 | */ |
| 1196 | if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
| 1197 | mce_panic("Machine check from unknown source", |
| 1198 | NULL, NULL); |
| 1199 | } |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1200 | |
| 1201 | /* |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1202 | * If tolerant is at an insane level we drop requests to kill |
| 1203 | * processes and continue even when there is no way out. |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1204 | */ |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1205 | if (cfg->tolerant == 3) |
| 1206 | kill_it = 0; |
| 1207 | else if (no_way_out) |
| 1208 | mce_panic("Fatal machine check on current CPU", &m, msg); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1209 | |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1210 | if (worst > 0) |
| 1211 | mce_report_event(regs); |
Andi Kleen | 5f8c1a5 | 2009-04-29 19:29:12 +0200 | [diff] [blame] | 1212 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); |
Andi Kleen | 3256169 | 2009-05-27 21:56:53 +0200 | [diff] [blame] | 1213 | out: |
Andi Kleen | 88921be | 2009-05-27 21:56:51 +0200 | [diff] [blame] | 1214 | sync_core(); |
Luck, Tony | d4812e1 | 2015-01-05 16:44:42 -0800 | [diff] [blame] | 1215 | |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1216 | if (worst != MCE_AR_SEVERITY && !kill_it) |
| 1217 | goto out_ist; |
Luck, Tony | d4812e1 | 2015-01-05 16:44:42 -0800 | [diff] [blame] | 1218 | |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1219 | /* Fault was in user mode and we need to take some action */ |
| 1220 | if ((m.cs & 3) == 3) { |
| 1221 | ist_begin_non_atomic(regs); |
| 1222 | local_irq_enable(); |
| 1223 | |
| 1224 | if (kill_it || do_memory_failure(&m)) |
| 1225 | force_sig(SIGBUS, current); |
| 1226 | local_irq_disable(); |
| 1227 | ist_end_non_atomic(); |
| 1228 | } else { |
| 1229 | if (!fixup_exception(regs, X86_TRAP_MC)) |
| 1230 | mce_panic("Failed kernel mode recovery", &m, NULL); |
Luck, Tony | d4812e1 | 2015-01-05 16:44:42 -0800 | [diff] [blame] | 1231 | } |
Tony Luck | b2f9d67 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1232 | |
| 1233 | out_ist: |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 1234 | ist_exit(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | } |
Andi Kleen | ea149b3 | 2009-04-29 19:31:00 +0200 | [diff] [blame] | 1236 | EXPORT_SYMBOL_GPL(do_machine_check); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1238 | #ifndef CONFIG_MEMORY_FAILURE |
| 1239 | int memory_failure(unsigned long pfn, int vector, int flags) |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1240 | { |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1241 | /* mce_severity() should not hand us an ACTION_REQUIRED error */ |
| 1242 | BUG_ON(flags & MF_ACTION_REQUIRED); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1243 | pr_err("Uncorrected memory error in page 0x%lx ignored\n" |
| 1244 | "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", |
| 1245 | pfn); |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1246 | |
| 1247 | return 0; |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1248 | } |
Tony Luck | cd42f4a | 2011-12-15 10:48:12 -0800 | [diff] [blame] | 1249 | #endif |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1250 | |
| 1251 | /* |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1252 | * Action optional processing happens here (picking up |
| 1253 | * from the list of faulting pages that do_machine_check() |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 1254 | * placed into the genpool). |
Tony Luck | a8c321f | 2012-01-03 11:45:45 -0800 | [diff] [blame] | 1255 | */ |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1256 | static void mce_process_work(struct work_struct *dummy) |
| 1257 | { |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 1258 | mce_gen_pool_process(); |
Andi Kleen | 9b1beaf | 2009-05-27 21:56:59 +0200 | [diff] [blame] | 1259 | } |
| 1260 | |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1261 | #ifdef CONFIG_X86_MCE_INTEL |
| 1262 | /*** |
| 1263 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog |
Simon Arlott | 676b185 | 2007-10-20 01:25:36 +0200 | [diff] [blame] | 1264 | * @cpu: The CPU on which the event occurred. |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1265 | * @status: Event status information |
| 1266 | * |
| 1267 | * This function should be called by the thermal interrupt after the |
| 1268 | * event has been processed and the decision was made to log the event |
| 1269 | * further. |
| 1270 | * |
| 1271 | * The status parameter will be saved to the 'status' field of 'struct mce' |
| 1272 | * and historically has been the register value of the |
| 1273 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
| 1274 | */ |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1275 | void mce_log_therm_throt_event(__u64 status) |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1276 | { |
| 1277 | struct mce m; |
| 1278 | |
Andi Kleen | b5f2fa4 | 2009-02-12 13:43:22 +0100 | [diff] [blame] | 1279 | mce_setup(&m); |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1280 | m.bank = MCE_THERMAL_BANK; |
| 1281 | m.status = status; |
Dmitriy Zavin | 15d5f83 | 2006-09-26 10:52:42 +0200 | [diff] [blame] | 1282 | mce_log(&m); |
| 1283 | } |
| 1284 | #endif /* CONFIG_X86_MCE_INTEL */ |
| 1285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | /* |
Tim Hockin | 8a336b0 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 1287 | * Periodic polling timer for "silent" machine check errors. If the |
| 1288 | * poller finds an MCE, poll 2x faster. When the poller finds no more |
| 1289 | * errors, poll 2x slower (up to check_interval seconds). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | */ |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1291 | static unsigned long check_interval = INITIAL_CHECK_INTERVAL; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1292 | |
Thomas Gleixner | 82f7af0 | 2012-05-24 17:54:51 +0000 | [diff] [blame] | 1293 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1294 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1296 | static unsigned long mce_adjust_timer_default(unsigned long interval) |
| 1297 | { |
| 1298 | return interval; |
| 1299 | } |
| 1300 | |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1301 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1302 | |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1303 | static void __restart_timer(struct timer_list *t, unsigned long interval) |
Chen, Gong | 27f6c57 | 2014-03-27 21:24:36 -0400 | [diff] [blame] | 1304 | { |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1305 | unsigned long when = jiffies + interval; |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1306 | unsigned long flags; |
| 1307 | |
| 1308 | local_irq_save(flags); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1309 | |
| 1310 | if (timer_pending(t)) { |
| 1311 | if (time_before(when, t->expires)) |
Thomas Gleixner | f9c287b | 2016-07-04 09:50:17 +0000 | [diff] [blame] | 1312 | mod_timer(t, when); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1313 | } else { |
| 1314 | t->expires = round_jiffies(when); |
| 1315 | add_timer_on(t, smp_processor_id()); |
| 1316 | } |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1317 | |
| 1318 | local_irq_restore(flags); |
| 1319 | } |
| 1320 | |
| 1321 | static void mce_timer_fn(unsigned long data) |
| 1322 | { |
| 1323 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
| 1324 | int cpu = smp_processor_id(); |
| 1325 | unsigned long iv; |
| 1326 | |
| 1327 | WARN_ON(cpu != data); |
| 1328 | |
| 1329 | iv = __this_cpu_read(mce_next_interval); |
| 1330 | |
| 1331 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
| 1332 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks)); |
| 1333 | |
| 1334 | if (mce_intel_cmci_poll()) { |
| 1335 | iv = mce_adjust_timer(iv); |
| 1336 | goto done; |
| 1337 | } |
| 1338 | } |
| 1339 | |
| 1340 | /* |
| 1341 | * Alert userspace if needed. If we logged an MCE, reduce the polling |
| 1342 | * interval, otherwise increase the polling interval. |
| 1343 | */ |
| 1344 | if (mce_notify_irq()) |
| 1345 | iv = max(iv / 2, (unsigned long) HZ/100); |
| 1346 | else |
| 1347 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
| 1348 | |
| 1349 | done: |
| 1350 | __this_cpu_write(mce_next_interval, iv); |
| 1351 | __restart_timer(t, iv); |
| 1352 | } |
| 1353 | |
| 1354 | /* |
| 1355 | * Ensure that the timer is firing in @interval from now. |
| 1356 | */ |
| 1357 | void mce_timer_kick(unsigned long interval) |
| 1358 | { |
| 1359 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
| 1360 | unsigned long iv = __this_cpu_read(mce_next_interval); |
| 1361 | |
| 1362 | __restart_timer(t, interval); |
| 1363 | |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 1364 | if (interval < iv) |
| 1365 | __this_cpu_write(mce_next_interval, interval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | } |
| 1367 | |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 1368 | /* Must not be called in IRQ context where del_timer_sync() can deadlock */ |
| 1369 | static void mce_timer_delete_all(void) |
| 1370 | { |
| 1371 | int cpu; |
| 1372 | |
| 1373 | for_each_online_cpu(cpu) |
| 1374 | del_timer_sync(&per_cpu(mce_timer, cpu)); |
| 1375 | } |
| 1376 | |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1377 | static void mce_do_trigger(struct work_struct *work) |
| 1378 | { |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 1379 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1380 | } |
| 1381 | |
| 1382 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); |
| 1383 | |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1384 | /* |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1385 | * Notify the user(s) about new machine check events. |
| 1386 | * Can be called from interrupt context, but not from machine check/NMI |
| 1387 | * context. |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1388 | */ |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 1389 | int mce_notify_irq(void) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1390 | { |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 1391 | /* Not more than two messages every minute */ |
| 1392 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); |
| 1393 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 1394 | if (test_and_clear_bit(0, &mce_need_notify)) { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1395 | /* wake processes polling /dev/mcelog */ |
| 1396 | wake_up_interruptible(&mce_chrdev_wait); |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1397 | |
Tejun Heo | 4d899be | 2012-12-21 17:57:05 -0800 | [diff] [blame] | 1398 | if (mce_helper[0]) |
Andi Kleen | 9bd9840 | 2009-02-12 13:39:28 +0100 | [diff] [blame] | 1399 | schedule_work(&mce_trigger_work); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1400 | |
Andi Kleen | 8457c84 | 2009-02-12 13:49:33 +0100 | [diff] [blame] | 1401 | if (__ratelimit(&ratelimit)) |
Huang Ying | a2d7b0d | 2010-06-08 14:35:39 +0800 | [diff] [blame] | 1402 | pr_info(HW_ERR "Machine check events logged\n"); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1403 | |
| 1404 | return 1; |
| 1405 | } |
| 1406 | return 0; |
| 1407 | } |
Andi Kleen | 9ff36ee | 2009-05-27 21:56:58 +0200 | [diff] [blame] | 1408 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1409 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1410 | static int __mcheck_cpu_mce_banks_init(void) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1411 | { |
| 1412 | int i; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1413 | u8 num_banks = mca_cfg.banks; |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1414 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1415 | mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1416 | if (!mce_banks) |
| 1417 | return -ENOMEM; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1418 | |
| 1419 | for (i = 0; i < num_banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1420 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1421 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1422 | b->ctl = -1ULL; |
| 1423 | b->init = 1; |
| 1424 | } |
| 1425 | return 0; |
| 1426 | } |
| 1427 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1428 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | * Initialize Machine Checks for a CPU. |
| 1430 | */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1431 | static int __mcheck_cpu_cap_init(void) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1432 | { |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1433 | unsigned b; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1434 | u64 cap; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1435 | |
| 1436 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
Thomas Gleixner | 01c6680 | 2009-04-08 12:31:24 +0200 | [diff] [blame] | 1437 | |
| 1438 | b = cap & MCG_BANKCNT_MASK; |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1439 | if (!mca_cfg.banks) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1440 | pr_info("CPU supports %d MCE banks\n", b); |
Ingo Molnar | b659294 | 2009-04-08 12:31:27 +0200 | [diff] [blame] | 1441 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1442 | if (b > MAX_NR_BANKS) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1443 | pr_warn("Using only %u machine check banks out of %u\n", |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1444 | MAX_NR_BANKS, b); |
| 1445 | b = MAX_NR_BANKS; |
| 1446 | } |
| 1447 | |
| 1448 | /* Don't support asymmetric configurations today */ |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1449 | WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); |
| 1450 | mca_cfg.banks = b; |
| 1451 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1452 | if (!mce_banks) { |
Hidetoshi Seto | cffd377 | 2009-11-12 15:52:40 +0900 | [diff] [blame] | 1453 | int err = __mcheck_cpu_mce_banks_init(); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1454 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1455 | if (err) |
| 1456 | return err; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1457 | } |
| 1458 | |
| 1459 | /* Use accurate RIP reporting if available. */ |
Thomas Gleixner | 01c6680 | 2009-04-08 12:31:24 +0200 | [diff] [blame] | 1460 | if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1461 | mca_cfg.rip_msr = MSR_IA32_MCG_EIP; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1462 | |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1463 | if (cap & MCG_SER_P) |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1464 | mca_cfg.ser = true; |
Andi Kleen | ed7290d | 2009-05-27 21:56:57 +0200 | [diff] [blame] | 1465 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1466 | return 0; |
| 1467 | } |
| 1468 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1469 | static void __mcheck_cpu_init_generic(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | { |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1471 | enum mcp_flags m_fl = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1472 | mce_banks_t all_banks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | u64 cap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1474 | |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1475 | if (!mca_cfg.bootlog) |
| 1476 | m_fl = MCP_DONTLOG; |
| 1477 | |
Andi Kleen | b79109c | 2009-02-12 13:43:23 +0100 | [diff] [blame] | 1478 | /* |
| 1479 | * Log the machine checks left over from the previous reset. |
| 1480 | */ |
Andi Kleen | ee031c3 | 2009-02-12 13:49:34 +0100 | [diff] [blame] | 1481 | bitmap_fill(all_banks, MAX_NR_BANKS); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1482 | machine_check_poll(MCP_UC | m_fl, &all_banks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 1484 | cr4_set_bits(X86_CR4_MCE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1485 | |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1486 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | if (cap & MCG_CTL_P) |
| 1488 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 1489 | } |
| 1490 | |
| 1491 | static void __mcheck_cpu_init_clear_banks(void) |
| 1492 | { |
| 1493 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1495 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1496 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 1497 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1498 | if (!b->init) |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1499 | continue; |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1500 | wrmsrl(msr_ops.ctl(i), b->ctl); |
| 1501 | wrmsrl(msr_ops.status(i), 0); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1502 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | } |
| 1504 | |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1505 | /* |
| 1506 | * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and |
| 1507 | * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM |
| 1508 | * Vol 3B Table 15-20). But this confuses both the code that determines |
| 1509 | * whether the machine check occurred in kernel or user mode, and also |
| 1510 | * the severity assessment code. Pretend that EIPV was set, and take the |
| 1511 | * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. |
| 1512 | */ |
| 1513 | static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) |
| 1514 | { |
| 1515 | if (bank != 0) |
| 1516 | return; |
| 1517 | if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) |
| 1518 | return; |
| 1519 | if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| |
| 1520 | MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| |
| 1521 | MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| |
| 1522 | MCACOD)) != |
| 1523 | (MCI_STATUS_UC|MCI_STATUS_EN| |
| 1524 | MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| |
| 1525 | MCI_STATUS_AR|MCACOD_INSTR)) |
| 1526 | return; |
| 1527 | |
| 1528 | m->mcgstatus |= MCG_STATUS_EIPV; |
| 1529 | m->ip = regs->ip; |
| 1530 | m->cs = regs->cs; |
| 1531 | } |
| 1532 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1533 | /* Add per CPU specific workarounds here */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1534 | static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1535 | { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1536 | struct mca_config *cfg = &mca_cfg; |
| 1537 | |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1538 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1539 | pr_info("unknown CPU type - not enabling MCE support\n"); |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1540 | return -EOPNOTSUPP; |
| 1541 | } |
| 1542 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | /* This should be disabled by the BIOS, but isn't always */ |
Jan Beulich | 911f6a7 | 2008-04-22 16:22:21 +0100 | [diff] [blame] | 1544 | if (c->x86_vendor == X86_VENDOR_AMD) { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1545 | if (c->x86 == 15 && cfg->banks > 4) { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1546 | /* |
| 1547 | * disable GART TBL walk error reporting, which |
| 1548 | * trips off incorrectly with the IOMMU & 3ware |
| 1549 | * & Cerberus: |
| 1550 | */ |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1551 | clear_bit(10, (unsigned long *)&mce_banks[4].ctl); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1552 | } |
Aravind Gopalakrishnan | 10001d91 | 2016-04-30 14:33:51 +0200 | [diff] [blame] | 1553 | if (c->x86 < 17 && cfg->bootlog < 0) { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1554 | /* |
| 1555 | * Lots of broken BIOS around that don't clear them |
| 1556 | * by default and leave crap in there. Don't log: |
| 1557 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1558 | cfg->bootlog = 0; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1559 | } |
Andi Kleen | 2e6f694 | 2009-04-27 18:42:48 +0200 | [diff] [blame] | 1560 | /* |
| 1561 | * Various K7s with broken bank 0 around. Always disable |
| 1562 | * by default. |
| 1563 | */ |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1564 | if (c->x86 == 6 && cfg->banks > 0) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1565 | mce_banks[0].ctl = 0; |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1566 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1567 | /* |
Aravind Gopalakrishnan | bf80bbd | 2015-03-23 10:42:52 -0500 | [diff] [blame] | 1568 | * overflow_recov is supported for F15h Models 00h-0fh |
| 1569 | * even though we don't have a CPUID bit for it. |
| 1570 | */ |
| 1571 | if (c->x86 == 0x15 && c->x86_model <= 0xf) |
| 1572 | mce_flags.overflow_recov = 1; |
| 1573 | |
| 1574 | /* |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1575 | * Turn off MC4_MISC thresholding banks on those models since |
| 1576 | * they're not supported there. |
| 1577 | */ |
| 1578 | if (c->x86 == 0x15 && |
| 1579 | (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { |
| 1580 | int i; |
| 1581 | u64 hwcr; |
| 1582 | bool need_toggle; |
| 1583 | u32 msrs[] = { |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1584 | 0x00000413, /* MC4_MISC0 */ |
| 1585 | 0xc0000408, /* MC4_MISC1 */ |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1586 | }; |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1587 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1588 | rdmsrl(MSR_K7_HWCR, hwcr); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1589 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1590 | /* McStatusWrEn has to be set */ |
| 1591 | need_toggle = !(hwcr & BIT(18)); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1592 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1593 | if (need_toggle) |
| 1594 | wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1595 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1596 | /* Clear CntP bit safely */ |
| 1597 | for (i = 0; i < ARRAY_SIZE(msrs); i++) |
| 1598 | msr_clear_bit(msrs[i], 62); |
Borislav Petkov | 575203b | 2012-04-20 18:01:34 +0200 | [diff] [blame] | 1599 | |
Borislav Petkov | c9ce871 | 2015-03-13 23:30:47 +0100 | [diff] [blame] | 1600 | /* restore old settings */ |
| 1601 | if (need_toggle) |
| 1602 | wrmsrl(MSR_K7_HWCR, hwcr); |
| 1603 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | } |
Andi Kleen | e583538 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 1605 | |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1606 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 1607 | /* |
| 1608 | * SDM documents that on family 6 bank 0 should not be written |
| 1609 | * because it aliases to another special BIOS controlled |
| 1610 | * register. |
| 1611 | * But it's not aliased anymore on model 0x1a+ |
| 1612 | * Don't ignore bank 0 completely because there could be a |
| 1613 | * valid event later, merely don't write CTL0. |
| 1614 | */ |
| 1615 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 1616 | if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 1617 | mce_banks[0].init = 0; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 1618 | |
| 1619 | /* |
| 1620 | * All newer Intel systems support MCE broadcasting. Enable |
| 1621 | * synchronization with a one second timeout. |
| 1622 | */ |
| 1623 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1624 | cfg->monarch_timeout < 0) |
| 1625 | cfg->monarch_timeout = USEC_PER_SEC; |
Bartlomiej Zolnierkiewicz | c7f6fa4 | 2009-07-28 23:52:54 +0200 | [diff] [blame] | 1626 | |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1627 | /* |
| 1628 | * There are also broken BIOSes on some Pentium M and |
| 1629 | * earlier systems: |
| 1630 | */ |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1631 | if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) |
| 1632 | cfg->bootlog = 0; |
Tony Luck | 61b0fcc | 2012-07-19 11:28:46 -0700 | [diff] [blame] | 1633 | |
| 1634 | if (c->x86 == 6 && c->x86_model == 45) |
| 1635 | quirk_no_way_out = quirk_sandybridge_ifu; |
Tony Luck | 0f68c08 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 1636 | /* |
| 1637 | * MCG_CAP.MCG_SER_P is necessary but not sufficient to know |
| 1638 | * whether this processor will actually generate recoverable |
| 1639 | * machine checks. Check to see if this is an E7 model Xeon. |
| 1640 | * We can't do a model number check because E5 and E7 use the |
| 1641 | * same model number. E5 doesn't support recovery, E7 does. |
| 1642 | */ |
| 1643 | if (mca_cfg.recovery || (mca_cfg.ser && |
| 1644 | !strncmp(c->x86_model_id, |
| 1645 | "Intel(R) Xeon(R) CPU E7-", 24))) |
| 1646 | set_cpu_cap(c, X86_FEATURE_MCE_RECOVERY); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 1647 | } |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 1648 | if (cfg->monarch_timeout < 0) |
| 1649 | cfg->monarch_timeout = 0; |
| 1650 | if (cfg->bootlog != 0) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1651 | cfg->panic_timeout = 30; |
Ingo Molnar | e412cd2 | 2009-08-17 10:19:00 +0200 | [diff] [blame] | 1652 | |
| 1653 | return 0; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1654 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1656 | static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1657 | { |
| 1658 | if (c->x86 != 5) |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1659 | return 0; |
| 1660 | |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1661 | switch (c->x86_vendor) { |
| 1662 | case X86_VENDOR_INTEL: |
Hidetoshi Seto | c697836 | 2009-06-15 17:22:49 +0900 | [diff] [blame] | 1663 | intel_p5_mcheck_init(c); |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1664 | return 1; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1665 | break; |
| 1666 | case X86_VENDOR_CENTAUR: |
| 1667 | winchip_mcheck_init(c); |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1668 | return 1; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1669 | break; |
Borislav Petkov | dc34bdd | 2015-10-30 13:11:38 +0100 | [diff] [blame] | 1670 | default: |
| 1671 | return 0; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1672 | } |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1673 | |
| 1674 | return 0; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1675 | } |
| 1676 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1677 | static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 | { |
| 1679 | switch (c->x86_vendor) { |
| 1680 | case X86_VENDOR_INTEL: |
| 1681 | mce_intel_feature_init(c); |
Borislav Petkov | 3f2f068 | 2015-01-13 15:08:51 +0100 | [diff] [blame] | 1682 | mce_adjust_timer = cmci_intel_adjust_timer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1683 | break; |
Aravind Gopalakrishnan | 7559e13 | 2015-05-06 06:58:55 -0500 | [diff] [blame] | 1684 | |
| 1685 | case X86_VENDOR_AMD: { |
Yazen Ghannam | 14cddfd | 2016-05-11 14:58:27 +0200 | [diff] [blame] | 1686 | mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); |
| 1687 | mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); |
| 1688 | mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 1689 | |
| 1690 | /* |
| 1691 | * Install proper ops for Scalable MCA enabled processors |
| 1692 | */ |
| 1693 | if (mce_flags.smca) { |
| 1694 | msr_ops.ctl = smca_ctl_reg; |
| 1695 | msr_ops.status = smca_status_reg; |
| 1696 | msr_ops.addr = smca_addr_reg; |
| 1697 | msr_ops.misc = smca_misc_reg; |
| 1698 | } |
Aravind Gopalakrishnan | bfbe0ee | 2016-01-25 20:41:48 +0100 | [diff] [blame] | 1699 | mce_amd_feature_init(c); |
Aravind Gopalakrishnan | c7f54d2 | 2015-10-30 13:11:37 +0100 | [diff] [blame] | 1700 | |
Jacob Shin | 89b831e | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1701 | break; |
Aravind Gopalakrishnan | 7559e13 | 2015-05-06 06:58:55 -0500 | [diff] [blame] | 1702 | } |
| 1703 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1704 | default: |
| 1705 | break; |
| 1706 | } |
| 1707 | } |
| 1708 | |
Ashok Raj | 8838eb6 | 2015-08-12 18:29:40 +0200 | [diff] [blame] | 1709 | static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) |
| 1710 | { |
| 1711 | switch (c->x86_vendor) { |
| 1712 | case X86_VENDOR_INTEL: |
| 1713 | mce_intel_feature_clear(c); |
| 1714 | break; |
| 1715 | default: |
| 1716 | break; |
| 1717 | } |
| 1718 | } |
| 1719 | |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1720 | static void mce_start_timer(unsigned int cpu, struct timer_list *t) |
| 1721 | { |
Borislav Petkov | 4f75d84 | 2013-12-23 18:05:02 +0100 | [diff] [blame] | 1722 | unsigned long iv = check_interval * HZ; |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1723 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 1724 | if (mca_cfg.ignore_ce || !iv) |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1725 | return; |
| 1726 | |
Borislav Petkov | 4f75d84 | 2013-12-23 18:05:02 +0100 | [diff] [blame] | 1727 | per_cpu(mce_next_interval, cpu) = iv; |
| 1728 | |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1729 | t->expires = round_jiffies(jiffies + iv); |
Borislav Petkov | 4f75d84 | 2013-12-23 18:05:02 +0100 | [diff] [blame] | 1730 | add_timer_on(t, cpu); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1731 | } |
| 1732 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1733 | static void __mcheck_cpu_init_timer(void) |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1734 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 1735 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1736 | unsigned int cpu = smp_processor_id(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1737 | |
Thomas Gleixner | f9c287b | 2016-07-04 09:50:17 +0000 | [diff] [blame] | 1738 | setup_pinned_timer(t, mce_timer_fn, cpu); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 1739 | mce_start_timer(cpu, t); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 1740 | } |
| 1741 | |
Andi Kleen | 9eda8cb | 2009-07-09 00:31:42 +0200 | [diff] [blame] | 1742 | /* Handle unconfigured int18 (should never happen) */ |
| 1743 | static void unexpected_machine_check(struct pt_regs *regs, long error_code) |
| 1744 | { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1745 | pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", |
Andi Kleen | 9eda8cb | 2009-07-09 00:31:42 +0200 | [diff] [blame] | 1746 | smp_processor_id()); |
| 1747 | } |
| 1748 | |
| 1749 | /* Call the installed machine check handler for this CPU setup. */ |
| 1750 | void (*machine_check_vector)(struct pt_regs *, long error_code) = |
| 1751 | unexpected_machine_check; |
| 1752 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1753 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1754 | * Called for each booted CPU to set up machine checks. |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1755 | * Must be called with preempt off: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1756 | */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1757 | void mcheck_cpu_init(struct cpuinfo_x86 *c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1758 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1759 | if (mca_cfg.disabled) |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1760 | return; |
| 1761 | |
Hidetoshi Seto | 3a97fc3 | 2011-06-08 10:58:35 +0900 | [diff] [blame] | 1762 | if (__mcheck_cpu_ancient_init(c)) |
| 1763 | return; |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 1764 | |
Andi Kleen | 5b4408f | 2009-02-12 13:39:30 +0100 | [diff] [blame] | 1765 | if (!mce_available(c)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 | return; |
| 1767 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1768 | if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 1769 | mca_cfg.disabled = true; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1770 | return; |
| 1771 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 1772 | |
Chen, Gong | 648ed94 | 2015-08-12 18:29:34 +0200 | [diff] [blame] | 1773 | if (mce_gen_pool_init()) { |
| 1774 | mca_cfg.disabled = true; |
| 1775 | pr_emerg("Couldn't allocate MCE records pool!\n"); |
| 1776 | return; |
| 1777 | } |
| 1778 | |
Andi Kleen | 5d72792 | 2009-04-27 19:25:48 +0200 | [diff] [blame] | 1779 | machine_check_vector = do_machine_check; |
| 1780 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1781 | __mcheck_cpu_init_generic(); |
| 1782 | __mcheck_cpu_init_vendor(c); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 1783 | __mcheck_cpu_init_clear_banks(); |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 1784 | __mcheck_cpu_init_timer(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | } |
| 1786 | |
| 1787 | /* |
Ashok Raj | 8838eb6 | 2015-08-12 18:29:40 +0200 | [diff] [blame] | 1788 | * Called for each booted CPU to clear some machine checks opt-ins |
| 1789 | */ |
| 1790 | void mcheck_cpu_clear(struct cpuinfo_x86 *c) |
| 1791 | { |
| 1792 | if (mca_cfg.disabled) |
| 1793 | return; |
| 1794 | |
| 1795 | if (!mce_available(c)) |
| 1796 | return; |
| 1797 | |
| 1798 | /* |
| 1799 | * Possibly to clear general settings generic to x86 |
| 1800 | * __mcheck_cpu_clear_generic(c); |
| 1801 | */ |
| 1802 | __mcheck_cpu_clear_vendor(c); |
| 1803 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1804 | } |
| 1805 | |
| 1806 | /* |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1807 | * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1808 | */ |
| 1809 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1810 | static DEFINE_SPINLOCK(mce_chrdev_state_lock); |
| 1811 | static int mce_chrdev_open_count; /* #times opened */ |
| 1812 | static int mce_chrdev_open_exclu; /* already open exclusive? */ |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1813 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1814 | static int mce_chrdev_open(struct inode *inode, struct file *file) |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1815 | { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1816 | spin_lock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1817 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1818 | if (mce_chrdev_open_exclu || |
| 1819 | (mce_chrdev_open_count && (file->f_flags & O_EXCL))) { |
| 1820 | spin_unlock(&mce_chrdev_state_lock); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1821 | |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1822 | return -EBUSY; |
| 1823 | } |
| 1824 | |
| 1825 | if (file->f_flags & O_EXCL) |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1826 | mce_chrdev_open_exclu = 1; |
| 1827 | mce_chrdev_open_count++; |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1828 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1829 | spin_unlock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1830 | |
Tim Hockin | bd78432 | 2007-07-21 17:10:37 +0200 | [diff] [blame] | 1831 | return nonseekable_open(inode, file); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1832 | } |
| 1833 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1834 | static int mce_chrdev_release(struct inode *inode, struct file *file) |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1835 | { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1836 | spin_lock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1837 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1838 | mce_chrdev_open_count--; |
| 1839 | mce_chrdev_open_exclu = 0; |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1840 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1841 | spin_unlock(&mce_chrdev_state_lock); |
Tim Hockin | f528e7b | 2007-07-21 17:10:35 +0200 | [diff] [blame] | 1842 | |
| 1843 | return 0; |
| 1844 | } |
| 1845 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1846 | static void collect_tscs(void *data) |
| 1847 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | unsigned long *cpu_tsc = (unsigned long *)data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1849 | |
Andy Lutomirski | 4ea1636 | 2015-06-25 18:44:07 +0200 | [diff] [blame] | 1850 | cpu_tsc[smp_processor_id()] = rdtsc(); |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1851 | } |
| 1852 | |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1853 | static int mce_apei_read_done; |
| 1854 | |
| 1855 | /* Collect MCE record of previous boot in persistent storage via APEI ERST. */ |
| 1856 | static int __mce_read_apei(char __user **ubuf, size_t usize) |
| 1857 | { |
| 1858 | int rc; |
| 1859 | u64 record_id; |
| 1860 | struct mce m; |
| 1861 | |
| 1862 | if (usize < sizeof(struct mce)) |
| 1863 | return -EINVAL; |
| 1864 | |
| 1865 | rc = apei_read_mce(&m, &record_id); |
| 1866 | /* Error or no more MCE record */ |
| 1867 | if (rc <= 0) { |
| 1868 | mce_apei_read_done = 1; |
Naoya Horiguchi | fadd85f | 2012-01-23 15:54:52 -0500 | [diff] [blame] | 1869 | /* |
| 1870 | * When ERST is disabled, mce_chrdev_read() should return |
| 1871 | * "no record" instead of "no device." |
| 1872 | */ |
| 1873 | if (rc == -ENODEV) |
| 1874 | return 0; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1875 | return rc; |
| 1876 | } |
| 1877 | rc = -EFAULT; |
| 1878 | if (copy_to_user(*ubuf, &m, sizeof(struct mce))) |
| 1879 | return rc; |
| 1880 | /* |
| 1881 | * In fact, we should have cleared the record after that has |
| 1882 | * been flushed to the disk or sent to network in |
| 1883 | * /sbin/mcelog, but we have no interface to support that now, |
| 1884 | * so just clear it to avoid duplication. |
| 1885 | */ |
| 1886 | rc = apei_clear_mce(record_id); |
| 1887 | if (rc) { |
| 1888 | mce_apei_read_done = 1; |
| 1889 | return rc; |
| 1890 | } |
| 1891 | *ubuf += sizeof(struct mce); |
| 1892 | |
| 1893 | return 0; |
| 1894 | } |
| 1895 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1896 | static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf, |
| 1897 | size_t usize, loff_t *off) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1899 | char __user *buf = ubuf; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1900 | unsigned long *cpu_tsc; |
| 1901 | unsigned prev, next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | int i, err; |
| 1903 | |
Mike Travis | 6bca67f | 2008-07-18 18:11:27 -0700 | [diff] [blame] | 1904 | cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL); |
Andi Kleen | f0de53b | 2005-04-16 15:25:10 -0700 | [diff] [blame] | 1905 | if (!cpu_tsc) |
| 1906 | return -ENOMEM; |
| 1907 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1908 | mutex_lock(&mce_chrdev_read_mutex); |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1909 | |
| 1910 | if (!mce_apei_read_done) { |
| 1911 | err = __mce_read_apei(&buf, usize); |
| 1912 | if (err || buf != ubuf) |
| 1913 | goto out; |
| 1914 | } |
| 1915 | |
Borislav Petkov | 9a7783d | 2015-08-12 18:29:43 +0200 | [diff] [blame] | 1916 | next = mce_log_get_idx_check(mcelog.next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | |
| 1918 | /* Only supports full reads right now */ |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1919 | err = -EINVAL; |
| 1920 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) |
| 1921 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1922 | |
| 1923 | err = 0; |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1924 | prev = 0; |
| 1925 | do { |
| 1926 | for (i = prev; i < next; i++) { |
| 1927 | unsigned long start = jiffies; |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1928 | struct mce *m = &mcelog.entry[i]; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1929 | |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1930 | while (!m->finished) { |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1931 | if (time_after_eq(jiffies, start + 2)) { |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1932 | memset(m, 0, sizeof(*m)); |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1933 | goto timeout; |
| 1934 | } |
| 1935 | cpu_relax(); |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 1936 | } |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1937 | smp_rmb(); |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1938 | err |= copy_to_user(buf, m, sizeof(*m)); |
| 1939 | buf += sizeof(*m); |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1940 | timeout: |
| 1941 | ; |
Andi Kleen | 673242c | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 1942 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1943 | |
Huang Ying | ef41df434 | 2009-02-12 13:39:34 +0100 | [diff] [blame] | 1944 | memset(mcelog.entry + prev, 0, |
| 1945 | (next - prev) * sizeof(struct mce)); |
| 1946 | prev = next; |
| 1947 | next = cmpxchg(&mcelog.next, prev, 0); |
| 1948 | } while (next != prev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1949 | |
Paul E. McKenney | b2b1866 | 2005-06-25 14:55:38 -0700 | [diff] [blame] | 1950 | synchronize_sched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1951 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1952 | /* |
| 1953 | * Collect entries that were still getting written before the |
| 1954 | * synchronize. |
| 1955 | */ |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 1956 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1957 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1958 | for (i = next; i < MCE_LOG_LEN; i++) { |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1959 | struct mce *m = &mcelog.entry[i]; |
| 1960 | |
| 1961 | if (m->finished && m->tsc < cpu_tsc[m->cpu]) { |
| 1962 | err |= copy_to_user(buf, m, sizeof(*m)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1963 | smp_rmb(); |
Hidetoshi Seto | 559faa6 | 2011-06-08 11:00:08 +0900 | [diff] [blame] | 1964 | buf += sizeof(*m); |
| 1965 | memset(m, 0, sizeof(*m)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1966 | } |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1967 | } |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1968 | |
| 1969 | if (err) |
| 1970 | err = -EFAULT; |
| 1971 | |
| 1972 | out: |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1973 | mutex_unlock(&mce_chrdev_read_mutex); |
Andi Kleen | f0de53b | 2005-04-16 15:25:10 -0700 | [diff] [blame] | 1974 | kfree(cpu_tsc); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1975 | |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1976 | return err ? err : buf - ubuf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1977 | } |
| 1978 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1979 | static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1980 | { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1981 | poll_wait(file, &mce_chrdev_wait, wait); |
Paul E. McKenney | e90328b | 2015-04-19 18:16:02 -0700 | [diff] [blame] | 1982 | if (READ_ONCE(mcelog.next)) |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1983 | return POLLIN | POLLRDNORM; |
Huang Ying | 482908b | 2010-05-18 14:35:22 +0800 | [diff] [blame] | 1984 | if (!mce_apei_read_done && apei_check_mce()) |
| 1985 | return POLLIN | POLLRDNORM; |
Tim Hockin | e02e68d | 2007-07-21 17:10:36 +0200 | [diff] [blame] | 1986 | return 0; |
| 1987 | } |
| 1988 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 1989 | static long mce_chrdev_ioctl(struct file *f, unsigned int cmd, |
| 1990 | unsigned long arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | { |
| 1992 | int __user *p = (int __user *)arg; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1993 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1994 | if (!capable(CAP_SYS_ADMIN)) |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1995 | return -EPERM; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 1996 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | switch (cmd) { |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 1998 | case MCE_GET_RECORD_LEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | return put_user(sizeof(struct mce), p); |
| 2000 | case MCE_GET_LOG_LEN: |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2001 | return put_user(MCE_LOG_LEN, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2002 | case MCE_GETCLEAR_FLAGS: { |
| 2003 | unsigned flags; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2004 | |
| 2005 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2006 | flags = mcelog.flags; |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2007 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2008 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2009 | return put_user(flags, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2010 | } |
| 2011 | default: |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2012 | return -ENOTTY; |
| 2013 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | } |
| 2015 | |
Luck, Tony | 66f5ddf | 2011-11-03 11:46:47 -0700 | [diff] [blame] | 2016 | static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf, |
| 2017 | size_t usize, loff_t *off); |
| 2018 | |
| 2019 | void register_mce_write_callback(ssize_t (*fn)(struct file *filp, |
| 2020 | const char __user *ubuf, |
| 2021 | size_t usize, loff_t *off)) |
| 2022 | { |
| 2023 | mce_write = fn; |
| 2024 | } |
| 2025 | EXPORT_SYMBOL_GPL(register_mce_write_callback); |
| 2026 | |
Paul E. McKenney | 29c6820 | 2015-04-21 14:05:25 -0700 | [diff] [blame] | 2027 | static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, |
| 2028 | size_t usize, loff_t *off) |
Luck, Tony | 66f5ddf | 2011-11-03 11:46:47 -0700 | [diff] [blame] | 2029 | { |
| 2030 | if (mce_write) |
| 2031 | return mce_write(filp, ubuf, usize, off); |
| 2032 | else |
| 2033 | return -EINVAL; |
| 2034 | } |
| 2035 | |
| 2036 | static const struct file_operations mce_chrdev_ops = { |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2037 | .open = mce_chrdev_open, |
| 2038 | .release = mce_chrdev_release, |
| 2039 | .read = mce_chrdev_read, |
Luck, Tony | 66f5ddf | 2011-11-03 11:46:47 -0700 | [diff] [blame] | 2040 | .write = mce_chrdev_write, |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2041 | .poll = mce_chrdev_poll, |
| 2042 | .unlocked_ioctl = mce_chrdev_ioctl, |
| 2043 | .llseek = no_llseek, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2044 | }; |
| 2045 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2046 | static struct miscdevice mce_chrdev_device = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | MISC_MCELOG_MINOR, |
| 2048 | "mcelog", |
| 2049 | &mce_chrdev_ops, |
| 2050 | }; |
| 2051 | |
Naveen N. Rao | c3d1fb5 | 2013-07-01 21:08:47 +0530 | [diff] [blame] | 2052 | static void __mce_disable_bank(void *arg) |
| 2053 | { |
| 2054 | int bank = *((int *)arg); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2055 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
Naveen N. Rao | c3d1fb5 | 2013-07-01 21:08:47 +0530 | [diff] [blame] | 2056 | cmci_disable_bank(bank); |
| 2057 | } |
| 2058 | |
| 2059 | void mce_disable_bank(int bank) |
| 2060 | { |
| 2061 | if (bank >= mca_cfg.banks) { |
| 2062 | pr_warn(FW_BUG |
| 2063 | "Ignoring request to disable invalid MCA bank %d.\n", |
| 2064 | bank); |
| 2065 | return; |
| 2066 | } |
| 2067 | set_bit(bank, mce_banks_ce_disabled); |
| 2068 | on_each_cpu(__mce_disable_bank, &bank, 1); |
| 2069 | } |
| 2070 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2071 | /* |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 2072 | * mce=off Disables machine check |
| 2073 | * mce=no_cmci Disables CMCI |
Ashok Raj | 88d5386 | 2015-06-04 18:55:23 +0200 | [diff] [blame] | 2074 | * mce=no_lmce Disables LMCE |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 2075 | * mce=dont_log_ce Clears corrected events silently, no log created for CEs. |
| 2076 | * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 2077 | * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) |
| 2078 | * monarchtimeout is how long to wait for other CPUs on machine |
| 2079 | * check, or 0 to not wait |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 2080 | * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
| 2081 | * mce=nobootlog Don't log MCEs from before booting. |
Naveen N. Rao | 450cc20 | 2012-09-27 10:08:00 -0700 | [diff] [blame] | 2082 | * mce=bios_cmci_threshold Don't program the CMCI threshold |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 2083 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2084 | static int __init mcheck_enable(char *str) |
| 2085 | { |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2086 | struct mca_config *cfg = &mca_cfg; |
| 2087 | |
Bartlomiej Zolnierkiewicz | e3346fc | 2009-07-28 23:55:09 +0200 | [diff] [blame] | 2088 | if (*str == 0) { |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 2089 | enable_p5_mce(); |
Bartlomiej Zolnierkiewicz | e3346fc | 2009-07-28 23:55:09 +0200 | [diff] [blame] | 2090 | return 1; |
| 2091 | } |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 2092 | if (*str == '=') |
| 2093 | str++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | if (!strcmp(str, "off")) |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 2095 | cfg->disabled = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 2096 | else if (!strcmp(str, "no_cmci")) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2097 | cfg->cmci_disabled = true; |
Ashok Raj | 88d5386 | 2015-06-04 18:55:23 +0200 | [diff] [blame] | 2098 | else if (!strcmp(str, "no_lmce")) |
| 2099 | cfg->lmce_disabled = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 2100 | else if (!strcmp(str, "dont_log_ce")) |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2101 | cfg->dont_log_ce = true; |
Hidetoshi Seto | 62fdac5 | 2009-06-11 16:06:07 +0900 | [diff] [blame] | 2102 | else if (!strcmp(str, "ignore_ce")) |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2103 | cfg->ignore_ce = true; |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 2104 | else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 2105 | cfg->bootlog = (str[0] == 'b'); |
Naveen N. Rao | 450cc20 | 2012-09-27 10:08:00 -0700 | [diff] [blame] | 2106 | else if (!strcmp(str, "bios_cmci_threshold")) |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 2107 | cfg->bios_cmci_threshold = true; |
Tony Luck | 0f68c08 | 2016-02-17 10:20:13 -0800 | [diff] [blame] | 2108 | else if (!strcmp(str, "recovery")) |
| 2109 | cfg->recovery = true; |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 2110 | else if (isdigit(str[0])) { |
Xie XiuQi | 5c31b28 | 2015-05-26 10:28:21 +0200 | [diff] [blame] | 2111 | if (get_option(&str, &cfg->tolerant) == 2) |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 2112 | get_option(&str, &(cfg->monarch_timeout)); |
Andi Kleen | 3c07979 | 2009-05-27 21:56:55 +0200 | [diff] [blame] | 2113 | } else { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 2114 | pr_info("mce argument %s ignored. Please use /sys\n", str); |
Hidetoshi Seto | 13503fa | 2009-03-26 17:39:20 +0900 | [diff] [blame] | 2115 | return 0; |
| 2116 | } |
OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 2117 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2118 | } |
Andi Kleen | 4efc067 | 2009-04-28 19:07:31 +0200 | [diff] [blame] | 2119 | __setup("mce", mcheck_enable); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 2121 | int __init mcheck_init(void) |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 2122 | { |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 2123 | mcheck_intel_therm_init(); |
Borislav Petkov | eef4dfa | 2015-08-12 18:29:38 +0200 | [diff] [blame] | 2124 | mce_register_decode_chain(&mce_srao_nb); |
Aravind Gopalakrishnan | 43eaa2a | 2015-03-23 10:42:53 -0500 | [diff] [blame] | 2125 | mcheck_vendor_init_severity(); |
Yong Wang | a2202aa | 2009-11-10 09:38:24 +0800 | [diff] [blame] | 2126 | |
Chen, Gong | 061120a | 2015-08-12 18:29:35 +0200 | [diff] [blame] | 2127 | INIT_WORK(&mce_work, mce_process_work); |
| 2128 | init_irq_work(&mce_irq_work, mce_irq_work_cb); |
| 2129 | |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 2130 | return 0; |
| 2131 | } |
Borislav Petkov | b33a636 | 2009-10-16 12:31:33 +0200 | [diff] [blame] | 2132 | |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2133 | /* |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2134 | * mce_syscore: PM support |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2135 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2137 | /* |
| 2138 | * Disable machine checks on suspend and shutdown. We can't really handle |
| 2139 | * them later. |
| 2140 | */ |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2141 | static void mce_disable_error_reporting(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2142 | { |
| 2143 | int i; |
| 2144 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2145 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2146 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2147 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2148 | if (b->init) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 2149 | wrmsrl(msr_ops.ctl(i), 0); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 2150 | } |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2151 | return; |
| 2152 | } |
| 2153 | |
| 2154 | static void vendor_disable_error_reporting(void) |
| 2155 | { |
| 2156 | /* |
| 2157 | * Don't clear on Intel CPUs. Some of these MSRs are socket-wide. |
| 2158 | * Disabling them for just a single offlined CPU is bad, since it will |
| 2159 | * inhibit reporting for all shared resources on the socket like the |
| 2160 | * last level cache (LLC), the integrated memory controller (iMC), etc. |
| 2161 | */ |
| 2162 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
| 2163 | return; |
| 2164 | |
| 2165 | mce_disable_error_reporting(); |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2166 | } |
| 2167 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2168 | static int mce_syscore_suspend(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2169 | { |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2170 | vendor_disable_error_reporting(); |
| 2171 | return 0; |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2172 | } |
| 2173 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2174 | static void mce_syscore_shutdown(void) |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2175 | { |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2176 | vendor_disable_error_reporting(); |
Andi Kleen | 973a2dd | 2009-02-12 13:39:32 +0100 | [diff] [blame] | 2177 | } |
| 2178 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2179 | /* |
| 2180 | * On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
| 2181 | * Only one CPU is active at this time, the others get re-added later using |
| 2182 | * CPU hotplug: |
| 2183 | */ |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2184 | static void mce_syscore_resume(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | { |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2186 | __mcheck_cpu_init_generic(); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2187 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 2188 | __mcheck_cpu_init_clear_banks(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2189 | } |
| 2190 | |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2191 | static struct syscore_ops mce_syscore_ops = { |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2192 | .suspend = mce_syscore_suspend, |
| 2193 | .shutdown = mce_syscore_shutdown, |
| 2194 | .resume = mce_syscore_resume, |
Rafael J. Wysocki | f3c6ea1 | 2011-03-23 22:15:54 +0100 | [diff] [blame] | 2195 | }; |
| 2196 | |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2197 | /* |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2198 | * mce_device: Sysfs support |
Hidetoshi Seto | c7cece8 | 2011-06-08 11:02:03 +0900 | [diff] [blame] | 2199 | */ |
| 2200 | |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2201 | static void mce_cpu_restart(void *data) |
| 2202 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2203 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 33edbf0 | 2009-06-15 17:18:45 +0900 | [diff] [blame] | 2204 | return; |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2205 | __mcheck_cpu_init_generic(); |
Aravind Gopalakrishnan | bb91f8c | 2016-04-30 14:33:53 +0200 | [diff] [blame] | 2206 | __mcheck_cpu_init_clear_banks(); |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2207 | __mcheck_cpu_init_timer(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2208 | } |
| 2209 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2210 | /* Reinit MCEs after user configuration changes */ |
Thomas Gleixner | d88203d | 2007-10-23 22:37:23 +0200 | [diff] [blame] | 2211 | static void mce_restart(void) |
| 2212 | { |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2213 | mce_timer_delete_all(); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2214 | on_each_cpu(mce_cpu_restart, NULL, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | } |
| 2216 | |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2217 | /* Toggle features for corrected errors */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2218 | static void mce_disable_cmci(void *data) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2219 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2220 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2221 | return; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2222 | cmci_clear(); |
| 2223 | } |
| 2224 | |
| 2225 | static void mce_enable_ce(void *all) |
| 2226 | { |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2227 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2228 | return; |
| 2229 | cmci_reenable(); |
| 2230 | cmci_recheck(); |
| 2231 | if (all) |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2232 | __mcheck_cpu_init_timer(); |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2233 | } |
| 2234 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2235 | static struct bus_type mce_subsys = { |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2236 | .name = "machinecheck", |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2237 | .dev_name = "machinecheck", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | }; |
| 2239 | |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2240 | DEFINE_PER_CPU(struct device *, mce_device); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2241 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2242 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2243 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2244 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2245 | { |
| 2246 | return container_of(attr, struct mce_bank, attr); |
| 2247 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2248 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2249 | static ssize_t show_bank(struct device *s, struct device_attribute *attr, |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2250 | char *buf) |
| 2251 | { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2252 | return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2253 | } |
| 2254 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2255 | static ssize_t set_bank(struct device *s, struct device_attribute *attr, |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2256 | const char *buf, size_t size) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2257 | { |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2258 | u64 new; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2259 | |
Daniel Walter | 164109e | 2014-08-08 14:24:03 -0700 | [diff] [blame] | 2260 | if (kstrtou64(buf, 0, &new) < 0) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2261 | return -EINVAL; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2262 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2263 | attr_to_bank(attr)->ctl = new; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2264 | mce_restart(); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2265 | |
Hidetoshi Seto | 9319cec | 2009-04-14 17:26:30 +0900 | [diff] [blame] | 2266 | return size; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2267 | } |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2268 | |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2269 | static ssize_t |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2270 | show_trigger(struct device *s, struct device_attribute *attr, char *buf) |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2271 | { |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2272 | strcpy(buf, mce_helper); |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2273 | strcat(buf, "\n"); |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2274 | return strlen(mce_helper) + 1; |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2275 | } |
| 2276 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2277 | static ssize_t set_trigger(struct device *s, struct device_attribute *attr, |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2278 | const char *buf, size_t siz) |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2279 | { |
| 2280 | char *p; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2281 | |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2282 | strncpy(mce_helper, buf, sizeof(mce_helper)); |
| 2283 | mce_helper[sizeof(mce_helper)-1] = 0; |
Hidetoshi Seto | 1020bcb | 2009-06-15 17:20:57 +0900 | [diff] [blame] | 2284 | p = strchr(mce_helper, '\n'); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2285 | |
Jan Beulich | e9084ec | 2009-07-16 09:45:11 +0100 | [diff] [blame] | 2286 | if (p) |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2287 | *p = 0; |
| 2288 | |
Jan Beulich | e9084ec | 2009-07-16 09:45:11 +0100 | [diff] [blame] | 2289 | return strlen(mce_helper) + !!p; |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2290 | } |
| 2291 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2292 | static ssize_t set_ignore_ce(struct device *s, |
| 2293 | struct device_attribute *attr, |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2294 | const char *buf, size_t size) |
| 2295 | { |
| 2296 | u64 new; |
| 2297 | |
Daniel Walter | 164109e | 2014-08-08 14:24:03 -0700 | [diff] [blame] | 2298 | if (kstrtou64(buf, 0, &new) < 0) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2299 | return -EINVAL; |
| 2300 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2301 | if (mca_cfg.ignore_ce ^ !!new) { |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2302 | if (new) { |
| 2303 | /* disable ce features */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2304 | mce_timer_delete_all(); |
| 2305 | on_each_cpu(mce_disable_cmci, NULL, 1); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2306 | mca_cfg.ignore_ce = true; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2307 | } else { |
| 2308 | /* enable ce features */ |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2309 | mca_cfg.ignore_ce = false; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2310 | on_each_cpu(mce_enable_ce, (void *)1, 1); |
| 2311 | } |
| 2312 | } |
| 2313 | return size; |
| 2314 | } |
| 2315 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2316 | static ssize_t set_cmci_disabled(struct device *s, |
| 2317 | struct device_attribute *attr, |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2318 | const char *buf, size_t size) |
| 2319 | { |
| 2320 | u64 new; |
| 2321 | |
Daniel Walter | 164109e | 2014-08-08 14:24:03 -0700 | [diff] [blame] | 2322 | if (kstrtou64(buf, 0, &new) < 0) |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2323 | return -EINVAL; |
| 2324 | |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2325 | if (mca_cfg.cmci_disabled ^ !!new) { |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2326 | if (new) { |
| 2327 | /* disable cmci */ |
Hidetoshi Seto | 9aaef96 | 2011-06-17 04:40:36 -0400 | [diff] [blame] | 2328 | on_each_cpu(mce_disable_cmci, NULL, 1); |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2329 | mca_cfg.cmci_disabled = true; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2330 | } else { |
| 2331 | /* enable cmci */ |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2332 | mca_cfg.cmci_disabled = false; |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2333 | on_each_cpu(mce_enable_ce, NULL, 1); |
| 2334 | } |
| 2335 | } |
| 2336 | return size; |
| 2337 | } |
| 2338 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2339 | static ssize_t store_int_with_restart(struct device *s, |
| 2340 | struct device_attribute *attr, |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2341 | const char *buf, size_t size) |
| 2342 | { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2343 | ssize_t ret = device_store_int(s, attr, buf, size); |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2344 | mce_restart(); |
| 2345 | return ret; |
| 2346 | } |
| 2347 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2348 | static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2349 | static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); |
Borislav Petkov | 84c2559 | 2012-10-15 19:59:18 +0200 | [diff] [blame] | 2350 | static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2351 | static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2352 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2353 | static struct dev_ext_attribute dev_attr_check_interval = { |
| 2354 | __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), |
Andi Kleen | b56f642 | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 2355 | &check_interval |
| 2356 | }; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2357 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2358 | static struct dev_ext_attribute dev_attr_ignore_ce = { |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2359 | __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), |
| 2360 | &mca_cfg.ignore_ce |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2361 | }; |
| 2362 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2363 | static struct dev_ext_attribute dev_attr_cmci_disabled = { |
Borislav Petkov | 7af19e4 | 2012-10-15 20:25:17 +0200 | [diff] [blame] | 2364 | __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), |
| 2365 | &mca_cfg.cmci_disabled |
Hidetoshi Seto | 9af43b5 | 2009-06-15 17:21:36 +0900 | [diff] [blame] | 2366 | }; |
| 2367 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2368 | static struct device_attribute *mce_device_attrs[] = { |
| 2369 | &dev_attr_tolerant.attr, |
| 2370 | &dev_attr_check_interval.attr, |
| 2371 | &dev_attr_trigger, |
| 2372 | &dev_attr_monarch_timeout.attr, |
| 2373 | &dev_attr_dont_log_ce.attr, |
| 2374 | &dev_attr_ignore_ce.attr, |
| 2375 | &dev_attr_cmci_disabled.attr, |
Andi Kleen | a98f0dd | 2007-02-13 13:26:23 +0100 | [diff] [blame] | 2376 | NULL |
| 2377 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2378 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2379 | static cpumask_var_t mce_device_initialized; |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2380 | |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2381 | static void mce_device_release(struct device *dev) |
| 2382 | { |
| 2383 | kfree(dev); |
| 2384 | } |
| 2385 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2386 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2387 | static int mce_device_create(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2388 | { |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2389 | struct device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2390 | int err; |
Hidetoshi Seto | b1f49f9 | 2009-06-18 14:53:24 +0900 | [diff] [blame] | 2391 | int i, j; |
Mike Travis | 92cb761 | 2007-10-19 20:35:04 +0200 | [diff] [blame] | 2392 | |
Andreas Herrmann | 9036755 | 2007-11-07 02:12:58 +0100 | [diff] [blame] | 2393 | if (!mce_available(&boot_cpu_data)) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2394 | return -EIO; |
| 2395 | |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2396 | dev = kzalloc(sizeof *dev, GFP_KERNEL); |
| 2397 | if (!dev) |
| 2398 | return -ENOMEM; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2399 | dev->id = cpu; |
| 2400 | dev->bus = &mce_subsys; |
Greg Kroah-Hartman | e032d807 | 2012-01-16 14:40:28 -0800 | [diff] [blame] | 2401 | dev->release = &mce_device_release; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2402 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2403 | err = device_register(dev); |
Levente Kurusa | 853d9b1 | 2013-11-29 21:28:48 +0100 | [diff] [blame] | 2404 | if (err) { |
| 2405 | put_device(dev); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2406 | return err; |
Levente Kurusa | 853d9b1 | 2013-11-29 21:28:48 +0100 | [diff] [blame] | 2407 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2408 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2409 | for (i = 0; mce_device_attrs[i]; i++) { |
| 2410 | err = device_create_file(dev, mce_device_attrs[i]); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2411 | if (err) |
| 2412 | goto error; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2413 | } |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2414 | for (j = 0; j < mca_cfg.banks; j++) { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2415 | err = device_create_file(dev, &mce_banks[j].attr); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2416 | if (err) |
| 2417 | goto error2; |
| 2418 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2419 | cpumask_set_cpu(cpu, mce_device_initialized); |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2420 | per_cpu(mce_device, cpu) = dev; |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2421 | |
| 2422 | return 0; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2423 | error2: |
Hidetoshi Seto | b1f49f9 | 2009-06-18 14:53:24 +0900 | [diff] [blame] | 2424 | while (--j >= 0) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2425 | device_remove_file(dev, &mce_banks[j].attr); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2426 | error: |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2427 | while (--i >= 0) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2428 | device_remove_file(dev, mce_device_attrs[i]); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2429 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2430 | device_unregister(dev); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2431 | |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2432 | return err; |
| 2433 | } |
| 2434 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2435 | static void mce_device_remove(unsigned int cpu) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2436 | { |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2437 | struct device *dev = per_cpu(mce_device, cpu); |
Shaohua Li | 73ca535 | 2006-01-11 22:43:06 +0100 | [diff] [blame] | 2438 | int i; |
| 2439 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2440 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2441 | return; |
| 2442 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2443 | for (i = 0; mce_device_attrs[i]; i++) |
| 2444 | device_remove_file(dev, mce_device_attrs[i]); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2445 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2446 | for (i = 0; i < mca_cfg.banks; i++) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2447 | device_remove_file(dev, &mce_banks[i].attr); |
Ingo Molnar | cb491fc | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2448 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2449 | device_unregister(dev); |
| 2450 | cpumask_clear_cpu(cpu, mce_device_initialized); |
Greg Kroah-Hartman | d6126ef | 2012-01-26 15:49:14 -0800 | [diff] [blame] | 2451 | per_cpu(mce_device, cpu) = NULL; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2452 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2453 | |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2454 | /* Make sure there are no machine checks on offlined CPUs. */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2455 | static void mce_disable_cpu(void *h) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2456 | { |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2457 | unsigned long action = *(unsigned long *)h; |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2458 | |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2459 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2460 | return; |
Hidetoshi Seto | 767df1b | 2009-11-26 17:29:02 +0900 | [diff] [blame] | 2461 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2462 | if (!(action & CPU_TASKS_FROZEN)) |
| 2463 | cmci_clear(); |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2464 | |
Ashok Raj | 6e06780 | 2015-09-28 09:21:43 +0200 | [diff] [blame] | 2465 | vendor_disable_error_reporting(); |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2466 | } |
| 2467 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2468 | static void mce_reenable_cpu(void *h) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2469 | { |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2470 | unsigned long action = *(unsigned long *)h; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2471 | int i; |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2472 | |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 2473 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2474 | return; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2475 | |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2476 | if (!(action & CPU_TASKS_FROZEN)) |
| 2477 | cmci_reenable(); |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2478 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2479 | struct mce_bank *b = &mce_banks[i]; |
Ingo Molnar | 11868a2 | 2009-09-23 17:49:55 +0200 | [diff] [blame] | 2480 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2481 | if (b->init) |
Yazen Ghannam | d9d73fc | 2016-04-30 14:33:55 +0200 | [diff] [blame] | 2482 | wrmsrl(msr_ops.ctl(i), b->ctl); |
Andi Kleen | 06b7a7a | 2009-04-27 18:37:43 +0200 | [diff] [blame] | 2483 | } |
Andi Kleen | d6b7558 | 2009-02-12 13:39:31 +0100 | [diff] [blame] | 2484 | } |
| 2485 | |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2486 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2487 | static int |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2488 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2489 | { |
| 2490 | unsigned int cpu = (unsigned long)hcpu; |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2491 | struct timer_list *t = &per_cpu(mce_timer, cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2492 | |
Thomas Gleixner | 1a65f97 | 2012-07-19 13:59:40 -0400 | [diff] [blame] | 2493 | switch (action & ~CPU_TASKS_FROZEN) { |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2494 | case CPU_ONLINE: |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2495 | mce_device_create(cpu); |
Rafael J. Wysocki | 8735728 | 2008-08-22 22:23:09 +0200 | [diff] [blame] | 2496 | if (threshold_cpu_callback) |
| 2497 | threshold_cpu_callback(action, cpu); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2498 | break; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2499 | case CPU_DEAD: |
Rafael J. Wysocki | 8735728 | 2008-08-22 22:23:09 +0200 | [diff] [blame] | 2500 | if (threshold_cpu_callback) |
| 2501 | threshold_cpu_callback(action, cpu); |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2502 | mce_device_remove(cpu); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 2503 | mce_intel_hcpu_update(cpu); |
Borislav Petkov | 38356c1 | 2014-05-22 16:40:54 +0200 | [diff] [blame] | 2504 | |
| 2505 | /* intentionally ignoring frozen here */ |
| 2506 | if (!(action & CPU_TASKS_FROZEN)) |
| 2507 | cmci_rediscover(); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2508 | break; |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2509 | case CPU_DOWN_PREPARE: |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2510 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); |
Chen Gong | 55babd8 | 2012-08-09 11:44:51 -0700 | [diff] [blame] | 2511 | del_timer_sync(t); |
Andi Kleen | 52d168e | 2009-02-12 13:39:29 +0100 | [diff] [blame] | 2512 | break; |
| 2513 | case CPU_DOWN_FAILED: |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2514 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
Thomas Gleixner | 26c3c28 | 2012-07-19 13:59:39 -0400 | [diff] [blame] | 2515 | mce_start_timer(cpu, t); |
Andi Kleen | 88ccbed | 2009-02-12 13:49:36 +0100 | [diff] [blame] | 2516 | break; |
Thomas Gleixner | 1a65f97 | 2012-07-19 13:59:40 -0400 | [diff] [blame] | 2517 | } |
| 2518 | |
Andreas Herrmann | bae19fe | 2007-11-14 17:00:44 -0800 | [diff] [blame] | 2519 | return NOTIFY_OK; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2520 | } |
| 2521 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 2522 | static struct notifier_block mce_cpu_notifier = { |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2523 | .notifier_call = mce_cpu_callback, |
| 2524 | }; |
| 2525 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2526 | static __init void mce_init_banks(void) |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2527 | { |
| 2528 | int i; |
| 2529 | |
Borislav Petkov | d203f0b | 2012-10-15 18:03:57 +0200 | [diff] [blame] | 2530 | for (i = 0; i < mca_cfg.banks; i++) { |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2531 | struct mce_bank *b = &mce_banks[i]; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2532 | struct device_attribute *a = &b->attr; |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2533 | |
Eric W. Biederman | a07e415 | 2010-02-11 15:23:05 -0800 | [diff] [blame] | 2534 | sysfs_attr_init(&a->attr); |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2535 | a->attr.name = b->attrname; |
| 2536 | snprintf(b->attrname, ATTR_LEN, "bank%d", i); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2537 | |
| 2538 | a->attr.mode = 0644; |
| 2539 | a->show = show_bank; |
| 2540 | a->store = set_bank; |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2541 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2542 | } |
| 2543 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2544 | static __init int mcheck_init_device(void) |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2545 | { |
| 2546 | int err; |
| 2547 | int i = 0; |
| 2548 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2549 | if (!mce_available(&boot_cpu_data)) { |
| 2550 | err = -EIO; |
| 2551 | goto err_out; |
| 2552 | } |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2553 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2554 | if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { |
| 2555 | err = -ENOMEM; |
| 2556 | goto err_out; |
| 2557 | } |
Rusty Russell | 996867d | 2009-03-13 14:49:51 +1030 | [diff] [blame] | 2558 | |
Andi Kleen | cebe182 | 2009-07-09 00:31:43 +0200 | [diff] [blame] | 2559 | mce_init_banks(); |
Andi Kleen | 0d7482e3 | 2009-02-17 23:07:13 +0100 | [diff] [blame] | 2560 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2561 | err = subsys_system_register(&mce_subsys, NULL); |
Akinobu Mita | d435d86 | 2007-10-18 03:05:15 -0700 | [diff] [blame] | 2562 | if (err) |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2563 | goto err_out_mem; |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2564 | |
Srivatsa S. Bhat | 82a8f13 | 2014-03-11 02:07:04 +0530 | [diff] [blame] | 2565 | cpu_notifier_register_begin(); |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2566 | for_each_online_cpu(i) { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 2567 | err = mce_device_create(i); |
Srivatsa S. Bhat | 82a8f13 | 2014-03-11 02:07:04 +0530 | [diff] [blame] | 2568 | if (err) { |
Borislav Petkov | 27c9341 | 2014-06-20 23:16:45 +0200 | [diff] [blame] | 2569 | /* |
| 2570 | * Register notifier anyway (and do not unreg it) so |
| 2571 | * that we don't leave undeleted timers, see notifier |
| 2572 | * callback above. |
| 2573 | */ |
| 2574 | __register_hotcpu_notifier(&mce_cpu_notifier); |
Srivatsa S. Bhat | 82a8f13 | 2014-03-11 02:07:04 +0530 | [diff] [blame] | 2575 | cpu_notifier_register_done(); |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2576 | goto err_device_create; |
Srivatsa S. Bhat | 82a8f13 | 2014-03-11 02:07:04 +0530 | [diff] [blame] | 2577 | } |
Andi Kleen | 91c6d40 | 2005-07-28 21:15:39 -0700 | [diff] [blame] | 2578 | } |
| 2579 | |
Srivatsa S. Bhat | 82a8f13 | 2014-03-11 02:07:04 +0530 | [diff] [blame] | 2580 | __register_hotcpu_notifier(&mce_cpu_notifier); |
| 2581 | cpu_notifier_register_done(); |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2582 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2583 | register_syscore_ops(&mce_syscore_ops); |
| 2584 | |
Hidetoshi Seto | 93b62c3 | 2011-06-08 11:00:45 +0900 | [diff] [blame] | 2585 | /* register character device /dev/mcelog */ |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2586 | err = misc_register(&mce_chrdev_device); |
| 2587 | if (err) |
| 2588 | goto err_register; |
| 2589 | |
| 2590 | return 0; |
| 2591 | |
| 2592 | err_register: |
| 2593 | unregister_syscore_ops(&mce_syscore_ops); |
| 2594 | |
Mathieu Souchaud | 9c15a24 | 2014-05-28 09:12:37 +0200 | [diff] [blame] | 2595 | err_device_create: |
| 2596 | /* |
| 2597 | * We didn't keep track of which devices were created above, but |
| 2598 | * even if we had, the set of online cpus might have changed. |
| 2599 | * Play safe and remove for every possible cpu, since |
| 2600 | * mce_device_remove() will do the right thing. |
| 2601 | */ |
| 2602 | for_each_possible_cpu(i) |
| 2603 | mce_device_remove(i); |
| 2604 | |
| 2605 | err_out_mem: |
| 2606 | free_cpumask_var(mce_device_initialized); |
| 2607 | |
| 2608 | err_out: |
| 2609 | pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err); |
Ingo Molnar | e9eee03 | 2009-04-08 12:31:17 +0200 | [diff] [blame] | 2610 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2611 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2612 | } |
Liu, Jinsong | cef12ee | 2012-06-07 19:56:51 +0800 | [diff] [blame] | 2613 | device_initcall_sync(mcheck_init_device); |
Ingo Molnar | a988d33 | 2009-04-08 12:31:25 +0200 | [diff] [blame] | 2614 | |
Andi Kleen | d7c3c9a | 2009-04-28 23:07:25 +0200 | [diff] [blame] | 2615 | /* |
| 2616 | * Old style boot options parsing. Only for compatibility. |
| 2617 | */ |
| 2618 | static int __init mcheck_disable(char *str) |
| 2619 | { |
Borislav Petkov | 1462594 | 2012-10-17 12:05:33 +0200 | [diff] [blame] | 2620 | mca_cfg.disabled = true; |
Andi Kleen | d7c3c9a | 2009-04-28 23:07:25 +0200 | [diff] [blame] | 2621 | return 1; |
| 2622 | } |
| 2623 | __setup("nomce", mcheck_disable); |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 2624 | |
| 2625 | #ifdef CONFIG_DEBUG_FS |
| 2626 | struct dentry *mce_get_debugfs_dir(void) |
| 2627 | { |
| 2628 | static struct dentry *dmce; |
| 2629 | |
| 2630 | if (!dmce) |
| 2631 | dmce = debugfs_create_dir("mce", NULL); |
| 2632 | |
| 2633 | return dmce; |
| 2634 | } |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2635 | |
| 2636 | static void mce_reset(void) |
| 2637 | { |
| 2638 | cpu_missing = 0; |
Borislav Petkov | c7c9b39 | 2014-12-03 22:36:45 +0100 | [diff] [blame] | 2639 | atomic_set(&mce_fake_panicked, 0); |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2640 | atomic_set(&mce_executing, 0); |
| 2641 | atomic_set(&mce_callin, 0); |
| 2642 | atomic_set(&global_nwo, 0); |
| 2643 | } |
| 2644 | |
| 2645 | static int fake_panic_get(void *data, u64 *val) |
| 2646 | { |
| 2647 | *val = fake_panic; |
| 2648 | return 0; |
| 2649 | } |
| 2650 | |
| 2651 | static int fake_panic_set(void *data, u64 val) |
| 2652 | { |
| 2653 | mce_reset(); |
| 2654 | fake_panic = val; |
| 2655 | return 0; |
| 2656 | } |
| 2657 | |
| 2658 | DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, |
| 2659 | fake_panic_set, "%llu\n"); |
| 2660 | |
Borislav Petkov | 5e09954 | 2009-10-16 12:31:32 +0200 | [diff] [blame] | 2661 | static int __init mcheck_debugfs_init(void) |
Huang Ying | bf783f9 | 2009-07-31 09:41:43 +0800 | [diff] [blame] | 2662 | { |
| 2663 | struct dentry *dmce, *ffake_panic; |
| 2664 | |
| 2665 | dmce = mce_get_debugfs_dir(); |
| 2666 | if (!dmce) |
| 2667 | return -ENOMEM; |
| 2668 | ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL, |
| 2669 | &fake_panic_fops); |
| 2670 | if (!ffake_panic) |
| 2671 | return -ENOMEM; |
| 2672 | |
| 2673 | return 0; |
| 2674 | } |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2675 | #else |
| 2676 | static int __init mcheck_debugfs_init(void) { return -EINVAL; } |
Huang Ying | 5be9ed2 | 2009-07-31 09:41:42 +0800 | [diff] [blame] | 2677 | #endif |
Chen, Gong | fd4cf79 | 2015-08-12 18:29:36 +0200 | [diff] [blame] | 2678 | |
| 2679 | static int __init mcheck_late_init(void) |
| 2680 | { |
| 2681 | mcheck_debugfs_init(); |
| 2682 | |
| 2683 | /* |
| 2684 | * Flush out everything that has been logged during early boot, now that |
| 2685 | * everything has been initialized (workqueues, decoders, ...). |
| 2686 | */ |
| 2687 | mce_schedule_work(); |
| 2688 | |
| 2689 | return 0; |
| 2690 | } |
| 2691 | late_initcall(mcheck_late_init); |