2 * Machine check handler.
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
10 #include <linux/thread_info.h>
11 #include <linux/capability.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ratelimit.h>
14 #include <linux/kallsyms.h>
15 #include <linux/rcupdate.h>
16 #include <linux/smp_lock.h>
17 #include <linux/kobject.h>
18 #include <linux/kdebug.h>
19 #include <linux/kernel.h>
20 #include <linux/percpu.h>
21 #include <linux/string.h>
22 #include <linux/sysdev.h>
23 #include <linux/ctype.h>
24 #include <linux/sched.h>
25 #include <linux/sysfs.h>
26 #include <linux/types.h>
27 #include <linux/init.h>
28 #include <linux/kmod.h>
29 #include <linux/poll.h>
30 #include <linux/cpu.h>
33 #include <asm/processor.h>
34 #include <asm/uaccess.h>
44 #define MISC_MCELOG_MINOR 227
48 static int mce_dont_init;
52 * 0: always panic on uncorrected errors, log corrected errors
53 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
54 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
55 * 3: never panic or SIGBUS, log all errors (for testing only)
57 static int tolerant = 1;
60 static unsigned long notify_user;
62 static int mce_bootlog = -1;
63 static atomic_t mce_events;
65 static char trigger[128];
66 static char *trigger_argv[2] = { trigger, NULL };
68 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
70 /* MCA banks polled by the period polling timer for corrected events */
71 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
72 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
75 /* Do initial initialization of a struct mce */
76 void mce_setup(struct mce *m)
78 memset(m, 0, sizeof(struct mce));
79 m->cpu = smp_processor_id();
84 * Lockless MCE logging infrastructure.
85 * This avoids deadlocks on printk locks without having to break locks. Also
86 * separate MCEs from kernel messages to avoid bogus bug reports.
89 static struct mce_log mcelog = {
94 void mce_log(struct mce *mce)
98 atomic_inc(&mce_events);
102 entry = rcu_dereference(mcelog.next);
105 * When the buffer fills up discard new entries.
106 * Assume that the earlier errors are the more
109 if (entry >= MCE_LOG_LEN) {
110 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
113 /* Old left over entry. Skip: */
114 if (mcelog.entry[entry].finished) {
122 if (cmpxchg(&mcelog.next, entry, next) == entry)
125 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
127 mcelog.entry[entry].finished = 1;
130 set_bit(0, ¬ify_user);
133 static void print_mce(struct mce *m)
135 printk(KERN_EMERG "\n"
136 KERN_EMERG "HARDWARE ERROR\n"
138 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
139 m->cpu, m->mcgstatus, m->bank, m->status);
141 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
142 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
144 if (m->cs == __KERNEL_CS)
145 print_symbol("{%s}", m->ip);
148 printk(KERN_EMERG "TSC %llx ", m->tsc);
150 printk("ADDR %llx ", m->addr);
152 printk("MISC %llx ", m->misc);
154 printk(KERN_EMERG "This is not a software problem!\n");
155 printk(KERN_EMERG "Run through mcelog --ascii to decode "
156 "and contact your hardware vendor\n");
159 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
164 for (i = 0; i < MCE_LOG_LEN; i++) {
165 unsigned long tsc = mcelog.entry[i].tsc;
167 if (time_before(tsc, start))
169 print_mce(&mcelog.entry[i]);
170 if (backup && mcelog.entry[i].tsc == backup->tsc)
178 int mce_available(struct cpuinfo_x86 *c)
182 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
185 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
187 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
195 /* Assume the RIP in the MSR is exact. Is this true? */
196 m->mcgstatus |= MCG_STATUS_EIPV;
197 rdmsrl(rip_msr, m->ip);
203 * Poll for corrected events or events that happened before reset.
204 * Those are just logged through /dev/mcelog.
206 * This is executed in standard interrupt context.
208 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
215 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
216 for (i = 0; i < banks; i++) {
217 if (!bank[i] || !test_bit(i, *b))
226 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
227 if (!(m.status & MCI_STATUS_VAL))
231 * Uncorrected events are handled by the exception handler
232 * when it is enabled. But when the exception is disabled log
235 * TBD do the same check for MCI_STATUS_EN here?
237 if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
240 if (m.status & MCI_STATUS_MISCV)
241 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
242 if (m.status & MCI_STATUS_ADDRV)
243 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
245 if (!(flags & MCP_TIMESTAMP))
248 * Don't get the IP here because it's unlikely to
249 * have anything to do with the actual error location.
251 if (!(flags & MCP_DONTLOG)) {
253 add_taint(TAINT_MACHINE_CHECK);
257 * Clear state for this bank.
259 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
263 * Don't clear MCG_STATUS here because it's only defined for
269 * The actual machine check handler. This only handles real
270 * exceptions when something got corrupted coming in through int 18.
272 * This is executed in NMI context not subject to normal locking rules. This
273 * implies that most kernel services cannot be safely used. Don't even
274 * think about putting a printk in there!
276 void do_machine_check(struct pt_regs *regs, long error_code)
278 struct mce m, panicm;
279 int panicm_found = 0;
283 * If no_way_out gets set, there is no safe way to recover from this
284 * MCE. If tolerant is cranked up, we'll try anyway.
288 * If kill_it gets set, there might be a way to recover from this
292 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
294 atomic_inc(&mce_entry);
296 if (notify_die(DIE_NMI, "machine check", regs, error_code,
297 18, SIGKILL) == NOTIFY_STOP)
304 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
306 /* if the restart IP is not valid, we're done for */
307 if (!(m.mcgstatus & MCG_STATUS_RIPV))
313 for (i = 0; i < banks; i++) {
314 __clear_bit(i, toclear);
322 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
323 if ((m.status & MCI_STATUS_VAL) == 0)
327 * Non uncorrected errors are handled by machine_check_poll
330 if ((m.status & MCI_STATUS_UC) == 0)
334 * Set taint even when machine check was not enabled.
336 add_taint(TAINT_MACHINE_CHECK);
338 __set_bit(i, toclear);
340 if (m.status & MCI_STATUS_EN) {
341 /* if PCC was set, there's no way out */
342 no_way_out |= !!(m.status & MCI_STATUS_PCC);
344 * If this error was uncorrectable and there was
345 * an overflow, we're in trouble. If no overflow,
346 * we might get away with just killing a task.
348 if (m.status & MCI_STATUS_UC) {
349 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
355 * Machine check event was not enabled. Clear, but
361 if (m.status & MCI_STATUS_MISCV)
362 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
363 if (m.status & MCI_STATUS_ADDRV)
364 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
366 mce_get_rip(&m, regs);
370 * Did this bank cause the exception?
372 * Assume that the bank with uncorrectable errors did it,
373 * and that there is only a single one:
375 if ((m.status & MCI_STATUS_UC) &&
376 (m.status & MCI_STATUS_EN)) {
383 * If we didn't find an uncorrectable error, pick
384 * the last one (shouldn't happen, just being safe).
390 * If we have decided that we just CAN'T continue, and the user
391 * has not set tolerant to an insane level, give up and die.
393 if (no_way_out && tolerant < 3)
394 mce_panic("Machine check", &panicm, mcestart);
397 * If the error seems to be unrecoverable, something should be
398 * done. Try to kill as little as possible. If we can kill just
399 * one task, do that. If the user has set the tolerance very
400 * high, don't try to do anything at all.
402 if (kill_it && tolerant < 3) {
406 * If the EIPV bit is set, it means the saved IP is the
407 * instruction which caused the MCE.
409 if (m.mcgstatus & MCG_STATUS_EIPV)
410 user_space = panicm.ip && (panicm.cs & 3);
413 * If we know that the error was in user space, send a
414 * SIGBUS. Otherwise, panic if tolerance is low.
416 * force_sig() takes an awful lot of locks and has a slight
417 * risk of deadlocking.
420 force_sig(SIGBUS, current);
421 } else if (panic_on_oops || tolerant < 2) {
422 mce_panic("Uncorrected machine check",
427 /* notify userspace ASAP */
428 set_thread_flag(TIF_MCE_NOTIFY);
430 /* the last thing we do is clear state */
431 for (i = 0; i < banks; i++) {
432 if (test_bit(i, toclear))
433 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
435 wrmsrl(MSR_IA32_MCG_STATUS, 0);
437 atomic_dec(&mce_entry);
440 #ifdef CONFIG_X86_MCE_INTEL
442 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
443 * @cpu: The CPU on which the event occurred.
444 * @status: Event status information
446 * This function should be called by the thermal interrupt after the
447 * event has been processed and the decision was made to log the event
450 * The status parameter will be saved to the 'status' field of 'struct mce'
451 * and historically has been the register value of the
452 * MSR_IA32_THERMAL_STATUS (Intel) msr.
454 void mce_log_therm_throt_event(__u64 status)
459 m.bank = MCE_THERMAL_BANK;
463 #endif /* CONFIG_X86_MCE_INTEL */
466 * Periodic polling timer for "silent" machine check errors. If the
467 * poller finds an MCE, poll 2x faster. When the poller finds no more
468 * errors, poll 2x slower (up to check_interval seconds).
470 static int check_interval = 5 * 60; /* 5 minutes */
472 static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
473 static DEFINE_PER_CPU(struct timer_list, mce_timer);
475 static void mcheck_timer(unsigned long data)
477 struct timer_list *t = &per_cpu(mce_timer, data);
480 WARN_ON(smp_processor_id() != data);
482 if (mce_available(¤t_cpu_data)) {
483 machine_check_poll(MCP_TIMESTAMP,
484 &__get_cpu_var(mce_poll_banks));
488 * Alert userspace if needed. If we logged an MCE, reduce the
489 * polling interval, otherwise increase the polling interval.
491 n = &__get_cpu_var(next_interval);
492 if (mce_notify_user()) {
493 *n = max(*n/2, HZ/100);
495 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
498 t->expires = jiffies + *n;
502 static void mce_do_trigger(struct work_struct *work)
504 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
507 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
510 * Notify the user(s) about new machine check events.
511 * Can be called from interrupt context, but not from machine check/NMI
514 int mce_notify_user(void)
516 /* Not more than two messages every minute */
517 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
519 clear_thread_flag(TIF_MCE_NOTIFY);
521 if (test_and_clear_bit(0, ¬ify_user)) {
522 wake_up_interruptible(&mce_wait);
525 * There is no risk of missing notifications because
526 * work_pending is always cleared before the function is
529 if (trigger[0] && !work_pending(&mce_trigger_work))
530 schedule_work(&mce_trigger_work);
532 if (__ratelimit(&ratelimit))
533 printk(KERN_INFO "Machine check events logged\n");
540 /* see if the idle task needs to notify userspace: */
542 mce_idle_callback(struct notifier_block *nfb, unsigned long action,
545 /* IDLE_END should be safe - interrupts are back on */
546 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
552 static struct notifier_block mce_idle_notifier = {
553 .notifier_call = mce_idle_callback,
556 static __init int periodic_mcheck_init(void)
558 idle_notifier_register(&mce_idle_notifier);
561 __initcall(periodic_mcheck_init);
564 * Initialize Machine Checks for a CPU.
566 static int mce_cap_init(void)
571 rdmsrl(MSR_IA32_MCG_CAP, cap);
573 if (b > MAX_NR_BANKS) {
575 "MCE: Using only %u machine check banks out of %u\n",
580 /* Don't support asymmetric configurations today */
581 WARN_ON(banks != 0 && b != banks);
584 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
587 memset(bank, 0xff, banks * sizeof(u64));
590 /* Use accurate RIP reporting if available. */
591 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
592 rip_msr = MSR_IA32_MCG_EIP;
597 static void mce_init(void *dummy)
599 mce_banks_t all_banks;
604 * Log the machine checks left over from the previous reset.
606 bitmap_fill(all_banks, MAX_NR_BANKS);
607 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
609 set_in_cr4(X86_CR4_MCE);
611 rdmsrl(MSR_IA32_MCG_CAP, cap);
613 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
615 for (i = 0; i < banks; i++) {
616 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
617 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
621 /* Add per CPU specific workarounds here */
622 static void mce_cpu_quirks(struct cpuinfo_x86 *c)
624 /* This should be disabled by the BIOS, but isn't always */
625 if (c->x86_vendor == X86_VENDOR_AMD) {
626 if (c->x86 == 15 && banks > 4) {
628 * disable GART TBL walk error reporting, which
629 * trips off incorrectly with the IOMMU & 3ware
632 clear_bit(10, (unsigned long *)&bank[4]);
634 if (c->x86 <= 17 && mce_bootlog < 0) {
636 * Lots of broken BIOS around that don't clear them
637 * by default and leave crap in there. Don't log:
645 static void mce_cpu_features(struct cpuinfo_x86 *c)
647 switch (c->x86_vendor) {
648 case X86_VENDOR_INTEL:
649 mce_intel_feature_init(c);
652 mce_amd_feature_init(c);
659 static void mce_init_timer(void)
661 struct timer_list *t = &__get_cpu_var(mce_timer);
662 int *n = &__get_cpu_var(next_interval);
664 *n = check_interval * HZ;
667 setup_timer(t, mcheck_timer, smp_processor_id());
668 t->expires = round_jiffies(jiffies + *n);
673 * Called for each booted CPU to set up machine checks.
674 * Must be called with preempt off:
676 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
678 if (!mce_available(c))
681 if (mce_cap_init() < 0) {
693 * Character device to read and clear the MCE log.
696 static DEFINE_SPINLOCK(mce_state_lock);
697 static int open_count; /* #times opened */
698 static int open_exclu; /* already open exclusive? */
700 static int mce_open(struct inode *inode, struct file *file)
703 spin_lock(&mce_state_lock);
705 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
706 spin_unlock(&mce_state_lock);
712 if (file->f_flags & O_EXCL)
716 spin_unlock(&mce_state_lock);
719 return nonseekable_open(inode, file);
722 static int mce_release(struct inode *inode, struct file *file)
724 spin_lock(&mce_state_lock);
729 spin_unlock(&mce_state_lock);
734 static void collect_tscs(void *data)
736 unsigned long *cpu_tsc = (unsigned long *)data;
738 rdtscll(cpu_tsc[smp_processor_id()]);
741 static DEFINE_MUTEX(mce_read_mutex);
743 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
746 char __user *buf = ubuf;
747 unsigned long *cpu_tsc;
751 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
755 mutex_lock(&mce_read_mutex);
756 next = rcu_dereference(mcelog.next);
758 /* Only supports full reads right now */
759 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
760 mutex_unlock(&mce_read_mutex);
769 for (i = prev; i < next; i++) {
770 unsigned long start = jiffies;
772 while (!mcelog.entry[i].finished) {
773 if (time_after_eq(jiffies, start + 2)) {
774 memset(mcelog.entry + i, 0,
781 err |= copy_to_user(buf, mcelog.entry + i,
783 buf += sizeof(struct mce);
788 memset(mcelog.entry + prev, 0,
789 (next - prev) * sizeof(struct mce));
791 next = cmpxchg(&mcelog.next, prev, 0);
792 } while (next != prev);
797 * Collect entries that were still getting written before the
800 on_each_cpu(collect_tscs, cpu_tsc, 1);
802 for (i = next; i < MCE_LOG_LEN; i++) {
803 if (mcelog.entry[i].finished &&
804 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
805 err |= copy_to_user(buf, mcelog.entry+i,
808 buf += sizeof(struct mce);
809 memset(&mcelog.entry[i], 0, sizeof(struct mce));
812 mutex_unlock(&mce_read_mutex);
815 return err ? -EFAULT : buf - ubuf;
818 static unsigned int mce_poll(struct file *file, poll_table *wait)
820 poll_wait(file, &mce_wait, wait);
821 if (rcu_dereference(mcelog.next))
822 return POLLIN | POLLRDNORM;
826 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
828 int __user *p = (int __user *)arg;
830 if (!capable(CAP_SYS_ADMIN))
834 case MCE_GET_RECORD_LEN:
835 return put_user(sizeof(struct mce), p);
836 case MCE_GET_LOG_LEN:
837 return put_user(MCE_LOG_LEN, p);
838 case MCE_GETCLEAR_FLAGS: {
842 flags = mcelog.flags;
843 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
845 return put_user(flags, p);
852 static const struct file_operations mce_chrdev_ops = {
854 .release = mce_release,
857 .unlocked_ioctl = mce_ioctl,
860 static struct miscdevice mce_log_device = {
867 * Old style boot options parsing. Only for compatibility.
869 static int __init mcheck_disable(char *str)
874 __setup("nomce", mcheck_disable);
877 * mce=off disables machine check
878 * mce=TOLERANCELEVEL (number, see above)
879 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
880 * mce=nobootlog Don't log MCEs from before booting.
882 static int __init mcheck_enable(char *str)
884 if (!strcmp(str, "off"))
886 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
887 mce_bootlog = (str[0] == 'b');
888 else if (isdigit(str[0]))
889 get_option(&str, &tolerant);
891 printk(KERN_INFO "mce= argument %s ignored. Please use /sys\n",
897 __setup("mce=", mcheck_enable);
904 * Disable machine checks on suspend and shutdown. We can't really handle
907 static int mce_disable(void)
911 for (i = 0; i < banks; i++)
912 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
916 static int mce_suspend(struct sys_device *dev, pm_message_t state)
918 return mce_disable();
921 static int mce_shutdown(struct sys_device *dev)
923 return mce_disable();
927 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
928 * Only one CPU is active at this time, the others get re-added later using
931 static int mce_resume(struct sys_device *dev)
934 mce_cpu_features(¤t_cpu_data);
939 static void mce_cpu_restart(void *data)
941 del_timer_sync(&__get_cpu_var(mce_timer));
942 if (mce_available(¤t_cpu_data))
947 /* Reinit MCEs after user configuration changes */
948 static void mce_restart(void)
950 on_each_cpu(mce_cpu_restart, NULL, 1);
953 static struct sysdev_class mce_sysclass = {
954 .suspend = mce_suspend,
955 .shutdown = mce_shutdown,
956 .resume = mce_resume,
957 .name = "machinecheck",
960 DEFINE_PER_CPU(struct sys_device, device_mce);
963 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
965 /* Why are there no generic functions for this? */
966 #define ACCESSOR(name, var, start) \
967 static ssize_t show_ ## name(struct sys_device *s, \
968 struct sysdev_attribute *attr, \
970 return sprintf(buf, "%lx\n", (unsigned long)var); \
972 static ssize_t set_ ## name(struct sys_device *s, \
973 struct sysdev_attribute *attr, \
974 const char *buf, size_t siz) { \
976 unsigned long new = simple_strtoul(buf, &end, 0); \
985 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
987 static struct sysdev_attribute *bank_attrs;
989 static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
992 u64 b = bank[attr - bank_attrs];
994 return sprintf(buf, "%llx\n", b);
997 static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
998 const char *buf, size_t siz)
1001 u64 new = simple_strtoull(buf, &end, 0);
1006 bank[attr - bank_attrs] = new;
1013 show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
1015 strcpy(buf, trigger);
1017 return strlen(trigger) + 1;
1020 static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
1021 const char *buf, size_t siz)
1026 strncpy(trigger, buf, sizeof(trigger));
1027 trigger[sizeof(trigger)-1] = 0;
1028 len = strlen(trigger);
1029 p = strchr(trigger, '\n');
1037 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
1038 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
1040 ACCESSOR(check_interval, check_interval, mce_restart())
1042 static struct sysdev_attribute *mce_attributes[] = {
1043 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
1047 static cpumask_var_t mce_device_initialized;
1049 /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
1050 static __cpuinit int mce_create_device(unsigned int cpu)
1055 if (!mce_available(&boot_cpu_data))
1058 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
1059 per_cpu(device_mce, cpu).id = cpu;
1060 per_cpu(device_mce, cpu).cls = &mce_sysclass;
1062 err = sysdev_register(&per_cpu(device_mce, cpu));
1066 for (i = 0; mce_attributes[i]; i++) {
1067 err = sysdev_create_file(&per_cpu(device_mce, cpu),
1072 for (i = 0; i < banks; i++) {
1073 err = sysdev_create_file(&per_cpu(device_mce, cpu),
1078 cpumask_set_cpu(cpu, mce_device_initialized);
1083 sysdev_remove_file(&per_cpu(device_mce, cpu),
1088 sysdev_remove_file(&per_cpu(device_mce, cpu),
1091 sysdev_unregister(&per_cpu(device_mce, cpu));
1096 static __cpuinit void mce_remove_device(unsigned int cpu)
1100 if (!cpumask_test_cpu(cpu, mce_device_initialized))
1103 for (i = 0; mce_attributes[i]; i++)
1104 sysdev_remove_file(&per_cpu(device_mce, cpu),
1106 for (i = 0; i < banks; i++)
1107 sysdev_remove_file(&per_cpu(device_mce, cpu),
1109 sysdev_unregister(&per_cpu(device_mce, cpu));
1110 cpumask_clear_cpu(cpu, mce_device_initialized);
1113 /* Make sure there are no machine checks on offlined CPUs. */
1114 static void mce_disable_cpu(void *h)
1117 unsigned long action = *(unsigned long *)h;
1119 if (!mce_available(¤t_cpu_data))
1121 if (!(action & CPU_TASKS_FROZEN))
1123 for (i = 0; i < banks; i++)
1124 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
1127 static void mce_reenable_cpu(void *h)
1129 unsigned long action = *(unsigned long *)h;
1132 if (!mce_available(¤t_cpu_data))
1135 if (!(action & CPU_TASKS_FROZEN))
1137 for (i = 0; i < banks; i++)
1138 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
1141 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
1142 static int __cpuinit
1143 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1145 unsigned int cpu = (unsigned long)hcpu;
1146 struct timer_list *t = &per_cpu(mce_timer, cpu);
1150 case CPU_ONLINE_FROZEN:
1151 mce_create_device(cpu);
1152 if (threshold_cpu_callback)
1153 threshold_cpu_callback(action, cpu);
1156 case CPU_DEAD_FROZEN:
1157 if (threshold_cpu_callback)
1158 threshold_cpu_callback(action, cpu);
1159 mce_remove_device(cpu);
1161 case CPU_DOWN_PREPARE:
1162 case CPU_DOWN_PREPARE_FROZEN:
1164 smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
1166 case CPU_DOWN_FAILED:
1167 case CPU_DOWN_FAILED_FROZEN:
1168 t->expires = round_jiffies(jiffies +
1169 __get_cpu_var(next_interval));
1170 add_timer_on(t, cpu);
1171 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1174 /* intentionally ignoring frozen here */
1175 cmci_rediscover(cpu);
1181 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
1182 .notifier_call = mce_cpu_callback,
1185 static __init int mce_init_banks(void)
1189 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1194 for (i = 0; i < banks; i++) {
1195 struct sysdev_attribute *a = &bank_attrs[i];
1197 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
1201 a->attr.mode = 0644;
1202 a->show = show_bank;
1203 a->store = set_bank;
1209 kfree(bank_attrs[i].attr.name);
1216 static __init int mce_init_device(void)
1221 if (!mce_available(&boot_cpu_data))
1224 alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
1226 err = mce_init_banks();
1230 err = sysdev_class_register(&mce_sysclass);
1234 for_each_online_cpu(i) {
1235 err = mce_create_device(i);
1240 register_hotcpu_notifier(&mce_cpu_notifier);
1241 misc_register(&mce_log_device);
1246 device_initcall(mce_init_device);
1248 #else /* CONFIG_X86_32: */
1253 EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
1255 /* Handle unconfigured int18 (should never happen) */
1256 static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1258 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
1259 smp_processor_id());
1262 /* Call the installed machine check handler for this CPU setup. */
1263 void (*machine_check_vector)(struct pt_regs *, long error_code) =
1264 unexpected_machine_check;
1266 /* This has to be run for each processor */
1267 void mcheck_init(struct cpuinfo_x86 *c)
1269 if (mce_disabled == 1)
1272 switch (c->x86_vendor) {
1273 case X86_VENDOR_AMD:
1277 case X86_VENDOR_INTEL:
1279 intel_p5_mcheck_init(c);
1281 intel_p6_mcheck_init(c);
1283 intel_p4_mcheck_init(c);
1286 case X86_VENDOR_CENTAUR:
1288 winchip_mcheck_init(c);
1296 static int __init mcheck_disable(char *str)
1302 static int __init mcheck_enable(char *str)
1308 __setup("nomce", mcheck_disable);
1309 __setup("mce", mcheck_enable);
1311 #endif /* CONFIG_X86_32 */