2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 #include <linux/sched.h>
14 #include <asm/processor.h>
18 #include "mce-internal.h"
21 * Support for Intel Correct Machine Check Interrupts. This allows
22 * the CPU to raise an interrupt when a corrected machine check happened.
23 * Normally we pick those up using a regular polling timer.
24 * Also supports reliable discovery of shared banks.
27 static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
30 * cmci_discover_lock protects against parallel discovery attempts
31 * which could race against each other.
33 static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
35 #define CMCI_THRESHOLD 1
36 #define CMCI_POLL_INTERVAL (30 * HZ)
37 #define CMCI_STORM_INTERVAL (1 * HZ)
38 #define CMCI_STORM_THRESHOLD 15
40 static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
41 static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
42 static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
50 static atomic_t cmci_storm_on_cpus;
52 static int cmci_supported(int *banks)
56 if (mce_cmci_disabled || mce_ignore_ce)
60 * Vendor check is not strictly needed, but the initial
61 * initialization is vendor keyed and this
62 * makes sure none of the backdoors are entered otherwise.
64 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
66 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
68 rdmsrl(MSR_IA32_MCG_CAP, cap);
69 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
70 return !!(cap & MCG_CMCI_P);
73 void mce_intel_cmci_poll(void)
75 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
77 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
80 void mce_intel_hcpu_update(unsigned long cpu)
82 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
83 atomic_dec(&cmci_storm_on_cpus);
85 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
88 unsigned long mce_intel_adjust_timer(unsigned long interval)
92 if (interval < CMCI_POLL_INTERVAL)
95 switch (__this_cpu_read(cmci_storm_state)) {
96 case CMCI_STORM_ACTIVE:
98 * We switch back to interrupt mode once the poll timer has
99 * silenced itself. That means no events recorded and the
100 * timer interval is back to our poll interval.
102 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
103 r = atomic_sub_return(1, &cmci_storm_on_cpus);
105 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
108 case CMCI_STORM_SUBSIDED:
110 * We wait for all cpus to go back to SUBSIDED
111 * state. When that happens we switch back to
114 if (!atomic_read(&cmci_storm_on_cpus)) {
115 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
119 return CMCI_POLL_INTERVAL;
122 * We have shiny weather. Let the poll do whatever it
129 static bool cmci_storm_detect(void)
131 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
132 unsigned long ts = __this_cpu_read(cmci_time_stamp);
133 unsigned long now = jiffies;
136 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
139 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
143 __this_cpu_write(cmci_time_stamp, now);
145 __this_cpu_write(cmci_storm_cnt, cnt);
147 if (cnt <= CMCI_STORM_THRESHOLD)
151 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
152 r = atomic_add_return(1, &cmci_storm_on_cpus);
153 mce_timer_kick(CMCI_POLL_INTERVAL);
156 pr_notice("CMCI storm detected: switching to poll mode\n");
161 * The interrupt handler. This is called on every event.
162 * Just call the poller directly to log any events.
163 * This could in theory increase the threshold under high load,
164 * but doesn't for now.
166 static void intel_threshold_interrupt(void)
168 if (cmci_storm_detect())
170 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
175 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
176 * on this CPU. Use the algorithm recommended in the SDM to discover shared
179 static void cmci_discover(int banks)
181 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
185 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
186 for (i = 0; i < banks; i++) {
189 if (test_bit(i, owned))
192 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
194 /* Already owned by someone else? */
195 if (val & MCI_CTL2_CMCI_EN) {
197 __clear_bit(i, __get_cpu_var(mce_poll_banks));
201 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
202 val |= MCI_CTL2_CMCI_EN | CMCI_THRESHOLD;
203 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
204 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
206 /* Did the enable bit stick? -- the bank supports CMCI */
207 if (val & MCI_CTL2_CMCI_EN) {
209 __clear_bit(i, __get_cpu_var(mce_poll_banks));
211 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
214 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
218 * Just in case we missed an event during initialization check
219 * all the CMCI owned banks.
221 void cmci_recheck(void)
226 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
228 local_irq_save(flags);
229 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
230 local_irq_restore(flags);
234 * Disable CMCI on this CPU for all banks it owns when it goes down.
235 * This allows other CPUs to claim the banks on rediscovery.
237 void cmci_clear(void)
244 if (!cmci_supported(&banks))
246 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
247 for (i = 0; i < banks; i++) {
248 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
251 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
252 val &= ~(MCI_CTL2_CMCI_EN|MCI_CTL2_CMCI_THRESHOLD_MASK);
253 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
254 __clear_bit(i, __get_cpu_var(mce_banks_owned));
256 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
260 * After a CPU went down cycle through all the others and rediscover
261 * Must run in process context.
263 void cmci_rediscover(int dying)
269 if (!cmci_supported(&banks))
271 if (!alloc_cpumask_var(&old, GFP_KERNEL))
273 cpumask_copy(old, ¤t->cpus_allowed);
275 for_each_online_cpu(cpu) {
278 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
280 /* Recheck banks in case CPUs don't all have the same */
281 if (cmci_supported(&banks))
282 cmci_discover(banks);
285 set_cpus_allowed_ptr(current, old);
286 free_cpumask_var(old);
290 * Reenable CMCI on this CPU in case a CPU down failed.
292 void cmci_reenable(void)
295 if (cmci_supported(&banks))
296 cmci_discover(banks);
299 static void intel_init_cmci(void)
303 if (!cmci_supported(&banks))
306 mce_threshold_vector = intel_threshold_interrupt;
307 cmci_discover(banks);
309 * For CPU #0 this runs with still disabled APIC, but that's
310 * ok because only the vector is set up. We still do another
311 * check for the banks later for CPU #0 just to make sure
312 * to not miss any events.
314 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
318 void mce_intel_feature_init(struct cpuinfo_x86 *c)
320 intel_init_thermal(c);