2 * SGI NMI support routines
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Mike Travis
22 #include <linux/cpu.h>
23 #include <linux/delay.h>
24 #include <linux/kdb.h>
25 #include <linux/kexec.h>
26 #include <linux/kgdb.h>
27 #include <linux/module.h>
28 #include <linux/nmi.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
33 #include <asm/current.h>
34 #include <asm/kdebug.h>
35 #include <asm/local64.h>
37 #include <asm/traps.h>
38 #include <asm/uv/uv.h>
39 #include <asm/uv/uv_hub.h>
40 #include <asm/uv/uv_mmrs.h>
45 * Handle system-wide NMI events generated by the global 'power nmi' command.
47 * Basic operation is to field the NMI interrupt on each cpu and wait
48 * until all cpus have arrived into the nmi handler. If some cpus do not
49 * make it into the handler, try and force them in with the IPI(NMI) signal.
51 * We also have to lessen UV Hub MMR accesses as much as possible as this
52 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
53 * can cause system problems to occur.
55 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
56 * chain. This reduces the number of false NMI calls when the perf
57 * tools are running which generate an enormous number of NMIs per
58 * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is
59 * very short as it only checks that if it has been "pinged" with the
60 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
64 static struct uv_hub_nmi_s **uv_hub_nmi_list;
66 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
67 EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
69 static unsigned long nmi_mmr;
70 static unsigned long nmi_mmr_clear;
71 static unsigned long nmi_mmr_pending;
73 static atomic_t uv_in_nmi;
74 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
75 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
76 static atomic_t uv_nmi_slave_continue;
77 static cpumask_var_t uv_nmi_cpu_mask;
79 /* Values for uv_nmi_slave_continue */
81 #define SLAVE_CONTINUE 1
85 * Default is all stack dumps go to the console and buffer.
86 * Lower level to send to log buffer only.
88 static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
89 module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
92 * The following values show statistics on how perf events are affecting
95 static int param_get_local64(char *buffer, const struct kernel_param *kp)
97 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
100 static int param_set_local64(const char *val, const struct kernel_param *kp)
102 /* clear on any write */
103 local64_set((local64_t *)kp->arg, 0);
107 static const struct kernel_param_ops param_ops_local64 = {
108 .get = param_get_local64,
109 .set = param_set_local64,
111 #define param_check_local64(name, p) __param_check(name, p, local64_t)
113 static local64_t uv_nmi_count;
114 module_param_named(nmi_count, uv_nmi_count, local64, 0644);
116 static local64_t uv_nmi_misses;
117 module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
119 static local64_t uv_nmi_ping_count;
120 module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
122 static local64_t uv_nmi_ping_misses;
123 module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
126 * Following values allow tuning for large systems under heavy loading
128 static int uv_nmi_initial_delay = 100;
129 module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
131 static int uv_nmi_slave_delay = 100;
132 module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
134 static int uv_nmi_loop_delay = 100;
135 module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
137 static int uv_nmi_trigger_delay = 10000;
138 module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
140 static int uv_nmi_wait_count = 100;
141 module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
143 static int uv_nmi_retry_count = 500;
144 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
148 * "dump" - dump process stack for each cpu
149 * "ips" - dump IP info for each cpu
150 * "kdump" - do crash dump
151 * "kdb" - enter KDB (default)
152 * "kgdb" - enter KGDB
154 static char uv_nmi_action[8] = "kdb";
155 module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
157 static inline bool uv_nmi_action_is(const char *action)
159 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
162 /* Setup which NMI support is present in system */
163 static void uv_nmi_setup_mmrs(void)
165 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
166 uv_write_local_mmr(UVH_NMI_MMRX_REQ,
167 1UL << UVH_NMI_MMRX_REQ_SHIFT);
168 nmi_mmr = UVH_NMI_MMRX;
169 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
170 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
171 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
173 nmi_mmr = UVH_NMI_MMR;
174 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
175 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
176 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
180 /* Read NMI MMR and check if NMI flag was set by BMC. */
181 static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
183 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
184 atomic_inc(&hub_nmi->read_mmr_count);
185 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
188 static inline void uv_local_mmr_clear_nmi(void)
190 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
194 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
195 * return true. If first cpu in on the system, set global "in_nmi" flag.
197 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
199 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
202 atomic_set(&hub_nmi->cpu_owner, cpu);
203 if (atomic_add_unless(&uv_in_nmi, 1, 1))
204 atomic_set(&uv_nmi_cpu, cpu);
206 atomic_inc(&hub_nmi->nmi_count);
211 /* Check if this is a system NMI event */
212 static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
214 int cpu = smp_processor_id();
217 local64_inc(&uv_nmi_count);
218 this_cpu_inc(uv_cpu_nmi.queries);
221 nmi = atomic_read(&hub_nmi->in_nmi);
225 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
227 /* check hub MMR NMI flag */
228 if (uv_nmi_test_mmr(hub_nmi)) {
229 uv_set_in_nmi(cpu, hub_nmi);
234 /* MMR NMI flag is clear */
235 raw_spin_unlock(&hub_nmi->nmi_lock);
238 /* wait a moment for the hub nmi locker to set flag */
240 udelay(uv_nmi_slave_delay);
242 /* re-check hub in_nmi flag */
243 nmi = atomic_read(&hub_nmi->in_nmi);
248 /* check if this BMC missed setting the MMR NMI flag */
250 nmi = atomic_read(&uv_in_nmi);
252 uv_set_in_nmi(cpu, hub_nmi);
258 local64_inc(&uv_nmi_misses);
263 /* Need to reset the NMI MMR register, but only once per hub. */
264 static inline void uv_clear_nmi(int cpu)
266 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
268 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
269 atomic_set(&hub_nmi->cpu_owner, -1);
270 atomic_set(&hub_nmi->in_nmi, 0);
271 uv_local_mmr_clear_nmi();
272 raw_spin_unlock(&hub_nmi->nmi_lock);
276 /* Ping non-responding cpus attemping to force them into the NMI handler */
277 static void uv_nmi_nr_cpus_ping(void)
281 for_each_cpu(cpu, uv_nmi_cpu_mask)
282 uv_cpu_nmi_per(cpu).pinging = 1;
284 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
287 /* Clean up flags for cpus that ignored both NMI and ping */
288 static void uv_nmi_cleanup_mask(void)
292 for_each_cpu(cpu, uv_nmi_cpu_mask) {
293 uv_cpu_nmi_per(cpu).pinging = 0;
294 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
295 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
299 /* Loop waiting as cpus enter nmi handler */
300 static int uv_nmi_wait_cpus(int first)
302 int i, j, k, n = num_online_cpus();
303 int last_k = 0, waiting = 0;
306 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
309 k = n - cpumask_weight(uv_nmi_cpu_mask);
312 udelay(uv_nmi_initial_delay);
313 for (i = 0; i < uv_nmi_retry_count; i++) {
314 int loop_delay = uv_nmi_loop_delay;
316 for_each_cpu(j, uv_nmi_cpu_mask) {
317 if (uv_cpu_nmi_per(j).state) {
318 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
323 if (k >= n) { /* all in? */
327 if (last_k != k) { /* abort if no new cpus coming in */
330 } else if (++waiting > uv_nmi_wait_count)
333 /* extend delay if waiting only for cpu 0 */
334 if (waiting && (n - k) == 1 &&
335 cpumask_test_cpu(0, uv_nmi_cpu_mask))
340 atomic_set(&uv_nmi_cpus_in_nmi, k);
344 /* Wait until all slave cpus have entered UV NMI handler */
345 static void uv_nmi_wait(int master)
347 /* indicate this cpu is in */
348 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
350 /* if not the first cpu in (the master), then we are a slave cpu */
355 /* wait for all other cpus to gather here */
356 if (!uv_nmi_wait_cpus(1))
359 /* if not all made it in, send IPI NMI to them */
360 pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n",
361 cpumask_weight(uv_nmi_cpu_mask),
362 cpumask_pr_args(uv_nmi_cpu_mask));
364 uv_nmi_nr_cpus_ping();
366 /* if all cpus are in, then done */
367 if (!uv_nmi_wait_cpus(0))
370 pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
371 cpumask_weight(uv_nmi_cpu_mask),
372 cpumask_pr_args(uv_nmi_cpu_mask));
375 pr_alert("UV: %d of %d CPUs in NMI\n",
376 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
379 /* Dump Instruction Pointer header */
380 static void uv_nmi_dump_cpu_ip_hdr(void)
382 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
383 "CPU", "PID", "COMMAND", "IP");
386 /* Dump Instruction Pointer info */
387 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
389 pr_info("UV: %4d %6d %-32.32s ", cpu, current->pid, current->comm);
390 printk_address(regs->ip);
394 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
395 * failed, then we provide "dump" as an alternate action. Action "dump" now
396 * also includes the show "ips" (instruction pointers) action whereas the
397 * action "ips" only displays instruction pointers for the non-idle CPU's.
398 * This is an abbreviated form of the "ps" command.
400 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
402 const char *dots = " ................................. ";
405 uv_nmi_dump_cpu_ip_hdr();
407 if (current->pid != 0 || !uv_nmi_action_is("ips"))
408 uv_nmi_dump_cpu_ip(cpu, regs);
410 if (uv_nmi_action_is("dump")) {
411 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
415 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
418 /* Trigger a slave cpu to dump it's state */
419 static void uv_nmi_trigger_dump(int cpu)
421 int retry = uv_nmi_trigger_delay;
423 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
426 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
430 if (uv_cpu_nmi_per(cpu).state
431 != UV_NMI_STATE_DUMP)
433 } while (--retry > 0);
435 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
436 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
439 /* Wait until all cpus ready to exit */
440 static void uv_nmi_sync_exit(int master)
442 atomic_dec(&uv_nmi_cpus_in_nmi);
444 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
446 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
448 while (atomic_read(&uv_nmi_slave_continue))
453 /* Walk through cpu list and dump state of each */
454 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
459 int saved_console_loglevel = console_loglevel;
461 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
462 uv_nmi_action_is("ips") ? "IPs" : "processes",
463 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
465 console_loglevel = uv_nmi_loglevel;
466 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
467 for_each_online_cpu(tcpu) {
468 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
470 else if (tcpu == cpu)
471 uv_nmi_dump_state_cpu(tcpu, regs);
473 uv_nmi_trigger_dump(tcpu);
476 pr_alert("UV: %d CPUs ignored NMI\n", ignored);
478 console_loglevel = saved_console_loglevel;
479 pr_alert("UV: process trace complete\n");
481 while (!atomic_read(&uv_nmi_slave_continue))
483 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
485 uv_nmi_dump_state_cpu(cpu, regs);
487 uv_nmi_sync_exit(master);
490 static void uv_nmi_touch_watchdogs(void)
492 touch_softlockup_watchdog_sync();
493 clocksource_touch_watchdog();
494 rcu_cpu_stall_reset();
495 touch_nmi_watchdog();
498 static atomic_t uv_nmi_kexec_failed;
500 #if defined(CONFIG_KEXEC_CORE)
501 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
503 /* Call crash to dump system state */
505 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
508 pr_emerg("UV: crash_kexec unexpectedly returned, ");
509 atomic_set(&uv_nmi_kexec_failed, 1);
510 if (!kexec_crash_image) {
511 pr_cont("crash kernel not loaded\n");
514 pr_cont("kexec busy, stalling cpus while waiting\n");
517 /* If crash exec fails the slaves should return, otherwise stall */
518 while (atomic_read(&uv_nmi_kexec_failed) == 0)
522 #else /* !CONFIG_KEXEC_CORE */
523 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
526 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
527 atomic_set(&uv_nmi_kexec_failed, 1);
529 #endif /* !CONFIG_KEXEC_CORE */
532 #ifdef CONFIG_KGDB_KDB
533 static inline int uv_nmi_kdb_reason(void)
535 return KDB_REASON_SYSTEM_NMI;
537 #else /* !CONFIG_KGDB_KDB */
538 static inline int uv_nmi_kdb_reason(void)
540 /* Insure user is expecting to attach gdb remote */
541 if (uv_nmi_action_is("kgdb"))
544 pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
547 #endif /* CONFIG_KGDB_KDB */
550 * Call KGDB/KDB from NMI handler
552 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
553 * 'kdb' has no affect on which is used. See the KGDB documention for further
556 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
559 int reason = uv_nmi_kdb_reason();
565 /* call KGDB NMI handler as MASTER */
566 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
567 &uv_nmi_slave_continue);
569 pr_alert("KGDB returned error, is kgdboc set?\n");
570 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
573 /* wait for KGDB signal that it's ready for slaves to enter */
578 sig = atomic_read(&uv_nmi_slave_continue);
581 /* call KGDB as slave */
582 if (sig == SLAVE_CONTINUE)
583 kgdb_nmicallback(cpu, regs);
585 uv_nmi_sync_exit(master);
588 #else /* !CONFIG_KGDB */
589 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
591 pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
593 #endif /* !CONFIG_KGDB */
598 int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
600 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
601 int cpu = smp_processor_id();
605 local_irq_save(flags);
607 /* If not a UV System NMI, ignore */
608 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
609 local_irq_restore(flags);
613 /* Indicate we are the first CPU into the NMI handler */
614 master = (atomic_read(&uv_nmi_cpu) == cpu);
616 /* If NMI action is "kdump", then attempt to do it */
617 if (uv_nmi_action_is("kdump")) {
618 uv_nmi_kdump(cpu, master, regs);
620 /* Unexpected return, revert action to "dump" */
622 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
625 /* Pause as all cpus enter the NMI handler */
628 /* Dump state of each cpu */
629 if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
630 uv_nmi_dump_state(cpu, regs, master);
632 /* Call KGDB/KDB if enabled */
633 else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
634 uv_call_kgdb_kdb(cpu, regs, master);
636 /* Clear per_cpu "in nmi" flag */
637 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
639 /* Clear MMR NMI flag on each hub */
642 /* Clear global flags */
644 if (cpumask_weight(uv_nmi_cpu_mask))
645 uv_nmi_cleanup_mask();
646 atomic_set(&uv_nmi_cpus_in_nmi, -1);
647 atomic_set(&uv_nmi_cpu, -1);
648 atomic_set(&uv_in_nmi, 0);
649 atomic_set(&uv_nmi_kexec_failed, 0);
652 uv_nmi_touch_watchdogs();
653 local_irq_restore(flags);
659 * NMI handler for pulling in CPUs when perf events are grabbing our NMI
661 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
665 this_cpu_inc(uv_cpu_nmi.queries);
666 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
667 local64_inc(&uv_nmi_ping_misses);
671 this_cpu_inc(uv_cpu_nmi.pings);
672 local64_inc(&uv_nmi_ping_count);
673 ret = uv_handle_nmi(reason, regs);
674 this_cpu_write(uv_cpu_nmi.pinging, 0);
678 static void uv_register_nmi_notifier(void)
680 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
681 pr_warn("UV: NMI handler failed to register\n");
683 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
684 pr_warn("UV: PING NMI handler failed to register\n");
687 void uv_nmi_init(void)
692 * Unmask NMI on all cpus
694 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
695 value &= ~APIC_LVT_MASKED;
696 apic_write(APIC_LVT1, value);
699 void uv_nmi_setup(void)
701 int size = sizeof(void *) * (1 << NODES_SHIFT);
704 /* Setup hub nmi info */
706 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
707 pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
708 BUG_ON(!uv_hub_nmi_list);
709 size = sizeof(struct uv_hub_nmi_s);
710 for_each_present_cpu(cpu) {
711 nid = cpu_to_node(cpu);
712 if (uv_hub_nmi_list[nid] == NULL) {
713 uv_hub_nmi_list[nid] = kzalloc_node(size,
715 BUG_ON(!uv_hub_nmi_list[nid]);
716 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
717 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
719 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
721 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
722 uv_register_nmi_notifier();