1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
12 #include <linux/tick.h>
13 #include <linux/random.h>
14 #include <linux/user-return-notifier.h>
15 #include <linux/dmi.h>
16 #include <linux/utsname.h>
17 #include <linux/stackprotector.h>
18 #include <linux/tick.h>
19 #include <linux/cpuidle.h>
20 #include <trace/events/power.h>
21 #include <linux/hw_breakpoint.h>
24 #include <asm/syscalls.h>
26 #include <asm/uaccess.h>
27 #include <asm/mwait.h>
28 #include <asm/fpu/internal.h>
29 #include <asm/debugreg.h>
31 #include <asm/tlbflush.h>
36 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
37 * no more per-task TSS's. The TSS size is kept cacheline-aligned
38 * so they are allowed to end up in the .data..cacheline_aligned
39 * section. Since TSS's are completely CPU-local, we want them
40 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
42 __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
44 .sp0 = TOP_OF_INIT_STACK,
48 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
53 * Note that the .io_bitmap member must be extra-big. This is because
54 * the CPU will access an additional byte beyond the end of the IO
55 * permission bitmap. The extra byte must be all 1 bits, and must
56 * be within the limit.
58 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
61 EXPORT_PER_CPU_SYMBOL(cpu_tss);
64 static DEFINE_PER_CPU(unsigned char, is_idle);
68 * this gets called so that we can store lazy state into memory and copy the
69 * current task into the new thread.
71 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
73 memcpy(dst, src, arch_task_struct_size);
75 dst->thread.vm86 = NULL;
78 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
82 * Free current thread data structures etc..
84 void exit_thread(void)
86 struct task_struct *me = current;
87 struct thread_struct *t = &me->thread;
88 unsigned long *bp = t->io_bitmap_ptr;
89 struct fpu *fpu = &t->fpu;
92 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
94 t->io_bitmap_ptr = NULL;
95 clear_thread_flag(TIF_IO_BITMAP);
97 * Careful, clear this in the TSS too:
99 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
100 t->io_bitmap_max = 0;
110 void flush_thread(void)
112 struct task_struct *tsk = current;
114 flush_ptrace_hw_breakpoint(tsk);
115 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
117 fpu__clear(&tsk->thread.fpu);
120 static void hard_disable_TSC(void)
122 cr4_set_bits(X86_CR4_TSD);
125 void disable_TSC(void)
128 if (!test_and_set_thread_flag(TIF_NOTSC))
130 * Must flip the CPU state synchronously with
131 * TIF_NOTSC in the current running context.
137 static void hard_enable_TSC(void)
139 cr4_clear_bits(X86_CR4_TSD);
142 static void enable_TSC(void)
145 if (test_and_clear_thread_flag(TIF_NOTSC))
147 * Must flip the CPU state synchronously with
148 * TIF_NOTSC in the current running context.
154 int get_tsc_mode(unsigned long adr)
158 if (test_thread_flag(TIF_NOTSC))
159 val = PR_TSC_SIGSEGV;
163 return put_user(val, (unsigned int __user *)adr);
166 int set_tsc_mode(unsigned int val)
168 if (val == PR_TSC_SIGSEGV)
170 else if (val == PR_TSC_ENABLE)
178 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
179 struct tss_struct *tss)
181 struct thread_struct *prev, *next;
183 prev = &prev_p->thread;
184 next = &next_p->thread;
186 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
187 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
188 unsigned long debugctl = get_debugctlmsr();
190 debugctl &= ~DEBUGCTLMSR_BTF;
191 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
192 debugctl |= DEBUGCTLMSR_BTF;
194 update_debugctlmsr(debugctl);
197 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
198 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
199 /* prev and next are different */
200 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
206 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
208 * Copy the relevant range of the IO bitmap.
209 * Normally this is 128 bytes or less:
211 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
212 max(prev->io_bitmap_max, next->io_bitmap_max));
213 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
215 * Clear any possible leftover bits:
217 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
219 propagate_user_return_notify(prev_p, next_p);
223 * Idle related variables and functions
225 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
226 EXPORT_SYMBOL(boot_option_idle_override);
228 static void (*x86_idle)(void);
231 static inline void play_dead(void)
238 void enter_idle(void)
240 this_cpu_write(is_idle, 1);
241 idle_notifier_call_chain(IDLE_START);
244 static void __exit_idle(void)
246 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
248 idle_notifier_call_chain(IDLE_END);
251 /* Called from interrupts to signify idle end */
254 /* idle loop has pid 0 */
261 void arch_cpu_idle_enter(void)
267 void arch_cpu_idle_exit(void)
272 void arch_cpu_idle_dead(void)
278 * Called from the generic idle code.
280 void arch_cpu_idle(void)
286 * We use this if we don't have any better idle routine..
288 void default_idle(void)
290 trace_cpu_idle_rcuidle(1, smp_processor_id());
292 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
294 #ifdef CONFIG_APM_MODULE
295 EXPORT_SYMBOL(default_idle);
299 bool xen_set_default_idle(void)
301 bool ret = !!x86_idle;
303 x86_idle = default_idle;
308 void stop_this_cpu(void *dummy)
314 set_cpu_online(smp_processor_id(), false);
315 disable_local_APIC();
316 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
322 bool amd_e400_c1e_detected;
323 EXPORT_SYMBOL(amd_e400_c1e_detected);
325 static cpumask_var_t amd_e400_c1e_mask;
327 void amd_e400_remove_cpu(int cpu)
329 if (amd_e400_c1e_mask != NULL)
330 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
334 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
335 * pending message MSR. If we detect C1E, then we handle it the same
336 * way as C3 power states (local apic timer and TSC stop)
338 static void amd_e400_idle(void)
340 if (!amd_e400_c1e_detected) {
343 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
345 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
346 amd_e400_c1e_detected = true;
347 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
348 mark_tsc_unstable("TSC halt in AMD C1E");
349 pr_info("System has AMD C1E enabled\n");
353 if (amd_e400_c1e_detected) {
354 int cpu = smp_processor_id();
356 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
357 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
358 /* Force broadcast so ACPI can not interfere. */
359 tick_broadcast_force();
360 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
362 tick_broadcast_enter();
367 * The switch back from broadcast mode needs to be
368 * called with interrupts disabled.
371 tick_broadcast_exit();
378 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
379 * We can't rely on cpuidle installing MWAIT, because it will not load
380 * on systems that support only C1 -- so the boot default must be MWAIT.
382 * Some AMD machines are the opposite, they depend on using HALT.
384 * So for default C1, which is used during boot until cpuidle loads,
385 * use MWAIT-C1 on Intel HW that has it, else use HALT.
387 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
389 if (c->x86_vendor != X86_VENDOR_INTEL)
392 if (!cpu_has(c, X86_FEATURE_MWAIT))
399 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
400 * with interrupts enabled and no flags, which is backwards compatible with the
401 * original MWAIT implementation.
403 static void mwait_idle(void)
405 if (!current_set_polling_and_test()) {
406 trace_cpu_idle_rcuidle(1, smp_processor_id());
407 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
408 smp_mb(); /* quirk */
409 clflush((void *)¤t_thread_info()->flags);
410 smp_mb(); /* quirk */
413 __monitor((void *)¤t_thread_info()->flags, 0, 0);
418 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
422 __current_clr_polling();
425 void select_idle_routine(const struct cpuinfo_x86 *c)
428 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
429 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
431 if (x86_idle || boot_option_idle_override == IDLE_POLL)
434 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
435 /* E400: APIC timer interrupt does not wake up CPU from C1e */
436 pr_info("using AMD E400 aware idle routine\n");
437 x86_idle = amd_e400_idle;
438 } else if (prefer_mwait_c1_over_halt(c)) {
439 pr_info("using mwait in idle threads\n");
440 x86_idle = mwait_idle;
442 x86_idle = default_idle;
445 void __init init_amd_e400_c1e_mask(void)
447 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
448 if (x86_idle == amd_e400_idle)
449 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
452 static int __init idle_setup(char *str)
457 if (!strcmp(str, "poll")) {
458 pr_info("using polling idle threads\n");
459 boot_option_idle_override = IDLE_POLL;
460 cpu_idle_poll_ctrl(true);
461 } else if (!strcmp(str, "halt")) {
463 * When the boot option of idle=halt is added, halt is
464 * forced to be used for CPU idle. In such case CPU C2/C3
465 * won't be used again.
466 * To continue to load the CPU idle driver, don't touch
467 * the boot_option_idle_override.
469 x86_idle = default_idle;
470 boot_option_idle_override = IDLE_HALT;
471 } else if (!strcmp(str, "nomwait")) {
473 * If the boot option of "idle=nomwait" is added,
474 * it means that mwait will be disabled for CPU C2/C3
475 * states. In such case it won't touch the variable
476 * of boot_option_idle_override.
478 boot_option_idle_override = IDLE_NOMWAIT;
484 early_param("idle", idle_setup);
486 unsigned long arch_align_stack(unsigned long sp)
488 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
489 sp -= get_random_int() % 8192;
493 unsigned long arch_randomize_brk(struct mm_struct *mm)
495 unsigned long range_end = mm->brk + 0x02000000;
496 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
500 * Called from fs/proc with a reference on @p to find the function
501 * which called into schedule(). This needs to be done carefully
502 * because the task might wake up and we might look at a stack
505 unsigned long get_wchan(struct task_struct *p)
507 unsigned long start, bottom, top, sp, fp, ip;
510 if (!p || p == current || p->state == TASK_RUNNING)
513 start = (unsigned long)task_stack_page(p);
518 * Layout of the stack page:
520 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
522 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
524 * ----------- bottom = start + sizeof(thread_info)
528 * The tasks stack pointer points at the location where the
529 * framepointer is stored. The data on the stack is:
530 * ... IP FP ... IP FP
532 * We need to read FP and IP, so we need to adjust the upper
533 * bound by another unsigned long.
535 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
536 top -= 2 * sizeof(unsigned long);
537 bottom = start + sizeof(struct thread_info);
539 sp = READ_ONCE(p->thread.sp);
540 if (sp < bottom || sp > top)
543 fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
545 if (fp < bottom || fp > top)
547 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
548 if (!in_sched_functions(ip))
550 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
551 } while (count++ < 16 && p->state != TASK_RUNNING);