2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/uaccess.h>
42 #include <asm/pgtable.h>
44 #include <asm/processor.h>
47 #include <asm/machdep.h>
49 #include <asm/runlatch.h>
50 #include <asm/syscalls.h>
51 #include <asm/switch_to.h>
53 #include <asm/debug.h>
55 #include <asm/firmware.h>
57 #include <asm/code-patching.h>
58 #include <linux/kprobes.h>
59 #include <linux/kdebug.h>
61 /* Transactional Memory debug */
63 #define TM_DEBUG(x...) printk(KERN_INFO x)
65 #define TM_DEBUG(x...) do { } while(0)
68 extern unsigned long _get_SP(void);
71 struct task_struct *last_task_used_math = NULL;
72 struct task_struct *last_task_used_altivec = NULL;
73 struct task_struct *last_task_used_vsx = NULL;
74 struct task_struct *last_task_used_spe = NULL;
77 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
78 void giveup_fpu_maybe_transactional(struct task_struct *tsk)
81 * If we are saving the current thread's registers, and the
82 * thread is in a transactional state, set the TIF_RESTORE_TM
83 * bit so that we know to restore the registers before
84 * returning to userspace.
86 if (tsk == current && tsk->thread.regs &&
87 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
88 !test_thread_flag(TIF_RESTORE_TM)) {
89 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
90 set_thread_flag(TIF_RESTORE_TM);
96 void giveup_altivec_maybe_transactional(struct task_struct *tsk)
99 * If we are saving the current thread's registers, and the
100 * thread is in a transactional state, set the TIF_RESTORE_TM
101 * bit so that we know to restore the registers before
102 * returning to userspace.
104 if (tsk == current && tsk->thread.regs &&
105 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
106 !test_thread_flag(TIF_RESTORE_TM)) {
107 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
108 set_thread_flag(TIF_RESTORE_TM);
115 #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
116 #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
117 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
119 #ifdef CONFIG_PPC_FPU
121 * Make sure the floating-point register state in the
122 * the thread_struct is up to date for task tsk.
124 void flush_fp_to_thread(struct task_struct *tsk)
126 if (tsk->thread.regs) {
128 * We need to disable preemption here because if we didn't,
129 * another process could get scheduled after the regs->msr
130 * test but before we have finished saving the FP registers
131 * to the thread_struct. That process could take over the
132 * FPU, and then when we get scheduled again we would store
133 * bogus values for the remaining FP registers.
136 if (tsk->thread.regs->msr & MSR_FP) {
139 * This should only ever be called for current or
140 * for a stopped child process. Since we save away
141 * the FP register state on context switch on SMP,
142 * there is something wrong if a stopped child appears
143 * to still have its FP state in the CPU registers.
145 BUG_ON(tsk != current);
147 giveup_fpu_maybe_transactional(tsk);
152 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
153 #endif /* CONFIG_PPC_FPU */
155 void enable_kernel_fp(void)
157 WARN_ON(preemptible());
160 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
161 giveup_fpu_maybe_transactional(current);
163 giveup_fpu(NULL); /* just enables FP for kernel */
165 giveup_fpu_maybe_transactional(last_task_used_math);
166 #endif /* CONFIG_SMP */
168 EXPORT_SYMBOL(enable_kernel_fp);
170 #ifdef CONFIG_ALTIVEC
171 void enable_kernel_altivec(void)
173 WARN_ON(preemptible());
176 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
177 giveup_altivec_maybe_transactional(current);
179 giveup_altivec_notask();
181 giveup_altivec_maybe_transactional(last_task_used_altivec);
182 #endif /* CONFIG_SMP */
184 EXPORT_SYMBOL(enable_kernel_altivec);
187 * Make sure the VMX/Altivec register state in the
188 * the thread_struct is up to date for task tsk.
190 void flush_altivec_to_thread(struct task_struct *tsk)
192 if (tsk->thread.regs) {
194 if (tsk->thread.regs->msr & MSR_VEC) {
196 BUG_ON(tsk != current);
198 giveup_altivec_maybe_transactional(tsk);
203 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
204 #endif /* CONFIG_ALTIVEC */
207 void enable_kernel_vsx(void)
209 WARN_ON(preemptible());
212 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
215 giveup_vsx(NULL); /* just enable vsx for kernel - force */
217 giveup_vsx(last_task_used_vsx);
218 #endif /* CONFIG_SMP */
220 EXPORT_SYMBOL(enable_kernel_vsx);
222 void giveup_vsx(struct task_struct *tsk)
224 giveup_fpu_maybe_transactional(tsk);
225 giveup_altivec_maybe_transactional(tsk);
228 EXPORT_SYMBOL(giveup_vsx);
230 void flush_vsx_to_thread(struct task_struct *tsk)
232 if (tsk->thread.regs) {
234 if (tsk->thread.regs->msr & MSR_VSX) {
236 BUG_ON(tsk != current);
243 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
244 #endif /* CONFIG_VSX */
248 void enable_kernel_spe(void)
250 WARN_ON(preemptible());
253 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
256 giveup_spe(NULL); /* just enable SPE for kernel - force */
258 giveup_spe(last_task_used_spe);
259 #endif /* __SMP __ */
261 EXPORT_SYMBOL(enable_kernel_spe);
263 void flush_spe_to_thread(struct task_struct *tsk)
265 if (tsk->thread.regs) {
267 if (tsk->thread.regs->msr & MSR_SPE) {
269 BUG_ON(tsk != current);
271 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
277 #endif /* CONFIG_SPE */
281 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
282 * and the current task has some state, discard it.
284 void discard_lazy_cpu_state(void)
287 if (last_task_used_math == current)
288 last_task_used_math = NULL;
289 #ifdef CONFIG_ALTIVEC
290 if (last_task_used_altivec == current)
291 last_task_used_altivec = NULL;
292 #endif /* CONFIG_ALTIVEC */
294 if (last_task_used_vsx == current)
295 last_task_used_vsx = NULL;
296 #endif /* CONFIG_VSX */
298 if (last_task_used_spe == current)
299 last_task_used_spe = NULL;
303 #endif /* CONFIG_SMP */
305 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
306 void do_send_trap(struct pt_regs *regs, unsigned long address,
307 unsigned long error_code, int signal_code, int breakpt)
311 current->thread.trap_nr = signal_code;
312 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
313 11, SIGSEGV) == NOTIFY_STOP)
316 /* Deliver the signal to userspace */
317 info.si_signo = SIGTRAP;
318 info.si_errno = breakpt; /* breakpoint or watchpoint id */
319 info.si_code = signal_code;
320 info.si_addr = (void __user *)address;
321 force_sig_info(SIGTRAP, &info, current);
323 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
324 void do_break (struct pt_regs *regs, unsigned long address,
325 unsigned long error_code)
329 current->thread.trap_nr = TRAP_HWBKPT;
330 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
331 11, SIGSEGV) == NOTIFY_STOP)
334 if (debugger_break_match(regs))
337 /* Clear the breakpoint */
338 hw_breakpoint_disable();
340 /* Deliver the signal to userspace */
341 info.si_signo = SIGTRAP;
343 info.si_code = TRAP_HWBKPT;
344 info.si_addr = (void __user *)address;
345 force_sig_info(SIGTRAP, &info, current);
347 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
349 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
351 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
353 * Set the debug registers back to their default "safe" values.
355 static void set_debug_reg_defaults(struct thread_struct *thread)
357 thread->debug.iac1 = thread->debug.iac2 = 0;
358 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
359 thread->debug.iac3 = thread->debug.iac4 = 0;
361 thread->debug.dac1 = thread->debug.dac2 = 0;
362 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
363 thread->debug.dvc1 = thread->debug.dvc2 = 0;
365 thread->debug.dbcr0 = 0;
368 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
370 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
371 DBCR1_IAC3US | DBCR1_IAC4US;
373 * Force Data Address Compare User/Supervisor bits to be User-only
374 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
376 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
378 thread->debug.dbcr1 = 0;
382 static void prime_debug_regs(struct debug_reg *debug)
385 * We could have inherited MSR_DE from userspace, since
386 * it doesn't get cleared on exception entry. Make sure
387 * MSR_DE is clear before we enable any debug events.
389 mtmsr(mfmsr() & ~MSR_DE);
391 mtspr(SPRN_IAC1, debug->iac1);
392 mtspr(SPRN_IAC2, debug->iac2);
393 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
394 mtspr(SPRN_IAC3, debug->iac3);
395 mtspr(SPRN_IAC4, debug->iac4);
397 mtspr(SPRN_DAC1, debug->dac1);
398 mtspr(SPRN_DAC2, debug->dac2);
399 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
400 mtspr(SPRN_DVC1, debug->dvc1);
401 mtspr(SPRN_DVC2, debug->dvc2);
403 mtspr(SPRN_DBCR0, debug->dbcr0);
404 mtspr(SPRN_DBCR1, debug->dbcr1);
406 mtspr(SPRN_DBCR2, debug->dbcr2);
410 * Unless neither the old or new thread are making use of the
411 * debug registers, set the debug registers from the values
412 * stored in the new thread.
414 void switch_booke_debug_regs(struct debug_reg *new_debug)
416 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
417 || (new_debug->dbcr0 & DBCR0_IDM))
418 prime_debug_regs(new_debug);
420 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
421 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
422 #ifndef CONFIG_HAVE_HW_BREAKPOINT
423 static void set_debug_reg_defaults(struct thread_struct *thread)
425 thread->hw_brk.address = 0;
426 thread->hw_brk.type = 0;
427 set_breakpoint(&thread->hw_brk);
429 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
430 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
432 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
433 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
435 mtspr(SPRN_DAC1, dabr);
436 #ifdef CONFIG_PPC_47x
441 #elif defined(CONFIG_PPC_BOOK3S)
442 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
444 mtspr(SPRN_DABR, dabr);
445 if (cpu_has_feature(CPU_FTR_DABRX))
446 mtspr(SPRN_DABRX, dabrx);
450 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
456 static inline int set_dabr(struct arch_hw_breakpoint *brk)
458 unsigned long dabr, dabrx;
460 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
461 dabrx = ((brk->type >> 3) & 0x7);
464 return ppc_md.set_dabr(dabr, dabrx);
466 return __set_dabr(dabr, dabrx);
469 static inline int set_dawr(struct arch_hw_breakpoint *brk)
471 unsigned long dawr, dawrx, mrd;
475 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
476 << (63 - 58); //* read/write bits */
477 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
478 << (63 - 59); //* translate */
479 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
480 >> 3; //* PRIM bits */
481 /* dawr length is stored in field MDR bits 48:53. Matches range in
482 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
484 brk->len is in bytes.
485 This aligns up to double word size, shifts and does the bias.
487 mrd = ((brk->len + 7) >> 3) - 1;
488 dawrx |= (mrd & 0x3f) << (63 - 53);
491 return ppc_md.set_dawr(dawr, dawrx);
492 mtspr(SPRN_DAWR, dawr);
493 mtspr(SPRN_DAWRX, dawrx);
497 void __set_breakpoint(struct arch_hw_breakpoint *brk)
499 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
501 if (cpu_has_feature(CPU_FTR_DAWR))
507 void set_breakpoint(struct arch_hw_breakpoint *brk)
510 __set_breakpoint(brk);
515 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
518 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
519 struct arch_hw_breakpoint *b)
521 if (a->address != b->address)
523 if (a->type != b->type)
525 if (a->len != b->len)
530 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
531 static void tm_reclaim_thread(struct thread_struct *thr,
532 struct thread_info *ti, uint8_t cause)
534 unsigned long msr_diff = 0;
537 * If FP/VSX registers have been already saved to the
538 * thread_struct, move them to the transact_fp array.
539 * We clear the TIF_RESTORE_TM bit since after the reclaim
540 * the thread will no longer be transactional.
542 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
543 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
544 if (msr_diff & MSR_FP)
545 memcpy(&thr->transact_fp, &thr->fp_state,
546 sizeof(struct thread_fp_state));
547 if (msr_diff & MSR_VEC)
548 memcpy(&thr->transact_vr, &thr->vr_state,
549 sizeof(struct thread_vr_state));
550 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
555 * Use the current MSR TM suspended bit to track if we have
556 * checkpointed state outstanding.
557 * On signal delivery, we'd normally reclaim the checkpointed
558 * state to obtain stack pointer (see:get_tm_stackpointer()).
559 * This will then directly return to userspace without going
560 * through __switch_to(). However, if the stack frame is bad,
561 * we need to exit this thread which calls __switch_to() which
562 * will again attempt to reclaim the already saved tm state.
563 * Hence we need to check that we've not already reclaimed
565 * We do this using the current MSR, rather tracking it in
566 * some specific thread_struct bit, as it has the additional
567 * benifit of checking for a potential TM bad thing exception.
569 if (!MSR_TM_SUSPENDED(mfmsr()))
573 * Use the current MSR TM suspended bit to track if we have
574 * checkpointed state outstanding.
575 * On signal delivery, we'd normally reclaim the checkpointed
576 * state to obtain stack pointer (see:get_tm_stackpointer()).
577 * This will then directly return to userspace without going
578 * through __switch_to(). However, if the stack frame is bad,
579 * we need to exit this thread which calls __switch_to() which
580 * will again attempt to reclaim the already saved tm state.
581 * Hence we need to check that we've not already reclaimed
583 * We do this using the current MSR, rather tracking it in
584 * some specific thread_struct bit, as it has the additional
585 * benifit of checking for a potential TM bad thing exception.
587 if (!MSR_TM_SUSPENDED(mfmsr()))
590 tm_reclaim(thr, thr->regs->msr, cause);
592 /* Having done the reclaim, we now have the checkpointed
593 * FP/VSX values in the registers. These might be valid
594 * even if we have previously called enable_kernel_fp() or
595 * flush_fp_to_thread(), so update thr->regs->msr to
596 * indicate their current validity.
598 thr->regs->msr |= msr_diff;
601 void tm_reclaim_current(uint8_t cause)
604 tm_reclaim_thread(¤t->thread, current_thread_info(), cause);
607 static inline void tm_reclaim_task(struct task_struct *tsk)
609 /* We have to work out if we're switching from/to a task that's in the
610 * middle of a transaction.
612 * In switching we need to maintain a 2nd register state as
613 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
614 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
615 * (current) FPRs into oldtask->thread.transact_fpr[].
617 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
619 struct thread_struct *thr = &tsk->thread;
624 if (!MSR_TM_ACTIVE(thr->regs->msr))
625 goto out_and_saveregs;
627 /* Stash the original thread MSR, as giveup_fpu et al will
628 * modify it. We hold onto it to see whether the task used
629 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
630 * ckpt_regs.msr is already set.
632 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
633 thr->ckpt_regs.msr = thr->regs->msr;
635 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
636 "ccr=%lx, msr=%lx, trap=%lx)\n",
637 tsk->pid, thr->regs->nip,
638 thr->regs->ccr, thr->regs->msr,
641 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
643 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
647 /* Always save the regs here, even if a transaction's not active.
648 * This context-switches a thread's TM info SPRs. We do it here to
649 * be consistent with the restore path (in recheckpoint) which
650 * cannot happen later in _switch().
655 extern void __tm_recheckpoint(struct thread_struct *thread,
656 unsigned long orig_msr);
658 void tm_recheckpoint(struct thread_struct *thread,
659 unsigned long orig_msr)
663 /* We really can't be interrupted here as the TEXASR registers can't
664 * change and later in the trecheckpoint code, we have a userspace R1.
665 * So let's hard disable over this region.
667 local_irq_save(flags);
670 /* The TM SPRs are restored here, so that TEXASR.FS can be set
671 * before the trecheckpoint and no explosion occurs.
673 tm_restore_sprs(thread);
675 __tm_recheckpoint(thread, orig_msr);
677 local_irq_restore(flags);
680 static inline void tm_recheckpoint_new_task(struct task_struct *new)
684 if (!cpu_has_feature(CPU_FTR_TM))
687 /* Recheckpoint the registers of the thread we're about to switch to.
689 * If the task was using FP, we non-lazily reload both the original and
690 * the speculative FP register states. This is because the kernel
691 * doesn't see if/when a TM rollback occurs, so if we take an FP
692 * unavoidable later, we are unable to determine which set of FP regs
693 * need to be restored.
695 if (!new->thread.regs)
698 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
699 tm_restore_sprs(&new->thread);
702 msr = new->thread.ckpt_regs.msr;
703 /* Recheckpoint to restore original checkpointed register state. */
704 TM_DEBUG("*** tm_recheckpoint of pid %d "
705 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
706 new->pid, new->thread.regs->msr, msr);
708 /* This loads the checkpointed FP/VEC state, if used */
709 tm_recheckpoint(&new->thread, msr);
711 /* This loads the speculative FP/VEC state, if used */
713 do_load_up_transact_fpu(&new->thread);
714 new->thread.regs->msr |=
715 (MSR_FP | new->thread.fpexc_mode);
717 #ifdef CONFIG_ALTIVEC
719 do_load_up_transact_altivec(&new->thread);
720 new->thread.regs->msr |= MSR_VEC;
723 /* We may as well turn on VSX too since all the state is restored now */
725 new->thread.regs->msr |= MSR_VSX;
727 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
728 "(kernel msr 0x%lx)\n",
732 static inline void __switch_to_tm(struct task_struct *prev)
734 if (cpu_has_feature(CPU_FTR_TM)) {
736 tm_reclaim_task(prev);
741 * This is called if we are on the way out to userspace and the
742 * TIF_RESTORE_TM flag is set. It checks if we need to reload
743 * FP and/or vector state and does so if necessary.
744 * If userspace is inside a transaction (whether active or
745 * suspended) and FP/VMX/VSX instructions have ever been enabled
746 * inside that transaction, then we have to keep them enabled
747 * and keep the FP/VMX/VSX state loaded while ever the transaction
748 * continues. The reason is that if we didn't, and subsequently
749 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
750 * we don't know whether it's the same transaction, and thus we
751 * don't know which of the checkpointed state and the transactional
754 void restore_tm_state(struct pt_regs *regs)
756 unsigned long msr_diff;
758 clear_thread_flag(TIF_RESTORE_TM);
759 if (!MSR_TM_ACTIVE(regs->msr))
762 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
763 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
764 if (msr_diff & MSR_FP) {
766 load_fp_state(¤t->thread.fp_state);
767 regs->msr |= current->thread.fpexc_mode;
769 if (msr_diff & MSR_VEC) {
771 load_vr_state(¤t->thread.vr_state);
773 regs->msr |= msr_diff;
777 #define tm_recheckpoint_new_task(new)
778 #define __switch_to_tm(prev)
779 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
781 struct task_struct *__switch_to(struct task_struct *prev,
782 struct task_struct *new)
784 struct thread_struct *new_thread, *old_thread;
785 struct task_struct *last;
786 #ifdef CONFIG_PPC_BOOK3S_64
787 struct ppc64_tlb_batch *batch;
790 WARN_ON(!irqs_disabled());
792 /* Back up the TAR and DSCR across context switches.
793 * Note that the TAR is not available for use in the kernel. (To
794 * provide this, the TAR should be backed up/restored on exception
795 * entry/exit instead, and be in pt_regs. FIXME, this should be in
796 * pt_regs anyway (for debug).)
797 * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
798 * these will change them.
800 save_early_sprs(&prev->thread);
802 __switch_to_tm(prev);
805 /* avoid complexity of lazy save/restore of fpu
806 * by just saving it every time we switch out if
807 * this task used the fpu during the last quantum.
809 * If it tries to use the fpu again, it'll trap and
810 * reload its fp regs. So we don't have to do a restore
811 * every switch, just a save.
814 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
816 #ifdef CONFIG_ALTIVEC
818 * If the previous thread used altivec in the last quantum
819 * (thus changing altivec regs) then save them.
820 * We used to check the VRSAVE register but not all apps
821 * set it, so we don't rely on it now (and in fact we need
822 * to save & restore VSCR even if VRSAVE == 0). -- paulus
824 * On SMP we always save/restore altivec regs just to avoid the
825 * complexity of changing processors.
828 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
829 giveup_altivec(prev);
830 #endif /* CONFIG_ALTIVEC */
832 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
833 /* VMX and FPU registers are already save here */
835 #endif /* CONFIG_VSX */
838 * If the previous thread used spe in the last quantum
839 * (thus changing spe regs) then save them.
841 * On SMP we always save/restore spe regs just to avoid the
842 * complexity of changing processors.
844 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
846 #endif /* CONFIG_SPE */
848 #else /* CONFIG_SMP */
849 #ifdef CONFIG_ALTIVEC
850 /* Avoid the trap. On smp this this never happens since
851 * we don't set last_task_used_altivec -- Cort
853 if (new->thread.regs && last_task_used_altivec == new)
854 new->thread.regs->msr |= MSR_VEC;
855 #endif /* CONFIG_ALTIVEC */
857 if (new->thread.regs && last_task_used_vsx == new)
858 new->thread.regs->msr |= MSR_VSX;
859 #endif /* CONFIG_VSX */
861 /* Avoid the trap. On smp this this never happens since
862 * we don't set last_task_used_spe
864 if (new->thread.regs && last_task_used_spe == new)
865 new->thread.regs->msr |= MSR_SPE;
866 #endif /* CONFIG_SPE */
868 #endif /* CONFIG_SMP */
870 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
871 switch_booke_debug_regs(&new->thread.debug);
874 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
877 #ifndef CONFIG_HAVE_HW_BREAKPOINT
878 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
879 __set_breakpoint(&new->thread.hw_brk);
880 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
884 new_thread = &new->thread;
885 old_thread = ¤t->thread;
889 * Collect processor utilization data per process
891 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
892 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
893 long unsigned start_tb, current_tb;
894 start_tb = old_thread->start_tb;
895 cu->current_tb = current_tb = mfspr(SPRN_PURR);
896 old_thread->accum_tb += (current_tb - start_tb);
897 new_thread->start_tb = current_tb;
899 #endif /* CONFIG_PPC64 */
901 #ifdef CONFIG_PPC_BOOK3S_64
902 batch = this_cpu_ptr(&ppc64_tlb_batch);
904 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
906 __flush_tlb_pending(batch);
909 #endif /* CONFIG_PPC_BOOK3S_64 */
912 * We can't take a PMU exception inside _switch() since there is a
913 * window where the kernel stack SLB and the kernel stack are out
914 * of sync. Hard disable here.
918 tm_recheckpoint_new_task(new);
920 last = _switch(old_thread, new_thread);
922 #ifdef CONFIG_PPC_BOOK3S_64
923 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
924 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
925 batch = this_cpu_ptr(&ppc64_tlb_batch);
928 #endif /* CONFIG_PPC_BOOK3S_64 */
933 static int instructions_to_print = 16;
935 static void show_instructions(struct pt_regs *regs)
938 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
941 printk("Instruction dump:");
943 for (i = 0; i < instructions_to_print; i++) {
949 #if !defined(CONFIG_BOOKE)
950 /* If executing with the IMMU off, adjust pc rather
951 * than print XXXXXXXX.
953 if (!(regs->msr & MSR_IR))
954 pc = (unsigned long)phys_to_virt(pc);
957 if (!__kernel_text_address(pc) ||
958 probe_kernel_address((unsigned int __user *)pc, instr)) {
959 printk(KERN_CONT "XXXXXXXX ");
962 printk(KERN_CONT "<%08x> ", instr);
964 printk(KERN_CONT "%08x ", instr);
973 static struct regbit {
977 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1006 static void printbits(unsigned long val, struct regbit *bits)
1008 const char *sep = "";
1011 for (; bits->bit; ++bits)
1012 if (val & bits->bit) {
1013 printk("%s%s", sep, bits->name);
1020 #define REG "%016lx"
1021 #define REGS_PER_LINE 4
1022 #define LAST_VOLATILE 13
1025 #define REGS_PER_LINE 8
1026 #define LAST_VOLATILE 12
1029 void show_regs(struct pt_regs * regs)
1033 show_regs_print_info(KERN_DEFAULT);
1035 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1036 regs->nip, regs->link, regs->ctr);
1037 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1038 regs, regs->trap, print_tainted(), init_utsname()->release);
1039 printk("MSR: "REG" ", regs->msr);
1040 printbits(regs->msr, msr_bits);
1041 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1043 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1044 printk("CFAR: "REG" ", regs->orig_gpr3);
1045 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1046 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1047 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1049 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1052 printk("SOFTE: %ld ", regs->softe);
1054 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1055 if (MSR_TM_ACTIVE(regs->msr))
1056 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1059 for (i = 0; i < 32; i++) {
1060 if ((i % REGS_PER_LINE) == 0)
1061 printk("\nGPR%02d: ", i);
1062 printk(REG " ", regs->gpr[i]);
1063 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1067 #ifdef CONFIG_KALLSYMS
1069 * Lookup NIP late so we have the best change of getting the
1070 * above info out without failing
1072 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1073 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1075 show_stack(current, (unsigned long *) regs->gpr[1]);
1076 if (!user_mode(regs))
1077 show_instructions(regs);
1080 void exit_thread(void)
1082 discard_lazy_cpu_state();
1085 void flush_thread(void)
1087 discard_lazy_cpu_state();
1089 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1090 flush_ptrace_hw_breakpoint(current);
1091 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1092 set_debug_reg_defaults(¤t->thread);
1093 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1097 release_thread(struct task_struct *t)
1102 * this gets called so that we can store coprocessor state into memory and
1103 * copy the current task into the new thread.
1105 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1107 flush_fp_to_thread(src);
1108 flush_altivec_to_thread(src);
1109 flush_vsx_to_thread(src);
1110 flush_spe_to_thread(src);
1112 * Flush TM state out so we can copy it. __switch_to_tm() does this
1113 * flush but it removes the checkpointed state from the current CPU and
1114 * transitions the CPU out of TM mode. Hence we need to call
1115 * tm_recheckpoint_new_task() (on the same task) to restore the
1116 * checkpointed state back and the TM mode.
1118 __switch_to_tm(src);
1119 tm_recheckpoint_new_task(src);
1123 clear_task_ebb(dst);
1128 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1130 #ifdef CONFIG_PPC_STD_MMU_64
1131 unsigned long sp_vsid;
1132 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1134 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1135 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1136 << SLB_VSID_SHIFT_1T;
1138 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1140 sp_vsid |= SLB_VSID_KERNEL | llp;
1141 p->thread.ksp_vsid = sp_vsid;
1150 * Copy architecture-specific thread state
1152 int copy_thread(unsigned long clone_flags, unsigned long usp,
1153 unsigned long kthread_arg, struct task_struct *p)
1155 struct pt_regs *childregs, *kregs;
1156 extern void ret_from_fork(void);
1157 extern void ret_from_kernel_thread(void);
1159 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1161 /* Copy registers */
1162 sp -= sizeof(struct pt_regs);
1163 childregs = (struct pt_regs *) sp;
1164 if (unlikely(p->flags & PF_KTHREAD)) {
1166 struct thread_info *ti = (void *)task_stack_page(p);
1167 memset(childregs, 0, sizeof(struct pt_regs));
1168 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1171 childregs->gpr[14] = ppc_function_entry((void *)usp);
1173 clear_tsk_thread_flag(p, TIF_32BIT);
1174 childregs->softe = 1;
1176 childregs->gpr[15] = kthread_arg;
1177 p->thread.regs = NULL; /* no user register state */
1178 ti->flags |= _TIF_RESTOREALL;
1179 f = ret_from_kernel_thread;
1182 struct pt_regs *regs = current_pt_regs();
1183 CHECK_FULL_REGS(regs);
1186 childregs->gpr[1] = usp;
1187 p->thread.regs = childregs;
1188 childregs->gpr[3] = 0; /* Result from fork() */
1189 if (clone_flags & CLONE_SETTLS) {
1191 if (!is_32bit_task())
1192 childregs->gpr[13] = childregs->gpr[6];
1195 childregs->gpr[2] = childregs->gpr[6];
1200 sp -= STACK_FRAME_OVERHEAD;
1203 * The way this works is that at some point in the future
1204 * some task will call _switch to switch to the new task.
1205 * That will pop off the stack frame created below and start
1206 * the new task running at ret_from_fork. The new task will
1207 * do some house keeping and then return from the fork or clone
1208 * system call, using the stack frame created above.
1210 ((unsigned long *)sp)[0] = 0;
1211 sp -= sizeof(struct pt_regs);
1212 kregs = (struct pt_regs *) sp;
1213 sp -= STACK_FRAME_OVERHEAD;
1216 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1217 _ALIGN_UP(sizeof(struct thread_info), 16);
1219 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1220 p->thread.ptrace_bps[0] = NULL;
1223 p->thread.fp_save_area = NULL;
1224 #ifdef CONFIG_ALTIVEC
1225 p->thread.vr_save_area = NULL;
1228 setup_ksp_vsid(p, sp);
1231 if (cpu_has_feature(CPU_FTR_DSCR)) {
1232 p->thread.dscr_inherit = current->thread.dscr_inherit;
1233 p->thread.dscr = current->thread.dscr;
1235 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1236 p->thread.ppr = INIT_PPR;
1238 kregs->nip = ppc_function_entry(f);
1243 * Set up a thread for executing a new program
1245 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1248 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1252 * If we exec out of a kernel thread then thread.regs will not be
1255 if (!current->thread.regs) {
1256 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1257 current->thread.regs = regs - 1;
1260 memset(regs->gpr, 0, sizeof(regs->gpr));
1268 * We have just cleared all the nonvolatile GPRs, so make
1269 * FULL_REGS(regs) return true. This is necessary to allow
1270 * ptrace to examine the thread immediately after exec.
1277 regs->msr = MSR_USER;
1279 if (!is_32bit_task()) {
1280 unsigned long entry;
1282 if (is_elf2_task()) {
1283 /* Look ma, no function descriptors! */
1288 * The latest iteration of the ABI requires that when
1289 * calling a function (at its global entry point),
1290 * the caller must ensure r12 holds the entry point
1291 * address (so that the function can quickly
1292 * establish addressability).
1294 regs->gpr[12] = start;
1295 /* Make sure that's restored on entry to userspace. */
1296 set_thread_flag(TIF_RESTOREALL);
1300 /* start is a relocated pointer to the function
1301 * descriptor for the elf _start routine. The first
1302 * entry in the function descriptor is the entry
1303 * address of _start and the second entry is the TOC
1304 * value we need to use.
1306 __get_user(entry, (unsigned long __user *)start);
1307 __get_user(toc, (unsigned long __user *)start+1);
1309 /* Check whether the e_entry function descriptor entries
1310 * need to be relocated before we can use them.
1312 if (load_addr != 0) {
1319 regs->msr = MSR_USER64;
1323 regs->msr = MSR_USER32;
1326 discard_lazy_cpu_state();
1328 current->thread.used_vsr = 0;
1330 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1331 current->thread.fp_save_area = NULL;
1332 #ifdef CONFIG_ALTIVEC
1333 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1334 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1335 current->thread.vr_save_area = NULL;
1336 current->thread.vrsave = 0;
1337 current->thread.used_vr = 0;
1338 #endif /* CONFIG_ALTIVEC */
1340 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1341 current->thread.acc = 0;
1342 current->thread.spefscr = 0;
1343 current->thread.used_spe = 0;
1344 #endif /* CONFIG_SPE */
1345 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1346 if (cpu_has_feature(CPU_FTR_TM))
1347 regs->msr |= MSR_TM;
1348 current->thread.tm_tfhar = 0;
1349 current->thread.tm_texasr = 0;
1350 current->thread.tm_tfiar = 0;
1351 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1353 EXPORT_SYMBOL(start_thread);
1355 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1356 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1358 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1360 struct pt_regs *regs = tsk->thread.regs;
1362 /* This is a bit hairy. If we are an SPE enabled processor
1363 * (have embedded fp) we store the IEEE exception enable flags in
1364 * fpexc_mode. fpexc_mode is also used for setting FP exception
1365 * mode (asyn, precise, disabled) for 'Classic' FP. */
1366 if (val & PR_FP_EXC_SW_ENABLE) {
1368 if (cpu_has_feature(CPU_FTR_SPE)) {
1370 * When the sticky exception bits are set
1371 * directly by userspace, it must call prctl
1372 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1373 * in the existing prctl settings) or
1374 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1375 * the bits being set). <fenv.h> functions
1376 * saving and restoring the whole
1377 * floating-point environment need to do so
1378 * anyway to restore the prctl settings from
1379 * the saved environment.
1381 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1382 tsk->thread.fpexc_mode = val &
1383 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1393 /* on a CONFIG_SPE this does not hurt us. The bits that
1394 * __pack_fe01 use do not overlap with bits used for
1395 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1396 * on CONFIG_SPE implementations are reserved so writing to
1397 * them does not change anything */
1398 if (val > PR_FP_EXC_PRECISE)
1400 tsk->thread.fpexc_mode = __pack_fe01(val);
1401 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1402 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1403 | tsk->thread.fpexc_mode;
1407 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1411 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1413 if (cpu_has_feature(CPU_FTR_SPE)) {
1415 * When the sticky exception bits are set
1416 * directly by userspace, it must call prctl
1417 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1418 * in the existing prctl settings) or
1419 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1420 * the bits being set). <fenv.h> functions
1421 * saving and restoring the whole
1422 * floating-point environment need to do so
1423 * anyway to restore the prctl settings from
1424 * the saved environment.
1426 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1427 val = tsk->thread.fpexc_mode;
1434 val = __unpack_fe01(tsk->thread.fpexc_mode);
1435 return put_user(val, (unsigned int __user *) adr);
1438 int set_endian(struct task_struct *tsk, unsigned int val)
1440 struct pt_regs *regs = tsk->thread.regs;
1442 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1443 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1449 if (val == PR_ENDIAN_BIG)
1450 regs->msr &= ~MSR_LE;
1451 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1452 regs->msr |= MSR_LE;
1459 int get_endian(struct task_struct *tsk, unsigned long adr)
1461 struct pt_regs *regs = tsk->thread.regs;
1464 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1465 !cpu_has_feature(CPU_FTR_REAL_LE))
1471 if (regs->msr & MSR_LE) {
1472 if (cpu_has_feature(CPU_FTR_REAL_LE))
1473 val = PR_ENDIAN_LITTLE;
1475 val = PR_ENDIAN_PPC_LITTLE;
1477 val = PR_ENDIAN_BIG;
1479 return put_user(val, (unsigned int __user *)adr);
1482 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1484 tsk->thread.align_ctl = val;
1488 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1490 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1493 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1494 unsigned long nbytes)
1496 unsigned long stack_page;
1497 unsigned long cpu = task_cpu(p);
1500 * Avoid crashing if the stack has overflowed and corrupted
1501 * task_cpu(p), which is in the thread_info struct.
1503 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1504 stack_page = (unsigned long) hardirq_ctx[cpu];
1505 if (sp >= stack_page + sizeof(struct thread_struct)
1506 && sp <= stack_page + THREAD_SIZE - nbytes)
1509 stack_page = (unsigned long) softirq_ctx[cpu];
1510 if (sp >= stack_page + sizeof(struct thread_struct)
1511 && sp <= stack_page + THREAD_SIZE - nbytes)
1517 int validate_sp(unsigned long sp, struct task_struct *p,
1518 unsigned long nbytes)
1520 unsigned long stack_page = (unsigned long)task_stack_page(p);
1522 if (sp >= stack_page + sizeof(struct thread_struct)
1523 && sp <= stack_page + THREAD_SIZE - nbytes)
1526 return valid_irq_stack(sp, p, nbytes);
1529 EXPORT_SYMBOL(validate_sp);
1531 unsigned long get_wchan(struct task_struct *p)
1533 unsigned long ip, sp;
1536 if (!p || p == current || p->state == TASK_RUNNING)
1540 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1544 sp = *(unsigned long *)sp;
1545 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1548 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1549 if (!in_sched_functions(ip))
1552 } while (count++ < 16);
1556 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1558 void show_stack(struct task_struct *tsk, unsigned long *stack)
1560 unsigned long sp, ip, lr, newsp;
1563 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1564 int curr_frame = current->curr_ret_stack;
1565 extern void return_to_handler(void);
1566 unsigned long rth = (unsigned long)return_to_handler;
1569 sp = (unsigned long) stack;
1574 sp = current_stack_pointer();
1576 sp = tsk->thread.ksp;
1580 printk("Call Trace:\n");
1582 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1585 stack = (unsigned long *) sp;
1587 ip = stack[STACK_FRAME_LR_SAVE];
1588 if (!firstframe || ip != lr) {
1589 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1591 if ((ip == rth) && curr_frame >= 0) {
1593 (void *)current->ret_stack[curr_frame].ret);
1598 printk(" (unreliable)");
1604 * See if this is an exception frame.
1605 * We look for the "regshere" marker in the current frame.
1607 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1608 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1609 struct pt_regs *regs = (struct pt_regs *)
1610 (sp + STACK_FRAME_OVERHEAD);
1612 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1613 regs->trap, (void *)regs->nip, (void *)lr);
1618 } while (count++ < kstack_depth_to_print);
1622 /* Called with hard IRQs off */
1623 void notrace __ppc64_runlatch_on(void)
1625 struct thread_info *ti = current_thread_info();
1628 ctrl = mfspr(SPRN_CTRLF);
1629 ctrl |= CTRL_RUNLATCH;
1630 mtspr(SPRN_CTRLT, ctrl);
1632 ti->local_flags |= _TLF_RUNLATCH;
1635 /* Called with hard IRQs off */
1636 void notrace __ppc64_runlatch_off(void)
1638 struct thread_info *ti = current_thread_info();
1641 ti->local_flags &= ~_TLF_RUNLATCH;
1643 ctrl = mfspr(SPRN_CTRLF);
1644 ctrl &= ~CTRL_RUNLATCH;
1645 mtspr(SPRN_CTRLT, ctrl);
1647 #endif /* CONFIG_PPC64 */
1649 unsigned long arch_align_stack(unsigned long sp)
1651 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1652 sp -= get_random_int() & ~PAGE_MASK;
1656 static inline unsigned long brk_rnd(void)
1658 unsigned long rnd = 0;
1660 /* 8MB for 32bit, 1GB for 64bit */
1661 if (is_32bit_task())
1662 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1664 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1666 return rnd << PAGE_SHIFT;
1669 unsigned long arch_randomize_brk(struct mm_struct *mm)
1671 unsigned long base = mm->brk;
1674 #ifdef CONFIG_PPC_STD_MMU_64
1676 * If we are using 1TB segments and we are allowed to randomise
1677 * the heap, we can put it above 1TB so it is backed by a 1TB
1678 * segment. Otherwise the heap will be in the bottom 1TB
1679 * which always uses 256MB segments and this may result in a
1680 * performance penalty.
1682 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1683 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1686 ret = PAGE_ALIGN(base + brk_rnd());