2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
45 #include <asm/processor.h>
48 #include <asm/machdep.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/debug.h>
55 #include <asm/firmware.h>
57 #include <linux/kprobes.h>
58 #include <linux/kdebug.h>
60 extern unsigned long _get_SP(void);
63 struct task_struct *last_task_used_math = NULL;
64 struct task_struct *last_task_used_altivec = NULL;
65 struct task_struct *last_task_used_vsx = NULL;
66 struct task_struct *last_task_used_spe = NULL;
70 * Make sure the floating-point register state in the
71 * the thread_struct is up to date for task tsk.
73 void flush_fp_to_thread(struct task_struct *tsk)
75 if (tsk->thread.regs) {
77 * We need to disable preemption here because if we didn't,
78 * another process could get scheduled after the regs->msr
79 * test but before we have finished saving the FP registers
80 * to the thread_struct. That process could take over the
81 * FPU, and then when we get scheduled again we would store
82 * bogus values for the remaining FP registers.
85 if (tsk->thread.regs->msr & MSR_FP) {
88 * This should only ever be called for current or
89 * for a stopped child process. Since we save away
90 * the FP register state on context switch on SMP,
91 * there is something wrong if a stopped child appears
92 * to still have its FP state in the CPU registers.
94 BUG_ON(tsk != current);
101 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
103 void enable_kernel_fp(void)
105 WARN_ON(preemptible());
108 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
111 giveup_fpu(NULL); /* just enables FP for kernel */
113 giveup_fpu(last_task_used_math);
114 #endif /* CONFIG_SMP */
116 EXPORT_SYMBOL(enable_kernel_fp);
118 #ifdef CONFIG_ALTIVEC
119 void enable_kernel_altivec(void)
121 WARN_ON(preemptible());
124 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125 giveup_altivec(current);
127 giveup_altivec_notask();
129 giveup_altivec(last_task_used_altivec);
130 #endif /* CONFIG_SMP */
132 EXPORT_SYMBOL(enable_kernel_altivec);
135 * Make sure the VMX/Altivec register state in the
136 * the thread_struct is up to date for task tsk.
138 void flush_altivec_to_thread(struct task_struct *tsk)
140 if (tsk->thread.regs) {
142 if (tsk->thread.regs->msr & MSR_VEC) {
144 BUG_ON(tsk != current);
151 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
152 #endif /* CONFIG_ALTIVEC */
156 /* not currently used, but some crazy RAID module might want to later */
157 void enable_kernel_vsx(void)
159 WARN_ON(preemptible());
162 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
165 giveup_vsx(NULL); /* just enable vsx for kernel - force */
167 giveup_vsx(last_task_used_vsx);
168 #endif /* CONFIG_SMP */
170 EXPORT_SYMBOL(enable_kernel_vsx);
173 void giveup_vsx(struct task_struct *tsk)
180 void flush_vsx_to_thread(struct task_struct *tsk)
182 if (tsk->thread.regs) {
184 if (tsk->thread.regs->msr & MSR_VSX) {
186 BUG_ON(tsk != current);
193 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
194 #endif /* CONFIG_VSX */
198 void enable_kernel_spe(void)
200 WARN_ON(preemptible());
203 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
206 giveup_spe(NULL); /* just enable SPE for kernel - force */
208 giveup_spe(last_task_used_spe);
209 #endif /* __SMP __ */
211 EXPORT_SYMBOL(enable_kernel_spe);
213 void flush_spe_to_thread(struct task_struct *tsk)
215 if (tsk->thread.regs) {
217 if (tsk->thread.regs->msr & MSR_SPE) {
219 BUG_ON(tsk != current);
221 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
227 #endif /* CONFIG_SPE */
231 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
232 * and the current task has some state, discard it.
234 void discard_lazy_cpu_state(void)
237 if (last_task_used_math == current)
238 last_task_used_math = NULL;
239 #ifdef CONFIG_ALTIVEC
240 if (last_task_used_altivec == current)
241 last_task_used_altivec = NULL;
242 #endif /* CONFIG_ALTIVEC */
244 if (last_task_used_vsx == current)
245 last_task_used_vsx = NULL;
246 #endif /* CONFIG_VSX */
248 if (last_task_used_spe == current)
249 last_task_used_spe = NULL;
253 #endif /* CONFIG_SMP */
255 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
256 void do_send_trap(struct pt_regs *regs, unsigned long address,
257 unsigned long error_code, int signal_code, int breakpt)
261 current->thread.trap_nr = signal_code;
262 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
263 11, SIGSEGV) == NOTIFY_STOP)
266 /* Deliver the signal to userspace */
267 info.si_signo = SIGTRAP;
268 info.si_errno = breakpt; /* breakpoint or watchpoint id */
269 info.si_code = signal_code;
270 info.si_addr = (void __user *)address;
271 force_sig_info(SIGTRAP, &info, current);
273 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
274 void do_break (struct pt_regs *regs, unsigned long address,
275 unsigned long error_code)
279 current->thread.trap_nr = TRAP_HWBKPT;
280 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
281 11, SIGSEGV) == NOTIFY_STOP)
284 if (debugger_break_match(regs))
287 /* Clear the breakpoint */
288 hw_breakpoint_disable();
290 /* Deliver the signal to userspace */
291 info.si_signo = SIGTRAP;
293 info.si_code = TRAP_HWBKPT;
294 info.si_addr = (void __user *)address;
295 force_sig_info(SIGTRAP, &info, current);
297 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
299 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
301 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
303 * Set the debug registers back to their default "safe" values.
305 static void set_debug_reg_defaults(struct thread_struct *thread)
307 thread->iac1 = thread->iac2 = 0;
308 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
309 thread->iac3 = thread->iac4 = 0;
311 thread->dac1 = thread->dac2 = 0;
312 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
313 thread->dvc1 = thread->dvc2 = 0;
318 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
320 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
321 DBCR1_IAC3US | DBCR1_IAC4US;
323 * Force Data Address Compare User/Supervisor bits to be User-only
324 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
326 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
332 static void prime_debug_regs(struct thread_struct *thread)
334 mtspr(SPRN_IAC1, thread->iac1);
335 mtspr(SPRN_IAC2, thread->iac2);
336 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
337 mtspr(SPRN_IAC3, thread->iac3);
338 mtspr(SPRN_IAC4, thread->iac4);
340 mtspr(SPRN_DAC1, thread->dac1);
341 mtspr(SPRN_DAC2, thread->dac2);
342 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
343 mtspr(SPRN_DVC1, thread->dvc1);
344 mtspr(SPRN_DVC2, thread->dvc2);
346 mtspr(SPRN_DBCR0, thread->dbcr0);
347 mtspr(SPRN_DBCR1, thread->dbcr1);
349 mtspr(SPRN_DBCR2, thread->dbcr2);
353 * Unless neither the old or new thread are making use of the
354 * debug registers, set the debug registers from the values
355 * stored in the new thread.
357 static void switch_booke_debug_regs(struct thread_struct *new_thread)
359 if ((current->thread.dbcr0 & DBCR0_IDM)
360 || (new_thread->dbcr0 & DBCR0_IDM))
361 prime_debug_regs(new_thread);
363 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
364 #ifndef CONFIG_HAVE_HW_BREAKPOINT
365 static void set_debug_reg_defaults(struct thread_struct *thread)
367 thread->hw_brk.address = 0;
368 thread->hw_brk.type = 0;
369 set_break(&thread->hw_brk);
371 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
372 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
374 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
375 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
377 mtspr(SPRN_DAC1, dabr);
378 #ifdef CONFIG_PPC_47x
383 #elif defined(CONFIG_PPC_BOOK3S)
384 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
386 mtspr(SPRN_DABR, dabr);
387 mtspr(SPRN_DABRX, dabrx);
391 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
397 static inline int set_dabr(struct arch_hw_breakpoint *brk)
399 unsigned long dabr, dabrx;
401 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
402 dabrx = ((brk->type >> 3) & 0x7);
405 return ppc_md.set_dabr(dabr, dabrx);
407 return __set_dabr(dabr, dabrx);
410 static inline int set_dawr(struct arch_hw_breakpoint *brk)
412 unsigned long dawr, dawrx;
416 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
417 << (63 - 58); //* read/write bits */
418 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
419 << (63 - 59); //* translate */
420 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
421 >> 3; //* PRIM bits */
424 return ppc_md.set_dawr(dawr, dawrx);
425 mtspr(SPRN_DAWR, dawr);
426 mtspr(SPRN_DAWRX, dawrx);
430 int set_break(struct arch_hw_breakpoint *brk)
432 __get_cpu_var(current_brk) = *brk;
434 if (cpu_has_feature(CPU_FTR_DAWR))
435 return set_dawr(brk);
437 return set_dabr(brk);
441 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
444 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
445 struct arch_hw_breakpoint *b)
447 if (a->address != b->address)
449 if (a->type != b->type)
451 if (a->len != b->len)
456 struct task_struct *__switch_to(struct task_struct *prev,
457 struct task_struct *new)
459 struct thread_struct *new_thread, *old_thread;
461 struct task_struct *last;
462 #ifdef CONFIG_PPC_BOOK3S_64
463 struct ppc64_tlb_batch *batch;
467 /* avoid complexity of lazy save/restore of fpu
468 * by just saving it every time we switch out if
469 * this task used the fpu during the last quantum.
471 * If it tries to use the fpu again, it'll trap and
472 * reload its fp regs. So we don't have to do a restore
473 * every switch, just a save.
476 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
478 #ifdef CONFIG_ALTIVEC
480 * If the previous thread used altivec in the last quantum
481 * (thus changing altivec regs) then save them.
482 * We used to check the VRSAVE register but not all apps
483 * set it, so we don't rely on it now (and in fact we need
484 * to save & restore VSCR even if VRSAVE == 0). -- paulus
486 * On SMP we always save/restore altivec regs just to avoid the
487 * complexity of changing processors.
490 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
491 giveup_altivec(prev);
492 #endif /* CONFIG_ALTIVEC */
494 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
495 /* VMX and FPU registers are already save here */
497 #endif /* CONFIG_VSX */
500 * If the previous thread used spe in the last quantum
501 * (thus changing spe regs) then save them.
503 * On SMP we always save/restore spe regs just to avoid the
504 * complexity of changing processors.
506 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
508 #endif /* CONFIG_SPE */
510 #else /* CONFIG_SMP */
511 #ifdef CONFIG_ALTIVEC
512 /* Avoid the trap. On smp this this never happens since
513 * we don't set last_task_used_altivec -- Cort
515 if (new->thread.regs && last_task_used_altivec == new)
516 new->thread.regs->msr |= MSR_VEC;
517 #endif /* CONFIG_ALTIVEC */
519 if (new->thread.regs && last_task_used_vsx == new)
520 new->thread.regs->msr |= MSR_VSX;
521 #endif /* CONFIG_VSX */
523 /* Avoid the trap. On smp this this never happens since
524 * we don't set last_task_used_spe
526 if (new->thread.regs && last_task_used_spe == new)
527 new->thread.regs->msr |= MSR_SPE;
528 #endif /* CONFIG_SPE */
530 #endif /* CONFIG_SMP */
532 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
533 switch_booke_debug_regs(&new->thread);
536 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
539 #ifndef CONFIG_HAVE_HW_BREAKPOINT
540 if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
541 set_break(&new->thread.hw_brk);
542 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
546 new_thread = &new->thread;
547 old_thread = ¤t->thread;
551 * Collect processor utilization data per process
553 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
554 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
555 long unsigned start_tb, current_tb;
556 start_tb = old_thread->start_tb;
557 cu->current_tb = current_tb = mfspr(SPRN_PURR);
558 old_thread->accum_tb += (current_tb - start_tb);
559 new_thread->start_tb = current_tb;
561 #endif /* CONFIG_PPC64 */
563 #ifdef CONFIG_PPC_BOOK3S_64
564 batch = &__get_cpu_var(ppc64_tlb_batch);
566 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
568 __flush_tlb_pending(batch);
571 #endif /* CONFIG_PPC_BOOK3S_64 */
573 local_irq_save(flags);
576 * We can't take a PMU exception inside _switch() since there is a
577 * window where the kernel stack SLB and the kernel stack are out
578 * of sync. Hard disable here.
581 last = _switch(old_thread, new_thread);
583 #ifdef CONFIG_PPC_BOOK3S_64
584 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
585 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
586 batch = &__get_cpu_var(ppc64_tlb_batch);
589 #endif /* CONFIG_PPC_BOOK3S_64 */
591 local_irq_restore(flags);
596 static int instructions_to_print = 16;
598 static void show_instructions(struct pt_regs *regs)
601 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
604 printk("Instruction dump:");
606 for (i = 0; i < instructions_to_print; i++) {
612 #if !defined(CONFIG_BOOKE)
613 /* If executing with the IMMU off, adjust pc rather
614 * than print XXXXXXXX.
616 if (!(regs->msr & MSR_IR))
617 pc = (unsigned long)phys_to_virt(pc);
620 /* We use __get_user here *only* to avoid an OOPS on a
621 * bad address because the pc *should* only be a
624 if (!__kernel_text_address(pc) ||
625 __get_user(instr, (unsigned int __user *)pc)) {
626 printk(KERN_CONT "XXXXXXXX ");
629 printk(KERN_CONT "<%08x> ", instr);
631 printk(KERN_CONT "%08x ", instr);
640 static struct regbit {
644 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
673 static void printbits(unsigned long val, struct regbit *bits)
675 const char *sep = "";
678 for (; bits->bit; ++bits)
679 if (val & bits->bit) {
680 printk("%s%s", sep, bits->name);
688 #define REGS_PER_LINE 4
689 #define LAST_VOLATILE 13
692 #define REGS_PER_LINE 8
693 #define LAST_VOLATILE 12
696 void show_regs(struct pt_regs * regs)
700 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
701 regs->nip, regs->link, regs->ctr);
702 printk("REGS: %p TRAP: %04lx %s (%s)\n",
703 regs, regs->trap, print_tainted(), init_utsname()->release);
704 printk("MSR: "REG" ", regs->msr);
705 printbits(regs->msr, msr_bits);
706 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
708 printk("SOFTE: %ld\n", regs->softe);
711 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
712 printk("CFAR: "REG"\n", regs->orig_gpr3);
713 if (trap == 0x300 || trap == 0x600)
714 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
715 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
717 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
719 printk("TASK = %p[%d] '%s' THREAD: %p",
720 current, task_pid_nr(current), current->comm, task_thread_info(current));
723 printk(" CPU: %d", raw_smp_processor_id());
724 #endif /* CONFIG_SMP */
726 for (i = 0; i < 32; i++) {
727 if ((i % REGS_PER_LINE) == 0)
728 printk("\nGPR%02d: ", i);
729 printk(REG " ", regs->gpr[i]);
730 if (i == LAST_VOLATILE && !FULL_REGS(regs))
734 #ifdef CONFIG_KALLSYMS
736 * Lookup NIP late so we have the best change of getting the
737 * above info out without failing
739 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
740 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
742 show_stack(current, (unsigned long *) regs->gpr[1]);
743 if (!user_mode(regs))
744 show_instructions(regs);
747 void exit_thread(void)
749 discard_lazy_cpu_state();
752 void flush_thread(void)
754 discard_lazy_cpu_state();
756 #ifdef CONFIG_HAVE_HW_BREAKPOINT
757 flush_ptrace_hw_breakpoint(current);
758 #else /* CONFIG_HAVE_HW_BREAKPOINT */
759 set_debug_reg_defaults(¤t->thread);
760 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
764 release_thread(struct task_struct *t)
769 * this gets called so that we can store coprocessor state into memory and
770 * copy the current task into the new thread.
772 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
774 flush_fp_to_thread(src);
775 flush_altivec_to_thread(src);
776 flush_vsx_to_thread(src);
777 flush_spe_to_thread(src);
778 #ifdef CONFIG_HAVE_HW_BREAKPOINT
779 flush_ptrace_hw_breakpoint(src);
780 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
789 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
791 int copy_thread(unsigned long clone_flags, unsigned long usp,
792 unsigned long arg, struct task_struct *p)
794 struct pt_regs *childregs, *kregs;
795 extern void ret_from_fork(void);
796 extern void ret_from_kernel_thread(void);
798 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
801 sp -= sizeof(struct pt_regs);
802 childregs = (struct pt_regs *) sp;
803 if (unlikely(p->flags & PF_KTHREAD)) {
804 struct thread_info *ti = (void *)task_stack_page(p);
805 memset(childregs, 0, sizeof(struct pt_regs));
806 childregs->gpr[1] = sp + sizeof(struct pt_regs);
807 childregs->gpr[14] = usp; /* function */
809 clear_tsk_thread_flag(p, TIF_32BIT);
810 childregs->softe = 1;
812 childregs->gpr[15] = arg;
813 p->thread.regs = NULL; /* no user register state */
814 ti->flags |= _TIF_RESTOREALL;
815 f = ret_from_kernel_thread;
817 struct pt_regs *regs = current_pt_regs();
818 CHECK_FULL_REGS(regs);
821 childregs->gpr[1] = usp;
822 p->thread.regs = childregs;
823 childregs->gpr[3] = 0; /* Result from fork() */
824 if (clone_flags & CLONE_SETTLS) {
826 if (!is_32bit_task())
827 childregs->gpr[13] = childregs->gpr[6];
830 childregs->gpr[2] = childregs->gpr[6];
835 sp -= STACK_FRAME_OVERHEAD;
838 * The way this works is that at some point in the future
839 * some task will call _switch to switch to the new task.
840 * That will pop off the stack frame created below and start
841 * the new task running at ret_from_fork. The new task will
842 * do some house keeping and then return from the fork or clone
843 * system call, using the stack frame created above.
845 sp -= sizeof(struct pt_regs);
846 kregs = (struct pt_regs *) sp;
847 sp -= STACK_FRAME_OVERHEAD;
849 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
850 _ALIGN_UP(sizeof(struct thread_info), 16);
852 #ifdef CONFIG_PPC_STD_MMU_64
853 if (mmu_has_feature(MMU_FTR_SLB)) {
854 unsigned long sp_vsid;
855 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
857 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
858 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
859 << SLB_VSID_SHIFT_1T;
861 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
863 sp_vsid |= SLB_VSID_KERNEL | llp;
864 p->thread.ksp_vsid = sp_vsid;
866 #endif /* CONFIG_PPC_STD_MMU_64 */
868 if (cpu_has_feature(CPU_FTR_DSCR)) {
869 p->thread.dscr_inherit = current->thread.dscr_inherit;
870 p->thread.dscr = current->thread.dscr;
872 if (cpu_has_feature(CPU_FTR_HAS_PPR))
873 p->thread.ppr = INIT_PPR;
876 * The PPC64 ABI makes use of a TOC to contain function
877 * pointers. The function (ret_from_except) is actually a pointer
878 * to the TOC entry. The first entry is a pointer to the actual
882 kregs->nip = *((unsigned long *)f);
884 kregs->nip = (unsigned long)f;
890 * Set up a thread for executing a new program
892 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
895 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
899 * If we exec out of a kernel thread then thread.regs will not be
902 if (!current->thread.regs) {
903 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
904 current->thread.regs = regs - 1;
907 memset(regs->gpr, 0, sizeof(regs->gpr));
915 * We have just cleared all the nonvolatile GPRs, so make
916 * FULL_REGS(regs) return true. This is necessary to allow
917 * ptrace to examine the thread immediately after exec.
924 regs->msr = MSR_USER;
926 if (!is_32bit_task()) {
927 unsigned long entry, toc;
929 /* start is a relocated pointer to the function descriptor for
930 * the elf _start routine. The first entry in the function
931 * descriptor is the entry address of _start and the second
932 * entry is the TOC value we need to use.
934 __get_user(entry, (unsigned long __user *)start);
935 __get_user(toc, (unsigned long __user *)start+1);
937 /* Check whether the e_entry function descriptor entries
938 * need to be relocated before we can use them.
940 if (load_addr != 0) {
946 regs->msr = MSR_USER64;
950 regs->msr = MSR_USER32;
954 discard_lazy_cpu_state();
956 current->thread.used_vsr = 0;
958 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
959 current->thread.fpscr.val = 0;
960 #ifdef CONFIG_ALTIVEC
961 memset(current->thread.vr, 0, sizeof(current->thread.vr));
962 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
963 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
964 current->thread.vrsave = 0;
965 current->thread.used_vr = 0;
966 #endif /* CONFIG_ALTIVEC */
968 memset(current->thread.evr, 0, sizeof(current->thread.evr));
969 current->thread.acc = 0;
970 current->thread.spefscr = 0;
971 current->thread.used_spe = 0;
972 #endif /* CONFIG_SPE */
975 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
976 | PR_FP_EXC_RES | PR_FP_EXC_INV)
978 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
980 struct pt_regs *regs = tsk->thread.regs;
982 /* This is a bit hairy. If we are an SPE enabled processor
983 * (have embedded fp) we store the IEEE exception enable flags in
984 * fpexc_mode. fpexc_mode is also used for setting FP exception
985 * mode (asyn, precise, disabled) for 'Classic' FP. */
986 if (val & PR_FP_EXC_SW_ENABLE) {
988 if (cpu_has_feature(CPU_FTR_SPE)) {
989 tsk->thread.fpexc_mode = val &
990 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1000 /* on a CONFIG_SPE this does not hurt us. The bits that
1001 * __pack_fe01 use do not overlap with bits used for
1002 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1003 * on CONFIG_SPE implementations are reserved so writing to
1004 * them does not change anything */
1005 if (val > PR_FP_EXC_PRECISE)
1007 tsk->thread.fpexc_mode = __pack_fe01(val);
1008 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1009 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1010 | tsk->thread.fpexc_mode;
1014 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1018 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1020 if (cpu_has_feature(CPU_FTR_SPE))
1021 val = tsk->thread.fpexc_mode;
1028 val = __unpack_fe01(tsk->thread.fpexc_mode);
1029 return put_user(val, (unsigned int __user *) adr);
1032 int set_endian(struct task_struct *tsk, unsigned int val)
1034 struct pt_regs *regs = tsk->thread.regs;
1036 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1037 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1043 if (val == PR_ENDIAN_BIG)
1044 regs->msr &= ~MSR_LE;
1045 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1046 regs->msr |= MSR_LE;
1053 int get_endian(struct task_struct *tsk, unsigned long adr)
1055 struct pt_regs *regs = tsk->thread.regs;
1058 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1059 !cpu_has_feature(CPU_FTR_REAL_LE))
1065 if (regs->msr & MSR_LE) {
1066 if (cpu_has_feature(CPU_FTR_REAL_LE))
1067 val = PR_ENDIAN_LITTLE;
1069 val = PR_ENDIAN_PPC_LITTLE;
1071 val = PR_ENDIAN_BIG;
1073 return put_user(val, (unsigned int __user *)adr);
1076 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1078 tsk->thread.align_ctl = val;
1082 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1084 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1087 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1088 unsigned long nbytes)
1090 unsigned long stack_page;
1091 unsigned long cpu = task_cpu(p);
1094 * Avoid crashing if the stack has overflowed and corrupted
1095 * task_cpu(p), which is in the thread_info struct.
1097 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1098 stack_page = (unsigned long) hardirq_ctx[cpu];
1099 if (sp >= stack_page + sizeof(struct thread_struct)
1100 && sp <= stack_page + THREAD_SIZE - nbytes)
1103 stack_page = (unsigned long) softirq_ctx[cpu];
1104 if (sp >= stack_page + sizeof(struct thread_struct)
1105 && sp <= stack_page + THREAD_SIZE - nbytes)
1111 int validate_sp(unsigned long sp, struct task_struct *p,
1112 unsigned long nbytes)
1114 unsigned long stack_page = (unsigned long)task_stack_page(p);
1116 if (sp >= stack_page + sizeof(struct thread_struct)
1117 && sp <= stack_page + THREAD_SIZE - nbytes)
1120 return valid_irq_stack(sp, p, nbytes);
1123 EXPORT_SYMBOL(validate_sp);
1125 unsigned long get_wchan(struct task_struct *p)
1127 unsigned long ip, sp;
1130 if (!p || p == current || p->state == TASK_RUNNING)
1134 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1138 sp = *(unsigned long *)sp;
1139 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1142 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1143 if (!in_sched_functions(ip))
1146 } while (count++ < 16);
1150 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1152 void show_stack(struct task_struct *tsk, unsigned long *stack)
1154 unsigned long sp, ip, lr, newsp;
1157 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1158 int curr_frame = current->curr_ret_stack;
1159 extern void return_to_handler(void);
1160 unsigned long rth = (unsigned long)return_to_handler;
1161 unsigned long mrth = -1;
1163 extern void mod_return_to_handler(void);
1164 rth = *(unsigned long *)rth;
1165 mrth = (unsigned long)mod_return_to_handler;
1166 mrth = *(unsigned long *)mrth;
1170 sp = (unsigned long) stack;
1175 asm("mr %0,1" : "=r" (sp));
1177 sp = tsk->thread.ksp;
1181 printk("Call Trace:\n");
1183 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1186 stack = (unsigned long *) sp;
1188 ip = stack[STACK_FRAME_LR_SAVE];
1189 if (!firstframe || ip != lr) {
1190 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1191 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1192 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1194 (void *)current->ret_stack[curr_frame].ret);
1199 printk(" (unreliable)");
1205 * See if this is an exception frame.
1206 * We look for the "regshere" marker in the current frame.
1208 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1209 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1210 struct pt_regs *regs = (struct pt_regs *)
1211 (sp + STACK_FRAME_OVERHEAD);
1213 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1214 regs->trap, (void *)regs->nip, (void *)lr);
1219 } while (count++ < kstack_depth_to_print);
1222 void dump_stack(void)
1224 show_stack(current, NULL);
1226 EXPORT_SYMBOL(dump_stack);
1229 /* Called with hard IRQs off */
1230 void __ppc64_runlatch_on(void)
1232 struct thread_info *ti = current_thread_info();
1235 ctrl = mfspr(SPRN_CTRLF);
1236 ctrl |= CTRL_RUNLATCH;
1237 mtspr(SPRN_CTRLT, ctrl);
1239 ti->local_flags |= _TLF_RUNLATCH;
1242 /* Called with hard IRQs off */
1243 void __ppc64_runlatch_off(void)
1245 struct thread_info *ti = current_thread_info();
1248 ti->local_flags &= ~_TLF_RUNLATCH;
1250 ctrl = mfspr(SPRN_CTRLF);
1251 ctrl &= ~CTRL_RUNLATCH;
1252 mtspr(SPRN_CTRLT, ctrl);
1254 #endif /* CONFIG_PPC64 */
1256 unsigned long arch_align_stack(unsigned long sp)
1258 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1259 sp -= get_random_int() & ~PAGE_MASK;
1263 static inline unsigned long brk_rnd(void)
1265 unsigned long rnd = 0;
1267 /* 8MB for 32bit, 1GB for 64bit */
1268 if (is_32bit_task())
1269 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1271 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1273 return rnd << PAGE_SHIFT;
1276 unsigned long arch_randomize_brk(struct mm_struct *mm)
1278 unsigned long base = mm->brk;
1281 #ifdef CONFIG_PPC_STD_MMU_64
1283 * If we are using 1TB segments and we are allowed to randomise
1284 * the heap, we can put it above 1TB so it is backed by a 1TB
1285 * segment. Otherwise the heap will be in the bottom 1TB
1286 * which always uses 256MB segments and this may result in a
1287 * performance penalty.
1289 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1290 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1293 ret = PAGE_ALIGN(base + brk_rnd());
1301 unsigned long randomize_et_dyn(unsigned long base)
1303 unsigned long ret = PAGE_ALIGN(base + brk_rnd());