2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
46 #include <asm/processor.h>
49 #include <asm/machdep.h>
51 #include <asm/syscalls.h>
53 #include <asm/firmware.h>
55 #include <linux/kprobes.h>
56 #include <linux/kdebug.h>
58 extern unsigned long _get_SP(void);
61 struct task_struct *last_task_used_math = NULL;
62 struct task_struct *last_task_used_altivec = NULL;
63 struct task_struct *last_task_used_vsx = NULL;
64 struct task_struct *last_task_used_spe = NULL;
68 * Make sure the floating-point register state in the
69 * the thread_struct is up to date for task tsk.
71 void flush_fp_to_thread(struct task_struct *tsk)
73 if (tsk->thread.regs) {
75 * We need to disable preemption here because if we didn't,
76 * another process could get scheduled after the regs->msr
77 * test but before we have finished saving the FP registers
78 * to the thread_struct. That process could take over the
79 * FPU, and then when we get scheduled again we would store
80 * bogus values for the remaining FP registers.
83 if (tsk->thread.regs->msr & MSR_FP) {
86 * This should only ever be called for current or
87 * for a stopped child process. Since we save away
88 * the FP register state on context switch on SMP,
89 * there is something wrong if a stopped child appears
90 * to still have its FP state in the CPU registers.
92 BUG_ON(tsk != current);
99 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
101 void enable_kernel_fp(void)
103 WARN_ON(preemptible());
106 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109 giveup_fpu(NULL); /* just enables FP for kernel */
111 giveup_fpu(last_task_used_math);
112 #endif /* CONFIG_SMP */
114 EXPORT_SYMBOL(enable_kernel_fp);
116 #ifdef CONFIG_ALTIVEC
117 void enable_kernel_altivec(void)
119 WARN_ON(preemptible());
122 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
123 giveup_altivec(current);
125 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
127 giveup_altivec(last_task_used_altivec);
128 #endif /* CONFIG_SMP */
130 EXPORT_SYMBOL(enable_kernel_altivec);
133 * Make sure the VMX/Altivec register state in the
134 * the thread_struct is up to date for task tsk.
136 void flush_altivec_to_thread(struct task_struct *tsk)
138 if (tsk->thread.regs) {
140 if (tsk->thread.regs->msr & MSR_VEC) {
142 BUG_ON(tsk != current);
149 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
150 #endif /* CONFIG_ALTIVEC */
154 /* not currently used, but some crazy RAID module might want to later */
155 void enable_kernel_vsx(void)
157 WARN_ON(preemptible());
160 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163 giveup_vsx(NULL); /* just enable vsx for kernel - force */
165 giveup_vsx(last_task_used_vsx);
166 #endif /* CONFIG_SMP */
168 EXPORT_SYMBOL(enable_kernel_vsx);
171 void giveup_vsx(struct task_struct *tsk)
178 void flush_vsx_to_thread(struct task_struct *tsk)
180 if (tsk->thread.regs) {
182 if (tsk->thread.regs->msr & MSR_VSX) {
184 BUG_ON(tsk != current);
191 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
192 #endif /* CONFIG_VSX */
196 void enable_kernel_spe(void)
198 WARN_ON(preemptible());
201 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204 giveup_spe(NULL); /* just enable SPE for kernel - force */
206 giveup_spe(last_task_used_spe);
207 #endif /* __SMP __ */
209 EXPORT_SYMBOL(enable_kernel_spe);
211 void flush_spe_to_thread(struct task_struct *tsk)
213 if (tsk->thread.regs) {
215 if (tsk->thread.regs->msr & MSR_SPE) {
217 BUG_ON(tsk != current);
219 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
225 #endif /* CONFIG_SPE */
229 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
230 * and the current task has some state, discard it.
232 void discard_lazy_cpu_state(void)
235 if (last_task_used_math == current)
236 last_task_used_math = NULL;
237 #ifdef CONFIG_ALTIVEC
238 if (last_task_used_altivec == current)
239 last_task_used_altivec = NULL;
240 #endif /* CONFIG_ALTIVEC */
242 if (last_task_used_vsx == current)
243 last_task_used_vsx = NULL;
244 #endif /* CONFIG_VSX */
246 if (last_task_used_spe == current)
247 last_task_used_spe = NULL;
251 #endif /* CONFIG_SMP */
253 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
254 void do_send_trap(struct pt_regs *regs, unsigned long address,
255 unsigned long error_code, int signal_code, int breakpt)
259 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
260 11, SIGSEGV) == NOTIFY_STOP)
263 /* Deliver the signal to userspace */
264 info.si_signo = SIGTRAP;
265 info.si_errno = breakpt; /* breakpoint or watchpoint id */
266 info.si_code = signal_code;
267 info.si_addr = (void __user *)address;
268 force_sig_info(SIGTRAP, &info, current);
270 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
271 void do_dabr(struct pt_regs *regs, unsigned long address,
272 unsigned long error_code)
276 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
277 11, SIGSEGV) == NOTIFY_STOP)
280 if (debugger_dabr_match(regs))
286 /* Deliver the signal to userspace */
287 info.si_signo = SIGTRAP;
289 info.si_code = TRAP_HWBKPT;
290 info.si_addr = (void __user *)address;
291 force_sig_info(SIGTRAP, &info, current);
293 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
295 static DEFINE_PER_CPU(unsigned long, current_dabr);
297 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
299 * Set the debug registers back to their default "safe" values.
301 static void set_debug_reg_defaults(struct thread_struct *thread)
303 thread->iac1 = thread->iac2 = 0;
304 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
305 thread->iac3 = thread->iac4 = 0;
307 thread->dac1 = thread->dac2 = 0;
308 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
309 thread->dvc1 = thread->dvc2 = 0;
314 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
316 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
317 DBCR1_IAC3US | DBCR1_IAC4US;
319 * Force Data Address Compare User/Supervisor bits to be User-only
320 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
322 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
328 static void prime_debug_regs(struct thread_struct *thread)
330 mtspr(SPRN_IAC1, thread->iac1);
331 mtspr(SPRN_IAC2, thread->iac2);
332 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
333 mtspr(SPRN_IAC3, thread->iac3);
334 mtspr(SPRN_IAC4, thread->iac4);
336 mtspr(SPRN_DAC1, thread->dac1);
337 mtspr(SPRN_DAC2, thread->dac2);
338 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
339 mtspr(SPRN_DVC1, thread->dvc1);
340 mtspr(SPRN_DVC2, thread->dvc2);
342 mtspr(SPRN_DBCR0, thread->dbcr0);
343 mtspr(SPRN_DBCR1, thread->dbcr1);
345 mtspr(SPRN_DBCR2, thread->dbcr2);
349 * Unless neither the old or new thread are making use of the
350 * debug registers, set the debug registers from the values
351 * stored in the new thread.
353 static void switch_booke_debug_regs(struct thread_struct *new_thread)
355 if ((current->thread.dbcr0 & DBCR0_IDM)
356 || (new_thread->dbcr0 & DBCR0_IDM))
357 prime_debug_regs(new_thread);
359 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
360 #ifndef CONFIG_HAVE_HW_BREAKPOINT
361 static void set_debug_reg_defaults(struct thread_struct *thread)
368 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
369 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
371 int set_dabr(unsigned long dabr)
373 __get_cpu_var(current_dabr) = dabr;
376 return ppc_md.set_dabr(dabr);
378 /* XXX should we have a CPU_FTR_HAS_DABR ? */
379 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
380 mtspr(SPRN_DAC1, dabr);
381 #ifdef CONFIG_PPC_47x
384 #elif defined(CONFIG_PPC_BOOK3S)
385 mtspr(SPRN_DABR, dabr);
393 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
396 struct task_struct *__switch_to(struct task_struct *prev,
397 struct task_struct *new)
399 struct thread_struct *new_thread, *old_thread;
401 struct task_struct *last;
402 #ifdef CONFIG_PPC_BOOK3S_64
403 struct ppc64_tlb_batch *batch;
407 /* avoid complexity of lazy save/restore of fpu
408 * by just saving it every time we switch out if
409 * this task used the fpu during the last quantum.
411 * If it tries to use the fpu again, it'll trap and
412 * reload its fp regs. So we don't have to do a restore
413 * every switch, just a save.
416 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
418 #ifdef CONFIG_ALTIVEC
420 * If the previous thread used altivec in the last quantum
421 * (thus changing altivec regs) then save them.
422 * We used to check the VRSAVE register but not all apps
423 * set it, so we don't rely on it now (and in fact we need
424 * to save & restore VSCR even if VRSAVE == 0). -- paulus
426 * On SMP we always save/restore altivec regs just to avoid the
427 * complexity of changing processors.
430 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
431 giveup_altivec(prev);
432 #endif /* CONFIG_ALTIVEC */
434 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
435 /* VMX and FPU registers are already save here */
437 #endif /* CONFIG_VSX */
440 * If the previous thread used spe in the last quantum
441 * (thus changing spe regs) then save them.
443 * On SMP we always save/restore spe regs just to avoid the
444 * complexity of changing processors.
446 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
448 #endif /* CONFIG_SPE */
450 #else /* CONFIG_SMP */
451 #ifdef CONFIG_ALTIVEC
452 /* Avoid the trap. On smp this this never happens since
453 * we don't set last_task_used_altivec -- Cort
455 if (new->thread.regs && last_task_used_altivec == new)
456 new->thread.regs->msr |= MSR_VEC;
457 #endif /* CONFIG_ALTIVEC */
459 if (new->thread.regs && last_task_used_vsx == new)
460 new->thread.regs->msr |= MSR_VSX;
461 #endif /* CONFIG_VSX */
463 /* Avoid the trap. On smp this this never happens since
464 * we don't set last_task_used_spe
466 if (new->thread.regs && last_task_used_spe == new)
467 new->thread.regs->msr |= MSR_SPE;
468 #endif /* CONFIG_SPE */
470 #endif /* CONFIG_SMP */
472 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
473 switch_booke_debug_regs(&new->thread);
476 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
479 #ifndef CONFIG_HAVE_HW_BREAKPOINT
480 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
481 set_dabr(new->thread.dabr);
482 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
486 new_thread = &new->thread;
487 old_thread = ¤t->thread;
491 * Collect processor utilization data per process
493 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
494 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
495 long unsigned start_tb, current_tb;
496 start_tb = old_thread->start_tb;
497 cu->current_tb = current_tb = mfspr(SPRN_PURR);
498 old_thread->accum_tb += (current_tb - start_tb);
499 new_thread->start_tb = current_tb;
501 #endif /* CONFIG_PPC64 */
503 #ifdef CONFIG_PPC_BOOK3S_64
504 batch = &__get_cpu_var(ppc64_tlb_batch);
506 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
508 __flush_tlb_pending(batch);
511 #endif /* CONFIG_PPC_BOOK3S_64 */
513 local_irq_save(flags);
515 account_system_vtime(current);
516 account_process_vtime(current);
519 * We can't take a PMU exception inside _switch() since there is a
520 * window where the kernel stack SLB and the kernel stack are out
521 * of sync. Hard disable here.
524 last = _switch(old_thread, new_thread);
526 #ifdef CONFIG_PPC_BOOK3S_64
527 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
528 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
529 batch = &__get_cpu_var(ppc64_tlb_batch);
532 #endif /* CONFIG_PPC_BOOK3S_64 */
534 local_irq_restore(flags);
539 static int instructions_to_print = 16;
541 static void show_instructions(struct pt_regs *regs)
544 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
547 printk("Instruction dump:");
549 for (i = 0; i < instructions_to_print; i++) {
555 #if !defined(CONFIG_BOOKE)
556 /* If executing with the IMMU off, adjust pc rather
557 * than print XXXXXXXX.
559 if (!(regs->msr & MSR_IR))
560 pc = (unsigned long)phys_to_virt(pc);
563 /* We use __get_user here *only* to avoid an OOPS on a
564 * bad address because the pc *should* only be a
567 if (!__kernel_text_address(pc) ||
568 __get_user(instr, (unsigned int __user *)pc)) {
569 printk(KERN_CONT "XXXXXXXX ");
572 printk(KERN_CONT "<%08x> ", instr);
574 printk(KERN_CONT "%08x ", instr);
583 static struct regbit {
587 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
616 static void printbits(unsigned long val, struct regbit *bits)
618 const char *sep = "";
621 for (; bits->bit; ++bits)
622 if (val & bits->bit) {
623 printk("%s%s", sep, bits->name);
631 #define REGS_PER_LINE 4
632 #define LAST_VOLATILE 13
635 #define REGS_PER_LINE 8
636 #define LAST_VOLATILE 12
639 void show_regs(struct pt_regs * regs)
643 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
644 regs->nip, regs->link, regs->ctr);
645 printk("REGS: %p TRAP: %04lx %s (%s)\n",
646 regs, regs->trap, print_tainted(), init_utsname()->release);
647 printk("MSR: "REG" ", regs->msr);
648 printbits(regs->msr, msr_bits);
649 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
651 printk("SOFTE: %ld\n", regs->softe);
654 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
655 printk("CFAR: "REG"\n", regs->orig_gpr3);
656 if (trap == 0x300 || trap == 0x600)
657 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
658 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
660 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
662 printk("TASK = %p[%d] '%s' THREAD: %p",
663 current, task_pid_nr(current), current->comm, task_thread_info(current));
666 printk(" CPU: %d", raw_smp_processor_id());
667 #endif /* CONFIG_SMP */
669 for (i = 0; i < 32; i++) {
670 if ((i % REGS_PER_LINE) == 0)
671 printk("\nGPR%02d: ", i);
672 printk(REG " ", regs->gpr[i]);
673 if (i == LAST_VOLATILE && !FULL_REGS(regs))
677 #ifdef CONFIG_KALLSYMS
679 * Lookup NIP late so we have the best change of getting the
680 * above info out without failing
682 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
683 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
685 show_stack(current, (unsigned long *) regs->gpr[1]);
686 if (!user_mode(regs))
687 show_instructions(regs);
690 void exit_thread(void)
692 discard_lazy_cpu_state();
695 void flush_thread(void)
697 discard_lazy_cpu_state();
699 #ifdef CONFIG_HAVE_HW_BREAKPOINT
700 flush_ptrace_hw_breakpoint(current);
701 #else /* CONFIG_HAVE_HW_BREAKPOINT */
702 set_debug_reg_defaults(¤t->thread);
703 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
707 release_thread(struct task_struct *t)
712 * This gets called before we allocate a new thread and copy
713 * the current task into it.
715 void prepare_to_copy(struct task_struct *tsk)
717 flush_fp_to_thread(current);
718 flush_altivec_to_thread(current);
719 flush_vsx_to_thread(current);
720 flush_spe_to_thread(current);
721 #ifdef CONFIG_HAVE_HW_BREAKPOINT
722 flush_ptrace_hw_breakpoint(tsk);
723 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
729 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
731 int copy_thread(unsigned long clone_flags, unsigned long usp,
732 unsigned long unused, struct task_struct *p,
733 struct pt_regs *regs)
735 struct pt_regs *childregs, *kregs;
736 extern void ret_from_fork(void);
737 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
739 CHECK_FULL_REGS(regs);
741 sp -= sizeof(struct pt_regs);
742 childregs = (struct pt_regs *) sp;
744 if ((childregs->msr & MSR_PR) == 0) {
745 /* for kernel thread, set `current' and stackptr in new task */
746 childregs->gpr[1] = sp + sizeof(struct pt_regs);
748 childregs->gpr[2] = (unsigned long) p;
750 clear_tsk_thread_flag(p, TIF_32BIT);
752 p->thread.regs = NULL; /* no user register state */
754 childregs->gpr[1] = usp;
755 p->thread.regs = childregs;
756 if (clone_flags & CLONE_SETTLS) {
758 if (!is_32bit_task())
759 childregs->gpr[13] = childregs->gpr[6];
762 childregs->gpr[2] = childregs->gpr[6];
765 childregs->gpr[3] = 0; /* Result from fork() */
766 sp -= STACK_FRAME_OVERHEAD;
769 * The way this works is that at some point in the future
770 * some task will call _switch to switch to the new task.
771 * That will pop off the stack frame created below and start
772 * the new task running at ret_from_fork. The new task will
773 * do some house keeping and then return from the fork or clone
774 * system call, using the stack frame created above.
776 sp -= sizeof(struct pt_regs);
777 kregs = (struct pt_regs *) sp;
778 sp -= STACK_FRAME_OVERHEAD;
780 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
781 _ALIGN_UP(sizeof(struct thread_info), 16);
783 #ifdef CONFIG_PPC_STD_MMU_64
784 if (mmu_has_feature(MMU_FTR_SLB)) {
785 unsigned long sp_vsid;
786 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
788 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
789 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
790 << SLB_VSID_SHIFT_1T;
792 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
794 sp_vsid |= SLB_VSID_KERNEL | llp;
795 p->thread.ksp_vsid = sp_vsid;
797 #endif /* CONFIG_PPC_STD_MMU_64 */
799 if (cpu_has_feature(CPU_FTR_DSCR)) {
800 if (current->thread.dscr_inherit) {
801 p->thread.dscr_inherit = 1;
802 p->thread.dscr = current->thread.dscr;
803 } else if (0 != dscr_default) {
804 p->thread.dscr_inherit = 1;
805 p->thread.dscr = dscr_default;
807 p->thread.dscr_inherit = 0;
814 * The PPC64 ABI makes use of a TOC to contain function
815 * pointers. The function (ret_from_except) is actually a pointer
816 * to the TOC entry. The first entry is a pointer to the actual
820 kregs->nip = *((unsigned long *)ret_from_fork);
822 kregs->nip = (unsigned long)ret_from_fork;
829 * Set up a thread for executing a new program
831 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
834 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
838 * If we exec out of a kernel thread then thread.regs will not be
841 if (!current->thread.regs) {
842 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
843 current->thread.regs = regs - 1;
846 memset(regs->gpr, 0, sizeof(regs->gpr));
854 * We have just cleared all the nonvolatile GPRs, so make
855 * FULL_REGS(regs) return true. This is necessary to allow
856 * ptrace to examine the thread immediately after exec.
863 regs->msr = MSR_USER;
865 if (!is_32bit_task()) {
866 unsigned long entry, toc;
868 /* start is a relocated pointer to the function descriptor for
869 * the elf _start routine. The first entry in the function
870 * descriptor is the entry address of _start and the second
871 * entry is the TOC value we need to use.
873 __get_user(entry, (unsigned long __user *)start);
874 __get_user(toc, (unsigned long __user *)start+1);
876 /* Check whether the e_entry function descriptor entries
877 * need to be relocated before we can use them.
879 if (load_addr != 0) {
885 regs->msr = MSR_USER64;
889 regs->msr = MSR_USER32;
893 discard_lazy_cpu_state();
895 current->thread.used_vsr = 0;
897 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
898 current->thread.fpscr.val = 0;
899 #ifdef CONFIG_ALTIVEC
900 memset(current->thread.vr, 0, sizeof(current->thread.vr));
901 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
902 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
903 current->thread.vrsave = 0;
904 current->thread.used_vr = 0;
905 #endif /* CONFIG_ALTIVEC */
907 memset(current->thread.evr, 0, sizeof(current->thread.evr));
908 current->thread.acc = 0;
909 current->thread.spefscr = 0;
910 current->thread.used_spe = 0;
911 #endif /* CONFIG_SPE */
914 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
915 | PR_FP_EXC_RES | PR_FP_EXC_INV)
917 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
919 struct pt_regs *regs = tsk->thread.regs;
921 /* This is a bit hairy. If we are an SPE enabled processor
922 * (have embedded fp) we store the IEEE exception enable flags in
923 * fpexc_mode. fpexc_mode is also used for setting FP exception
924 * mode (asyn, precise, disabled) for 'Classic' FP. */
925 if (val & PR_FP_EXC_SW_ENABLE) {
927 if (cpu_has_feature(CPU_FTR_SPE)) {
928 tsk->thread.fpexc_mode = val &
929 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
939 /* on a CONFIG_SPE this does not hurt us. The bits that
940 * __pack_fe01 use do not overlap with bits used for
941 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
942 * on CONFIG_SPE implementations are reserved so writing to
943 * them does not change anything */
944 if (val > PR_FP_EXC_PRECISE)
946 tsk->thread.fpexc_mode = __pack_fe01(val);
947 if (regs != NULL && (regs->msr & MSR_FP) != 0)
948 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
949 | tsk->thread.fpexc_mode;
953 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
957 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
959 if (cpu_has_feature(CPU_FTR_SPE))
960 val = tsk->thread.fpexc_mode;
967 val = __unpack_fe01(tsk->thread.fpexc_mode);
968 return put_user(val, (unsigned int __user *) adr);
971 int set_endian(struct task_struct *tsk, unsigned int val)
973 struct pt_regs *regs = tsk->thread.regs;
975 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
976 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
982 if (val == PR_ENDIAN_BIG)
983 regs->msr &= ~MSR_LE;
984 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
992 int get_endian(struct task_struct *tsk, unsigned long adr)
994 struct pt_regs *regs = tsk->thread.regs;
997 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
998 !cpu_has_feature(CPU_FTR_REAL_LE))
1004 if (regs->msr & MSR_LE) {
1005 if (cpu_has_feature(CPU_FTR_REAL_LE))
1006 val = PR_ENDIAN_LITTLE;
1008 val = PR_ENDIAN_PPC_LITTLE;
1010 val = PR_ENDIAN_BIG;
1012 return put_user(val, (unsigned int __user *)adr);
1015 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1017 tsk->thread.align_ctl = val;
1021 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1023 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1026 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1028 int sys_clone(unsigned long clone_flags, unsigned long usp,
1029 int __user *parent_tidp, void __user *child_threadptr,
1030 int __user *child_tidp, int p6,
1031 struct pt_regs *regs)
1033 CHECK_FULL_REGS(regs);
1035 usp = regs->gpr[1]; /* stack pointer for child */
1037 if (is_32bit_task()) {
1038 parent_tidp = TRUNC_PTR(parent_tidp);
1039 child_tidp = TRUNC_PTR(child_tidp);
1042 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1045 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1046 unsigned long p4, unsigned long p5, unsigned long p6,
1047 struct pt_regs *regs)
1049 CHECK_FULL_REGS(regs);
1050 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1053 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1054 unsigned long p4, unsigned long p5, unsigned long p6,
1055 struct pt_regs *regs)
1057 CHECK_FULL_REGS(regs);
1058 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1059 regs, 0, NULL, NULL);
1062 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1063 unsigned long a3, unsigned long a4, unsigned long a5,
1064 struct pt_regs *regs)
1069 filename = getname((const char __user *) a0);
1070 error = PTR_ERR(filename);
1071 if (IS_ERR(filename))
1073 flush_fp_to_thread(current);
1074 flush_altivec_to_thread(current);
1075 flush_spe_to_thread(current);
1076 error = do_execve(filename,
1077 (const char __user *const __user *) a1,
1078 (const char __user *const __user *) a2, regs);
1084 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1085 unsigned long nbytes)
1087 unsigned long stack_page;
1088 unsigned long cpu = task_cpu(p);
1091 * Avoid crashing if the stack has overflowed and corrupted
1092 * task_cpu(p), which is in the thread_info struct.
1094 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1095 stack_page = (unsigned long) hardirq_ctx[cpu];
1096 if (sp >= stack_page + sizeof(struct thread_struct)
1097 && sp <= stack_page + THREAD_SIZE - nbytes)
1100 stack_page = (unsigned long) softirq_ctx[cpu];
1101 if (sp >= stack_page + sizeof(struct thread_struct)
1102 && sp <= stack_page + THREAD_SIZE - nbytes)
1108 int validate_sp(unsigned long sp, struct task_struct *p,
1109 unsigned long nbytes)
1111 unsigned long stack_page = (unsigned long)task_stack_page(p);
1113 if (sp >= stack_page + sizeof(struct thread_struct)
1114 && sp <= stack_page + THREAD_SIZE - nbytes)
1117 return valid_irq_stack(sp, p, nbytes);
1120 EXPORT_SYMBOL(validate_sp);
1122 unsigned long get_wchan(struct task_struct *p)
1124 unsigned long ip, sp;
1127 if (!p || p == current || p->state == TASK_RUNNING)
1131 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1135 sp = *(unsigned long *)sp;
1136 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1139 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1140 if (!in_sched_functions(ip))
1143 } while (count++ < 16);
1147 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1149 void show_stack(struct task_struct *tsk, unsigned long *stack)
1151 unsigned long sp, ip, lr, newsp;
1154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1155 int curr_frame = current->curr_ret_stack;
1156 extern void return_to_handler(void);
1157 unsigned long rth = (unsigned long)return_to_handler;
1158 unsigned long mrth = -1;
1160 extern void mod_return_to_handler(void);
1161 rth = *(unsigned long *)rth;
1162 mrth = (unsigned long)mod_return_to_handler;
1163 mrth = *(unsigned long *)mrth;
1167 sp = (unsigned long) stack;
1172 asm("mr %0,1" : "=r" (sp));
1174 sp = tsk->thread.ksp;
1178 printk("Call Trace:\n");
1180 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1183 stack = (unsigned long *) sp;
1185 ip = stack[STACK_FRAME_LR_SAVE];
1186 if (!firstframe || ip != lr) {
1187 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1188 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1189 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1191 (void *)current->ret_stack[curr_frame].ret);
1196 printk(" (unreliable)");
1202 * See if this is an exception frame.
1203 * We look for the "regshere" marker in the current frame.
1205 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1206 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1207 struct pt_regs *regs = (struct pt_regs *)
1208 (sp + STACK_FRAME_OVERHEAD);
1210 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1211 regs->trap, (void *)regs->nip, (void *)lr);
1216 } while (count++ < kstack_depth_to_print);
1219 void dump_stack(void)
1221 show_stack(current, NULL);
1223 EXPORT_SYMBOL(dump_stack);
1226 /* Called with hard IRQs off */
1227 void __ppc64_runlatch_on(void)
1229 struct thread_info *ti = current_thread_info();
1232 ctrl = mfspr(SPRN_CTRLF);
1233 ctrl |= CTRL_RUNLATCH;
1234 mtspr(SPRN_CTRLT, ctrl);
1236 ti->local_flags |= TLF_RUNLATCH;
1239 /* Called with hard IRQs off */
1240 void __ppc64_runlatch_off(void)
1242 struct thread_info *ti = current_thread_info();
1245 ti->local_flags &= ~TLF_RUNLATCH;
1247 ctrl = mfspr(SPRN_CTRLF);
1248 ctrl &= ~CTRL_RUNLATCH;
1249 mtspr(SPRN_CTRLT, ctrl);
1251 #endif /* CONFIG_PPC64 */
1253 #if THREAD_SHIFT < PAGE_SHIFT
1255 static struct kmem_cache *thread_info_cache;
1257 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
1259 struct thread_info *ti;
1261 ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
1262 if (unlikely(ti == NULL))
1264 #ifdef CONFIG_DEBUG_STACK_USAGE
1265 memset(ti, 0, THREAD_SIZE);
1270 void free_thread_info(struct thread_info *ti)
1272 kmem_cache_free(thread_info_cache, ti);
1275 void thread_info_cache_init(void)
1277 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1278 THREAD_SIZE, 0, NULL);
1279 BUG_ON(thread_info_cache == NULL);
1282 #endif /* THREAD_SHIFT < PAGE_SHIFT */
1284 unsigned long arch_align_stack(unsigned long sp)
1286 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1287 sp -= get_random_int() & ~PAGE_MASK;
1291 static inline unsigned long brk_rnd(void)
1293 unsigned long rnd = 0;
1295 /* 8MB for 32bit, 1GB for 64bit */
1296 if (is_32bit_task())
1297 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1299 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1301 return rnd << PAGE_SHIFT;
1304 unsigned long arch_randomize_brk(struct mm_struct *mm)
1306 unsigned long base = mm->brk;
1309 #ifdef CONFIG_PPC_STD_MMU_64
1311 * If we are using 1TB segments and we are allowed to randomise
1312 * the heap, we can put it above 1TB so it is backed by a 1TB
1313 * segment. Otherwise the heap will be in the bottom 1TB
1314 * which always uses 256MB segments and this may result in a
1315 * performance penalty.
1317 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1318 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1321 ret = PAGE_ALIGN(base + brk_rnd());
1329 unsigned long randomize_et_dyn(unsigned long base)
1331 unsigned long ret = PAGE_ALIGN(base + brk_rnd());