885a2dd2ab80d4e77835a04b90d472390cc09d51
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kernel / process.c
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/module.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/processor.h>
47 #include <asm/mmu.h>
48 #include <asm/prom.h>
49 #include <asm/machdep.h>
50 #include <asm/time.h>
51 #include <asm/syscalls.h>
52 #ifdef CONFIG_PPC64
53 #include <asm/firmware.h>
54 #endif
55 #include <linux/kprobes.h>
56 #include <linux/kdebug.h>
57
58 extern unsigned long _get_SP(void);
59
60 #ifndef CONFIG_SMP
61 struct task_struct *last_task_used_math = NULL;
62 struct task_struct *last_task_used_altivec = NULL;
63 struct task_struct *last_task_used_vsx = NULL;
64 struct task_struct *last_task_used_spe = NULL;
65 #endif
66
67 /*
68  * Make sure the floating-point register state in the
69  * the thread_struct is up to date for task tsk.
70  */
71 void flush_fp_to_thread(struct task_struct *tsk)
72 {
73         if (tsk->thread.regs) {
74                 /*
75                  * We need to disable preemption here because if we didn't,
76                  * another process could get scheduled after the regs->msr
77                  * test but before we have finished saving the FP registers
78                  * to the thread_struct.  That process could take over the
79                  * FPU, and then when we get scheduled again we would store
80                  * bogus values for the remaining FP registers.
81                  */
82                 preempt_disable();
83                 if (tsk->thread.regs->msr & MSR_FP) {
84 #ifdef CONFIG_SMP
85                         /*
86                          * This should only ever be called for current or
87                          * for a stopped child process.  Since we save away
88                          * the FP register state on context switch on SMP,
89                          * there is something wrong if a stopped child appears
90                          * to still have its FP state in the CPU registers.
91                          */
92                         BUG_ON(tsk != current);
93 #endif
94                         giveup_fpu(tsk);
95                 }
96                 preempt_enable();
97         }
98 }
99
100 void enable_kernel_fp(void)
101 {
102         WARN_ON(preemptible());
103
104 #ifdef CONFIG_SMP
105         if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
106                 giveup_fpu(current);
107         else
108                 giveup_fpu(NULL);       /* just enables FP for kernel */
109 #else
110         giveup_fpu(last_task_used_math);
111 #endif /* CONFIG_SMP */
112 }
113 EXPORT_SYMBOL(enable_kernel_fp);
114
115 #ifdef CONFIG_ALTIVEC
116 void enable_kernel_altivec(void)
117 {
118         WARN_ON(preemptible());
119
120 #ifdef CONFIG_SMP
121         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
122                 giveup_altivec(current);
123         else
124                 giveup_altivec(NULL);   /* just enable AltiVec for kernel - force */
125 #else
126         giveup_altivec(last_task_used_altivec);
127 #endif /* CONFIG_SMP */
128 }
129 EXPORT_SYMBOL(enable_kernel_altivec);
130
131 /*
132  * Make sure the VMX/Altivec register state in the
133  * the thread_struct is up to date for task tsk.
134  */
135 void flush_altivec_to_thread(struct task_struct *tsk)
136 {
137         if (tsk->thread.regs) {
138                 preempt_disable();
139                 if (tsk->thread.regs->msr & MSR_VEC) {
140 #ifdef CONFIG_SMP
141                         BUG_ON(tsk != current);
142 #endif
143                         giveup_altivec(tsk);
144                 }
145                 preempt_enable();
146         }
147 }
148 #endif /* CONFIG_ALTIVEC */
149
150 #ifdef CONFIG_VSX
151 #if 0
152 /* not currently used, but some crazy RAID module might want to later */
153 void enable_kernel_vsx(void)
154 {
155         WARN_ON(preemptible());
156
157 #ifdef CONFIG_SMP
158         if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
159                 giveup_vsx(current);
160         else
161                 giveup_vsx(NULL);       /* just enable vsx for kernel - force */
162 #else
163         giveup_vsx(last_task_used_vsx);
164 #endif /* CONFIG_SMP */
165 }
166 EXPORT_SYMBOL(enable_kernel_vsx);
167 #endif
168
169 void giveup_vsx(struct task_struct *tsk)
170 {
171         giveup_fpu(tsk);
172         giveup_altivec(tsk);
173         __giveup_vsx(tsk);
174 }
175
176 void flush_vsx_to_thread(struct task_struct *tsk)
177 {
178         if (tsk->thread.regs) {
179                 preempt_disable();
180                 if (tsk->thread.regs->msr & MSR_VSX) {
181 #ifdef CONFIG_SMP
182                         BUG_ON(tsk != current);
183 #endif
184                         giveup_vsx(tsk);
185                 }
186                 preempt_enable();
187         }
188 }
189 #endif /* CONFIG_VSX */
190
191 #ifdef CONFIG_SPE
192
193 void enable_kernel_spe(void)
194 {
195         WARN_ON(preemptible());
196
197 #ifdef CONFIG_SMP
198         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
199                 giveup_spe(current);
200         else
201                 giveup_spe(NULL);       /* just enable SPE for kernel - force */
202 #else
203         giveup_spe(last_task_used_spe);
204 #endif /* __SMP __ */
205 }
206 EXPORT_SYMBOL(enable_kernel_spe);
207
208 void flush_spe_to_thread(struct task_struct *tsk)
209 {
210         if (tsk->thread.regs) {
211                 preempt_disable();
212                 if (tsk->thread.regs->msr & MSR_SPE) {
213 #ifdef CONFIG_SMP
214                         BUG_ON(tsk != current);
215 #endif
216                         giveup_spe(tsk);
217                 }
218                 preempt_enable();
219         }
220 }
221 #endif /* CONFIG_SPE */
222
223 #ifndef CONFIG_SMP
224 /*
225  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
226  * and the current task has some state, discard it.
227  */
228 void discard_lazy_cpu_state(void)
229 {
230         preempt_disable();
231         if (last_task_used_math == current)
232                 last_task_used_math = NULL;
233 #ifdef CONFIG_ALTIVEC
234         if (last_task_used_altivec == current)
235                 last_task_used_altivec = NULL;
236 #endif /* CONFIG_ALTIVEC */
237 #ifdef CONFIG_VSX
238         if (last_task_used_vsx == current)
239                 last_task_used_vsx = NULL;
240 #endif /* CONFIG_VSX */
241 #ifdef CONFIG_SPE
242         if (last_task_used_spe == current)
243                 last_task_used_spe = NULL;
244 #endif
245         preempt_enable();
246 }
247 #endif /* CONFIG_SMP */
248
249 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
250 void do_send_trap(struct pt_regs *regs, unsigned long address,
251                   unsigned long error_code, int signal_code, int breakpt)
252 {
253         siginfo_t info;
254
255         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
256                         11, SIGSEGV) == NOTIFY_STOP)
257                 return;
258
259         /* Deliver the signal to userspace */
260         info.si_signo = SIGTRAP;
261         info.si_errno = breakpt;        /* breakpoint or watchpoint id */
262         info.si_code = signal_code;
263         info.si_addr = (void __user *)address;
264         force_sig_info(SIGTRAP, &info, current);
265 }
266 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
267 void do_dabr(struct pt_regs *regs, unsigned long address,
268                     unsigned long error_code)
269 {
270         siginfo_t info;
271
272         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
273                         11, SIGSEGV) == NOTIFY_STOP)
274                 return;
275
276         if (debugger_dabr_match(regs))
277                 return;
278
279         /* Clear the DABR */
280         set_dabr(0);
281
282         /* Deliver the signal to userspace */
283         info.si_signo = SIGTRAP;
284         info.si_errno = 0;
285         info.si_code = TRAP_HWBKPT;
286         info.si_addr = (void __user *)address;
287         force_sig_info(SIGTRAP, &info, current);
288 }
289 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
290
291 static DEFINE_PER_CPU(unsigned long, current_dabr);
292
293 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
294 /*
295  * Set the debug registers back to their default "safe" values.
296  */
297 static void set_debug_reg_defaults(struct thread_struct *thread)
298 {
299         thread->iac1 = thread->iac2 = 0;
300 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
301         thread->iac3 = thread->iac4 = 0;
302 #endif
303         thread->dac1 = thread->dac2 = 0;
304 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
305         thread->dvc1 = thread->dvc2 = 0;
306 #endif
307         thread->dbcr0 = 0;
308 #ifdef CONFIG_BOOKE
309         /*
310          * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
311          */
312         thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |   \
313                         DBCR1_IAC3US | DBCR1_IAC4US;
314         /*
315          * Force Data Address Compare User/Supervisor bits to be User-only
316          * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
317          */
318         thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
319 #else
320         thread->dbcr1 = 0;
321 #endif
322 }
323
324 static void prime_debug_regs(struct thread_struct *thread)
325 {
326         mtspr(SPRN_IAC1, thread->iac1);
327         mtspr(SPRN_IAC2, thread->iac2);
328 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
329         mtspr(SPRN_IAC3, thread->iac3);
330         mtspr(SPRN_IAC4, thread->iac4);
331 #endif
332         mtspr(SPRN_DAC1, thread->dac1);
333         mtspr(SPRN_DAC2, thread->dac2);
334 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
335         mtspr(SPRN_DVC1, thread->dvc1);
336         mtspr(SPRN_DVC2, thread->dvc2);
337 #endif
338         mtspr(SPRN_DBCR0, thread->dbcr0);
339         mtspr(SPRN_DBCR1, thread->dbcr1);
340 #ifdef CONFIG_BOOKE
341         mtspr(SPRN_DBCR2, thread->dbcr2);
342 #endif
343 }
344 /*
345  * Unless neither the old or new thread are making use of the
346  * debug registers, set the debug registers from the values
347  * stored in the new thread.
348  */
349 static void switch_booke_debug_regs(struct thread_struct *new_thread)
350 {
351         if ((current->thread.dbcr0 & DBCR0_IDM)
352                 || (new_thread->dbcr0 & DBCR0_IDM))
353                         prime_debug_regs(new_thread);
354 }
355 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
356 #ifndef CONFIG_HAVE_HW_BREAKPOINT
357 static void set_debug_reg_defaults(struct thread_struct *thread)
358 {
359         if (thread->dabr) {
360                 thread->dabr = 0;
361                 set_dabr(0);
362         }
363 }
364 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
365 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
366
367 int set_dabr(unsigned long dabr)
368 {
369         __get_cpu_var(current_dabr) = dabr;
370
371         if (ppc_md.set_dabr)
372                 return ppc_md.set_dabr(dabr);
373
374         /* XXX should we have a CPU_FTR_HAS_DABR ? */
375 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
376         mtspr(SPRN_DAC1, dabr);
377 #ifdef CONFIG_PPC_47x
378         isync();
379 #endif
380 #elif defined(CONFIG_PPC_BOOK3S)
381         mtspr(SPRN_DABR, dabr);
382 #endif
383
384
385         return 0;
386 }
387
388 #ifdef CONFIG_PPC64
389 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
390 #endif
391
392 struct task_struct *__switch_to(struct task_struct *prev,
393         struct task_struct *new)
394 {
395         struct thread_struct *new_thread, *old_thread;
396         unsigned long flags;
397         struct task_struct *last;
398 #ifdef CONFIG_PPC_BOOK3S_64
399         struct ppc64_tlb_batch *batch;
400 #endif
401
402 #ifdef CONFIG_SMP
403         /* avoid complexity of lazy save/restore of fpu
404          * by just saving it every time we switch out if
405          * this task used the fpu during the last quantum.
406          *
407          * If it tries to use the fpu again, it'll trap and
408          * reload its fp regs.  So we don't have to do a restore
409          * every switch, just a save.
410          *  -- Cort
411          */
412         if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
413                 giveup_fpu(prev);
414 #ifdef CONFIG_ALTIVEC
415         /*
416          * If the previous thread used altivec in the last quantum
417          * (thus changing altivec regs) then save them.
418          * We used to check the VRSAVE register but not all apps
419          * set it, so we don't rely on it now (and in fact we need
420          * to save & restore VSCR even if VRSAVE == 0).  -- paulus
421          *
422          * On SMP we always save/restore altivec regs just to avoid the
423          * complexity of changing processors.
424          *  -- Cort
425          */
426         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
427                 giveup_altivec(prev);
428 #endif /* CONFIG_ALTIVEC */
429 #ifdef CONFIG_VSX
430         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
431                 /* VMX and FPU registers are already save here */
432                 __giveup_vsx(prev);
433 #endif /* CONFIG_VSX */
434 #ifdef CONFIG_SPE
435         /*
436          * If the previous thread used spe in the last quantum
437          * (thus changing spe regs) then save them.
438          *
439          * On SMP we always save/restore spe regs just to avoid the
440          * complexity of changing processors.
441          */
442         if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
443                 giveup_spe(prev);
444 #endif /* CONFIG_SPE */
445
446 #else  /* CONFIG_SMP */
447 #ifdef CONFIG_ALTIVEC
448         /* Avoid the trap.  On smp this this never happens since
449          * we don't set last_task_used_altivec -- Cort
450          */
451         if (new->thread.regs && last_task_used_altivec == new)
452                 new->thread.regs->msr |= MSR_VEC;
453 #endif /* CONFIG_ALTIVEC */
454 #ifdef CONFIG_VSX
455         if (new->thread.regs && last_task_used_vsx == new)
456                 new->thread.regs->msr |= MSR_VSX;
457 #endif /* CONFIG_VSX */
458 #ifdef CONFIG_SPE
459         /* Avoid the trap.  On smp this this never happens since
460          * we don't set last_task_used_spe
461          */
462         if (new->thread.regs && last_task_used_spe == new)
463                 new->thread.regs->msr |= MSR_SPE;
464 #endif /* CONFIG_SPE */
465
466 #endif /* CONFIG_SMP */
467
468 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
469         switch_booke_debug_regs(&new->thread);
470 #else
471 /*
472  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
473  * schedule DABR
474  */
475 #ifndef CONFIG_HAVE_HW_BREAKPOINT
476         if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
477                 set_dabr(new->thread.dabr);
478 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
479 #endif
480
481
482         new_thread = &new->thread;
483         old_thread = &current->thread;
484
485 #if defined(CONFIG_PPC_BOOK3E_64)
486         /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
487          * we always hold the user values, so we set it now.
488          *
489          * However, we ensure the kernel MSR:DE is appropriately cleared too
490          * to avoid spurrious single step exceptions in the kernel.
491          *
492          * This will have to change to merge with the ppc32 code at some point,
493          * but I don't like much what ppc32 is doing today so there's some
494          * thinking needed there
495          */
496         if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
497                 u32 dbcr0;
498
499                 mtmsr(mfmsr() & ~MSR_DE);
500                 isync();
501                 dbcr0 = mfspr(SPRN_DBCR0);
502                 dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
503                 mtspr(SPRN_DBCR0, dbcr0);
504         }
505 #endif /* CONFIG_PPC64_BOOK3E */
506
507 #ifdef CONFIG_PPC64
508         /*
509          * Collect processor utilization data per process
510          */
511         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
512                 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
513                 long unsigned start_tb, current_tb;
514                 start_tb = old_thread->start_tb;
515                 cu->current_tb = current_tb = mfspr(SPRN_PURR);
516                 old_thread->accum_tb += (current_tb - start_tb);
517                 new_thread->start_tb = current_tb;
518         }
519 #endif /* CONFIG_PPC64 */
520
521 #ifdef CONFIG_PPC_BOOK3S_64
522         batch = &__get_cpu_var(ppc64_tlb_batch);
523         if (batch->active) {
524                 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
525                 if (batch->index)
526                         __flush_tlb_pending(batch);
527                 batch->active = 0;
528         }
529 #endif /* CONFIG_PPC_BOOK3S_64 */
530
531         local_irq_save(flags);
532
533         account_system_vtime(current);
534         account_process_vtime(current);
535
536         /*
537          * We can't take a PMU exception inside _switch() since there is a
538          * window where the kernel stack SLB and the kernel stack are out
539          * of sync. Hard disable here.
540          */
541         hard_irq_disable();
542         last = _switch(old_thread, new_thread);
543
544 #ifdef CONFIG_PPC_BOOK3S_64
545         if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
546                 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
547                 batch = &__get_cpu_var(ppc64_tlb_batch);
548                 batch->active = 1;
549         }
550 #endif /* CONFIG_PPC_BOOK3S_64 */
551
552         local_irq_restore(flags);
553
554         return last;
555 }
556
557 static int instructions_to_print = 16;
558
559 static void show_instructions(struct pt_regs *regs)
560 {
561         int i;
562         unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
563                         sizeof(int));
564
565         printk("Instruction dump:");
566
567         for (i = 0; i < instructions_to_print; i++) {
568                 int instr;
569
570                 if (!(i % 8))
571                         printk("\n");
572
573 #if !defined(CONFIG_BOOKE)
574                 /* If executing with the IMMU off, adjust pc rather
575                  * than print XXXXXXXX.
576                  */
577                 if (!(regs->msr & MSR_IR))
578                         pc = (unsigned long)phys_to_virt(pc);
579 #endif
580
581                 /* We use __get_user here *only* to avoid an OOPS on a
582                  * bad address because the pc *should* only be a
583                  * kernel address.
584                  */
585                 if (!__kernel_text_address(pc) ||
586                      __get_user(instr, (unsigned int __user *)pc)) {
587                         printk("XXXXXXXX ");
588                 } else {
589                         if (regs->nip == pc)
590                                 printk("<%08x> ", instr);
591                         else
592                                 printk("%08x ", instr);
593                 }
594
595                 pc += sizeof(int);
596         }
597
598         printk("\n");
599 }
600
601 static struct regbit {
602         unsigned long bit;
603         const char *name;
604 } msr_bits[] = {
605         {MSR_EE,        "EE"},
606         {MSR_PR,        "PR"},
607         {MSR_FP,        "FP"},
608         {MSR_VEC,       "VEC"},
609         {MSR_VSX,       "VSX"},
610         {MSR_ME,        "ME"},
611         {MSR_CE,        "CE"},
612         {MSR_DE,        "DE"},
613         {MSR_IR,        "IR"},
614         {MSR_DR,        "DR"},
615         {0,             NULL}
616 };
617
618 static void printbits(unsigned long val, struct regbit *bits)
619 {
620         const char *sep = "";
621
622         printk("<");
623         for (; bits->bit; ++bits)
624                 if (val & bits->bit) {
625                         printk("%s%s", sep, bits->name);
626                         sep = ",";
627                 }
628         printk(">");
629 }
630
631 #ifdef CONFIG_PPC64
632 #define REG             "%016lx"
633 #define REGS_PER_LINE   4
634 #define LAST_VOLATILE   13
635 #else
636 #define REG             "%08lx"
637 #define REGS_PER_LINE   8
638 #define LAST_VOLATILE   12
639 #endif
640
641 void show_regs(struct pt_regs * regs)
642 {
643         int i, trap;
644
645         printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
646                regs->nip, regs->link, regs->ctr);
647         printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
648                regs, regs->trap, print_tainted(), init_utsname()->release);
649         printk("MSR: "REG" ", regs->msr);
650         printbits(regs->msr, msr_bits);
651         printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
652         trap = TRAP(regs);
653         if (trap == 0x300 || trap == 0x600)
654 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
655                 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
656 #else
657                 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
658 #endif
659         printk("TASK = %p[%d] '%s' THREAD: %p",
660                current, task_pid_nr(current), current->comm, task_thread_info(current));
661
662 #ifdef CONFIG_SMP
663         printk(" CPU: %d", raw_smp_processor_id());
664 #endif /* CONFIG_SMP */
665
666         for (i = 0;  i < 32;  i++) {
667                 if ((i % REGS_PER_LINE) == 0)
668                         printk("\nGPR%02d: ", i);
669                 printk(REG " ", regs->gpr[i]);
670                 if (i == LAST_VOLATILE && !FULL_REGS(regs))
671                         break;
672         }
673         printk("\n");
674 #ifdef CONFIG_KALLSYMS
675         /*
676          * Lookup NIP late so we have the best change of getting the
677          * above info out without failing
678          */
679         printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
680         printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
681 #endif
682         show_stack(current, (unsigned long *) regs->gpr[1]);
683         if (!user_mode(regs))
684                 show_instructions(regs);
685 }
686
687 void exit_thread(void)
688 {
689         discard_lazy_cpu_state();
690 }
691
692 void flush_thread(void)
693 {
694         discard_lazy_cpu_state();
695
696 #ifdef CONFIG_HAVE_HW_BREAKPOINT
697         flush_ptrace_hw_breakpoint(current);
698 #else /* CONFIG_HAVE_HW_BREAKPOINT */
699         set_debug_reg_defaults(&current->thread);
700 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
701 }
702
703 void
704 release_thread(struct task_struct *t)
705 {
706 }
707
708 /*
709  * This gets called before we allocate a new thread and copy
710  * the current task into it.
711  */
712 void prepare_to_copy(struct task_struct *tsk)
713 {
714         flush_fp_to_thread(current);
715         flush_altivec_to_thread(current);
716         flush_vsx_to_thread(current);
717         flush_spe_to_thread(current);
718 #ifdef CONFIG_HAVE_HW_BREAKPOINT
719         flush_ptrace_hw_breakpoint(tsk);
720 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
721 }
722
723 /*
724  * Copy a thread..
725  */
726 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
727
728 int copy_thread(unsigned long clone_flags, unsigned long usp,
729                 unsigned long unused, struct task_struct *p,
730                 struct pt_regs *regs)
731 {
732         struct pt_regs *childregs, *kregs;
733         extern void ret_from_fork(void);
734         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
735
736         CHECK_FULL_REGS(regs);
737         /* Copy registers */
738         sp -= sizeof(struct pt_regs);
739         childregs = (struct pt_regs *) sp;
740         *childregs = *regs;
741         if ((childregs->msr & MSR_PR) == 0) {
742                 /* for kernel thread, set `current' and stackptr in new task */
743                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
744 #ifdef CONFIG_PPC32
745                 childregs->gpr[2] = (unsigned long) p;
746 #else
747                 clear_tsk_thread_flag(p, TIF_32BIT);
748 #endif
749                 p->thread.regs = NULL;  /* no user register state */
750         } else {
751                 childregs->gpr[1] = usp;
752                 p->thread.regs = childregs;
753                 if (clone_flags & CLONE_SETTLS) {
754 #ifdef CONFIG_PPC64
755                         if (!is_32bit_task())
756                                 childregs->gpr[13] = childregs->gpr[6];
757                         else
758 #endif
759                                 childregs->gpr[2] = childregs->gpr[6];
760                 }
761         }
762         childregs->gpr[3] = 0;  /* Result from fork() */
763         sp -= STACK_FRAME_OVERHEAD;
764
765         /*
766          * The way this works is that at some point in the future
767          * some task will call _switch to switch to the new task.
768          * That will pop off the stack frame created below and start
769          * the new task running at ret_from_fork.  The new task will
770          * do some house keeping and then return from the fork or clone
771          * system call, using the stack frame created above.
772          */
773         sp -= sizeof(struct pt_regs);
774         kregs = (struct pt_regs *) sp;
775         sp -= STACK_FRAME_OVERHEAD;
776         p->thread.ksp = sp;
777         p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
778                                 _ALIGN_UP(sizeof(struct thread_info), 16);
779
780 #ifdef CONFIG_PPC_STD_MMU_64
781         if (mmu_has_feature(MMU_FTR_SLB)) {
782                 unsigned long sp_vsid;
783                 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
784
785                 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
786                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
787                                 << SLB_VSID_SHIFT_1T;
788                 else
789                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
790                                 << SLB_VSID_SHIFT;
791                 sp_vsid |= SLB_VSID_KERNEL | llp;
792                 p->thread.ksp_vsid = sp_vsid;
793         }
794 #endif /* CONFIG_PPC_STD_MMU_64 */
795 #ifdef CONFIG_PPC64 
796         if (cpu_has_feature(CPU_FTR_DSCR)) {
797                 if (current->thread.dscr_inherit) {
798                         p->thread.dscr_inherit = 1;
799                         p->thread.dscr = current->thread.dscr;
800                 } else if (0 != dscr_default) {
801                         p->thread.dscr_inherit = 1;
802                         p->thread.dscr = dscr_default;
803                 } else {
804                         p->thread.dscr_inherit = 0;
805                         p->thread.dscr = 0;
806                 }
807         }
808 #endif
809
810         /*
811          * The PPC64 ABI makes use of a TOC to contain function 
812          * pointers.  The function (ret_from_except) is actually a pointer
813          * to the TOC entry.  The first entry is a pointer to the actual
814          * function.
815          */
816 #ifdef CONFIG_PPC64
817         kregs->nip = *((unsigned long *)ret_from_fork);
818 #else
819         kregs->nip = (unsigned long)ret_from_fork;
820 #endif
821
822         return 0;
823 }
824
825 /*
826  * Set up a thread for executing a new program
827  */
828 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
829 {
830 #ifdef CONFIG_PPC64
831         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
832 #endif
833
834         /*
835          * If we exec out of a kernel thread then thread.regs will not be
836          * set.  Do it now.
837          */
838         if (!current->thread.regs) {
839                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
840                 current->thread.regs = regs - 1;
841         }
842
843         memset(regs->gpr, 0, sizeof(regs->gpr));
844         regs->ctr = 0;
845         regs->link = 0;
846         regs->xer = 0;
847         regs->ccr = 0;
848         regs->gpr[1] = sp;
849
850         /*
851          * We have just cleared all the nonvolatile GPRs, so make
852          * FULL_REGS(regs) return true.  This is necessary to allow
853          * ptrace to examine the thread immediately after exec.
854          */
855         regs->trap &= ~1UL;
856
857 #ifdef CONFIG_PPC32
858         regs->mq = 0;
859         regs->nip = start;
860         regs->msr = MSR_USER;
861 #else
862         if (!is_32bit_task()) {
863                 unsigned long entry, toc;
864
865                 /* start is a relocated pointer to the function descriptor for
866                  * the elf _start routine.  The first entry in the function
867                  * descriptor is the entry address of _start and the second
868                  * entry is the TOC value we need to use.
869                  */
870                 __get_user(entry, (unsigned long __user *)start);
871                 __get_user(toc, (unsigned long __user *)start+1);
872
873                 /* Check whether the e_entry function descriptor entries
874                  * need to be relocated before we can use them.
875                  */
876                 if (load_addr != 0) {
877                         entry += load_addr;
878                         toc   += load_addr;
879                 }
880                 regs->nip = entry;
881                 regs->gpr[2] = toc;
882                 regs->msr = MSR_USER64;
883         } else {
884                 regs->nip = start;
885                 regs->gpr[2] = 0;
886                 regs->msr = MSR_USER32;
887         }
888 #endif
889
890         discard_lazy_cpu_state();
891 #ifdef CONFIG_VSX
892         current->thread.used_vsr = 0;
893 #endif
894         memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
895         current->thread.fpscr.val = 0;
896 #ifdef CONFIG_ALTIVEC
897         memset(current->thread.vr, 0, sizeof(current->thread.vr));
898         memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
899         current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
900         current->thread.vrsave = 0;
901         current->thread.used_vr = 0;
902 #endif /* CONFIG_ALTIVEC */
903 #ifdef CONFIG_SPE
904         memset(current->thread.evr, 0, sizeof(current->thread.evr));
905         current->thread.acc = 0;
906         current->thread.spefscr = 0;
907         current->thread.used_spe = 0;
908 #endif /* CONFIG_SPE */
909 }
910
911 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
912                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
913
914 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
915 {
916         struct pt_regs *regs = tsk->thread.regs;
917
918         /* This is a bit hairy.  If we are an SPE enabled  processor
919          * (have embedded fp) we store the IEEE exception enable flags in
920          * fpexc_mode.  fpexc_mode is also used for setting FP exception
921          * mode (asyn, precise, disabled) for 'Classic' FP. */
922         if (val & PR_FP_EXC_SW_ENABLE) {
923 #ifdef CONFIG_SPE
924                 if (cpu_has_feature(CPU_FTR_SPE)) {
925                         tsk->thread.fpexc_mode = val &
926                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
927                         return 0;
928                 } else {
929                         return -EINVAL;
930                 }
931 #else
932                 return -EINVAL;
933 #endif
934         }
935
936         /* on a CONFIG_SPE this does not hurt us.  The bits that
937          * __pack_fe01 use do not overlap with bits used for
938          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
939          * on CONFIG_SPE implementations are reserved so writing to
940          * them does not change anything */
941         if (val > PR_FP_EXC_PRECISE)
942                 return -EINVAL;
943         tsk->thread.fpexc_mode = __pack_fe01(val);
944         if (regs != NULL && (regs->msr & MSR_FP) != 0)
945                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
946                         | tsk->thread.fpexc_mode;
947         return 0;
948 }
949
950 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
951 {
952         unsigned int val;
953
954         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
955 #ifdef CONFIG_SPE
956                 if (cpu_has_feature(CPU_FTR_SPE))
957                         val = tsk->thread.fpexc_mode;
958                 else
959                         return -EINVAL;
960 #else
961                 return -EINVAL;
962 #endif
963         else
964                 val = __unpack_fe01(tsk->thread.fpexc_mode);
965         return put_user(val, (unsigned int __user *) adr);
966 }
967
968 int set_endian(struct task_struct *tsk, unsigned int val)
969 {
970         struct pt_regs *regs = tsk->thread.regs;
971
972         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
973             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
974                 return -EINVAL;
975
976         if (regs == NULL)
977                 return -EINVAL;
978
979         if (val == PR_ENDIAN_BIG)
980                 regs->msr &= ~MSR_LE;
981         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
982                 regs->msr |= MSR_LE;
983         else
984                 return -EINVAL;
985
986         return 0;
987 }
988
989 int get_endian(struct task_struct *tsk, unsigned long adr)
990 {
991         struct pt_regs *regs = tsk->thread.regs;
992         unsigned int val;
993
994         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
995             !cpu_has_feature(CPU_FTR_REAL_LE))
996                 return -EINVAL;
997
998         if (regs == NULL)
999                 return -EINVAL;
1000
1001         if (regs->msr & MSR_LE) {
1002                 if (cpu_has_feature(CPU_FTR_REAL_LE))
1003                         val = PR_ENDIAN_LITTLE;
1004                 else
1005                         val = PR_ENDIAN_PPC_LITTLE;
1006         } else
1007                 val = PR_ENDIAN_BIG;
1008
1009         return put_user(val, (unsigned int __user *)adr);
1010 }
1011
1012 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1013 {
1014         tsk->thread.align_ctl = val;
1015         return 0;
1016 }
1017
1018 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1019 {
1020         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1021 }
1022
1023 #define TRUNC_PTR(x)    ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1024
1025 int sys_clone(unsigned long clone_flags, unsigned long usp,
1026               int __user *parent_tidp, void __user *child_threadptr,
1027               int __user *child_tidp, int p6,
1028               struct pt_regs *regs)
1029 {
1030         CHECK_FULL_REGS(regs);
1031         if (usp == 0)
1032                 usp = regs->gpr[1];     /* stack pointer for child */
1033 #ifdef CONFIG_PPC64
1034         if (is_32bit_task()) {
1035                 parent_tidp = TRUNC_PTR(parent_tidp);
1036                 child_tidp = TRUNC_PTR(child_tidp);
1037         }
1038 #endif
1039         return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1040 }
1041
1042 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1043              unsigned long p4, unsigned long p5, unsigned long p6,
1044              struct pt_regs *regs)
1045 {
1046         CHECK_FULL_REGS(regs);
1047         return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1048 }
1049
1050 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1051               unsigned long p4, unsigned long p5, unsigned long p6,
1052               struct pt_regs *regs)
1053 {
1054         CHECK_FULL_REGS(regs);
1055         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1056                         regs, 0, NULL, NULL);
1057 }
1058
1059 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1060                unsigned long a3, unsigned long a4, unsigned long a5,
1061                struct pt_regs *regs)
1062 {
1063         int error;
1064         char *filename;
1065
1066         filename = getname((const char __user *) a0);
1067         error = PTR_ERR(filename);
1068         if (IS_ERR(filename))
1069                 goto out;
1070         flush_fp_to_thread(current);
1071         flush_altivec_to_thread(current);
1072         flush_spe_to_thread(current);
1073         error = do_execve(filename,
1074                           (const char __user *const __user *) a1,
1075                           (const char __user *const __user *) a2, regs);
1076         putname(filename);
1077 out:
1078         return error;
1079 }
1080
1081 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1082                                   unsigned long nbytes)
1083 {
1084         unsigned long stack_page;
1085         unsigned long cpu = task_cpu(p);
1086
1087         /*
1088          * Avoid crashing if the stack has overflowed and corrupted
1089          * task_cpu(p), which is in the thread_info struct.
1090          */
1091         if (cpu < NR_CPUS && cpu_possible(cpu)) {
1092                 stack_page = (unsigned long) hardirq_ctx[cpu];
1093                 if (sp >= stack_page + sizeof(struct thread_struct)
1094                     && sp <= stack_page + THREAD_SIZE - nbytes)
1095                         return 1;
1096
1097                 stack_page = (unsigned long) softirq_ctx[cpu];
1098                 if (sp >= stack_page + sizeof(struct thread_struct)
1099                     && sp <= stack_page + THREAD_SIZE - nbytes)
1100                         return 1;
1101         }
1102         return 0;
1103 }
1104
1105 int validate_sp(unsigned long sp, struct task_struct *p,
1106                        unsigned long nbytes)
1107 {
1108         unsigned long stack_page = (unsigned long)task_stack_page(p);
1109
1110         if (sp >= stack_page + sizeof(struct thread_struct)
1111             && sp <= stack_page + THREAD_SIZE - nbytes)
1112                 return 1;
1113
1114         return valid_irq_stack(sp, p, nbytes);
1115 }
1116
1117 EXPORT_SYMBOL(validate_sp);
1118
1119 unsigned long get_wchan(struct task_struct *p)
1120 {
1121         unsigned long ip, sp;
1122         int count = 0;
1123
1124         if (!p || p == current || p->state == TASK_RUNNING)
1125                 return 0;
1126
1127         sp = p->thread.ksp;
1128         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1129                 return 0;
1130
1131         do {
1132                 sp = *(unsigned long *)sp;
1133                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1134                         return 0;
1135                 if (count > 0) {
1136                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1137                         if (!in_sched_functions(ip))
1138                                 return ip;
1139                 }
1140         } while (count++ < 16);
1141         return 0;
1142 }
1143
1144 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1145
1146 void show_stack(struct task_struct *tsk, unsigned long *stack)
1147 {
1148         unsigned long sp, ip, lr, newsp;
1149         int count = 0;
1150         int firstframe = 1;
1151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1152         int curr_frame = current->curr_ret_stack;
1153         extern void return_to_handler(void);
1154         unsigned long rth = (unsigned long)return_to_handler;
1155         unsigned long mrth = -1;
1156 #ifdef CONFIG_PPC64
1157         extern void mod_return_to_handler(void);
1158         rth = *(unsigned long *)rth;
1159         mrth = (unsigned long)mod_return_to_handler;
1160         mrth = *(unsigned long *)mrth;
1161 #endif
1162 #endif
1163
1164         sp = (unsigned long) stack;
1165         if (tsk == NULL)
1166                 tsk = current;
1167         if (sp == 0) {
1168                 if (tsk == current)
1169                         asm("mr %0,1" : "=r" (sp));
1170                 else
1171                         sp = tsk->thread.ksp;
1172         }
1173
1174         lr = 0;
1175         printk("Call Trace:\n");
1176         do {
1177                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1178                         return;
1179
1180                 stack = (unsigned long *) sp;
1181                 newsp = stack[0];
1182                 ip = stack[STACK_FRAME_LR_SAVE];
1183                 if (!firstframe || ip != lr) {
1184                         printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1185 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1186                         if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1187                                 printk(" (%pS)",
1188                                        (void *)current->ret_stack[curr_frame].ret);
1189                                 curr_frame--;
1190                         }
1191 #endif
1192                         if (firstframe)
1193                                 printk(" (unreliable)");
1194                         printk("\n");
1195                 }
1196                 firstframe = 0;
1197
1198                 /*
1199                  * See if this is an exception frame.
1200                  * We look for the "regshere" marker in the current frame.
1201                  */
1202                 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1203                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1204                         struct pt_regs *regs = (struct pt_regs *)
1205                                 (sp + STACK_FRAME_OVERHEAD);
1206                         lr = regs->link;
1207                         printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1208                                regs->trap, (void *)regs->nip, (void *)lr);
1209                         firstframe = 1;
1210                 }
1211
1212                 sp = newsp;
1213         } while (count++ < kstack_depth_to_print);
1214 }
1215
1216 void dump_stack(void)
1217 {
1218         show_stack(current, NULL);
1219 }
1220 EXPORT_SYMBOL(dump_stack);
1221
1222 #ifdef CONFIG_PPC64
1223 void ppc64_runlatch_on(void)
1224 {
1225         unsigned long ctrl;
1226
1227         if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1228                 HMT_medium();
1229
1230                 ctrl = mfspr(SPRN_CTRLF);
1231                 ctrl |= CTRL_RUNLATCH;
1232                 mtspr(SPRN_CTRLT, ctrl);
1233
1234                 set_thread_flag(TIF_RUNLATCH);
1235         }
1236 }
1237
1238 void __ppc64_runlatch_off(void)
1239 {
1240         unsigned long ctrl;
1241
1242         HMT_medium();
1243
1244         clear_thread_flag(TIF_RUNLATCH);
1245
1246         ctrl = mfspr(SPRN_CTRLF);
1247         ctrl &= ~CTRL_RUNLATCH;
1248         mtspr(SPRN_CTRLT, ctrl);
1249 }
1250 #endif
1251
1252 #if THREAD_SHIFT < PAGE_SHIFT
1253
1254 static struct kmem_cache *thread_info_cache;
1255
1256 struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
1257 {
1258         struct thread_info *ti;
1259
1260         ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
1261         if (unlikely(ti == NULL))
1262                 return NULL;
1263 #ifdef CONFIG_DEBUG_STACK_USAGE
1264         memset(ti, 0, THREAD_SIZE);
1265 #endif
1266         return ti;
1267 }
1268
1269 void free_thread_info(struct thread_info *ti)
1270 {
1271         kmem_cache_free(thread_info_cache, ti);
1272 }
1273
1274 void thread_info_cache_init(void)
1275 {
1276         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1277                                               THREAD_SIZE, 0, NULL);
1278         BUG_ON(thread_info_cache == NULL);
1279 }
1280
1281 #endif /* THREAD_SHIFT < PAGE_SHIFT */
1282
1283 unsigned long arch_align_stack(unsigned long sp)
1284 {
1285         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1286                 sp -= get_random_int() & ~PAGE_MASK;
1287         return sp & ~0xf;
1288 }
1289
1290 static inline unsigned long brk_rnd(void)
1291 {
1292         unsigned long rnd = 0;
1293
1294         /* 8MB for 32bit, 1GB for 64bit */
1295         if (is_32bit_task())
1296                 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1297         else
1298                 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1299
1300         return rnd << PAGE_SHIFT;
1301 }
1302
1303 unsigned long arch_randomize_brk(struct mm_struct *mm)
1304 {
1305         unsigned long base = mm->brk;
1306         unsigned long ret;
1307
1308 #ifdef CONFIG_PPC_STD_MMU_64
1309         /*
1310          * If we are using 1TB segments and we are allowed to randomise
1311          * the heap, we can put it above 1TB so it is backed by a 1TB
1312          * segment. Otherwise the heap will be in the bottom 1TB
1313          * which always uses 256MB segments and this may result in a
1314          * performance penalty.
1315          */
1316         if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1317                 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1318 #endif
1319
1320         ret = PAGE_ALIGN(base + brk_rnd());
1321
1322         if (ret < mm->brk)
1323                 return mm->brk;
1324
1325         return ret;
1326 }
1327
1328 unsigned long randomize_et_dyn(unsigned long base)
1329 {
1330         unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1331
1332         if (ret < base)
1333                 return base;
1334
1335         return ret;
1336 }