2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <linux/module.h>
19 #include <linux/kprobes.h>
20 #include <linux/elfcore.h>
21 #include <linux/tick.h>
22 #include <linux/init.h>
24 #include <linux/compat.h>
25 #include <linux/hardirq.h>
26 #include <linux/syscalls.h>
27 #include <linux/kernel.h>
28 #include <linux/tracehook.h>
29 #include <linux/signal.h>
30 #include <asm/stack.h>
31 #include <asm/switch_to.h>
32 #include <asm/homecache.h>
33 #include <asm/syscalls.h>
34 #include <asm/traps.h>
35 #include <asm/setup.h>
36 #ifdef CONFIG_HARDWALL
37 #include <asm/hardwall.h>
39 #include <arch/chip.h>
41 #include <arch/sim_def.h>
45 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
46 * idle loop over low power while in the idle loop, e.g. if we have
47 * one thread per core and we want to get threads out of futex waits fast.
49 static int no_idle_nap;
50 static int __init idle_setup(char *str)
55 if (!strcmp(str, "poll")) {
56 pr_info("using polling idle threads.\n");
58 } else if (!strcmp(str, "halt"))
65 early_param("idle", idle_setup);
68 * The idle thread. There's no useful work to be
69 * done, so just try to conserve power and have a
70 * low exit latency (ie sit in a loop waiting for
71 * somebody to say that they'd like to reschedule)
75 int cpu = smp_processor_id();
78 current_thread_info()->status |= TS_POLLING;
82 while (!need_resched())
88 /* endless idle loop with no priority at all */
90 tick_nohz_idle_enter();
92 while (!need_resched()) {
93 if (cpu_is_offline(cpu))
94 BUG(); /* no HOTPLUG_CPU */
97 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
98 current_thread_info()->status &= ~TS_POLLING;
100 * TS_POLLING-cleared state must be visible before we
109 current_thread_info()->status |= TS_POLLING;
112 tick_nohz_idle_exit();
113 schedule_preempt_disabled();
118 * Release a thread_info structure
120 void arch_release_thread_info(struct thread_info *info)
122 struct single_step_state *step_state = info->step_state;
124 #ifdef CONFIG_HARDWALL
126 * We free a thread_info from the context of the task that has
127 * been scheduled next, so the original task is already dead.
128 * Calling deactivate here just frees up the data structures.
129 * If the task we're freeing held the last reference to a
130 * hardwall fd, it would have been released prior to this point
131 * anyway via exit_files(), and "hardwall" would be NULL by now.
133 if (info->task->thread.hardwall)
134 hardwall_deactivate(info->task);
140 * FIXME: we don't munmap step_state->buffer
141 * because the mm_struct for this process (info->task->mm)
142 * has already been zeroed in exit_mm(). Keeping a
143 * reference to it here seems like a bad move, so this
144 * means we can't munmap() the buffer, and therefore if we
145 * ptrace multiple threads in a process, we will slowly
146 * leak user memory. (Note that as soon as the last
147 * thread in a process dies, we will reclaim all user
148 * memory including single-step buffers in the usual way.)
149 * We should either assign a kernel VA to this buffer
150 * somehow, or we should associate the buffer(s) with the
151 * mm itself so we can clean them up that way.
157 static void save_arch_state(struct thread_struct *t);
159 int copy_thread(unsigned long clone_flags, unsigned long sp,
160 unsigned long stack_size,
161 struct task_struct *p, struct pt_regs *regs)
163 struct pt_regs *childregs;
167 * When creating a new kernel thread we pass sp as zero.
168 * Assign it to a reasonable value now that we have the stack.
170 if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
174 * Do not clone step state from the parent; each thread
175 * must make its own lazily.
177 task_thread_info(p)->step_state = NULL;
180 * Start new thread in ret_from_fork so it schedules properly
181 * and then return from interrupt like the parent.
183 p->thread.pc = (unsigned long) ret_from_fork;
185 /* Save user stack top pointer so we can ID the stack vm area later. */
188 /* Record the pid of the process that created this one. */
189 p->thread.creator_pid = current->pid;
192 * Copy the registers onto the kernel stack so the
193 * return-from-interrupt code will reload it into registers.
195 childregs = task_pt_regs(p);
197 childregs->regs[0] = 0; /* return value is zero */
198 childregs->sp = sp; /* override with new user stack pointer */
201 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
202 * which is passed in as arg #5 to sys_clone().
204 if (clone_flags & CLONE_SETTLS)
205 childregs->tp = regs->regs[4];
208 * Copy the callee-saved registers from the passed pt_regs struct
209 * into the context-switch callee-saved registers area.
210 * This way when we start the interrupt-return sequence, the
211 * callee-save registers will be correctly in registers, which
212 * is how we assume the compiler leaves them as we start doing
213 * the normal return-from-interrupt path after calling C code.
214 * Zero out the C ABI save area to mark the top of the stack.
216 ksp = (unsigned long) childregs;
217 ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
218 ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
219 ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
220 memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG],
221 CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
222 ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
223 ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
226 #if CHIP_HAS_TILE_DMA()
228 * No DMA in the new thread. We model this on the fact that
229 * fork() clears the pending signals, alarms, and aio for the child.
231 memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
232 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
235 #if CHIP_HAS_SN_PROC()
236 /* Likewise, the new thread is not running static processor code. */
237 p->thread.sn_proc_running = 0;
238 memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
241 #if CHIP_HAS_PROC_STATUS_SPR()
242 /* New thread has its miscellaneous processor state bits clear. */
243 p->thread.proc_status = 0;
246 #ifdef CONFIG_HARDWALL
247 /* New thread does not own any networks. */
248 p->thread.hardwall = NULL;
253 * Start the new thread with the current architecture state
254 * (user interrupt masks, etc.).
256 save_arch_state(&p->thread);
262 * Return "current" if it looks plausible, or else a pointer to a dummy.
263 * This can be helpful if we are just trying to emit a clean panic.
265 struct task_struct *validate_current(void)
267 static struct task_struct corrupt = { .comm = "<corrupt>" };
268 struct task_struct *tsk = current;
269 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
270 (high_memory && (void *)tsk > high_memory) ||
271 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
272 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
278 /* Take and return the pointer to the previous task, for schedule_tail(). */
279 struct task_struct *sim_notify_fork(struct task_struct *prev)
281 struct task_struct *tsk = current;
282 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
283 (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
284 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
285 (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
289 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
291 struct pt_regs *ptregs = task_pt_regs(tsk);
292 elf_core_copy_regs(regs, ptregs);
296 #if CHIP_HAS_TILE_DMA()
298 /* Allow user processes to access the DMA SPRs */
299 void grant_dma_mpls(void)
301 #if CONFIG_KERNEL_PL == 2
302 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
303 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
305 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
306 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
310 /* Forbid user processes from accessing the DMA SPRs */
311 void restrict_dma_mpls(void)
313 #if CONFIG_KERNEL_PL == 2
314 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
315 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
317 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
318 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
322 /* Pause the DMA engine, then save off its state registers. */
323 static void save_tile_dma_state(struct tile_dma_state *dma)
325 unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
326 unsigned long post_suspend_state;
328 /* If we're running, suspend the engine. */
329 if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
330 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
333 * Wait for the engine to idle, then save regs. Note that we
334 * want to record the "running" bit from before suspension,
335 * and the "done" bit from after, so that we can properly
336 * distinguish a case where the user suspended the engine from
337 * the case where the kernel suspended as part of the context
341 post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
342 } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
344 dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
345 dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
346 dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
347 dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
348 dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
349 dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
350 dma->byte = __insn_mfspr(SPR_DMA_BYTE);
351 dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
352 (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
355 /* Restart a DMA that was running before we were context-switched out. */
356 static void restore_tile_dma_state(struct thread_struct *t)
358 const struct tile_dma_state *dma = &t->tile_dma_state;
361 * The only way to restore the done bit is to run a zero
362 * length transaction.
364 if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
365 !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
366 __insn_mtspr(SPR_DMA_BYTE, 0);
367 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
368 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
369 SPR_DMA_STATUS__BUSY_MASK)
373 __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
374 __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
375 __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
376 __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
377 __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
378 __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
379 __insn_mtspr(SPR_DMA_BYTE, dma->byte);
382 * Restart the engine if we were running and not done.
383 * Clear a pending async DMA fault that we were waiting on return
384 * to user space to execute, since we expect the DMA engine
385 * to regenerate those faults for us now. Note that we don't
386 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
387 * harmless if set, and it covers both DMA and the SN processor.
389 if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
390 t->dma_async_tlb.fault_num = 0;
391 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
397 static void save_arch_state(struct thread_struct *t)
399 #if CHIP_HAS_SPLIT_INTR_MASK()
400 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
401 ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
403 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
405 t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
406 t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
407 t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
408 t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
409 t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
410 t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
411 t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
412 #if CHIP_HAS_PROC_STATUS_SPR()
413 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
415 #if !CHIP_HAS_FIXED_INTVEC_BASE()
416 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
418 #if CHIP_HAS_TILE_RTF_HWM()
419 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
421 #if CHIP_HAS_DSTREAM_PF()
422 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
426 static void restore_arch_state(const struct thread_struct *t)
428 #if CHIP_HAS_SPLIT_INTR_MASK()
429 __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
430 __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
432 __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
434 __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
435 __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
436 __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
437 __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
438 __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
439 __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
440 __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
441 #if CHIP_HAS_PROC_STATUS_SPR()
442 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
444 #if !CHIP_HAS_FIXED_INTVEC_BASE()
445 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
447 #if CHIP_HAS_TILE_RTF_HWM()
448 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
450 #if CHIP_HAS_DSTREAM_PF()
451 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
456 void _prepare_arch_switch(struct task_struct *next)
458 #if CHIP_HAS_SN_PROC()
461 #if CHIP_HAS_TILE_DMA()
462 struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
464 save_tile_dma_state(dma);
466 #if CHIP_HAS_SN_PROC()
468 * Suspend the static network processor if it was running.
469 * We do not suspend the fabric itself, just like we don't
470 * try to suspend the UDN.
472 snctl = __insn_mfspr(SPR_SNCTL);
473 current->thread.sn_proc_running =
474 (snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
475 if (current->thread.sn_proc_running)
476 __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
481 struct task_struct *__sched _switch_to(struct task_struct *prev,
482 struct task_struct *next)
484 /* DMA state is already saved; save off other arch state. */
485 save_arch_state(&prev->thread);
487 #if CHIP_HAS_TILE_DMA()
489 * Restore DMA in new task if desired.
490 * Note that it is only safe to restart here since interrupts
491 * are disabled, so we can't take any DMATLB miss or access
492 * interrupts before we have finished switching stacks.
494 if (next->thread.tile_dma_state.enabled) {
495 restore_tile_dma_state(&next->thread);
502 /* Restore other arch state. */
503 restore_arch_state(&next->thread);
505 #if CHIP_HAS_SN_PROC()
507 * Restart static network processor in the new process
508 * if it was running before.
510 if (next->thread.sn_proc_running) {
511 int snctl = __insn_mfspr(SPR_SNCTL);
512 __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
516 #ifdef CONFIG_HARDWALL
517 /* Enable or disable access to the network registers appropriately. */
518 if (prev->thread.hardwall != NULL) {
519 if (next->thread.hardwall == NULL)
520 restrict_network_mpls();
521 } else if (next->thread.hardwall != NULL) {
522 grant_network_mpls();
527 * Switch kernel SP, PC, and callee-saved registers.
528 * In the context of the new task, return the old task pointer
529 * (i.e. the task that actually called __switch_to).
530 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
532 return __switch_to(prev, next, next_current_ksp0(next));
536 * This routine is called on return from interrupt if any of the
537 * TIF_WORK_MASK flags are set in thread_info->flags. It is
538 * entered with interrupts disabled so we don't miss an event
539 * that modified the thread_info flags. If any flag is set, we
540 * handle it and return, and the calling assembly code will
541 * re-disable interrupts, reload the thread flags, and call back
542 * if more flags need to be handled.
544 * We return whether we need to check the thread_info flags again
545 * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
546 * important that it be tested last, and then claim that we don't
547 * need to recheck the flags.
549 int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
551 /* If we enter in kernel mode, do nothing and exit the caller loop. */
552 if (!user_mode(regs))
555 if (thread_info_flags & _TIF_NEED_RESCHED) {
559 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
560 if (thread_info_flags & _TIF_ASYNC_TLB) {
561 do_async_page_fault(regs);
565 if (thread_info_flags & _TIF_SIGPENDING) {
569 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
570 clear_thread_flag(TIF_NOTIFY_RESUME);
571 tracehook_notify_resume(regs);
572 if (current->replacement_session_keyring)
573 key_replace_session_keyring();
576 if (thread_info_flags & _TIF_SINGLESTEP) {
577 single_step_once(regs);
580 panic("work_pending: bad flags %#x\n", thread_info_flags);
583 /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
584 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
585 void __user *, parent_tidptr, void __user *, child_tidptr,
586 struct pt_regs *, regs)
590 return do_fork(clone_flags, newsp, regs, 0,
591 parent_tidptr, child_tidptr);
595 * sys_execve() executes a new program.
597 SYSCALL_DEFINE4(execve, const char __user *, path,
598 const char __user *const __user *, argv,
599 const char __user *const __user *, envp,
600 struct pt_regs *, regs)
605 filename = getname(path);
606 error = PTR_ERR(filename);
607 if (IS_ERR(filename))
609 error = do_execve(filename, argv, envp, regs);
612 single_step_execve();
618 long compat_sys_execve(const char __user *path,
619 compat_uptr_t __user *argv,
620 compat_uptr_t __user *envp,
621 struct pt_regs *regs)
626 filename = getname(path);
627 error = PTR_ERR(filename);
628 if (IS_ERR(filename))
630 error = compat_do_execve(filename, argv, envp, regs);
633 single_step_execve();
639 unsigned long get_wchan(struct task_struct *p)
641 struct KBacktraceIterator kbt;
643 if (!p || p == current || p->state == TASK_RUNNING)
646 for (KBacktraceIterator_init(&kbt, p, NULL);
647 !KBacktraceIterator_end(&kbt);
648 KBacktraceIterator_next(&kbt)) {
649 if (!in_sched_functions(kbt.it.pc))
657 * We pass in lr as zero (cleared in kernel_thread) and the caller
658 * part of the backtrace ABI on the stack also zeroed (in copy_thread)
659 * so that backtraces will stop with this function.
660 * Note that we don't use r0, since copy_thread() clears it.
662 static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
668 * Create a kernel thread
670 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
674 memset(®s, 0, sizeof(regs));
675 regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */
676 regs.pc = (long) start_kernel_thread;
677 regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */
678 regs.regs[1] = (long) fn; /* function pointer */
679 regs.regs[2] = (long) arg; /* parameter register */
681 /* Ok, create the new process.. */
682 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s,
685 EXPORT_SYMBOL(kernel_thread);
687 /* Flush thread state. */
688 void flush_thread(void)
694 * Free current thread data structures etc..
696 void exit_thread(void)
701 void show_regs(struct pt_regs *regs)
703 struct task_struct *tsk = validate_current();
707 pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
708 tsk->pid, tsk->comm, smp_processor_id());
710 for (i = 0; i < 51; i += 3)
711 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
712 i, regs->regs[i], i+1, regs->regs[i+1],
713 i+2, regs->regs[i+2]);
714 pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
715 regs->regs[51], regs->regs[52], regs->tp);
716 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
718 for (i = 0; i < 52; i += 4)
719 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
720 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
721 i, regs->regs[i], i+1, regs->regs[i+1],
722 i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
723 pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
724 regs->regs[52], regs->tp, regs->sp, regs->lr);
726 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
727 regs->pc, regs->ex1, regs->faultnum);
729 dump_stack_regs(regs);