2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <linux/module.h>
19 #include <linux/kprobes.h>
20 #include <linux/elfcore.h>
21 #include <linux/tick.h>
22 #include <linux/init.h>
24 #include <linux/compat.h>
25 #include <linux/hardirq.h>
26 #include <linux/syscalls.h>
27 #include <linux/kernel.h>
28 #include <linux/tracehook.h>
29 #include <linux/signal.h>
30 #include <asm/stack.h>
31 #include <asm/switch_to.h>
32 #include <asm/homecache.h>
33 #include <asm/syscalls.h>
34 #include <asm/traps.h>
35 #include <asm/setup.h>
36 #ifdef CONFIG_HARDWALL
37 #include <asm/hardwall.h>
39 #include <arch/chip.h>
41 #include <arch/sim_def.h>
44 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
45 * idle loop over low power while in the idle loop, e.g. if we have
46 * one thread per core and we want to get threads out of futex waits fast.
48 static int __init idle_setup(char *str)
53 if (!strcmp(str, "poll")) {
54 pr_info("using polling idle threads.\n");
55 cpu_idle_poll_ctrl(true);
57 } else if (!strcmp(str, "halt")) {
62 early_param("idle", idle_setup);
64 void arch_cpu_idle(void)
66 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
71 * Release a thread_info structure
73 void arch_release_thread_info(struct thread_info *info)
75 struct single_step_state *step_state = info->step_state;
80 * FIXME: we don't munmap step_state->buffer
81 * because the mm_struct for this process (info->task->mm)
82 * has already been zeroed in exit_mm(). Keeping a
83 * reference to it here seems like a bad move, so this
84 * means we can't munmap() the buffer, and therefore if we
85 * ptrace multiple threads in a process, we will slowly
86 * leak user memory. (Note that as soon as the last
87 * thread in a process dies, we will reclaim all user
88 * memory including single-step buffers in the usual way.)
89 * We should either assign a kernel VA to this buffer
90 * somehow, or we should associate the buffer(s) with the
91 * mm itself so we can clean them up that way.
97 static void save_arch_state(struct thread_struct *t);
99 int copy_thread(unsigned long clone_flags, unsigned long sp,
100 unsigned long arg, struct task_struct *p)
102 struct pt_regs *childregs = task_pt_regs(p);
104 unsigned long *callee_regs;
107 * Set up the stack and stack pointer appropriately for the
108 * new child to find itself woken up in __switch_to().
109 * The callee-saved registers must be on the stack to be read;
110 * the new task will then jump to assembly support to handle
111 * calling schedule_tail(), etc., and (for userspace tasks)
112 * returning to the context set up in the pt_regs.
114 ksp = (unsigned long) childregs;
115 ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
116 ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
117 ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
118 callee_regs = (unsigned long *)ksp;
119 ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
120 ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
123 /* Record the pid of the task that created this one. */
124 p->thread.creator_pid = current->pid;
126 if (unlikely(p->flags & PF_KTHREAD)) {
128 memset(childregs, 0, sizeof(struct pt_regs));
129 memset(&callee_regs[2], 0,
130 (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
131 callee_regs[0] = sp; /* r30 = function */
132 callee_regs[1] = arg; /* r31 = arg */
133 childregs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
134 p->thread.pc = (unsigned long) ret_from_kernel_thread;
139 * Start new thread in ret_from_fork so it schedules properly
140 * and then return from interrupt like the parent.
142 p->thread.pc = (unsigned long) ret_from_fork;
145 * Do not clone step state from the parent; each thread
146 * must make its own lazily.
148 task_thread_info(p)->step_state = NULL;
151 * Copy the registers onto the kernel stack so the
152 * return-from-interrupt code will reload it into registers.
154 *childregs = *current_pt_regs();
155 childregs->regs[0] = 0; /* return value is zero */
157 childregs->sp = sp; /* override with new user stack pointer */
158 memcpy(callee_regs, &childregs->regs[CALLEE_SAVED_FIRST_REG],
159 CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
161 /* Save user stack top pointer so we can ID the stack vm area later. */
162 p->thread.usp0 = childregs->sp;
165 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
166 * which is passed in as arg #5 to sys_clone().
168 if (clone_flags & CLONE_SETTLS)
169 childregs->tp = childregs->regs[4];
172 #if CHIP_HAS_TILE_DMA()
174 * No DMA in the new thread. We model this on the fact that
175 * fork() clears the pending signals, alarms, and aio for the child.
177 memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
178 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
181 #if CHIP_HAS_SN_PROC()
182 /* Likewise, the new thread is not running static processor code. */
183 p->thread.sn_proc_running = 0;
184 memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
187 #if CHIP_HAS_PROC_STATUS_SPR()
188 /* New thread has its miscellaneous processor state bits clear. */
189 p->thread.proc_status = 0;
192 #ifdef CONFIG_HARDWALL
193 /* New thread does not own any networks. */
194 memset(&p->thread.hardwall[0], 0,
195 sizeof(struct hardwall_task) * HARDWALL_TYPES);
200 * Start the new thread with the current architecture state
201 * (user interrupt masks, etc.).
203 save_arch_state(&p->thread);
209 * Return "current" if it looks plausible, or else a pointer to a dummy.
210 * This can be helpful if we are just trying to emit a clean panic.
212 struct task_struct *validate_current(void)
214 static struct task_struct corrupt = { .comm = "<corrupt>" };
215 struct task_struct *tsk = current;
216 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
217 (high_memory && (void *)tsk > high_memory) ||
218 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
219 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
225 /* Take and return the pointer to the previous task, for schedule_tail(). */
226 struct task_struct *sim_notify_fork(struct task_struct *prev)
228 struct task_struct *tsk = current;
229 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
230 (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
231 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
232 (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
236 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
238 struct pt_regs *ptregs = task_pt_regs(tsk);
239 elf_core_copy_regs(regs, ptregs);
243 #if CHIP_HAS_TILE_DMA()
245 /* Allow user processes to access the DMA SPRs */
246 void grant_dma_mpls(void)
248 #if CONFIG_KERNEL_PL == 2
249 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
250 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
252 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
253 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
257 /* Forbid user processes from accessing the DMA SPRs */
258 void restrict_dma_mpls(void)
260 #if CONFIG_KERNEL_PL == 2
261 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
262 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
264 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
265 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
269 /* Pause the DMA engine, then save off its state registers. */
270 static void save_tile_dma_state(struct tile_dma_state *dma)
272 unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
273 unsigned long post_suspend_state;
275 /* If we're running, suspend the engine. */
276 if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
277 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
280 * Wait for the engine to idle, then save regs. Note that we
281 * want to record the "running" bit from before suspension,
282 * and the "done" bit from after, so that we can properly
283 * distinguish a case where the user suspended the engine from
284 * the case where the kernel suspended as part of the context
288 post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
289 } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
291 dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
292 dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
293 dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
294 dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
295 dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
296 dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
297 dma->byte = __insn_mfspr(SPR_DMA_BYTE);
298 dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
299 (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
302 /* Restart a DMA that was running before we were context-switched out. */
303 static void restore_tile_dma_state(struct thread_struct *t)
305 const struct tile_dma_state *dma = &t->tile_dma_state;
308 * The only way to restore the done bit is to run a zero
309 * length transaction.
311 if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
312 !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
313 __insn_mtspr(SPR_DMA_BYTE, 0);
314 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
315 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
316 SPR_DMA_STATUS__BUSY_MASK)
320 __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
321 __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
322 __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
323 __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
324 __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
325 __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
326 __insn_mtspr(SPR_DMA_BYTE, dma->byte);
329 * Restart the engine if we were running and not done.
330 * Clear a pending async DMA fault that we were waiting on return
331 * to user space to execute, since we expect the DMA engine
332 * to regenerate those faults for us now. Note that we don't
333 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
334 * harmless if set, and it covers both DMA and the SN processor.
336 if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
337 t->dma_async_tlb.fault_num = 0;
338 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
344 static void save_arch_state(struct thread_struct *t)
346 #if CHIP_HAS_SPLIT_INTR_MASK()
347 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
348 ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
350 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
352 t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
353 t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
354 t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
355 t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
356 t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
357 t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
358 t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
359 #if CHIP_HAS_PROC_STATUS_SPR()
360 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
362 #if !CHIP_HAS_FIXED_INTVEC_BASE()
363 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
365 #if CHIP_HAS_TILE_RTF_HWM()
366 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
368 #if CHIP_HAS_DSTREAM_PF()
369 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
373 static void restore_arch_state(const struct thread_struct *t)
375 #if CHIP_HAS_SPLIT_INTR_MASK()
376 __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
377 __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
379 __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
381 __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
382 __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
383 __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
384 __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
385 __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
386 __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
387 __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
388 #if CHIP_HAS_PROC_STATUS_SPR()
389 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
391 #if !CHIP_HAS_FIXED_INTVEC_BASE()
392 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
394 #if CHIP_HAS_TILE_RTF_HWM()
395 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
397 #if CHIP_HAS_DSTREAM_PF()
398 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
403 void _prepare_arch_switch(struct task_struct *next)
405 #if CHIP_HAS_SN_PROC()
408 #if CHIP_HAS_TILE_DMA()
409 struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
411 save_tile_dma_state(dma);
413 #if CHIP_HAS_SN_PROC()
415 * Suspend the static network processor if it was running.
416 * We do not suspend the fabric itself, just like we don't
417 * try to suspend the UDN.
419 snctl = __insn_mfspr(SPR_SNCTL);
420 current->thread.sn_proc_running =
421 (snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
422 if (current->thread.sn_proc_running)
423 __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
428 struct task_struct *__sched _switch_to(struct task_struct *prev,
429 struct task_struct *next)
431 /* DMA state is already saved; save off other arch state. */
432 save_arch_state(&prev->thread);
434 #if CHIP_HAS_TILE_DMA()
436 * Restore DMA in new task if desired.
437 * Note that it is only safe to restart here since interrupts
438 * are disabled, so we can't take any DMATLB miss or access
439 * interrupts before we have finished switching stacks.
441 if (next->thread.tile_dma_state.enabled) {
442 restore_tile_dma_state(&next->thread);
449 /* Restore other arch state. */
450 restore_arch_state(&next->thread);
452 #if CHIP_HAS_SN_PROC()
454 * Restart static network processor in the new process
455 * if it was running before.
457 if (next->thread.sn_proc_running) {
458 int snctl = __insn_mfspr(SPR_SNCTL);
459 __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
463 #ifdef CONFIG_HARDWALL
464 /* Enable or disable access to the network registers appropriately. */
465 hardwall_switch_tasks(prev, next);
469 * Switch kernel SP, PC, and callee-saved registers.
470 * In the context of the new task, return the old task pointer
471 * (i.e. the task that actually called __switch_to).
472 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
474 return __switch_to(prev, next, next_current_ksp0(next));
478 * This routine is called on return from interrupt if any of the
479 * TIF_WORK_MASK flags are set in thread_info->flags. It is
480 * entered with interrupts disabled so we don't miss an event
481 * that modified the thread_info flags. If any flag is set, we
482 * handle it and return, and the calling assembly code will
483 * re-disable interrupts, reload the thread flags, and call back
484 * if more flags need to be handled.
486 * We return whether we need to check the thread_info flags again
487 * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
488 * important that it be tested last, and then claim that we don't
489 * need to recheck the flags.
491 int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
493 /* If we enter in kernel mode, do nothing and exit the caller loop. */
494 if (!user_mode(regs))
497 /* Enable interrupts; they are disabled again on return to caller. */
500 if (thread_info_flags & _TIF_NEED_RESCHED) {
504 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
505 if (thread_info_flags & _TIF_ASYNC_TLB) {
506 do_async_page_fault(regs);
510 if (thread_info_flags & _TIF_SIGPENDING) {
514 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
515 clear_thread_flag(TIF_NOTIFY_RESUME);
516 tracehook_notify_resume(regs);
519 if (thread_info_flags & _TIF_SINGLESTEP) {
520 single_step_once(regs);
523 panic("work_pending: bad flags %#x\n", thread_info_flags);
526 unsigned long get_wchan(struct task_struct *p)
528 struct KBacktraceIterator kbt;
530 if (!p || p == current || p->state == TASK_RUNNING)
533 for (KBacktraceIterator_init(&kbt, p, NULL);
534 !KBacktraceIterator_end(&kbt);
535 KBacktraceIterator_next(&kbt)) {
536 if (!in_sched_functions(kbt.it.pc))
543 /* Flush thread state. */
544 void flush_thread(void)
550 * Free current thread data structures etc..
552 void exit_thread(void)
554 #ifdef CONFIG_HARDWALL
556 * Remove the task from the list of tasks that are associated
557 * with any live hardwalls. (If the task that is exiting held
558 * the last reference to a hardwall fd, it would already have
559 * been released and deactivated at this point.)
561 hardwall_deactivate_all(current);
565 void show_regs(struct pt_regs *regs)
567 struct task_struct *tsk = validate_current();
571 show_regs_print_info(KERN_ERR);
573 for (i = 0; i < 51; i += 3)
574 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
575 i, regs->regs[i], i+1, regs->regs[i+1],
576 i+2, regs->regs[i+2]);
577 pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
578 regs->regs[51], regs->regs[52], regs->tp);
579 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
581 for (i = 0; i < 52; i += 4)
582 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
583 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
584 i, regs->regs[i], i+1, regs->regs[i+1],
585 i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
586 pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
587 regs->regs[52], regs->tp, regs->sp, regs->lr);
589 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
590 regs->pc, regs->ex1, regs->faultnum);
592 dump_stack_regs(regs);