2 * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
4 * This file contains the architecture-dependent parts of process handling.
8 #include <linux/errno.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/unistd.h>
14 #include <linux/ptrace.h>
15 #include <linux/user.h>
16 #include <linux/reboot.h>
17 #include <linux/elfcore.h>
19 #include <linux/tick.h>
20 #include <linux/slab.h>
21 #include <linux/mman.h>
23 #include <linux/syscalls.h>
24 #include <linux/uaccess.h>
25 #include <asm/core_reg.h>
26 #include <asm/user_gateway.h>
28 #include <asm/traps.h>
29 #include <asm/switch_to.h>
32 * Wait for the next interrupt and enable local interrupts
34 static inline void arch_idle(void)
39 * Quickly jump straight into the interrupt entry point without actually
40 * triggering an interrupt. When TXSTATI gets read the processor will
41 * block until an interrupt is triggered.
43 asm volatile (/* Switch into ISTAT mode */
45 /* Enable local interrupts */
48 * We can't directly "SWAP PC, PCX", so we swap via a
49 * temporary. Essentially we do:
50 * PCX_new = 1f (the place to continue execution)
53 "ADD %0, CPC0, #(1f-.)\n\t"
56 /* Continue execution here with interrupts enabled */
59 : "r" (get_trigger_mask()));
64 set_thread_flag(TIF_POLLING_NRFLAG);
67 tick_nohz_idle_enter();
70 while (!need_resched()) {
72 * We need to disable interrupts here to ensure we don't
76 if (!need_resched()) {
77 #ifdef CONFIG_HOTPLUG_CPU
78 if (cpu_is_offline(smp_processor_id()))
88 tick_nohz_idle_exit();
89 schedule_preempt_disabled();
93 void (*pm_power_off)(void);
94 EXPORT_SYMBOL(pm_power_off);
96 void (*soc_restart)(char *cmd);
97 void (*soc_halt)(void);
99 void machine_restart(char *cmd)
103 hard_processor_halt(HALT_OK);
106 void machine_halt(void)
111 hard_processor_halt(HALT_OK);
114 void machine_power_off(void)
119 hard_processor_halt(HALT_OK);
127 void show_regs(struct pt_regs *regs)
130 const char *AX0_names[] = {"A0StP", "A0FrP"};
131 const char *AX1_names[] = {"A1GbP", "A1LbP"};
133 const char *DX0_names[] = {
144 const char *DX1_names[] = {
155 pr_info(" pt_regs @ %p\n", regs);
156 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
157 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
158 regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
159 regs->ctx.Flags & FLAG_N ? 'N' : 'n',
160 regs->ctx.Flags & FLAG_O ? 'O' : 'o',
161 regs->ctx.Flags & FLAG_C ? 'C' : 'c');
162 pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
163 pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
166 for (i = 0; i < 2; i++) {
167 pr_info(" %s = 0x%08x ",
170 printk(" %s = 0x%08x\n",
175 if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
176 pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
178 /* Special place with AXx.2 */
179 pr_info(" A0.2 = 0x%08x ",
180 regs->ctx.Ext.AX2.U0);
181 printk(" A1.2 = 0x%08x\n",
182 regs->ctx.Ext.AX2.U1);
184 /* 'extended' AX regs (nominally, just AXx.3) */
185 for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
186 pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0);
187 printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
190 for (i = 0; i < 8; i++) {
191 pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0);
192 printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
195 show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
198 int copy_thread(unsigned long clone_flags, unsigned long usp,
199 unsigned long arg, struct task_struct *tsk)
201 struct pt_regs *childregs = task_pt_regs(tsk);
202 void *kernel_context = ((void *) childregs +
203 sizeof(struct pt_regs));
204 unsigned long global_base;
206 BUG_ON(((unsigned long)childregs) & 0x7);
207 BUG_ON(((unsigned long)kernel_context) & 0x7);
209 memset(&tsk->thread.kernel_context, 0,
210 sizeof(tsk->thread.kernel_context));
212 tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
216 if (unlikely(tsk->flags & PF_KTHREAD)) {
218 * Make sure we don't leak any kernel data to child's regs
219 * if kernel thread becomes a userspace thread in the future
221 memset(childregs, 0 , sizeof(struct pt_regs));
223 global_base = __core_reg_get(A1GbP);
224 childregs->ctx.AX[0].U1 = (unsigned long) global_base;
225 childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
226 /* Set D1Ar1=arg and D1RtP=usp (fn) */
227 childregs->ctx.DX[4].U1 = usp;
228 childregs->ctx.DX[3].U1 = arg;
229 tsk->thread.int_depth = 2;
233 * Get a pointer to where the new child's register block should have
235 * The Meta's stack grows upwards, and the context is the the first
236 * thing to be pushed by TBX (phew)
238 *childregs = *current_pt_regs();
239 /* Set the correct stack for the clone mode */
241 childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
242 tsk->thread.int_depth = 1;
244 /* set return value for child process */
245 childregs->ctx.DX[0].U0 = 0;
247 /* The TLS pointer is passed as an argument to sys_clone. */
248 if (clone_flags & CLONE_SETTLS)
249 tsk->thread.tls_ptr =
250 (__force void __user *)childregs->ctx.DX[1].U1;
252 #ifdef CONFIG_METAG_FPU
253 if (tsk->thread.fpu_context) {
254 struct meta_fpu_context *ctx;
256 ctx = kmemdup(tsk->thread.fpu_context,
257 sizeof(struct meta_fpu_context), GFP_ATOMIC);
258 tsk->thread.fpu_context = ctx;
262 #ifdef CONFIG_METAG_DSP
263 if (tsk->thread.dsp_context) {
264 struct meta_ext_context *ctx;
267 ctx = kmemdup(tsk->thread.dsp_context,
268 sizeof(struct meta_ext_context), GFP_ATOMIC);
269 for (i = 0; i < 2; i++)
270 ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
272 tsk->thread.dsp_context = ctx;
279 #ifdef CONFIG_METAG_FPU
280 static void alloc_fpu_context(struct thread_struct *thread)
282 thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
286 static void clear_fpu(struct thread_struct *thread)
288 thread->user_flags &= ~TBICTX_FPAC_BIT;
289 kfree(thread->fpu_context);
290 thread->fpu_context = NULL;
293 static void clear_fpu(struct thread_struct *thread)
298 #ifdef CONFIG_METAG_DSP
299 static void clear_dsp(struct thread_struct *thread)
301 if (thread->dsp_context) {
302 kfree(thread->dsp_context->ram[0]);
303 kfree(thread->dsp_context->ram[1]);
305 kfree(thread->dsp_context);
307 thread->dsp_context = NULL;
310 __core_reg_set(D0.8, 0);
313 static void clear_dsp(struct thread_struct *thread)
318 struct task_struct *__sched __switch_to(struct task_struct *prev,
319 struct task_struct *next)
323 to.Switch.pCtx = next->thread.kernel_context;
324 to.Switch.pPara = prev;
326 #ifdef CONFIG_METAG_FPU
327 if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
328 struct pt_regs *regs = task_pt_regs(prev);
331 state.Sig.SaveMask = prev->thread.user_flags;
332 state.Sig.pCtx = ®s->ctx;
334 if (!prev->thread.fpu_context)
335 alloc_fpu_context(&prev->thread);
336 if (prev->thread.fpu_context)
337 __TBICtxFPUSave(state, prev->thread.fpu_context);
340 * Force a restore of the FPU context next time this process is
343 if (prev->thread.fpu_context)
344 prev->thread.fpu_context->needs_restore = true;
348 from = __TBISwitch(to, &prev->thread.kernel_context);
350 /* Restore TLS pointer for this process. */
351 set_gateway_tls(current->thread.tls_ptr);
353 return (struct task_struct *) from.Switch.pPara;
356 void flush_thread(void)
358 clear_fpu(¤t->thread);
359 clear_dsp(¤t->thread);
363 * Free current thread data structures etc.
365 void exit_thread(void)
367 clear_fpu(¤t->thread);
368 clear_dsp(¤t->thread);
371 /* TODO: figure out how to unwind the kernel stack here to figure out
372 * where we went to sleep. */
373 unsigned long get_wchan(struct task_struct *p)
378 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
380 /* Returning 0 indicates that the FPU state was not stored (as it was
385 #ifdef CONFIG_METAG_USER_TCM
387 #define ELF_MIN_ALIGN PAGE_SIZE
389 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
390 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
391 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
393 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
395 unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
396 struct elf_phdr *eppnt, int prot, int type,
397 unsigned long total_size)
399 unsigned long map_addr, size;
400 unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
401 unsigned long raw_size = eppnt->p_filesz + page_off;
402 unsigned long off = eppnt->p_offset - page_off;
403 unsigned int tcm_tag;
404 addr = ELF_PAGESTART(addr);
405 size = ELF_PAGEALIGN(raw_size);
407 /* mmap() will return -EINVAL if given a zero size, but a
408 * segment with zero filesize is perfectly valid */
412 tcm_tag = tcm_lookup_tag(addr);
414 if (tcm_tag != TCM_INVALID_TAG)
418 * total_size is the size of the ELF (interpreter) image.
419 * The _first_ mmap needs to know the full size, otherwise
420 * randomization might put this image into an overlapping
421 * position with the ELF binary image. (since size < total_size)
422 * So we first map the 'big' image - and unmap the remainder at
423 * the end. (which unmap is needed for ELF images with holes.)
426 total_size = ELF_PAGEALIGN(total_size);
427 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
428 if (!BAD_ADDR(map_addr))
429 vm_munmap(map_addr+size, total_size-size);
431 map_addr = vm_mmap(filep, addr, size, prot, type, off);
433 if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
434 struct tcm_allocation *tcm;
435 unsigned long tcm_addr;
437 tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
441 tcm_addr = tcm_alloc(tcm_tag, raw_size);
442 if (tcm_addr != addr) {
448 tcm->addr = tcm_addr;
449 tcm->size = raw_size;
451 list_add(&tcm->list, ¤t->mm->context.tcm);
453 eppnt->p_vaddr = map_addr;
454 if (copy_from_user((void *) addr, (void __user *) map_addr,