2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 * written by Carsten Langgaard, carstenl@mips.com
14 #include <asm/cachectl.h>
15 #include <asm/fpregdef.h>
16 #include <asm/mipsregs.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/pgtable-bits.h>
19 #include <asm/regdef.h>
20 #include <asm/stackframe.h>
21 #include <asm/thread_info.h>
23 #include <asm/asmmacro.h>
26 * Offset to the current process status flags, the first 32 bytes of the
29 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
32 * task_struct *resume(task_struct *prev, task_struct *next,
33 * struct thread_info *next_ti, s32 fp_save)
38 LONG_S t1, THREAD_STATUS(a0)
39 cpu_save_nonscratch a0
40 LONG_S ra, THREAD_REG31(a0)
43 * Check whether we need to save any FP context. FP context is saved
44 * iff the process has used the context with the scalar FPU or the MSA
45 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
46 * _TIF_USEDMSA respectively. switch_to will have set fp_save
47 * accordingly to an FP_SAVE_ enum value.
52 * We do. Clear the saved CU1 bit for prev, such that next time it is
53 * scheduled it will start in userland with the FPU disabled. If the
54 * task uses the FPU then it will be enabled again via the do_cpu trap.
55 * This allows us to lazily restore the FP context.
57 PTR_L t3, TASK_THREAD_INFO(a0)
63 /* Check whether we're saving scalar or vector context. */
66 /* Save 128b MSA vector context. */
70 1: /* Save 32b/64b scalar FP context. */
71 fpu_save_double a0 t0 t1 # c0_status passed in t0
75 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
76 PTR_LA t8, __stack_chk_guard
77 LONG_L t9, TASK_STACK_CANARY(a1)
82 * The order of restoring the registers takes care of the race
83 * updating $28, $29 and kernelsp without disabling ints.
86 cpu_restore_nonscratch a1
88 PTR_ADDU t0, $28, _THREAD_SIZE - 32
89 set_saved_sp t0, t1, t2
90 #ifdef CONFIG_MIPS_MT_SMTC
91 /* Read-modify-writes of Status must be atomic on a VPE */
93 ori t1, t2, TCSTATUS_IXMT
95 andi t2, t2, TCSTATUS_IXMT
101 #endif /* CONFIG_MIPS_MT_SMTC */
102 mfc0 t1, CP0_STATUS /* Do we really need this? */
105 LONG_L a2, THREAD_STATUS(a1)
110 #ifdef CONFIG_MIPS_MT_SMTC
112 andi t0, t0, VPECONTROL_TE
116 mfc0 t1, CP0_TCSTATUS
117 xori t1, t1, TCSTATUS_IXMT
119 mtc0 t1, CP0_TCSTATUS
121 #endif /* CONFIG_MIPS_MT_SMTC */
127 * Save a thread's fp context.
130 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
133 fpu_save_double a0 t0 t1 # clobbers t1
138 * Restore a thread's fp context.
141 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
144 fpu_restore_double a0 t0 t1 # clobbers t1
148 #ifdef CONFIG_CPU_HAS_MSA
151 * Save a thread's MSA vector context.
159 * Restore a thread's MSA vector context.
169 * Load the FPU with signalling NANS. This bit pattern we're using has
170 * the property that no matter whether considered as single or as double
171 * precision represents signaling NANS.
173 * We initialize fcr31 to rounding to nearest, no exceptions.
176 #define FPU_DEFAULT 0x00000000
179 #ifdef CONFIG_MIPS_MT_SMTC
180 /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
181 mfc0 t0, CP0_TCSTATUS
182 /* Bit position is the same for Status, TCStatus */
185 mtc0 t0, CP0_TCSTATUS
186 #else /* Normal MIPS CU1 enable */
191 #endif /* CONFIG_MIPS_MT_SMTC */
201 bgez t0, 1f # 16 / 32 register mode?
222 #ifdef CONFIG_CPU_MIPS32
256 #ifdef CONFIG_CPU_MIPS32_R2
259 sll t0, t0, 5 # is Status.FR set?
260 bgez t0, 1f # no: skip setting upper 32b
295 #endif /* CONFIG_CPU_MIPS32_R2 */