2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/traps.h>
13 #include <linux/hardirq.h>
16 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
17 * depending on the FPU hardware format:
19 union fpregs_state init_fpstate __read_mostly;
22 * Track whether the kernel is using the FPU state
27 * - by IRQ context code to potentially use the FPU
30 * - to debug kernel_fpu_begin()/end() correctness
32 static DEFINE_PER_CPU(bool, in_kernel_fpu);
35 * Track which context is using the FPU on the CPU:
37 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
39 static void kernel_fpu_disable(void)
41 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
42 this_cpu_write(in_kernel_fpu, true);
45 static void kernel_fpu_enable(void)
47 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
48 this_cpu_write(in_kernel_fpu, false);
51 static bool kernel_fpu_disabled(void)
53 return this_cpu_read(in_kernel_fpu);
57 * Were we in an interrupt that interrupted kernel mode?
59 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
60 * pair does nothing at all: the thread must not have fpu (so
61 * that we don't try to save the FPU state), and TS must
62 * be set (so that the clts/stts pair does nothing that is
63 * visible in the interrupted kernel thread).
65 * Except for the eagerfpu case when we return true; in the likely case
66 * the thread has FPU but we are not going to set/clear TS.
68 static bool interrupted_kernel_fpu_idle(void)
70 if (kernel_fpu_disabled())
76 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
80 * Were we in user mode (or vm86 mode) when we were
83 * Doing kernel_fpu_begin/end() is ok if we are running
84 * in an interrupt context from user mode - we'll just
85 * save the FPU state as required.
87 static bool interrupted_user_mode(void)
89 struct pt_regs *regs = get_irq_regs();
90 return regs && user_mode(regs);
94 * Can we use the FPU in kernel mode with the
95 * whole "kernel_fpu_begin/end()" sequence?
97 * It's always ok in process context (ie "not interrupt")
98 * but it is sometimes ok even from an irq.
100 bool irq_fpu_usable(void)
102 return !in_interrupt() ||
103 interrupted_user_mode() ||
104 interrupted_kernel_fpu_idle();
106 EXPORT_SYMBOL(irq_fpu_usable);
108 void __kernel_fpu_begin(void)
110 struct fpu *fpu = ¤t->thread.fpu;
112 WARN_ON_FPU(!irq_fpu_usable());
114 kernel_fpu_disable();
116 if (fpu->fpregs_active) {
117 copy_fpregs_to_fpstate(fpu);
119 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
120 __fpregs_activate_hw();
123 EXPORT_SYMBOL(__kernel_fpu_begin);
125 void __kernel_fpu_end(void)
127 struct fpu *fpu = ¤t->thread.fpu;
129 if (fpu->fpregs_active) {
130 if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
133 __fpregs_deactivate_hw();
138 EXPORT_SYMBOL(__kernel_fpu_end);
140 void kernel_fpu_begin(void)
143 __kernel_fpu_begin();
145 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
147 void kernel_fpu_end(void)
152 EXPORT_SYMBOL_GPL(kernel_fpu_end);
155 * CR0::TS save/restore functions:
157 int irq_ts_save(void)
160 * If in process context and not atomic, we can take a spurious DNA fault.
161 * Otherwise, doing clts() in process context requires disabling preemption
162 * or some heavy lifting like kernel_fpu_begin()
167 if (read_cr0() & X86_CR0_TS) {
174 EXPORT_SYMBOL_GPL(irq_ts_save);
176 void irq_ts_restore(int TS_state)
181 EXPORT_SYMBOL_GPL(irq_ts_restore);
184 * Save the FPU state (mark it for reload if necessary):
186 * This only ever gets called for the current task.
188 void fpu__save(struct fpu *fpu)
190 WARN_ON_FPU(fpu != ¤t->thread.fpu);
193 if (fpu->fpregs_active) {
194 if (!copy_fpregs_to_fpstate(fpu))
195 fpregs_deactivate(fpu);
199 EXPORT_SYMBOL_GPL(fpu__save);
202 * Legacy x87 fpstate state init:
204 static inline void fpstate_init_fstate(struct fregs_state *fp)
206 fp->cwd = 0xffff037fu;
207 fp->swd = 0xffff0000u;
208 fp->twd = 0xffffffffu;
209 fp->fos = 0xffff0000u;
212 void fpstate_init(union fpregs_state *state)
215 fpstate_init_soft(&state->soft);
219 memset(state, 0, xstate_size);
222 fpstate_init_fxstate(&state->fxsave);
224 fpstate_init_fstate(&state->fsave);
226 EXPORT_SYMBOL_GPL(fpstate_init);
229 * Copy the current task's FPU state to a new task's FPU context.
231 * In both the 'eager' and the 'lazy' case we save hardware registers
232 * directly to the destination buffer.
234 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
236 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
239 * Don't let 'init optimized' areas of the XSAVE area
240 * leak into the child task:
243 memset(&dst_fpu->state.xsave, 0, xstate_size);
246 * Save current FPU registers directly into the child
247 * FPU context, without any memory-to-memory copying.
249 * If the FPU context got destroyed in the process (FNSAVE
250 * done on old CPUs) then copy it back into the source
251 * context and mark the current task for lazy restore.
253 * We have to do all this with preemption disabled,
254 * mostly because of the FNSAVE case, because in that
255 * case we must not allow preemption in the window
256 * between the FNSAVE and us marking the context lazy.
258 * It shouldn't be an issue as even FNSAVE is plenty
259 * fast in terms of critical section length.
262 if (!copy_fpregs_to_fpstate(dst_fpu)) {
263 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
264 fpregs_deactivate(src_fpu);
269 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
271 dst_fpu->counter = 0;
272 dst_fpu->fpregs_active = 0;
273 dst_fpu->last_cpu = -1;
275 if (src_fpu->fpstate_active)
276 fpu_copy(dst_fpu, src_fpu);
282 * Activate the current task's in-memory FPU context,
283 * if it has not been used before:
285 void fpu__activate_curr(struct fpu *fpu)
287 WARN_ON_FPU(fpu != ¤t->thread.fpu);
289 if (!fpu->fpstate_active) {
290 fpstate_init(&fpu->state);
292 /* Safe to do for the current task: */
293 fpu->fpstate_active = 1;
296 EXPORT_SYMBOL_GPL(fpu__activate_curr);
299 * This function must be called before we read a task's fpstate.
301 * If the task has not used the FPU before then initialize its
304 * If the task has used the FPU before then save it.
306 void fpu__activate_fpstate_read(struct fpu *fpu)
309 * If fpregs are active (in the current CPU), then
310 * copy them to the fpstate:
312 if (fpu->fpregs_active) {
315 if (!fpu->fpstate_active) {
316 fpstate_init(&fpu->state);
318 /* Safe to do for current and for stopped child tasks: */
319 fpu->fpstate_active = 1;
325 * This function must be called before we read or write a task's fpstate.
327 * If the task has not used the FPU before then initialize its
330 * If the task has used the FPU before then save and unlazy it.
332 * [ If this function is used for non-current child tasks, then
333 * after this function call, after registers in the fpstate are
334 * modified and the child task has woken up, the child task will
335 * restore the modified FPU state from the modified context. If we
336 * didn't clear its lazy status here then the lazy in-registers
337 * state pending on its former CPU could be restored, corrupting
340 * This function can be used for the current task as well, but
341 * only for reading the fpstate. Modifications to the fpstate
342 * will be lost on eagerfpu systems. ]
344 * TODO: A future optimization would be to skip the unlazying in
345 * the read-only case, it's not strictly necessary for
346 * read-only access to the context.
348 void fpu__activate_fpstate_write(struct fpu *fpu)
351 * If fpregs are active (in the current CPU), then
352 * copy them to the fpstate:
354 if (fpu->fpregs_active) {
357 if (fpu->fpstate_active) {
358 /* Invalidate any lazy state: */
361 fpstate_init(&fpu->state);
363 /* Safe to do for current and for stopped child tasks: */
364 fpu->fpstate_active = 1;
370 * 'fpu__restore()' is called to copy FPU registers from
371 * the FPU fpstate to the live hw registers and to activate
372 * access to the hardware registers, so that FPU instructions
373 * can be used afterwards.
375 * Must be called with kernel preemption disabled (for example
376 * with local interrupts disabled, as it is in the case of
377 * do_device_not_available()).
379 void fpu__restore(struct fpu *fpu)
381 fpu__activate_curr(fpu);
383 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
384 kernel_fpu_disable();
385 fpregs_activate(fpu);
386 if (unlikely(copy_fpstate_to_fpregs(fpu))) {
388 force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
394 EXPORT_SYMBOL_GPL(fpu__restore);
397 * Drops current FPU state: deactivates the fpregs and
398 * the fpstate. NOTE: it still leaves previous contents
399 * in the fpregs in the eager-FPU case.
401 * This function can be used in cases where we know that
402 * a state-restore is coming: either an explicit one,
405 void fpu__drop(struct fpu *fpu)
410 if (fpu->fpregs_active) {
411 /* Ignore delayed exceptions from user space */
412 asm volatile("1: fwait\n"
414 _ASM_EXTABLE(1b, 2b));
415 fpregs_deactivate(fpu);
418 fpu->fpstate_active = 0;
424 * Clear FPU registers by setting them up from
427 static inline void copy_init_fpstate_to_fpregs(void)
430 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
432 copy_kernel_to_fxregs(&init_fpstate.fxsave);
436 * Clear the FPU state back to init state.
438 * Called by sys_execve(), by the signal handler code and by various
441 void fpu__clear(struct fpu *fpu)
443 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
445 if (!use_eager_fpu()) {
446 /* FPU state will be reallocated lazily at the first use. */
449 if (!fpu->fpstate_active) {
450 fpu__activate_curr(fpu);
453 copy_init_fpstate_to_fpregs();
458 * x87 math exception handling:
461 static inline unsigned short get_fpu_cwd(struct fpu *fpu)
464 return fpu->state.fxsave.cwd;
466 return (unsigned short)fpu->state.fsave.cwd;
470 static inline unsigned short get_fpu_swd(struct fpu *fpu)
473 return fpu->state.fxsave.swd;
475 return (unsigned short)fpu->state.fsave.swd;
479 static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
482 return fpu->state.fxsave.mxcsr;
484 return MXCSR_DEFAULT;
488 int fpu__exception_code(struct fpu *fpu, int trap_nr)
492 if (trap_nr == X86_TRAP_MF) {
493 unsigned short cwd, swd;
495 * (~cwd & swd) will mask out exceptions that are not set to unmasked
496 * status. 0x3f is the exception bits in these regs, 0x200 is the
497 * C1 reg you need in case of a stack fault, 0x040 is the stack
498 * fault bit. We should only be taking one exception at a time,
499 * so if this combination doesn't produce any single exception,
500 * then we have a bad program that isn't synchronizing its FPU usage
501 * and it will suffer the consequences since we won't be able to
502 * fully reproduce the context of the exception
504 cwd = get_fpu_cwd(fpu);
505 swd = get_fpu_swd(fpu);
510 * The SIMD FPU exceptions are handled a little differently, as there
511 * is only a single status/control register. Thus, to determine which
512 * unmasked exception was caught we must mask the exception mask bits
513 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
515 unsigned short mxcsr = get_fpu_mxcsr(fpu);
516 err = ~(mxcsr >> 7) & mxcsr;
519 if (err & 0x001) { /* Invalid op */
521 * swd & 0x240 == 0x040: Stack Underflow
522 * swd & 0x240 == 0x240: Stack Overflow
523 * User must clear the SF bit (0x40) if set
526 } else if (err & 0x004) { /* Divide by Zero */
528 } else if (err & 0x008) { /* Overflow */
530 } else if (err & 0x012) { /* Denormal, Underflow */
532 } else if (err & 0x020) { /* Precision */
537 * If we're using IRQ 13, or supposedly even some trap
538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.