2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_FPU_INTERNAL_H
11 #define _ASM_X86_FPU_INTERNAL_H
13 #include <linux/compat.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
18 #include <asm/fpu/api.h>
19 #include <asm/fpu/xstate.h>
21 #define MXCSR_DEFAULT 0x1f80
23 extern unsigned int mxcsr_feature_mask;
25 extern union fpregs_state init_fpstate;
27 extern void fpu__init_cpu(void);
28 extern void fpu__init_system_xstate(void);
29 extern void fpu__init_cpu_xstate(void);
30 extern void fpu__init_system(struct cpuinfo_x86 *c);
32 extern void fpstate_init(union fpregs_state *state);
33 #ifdef CONFIG_MATH_EMULATION
34 extern void fpstate_init_soft(struct swregs_state *soft);
36 static inline void fpstate_init_soft(struct swregs_state *soft) {}
38 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
41 fx->mxcsr = MXCSR_DEFAULT;
44 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
45 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
48 * High level FPU state handling functions:
50 extern void fpu__activate_curr(struct fpu *fpu);
51 extern void fpu__activate_stopped(struct fpu *fpu);
52 extern void fpu__save(struct fpu *fpu);
53 extern void fpu__restore(void);
54 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
55 extern void fpu__drop(struct fpu *fpu);
56 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
57 extern void fpu__clear(struct fpu *fpu);
59 extern void fpu__init_check_bugs(void);
60 extern void fpu__resume_cpu(void);
62 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
65 * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
68 * This will disable any lazy FPU state restore of the current FPU state,
69 * but if the current thread owns the FPU, it will still be saved by.
71 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
73 per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
76 static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
78 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
81 #define X87_FSW_ES (1 << 7) /* Exception Summary */
83 static __always_inline __pure bool use_eager_fpu(void)
85 return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
88 static __always_inline __pure bool use_xsaveopt(void)
90 return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
93 static __always_inline __pure bool use_xsave(void)
95 return static_cpu_has_safe(X86_FEATURE_XSAVE);
98 static __always_inline __pure bool use_fxsr(void)
100 return static_cpu_has_safe(X86_FEATURE_FXSR);
103 extern void fpstate_sanitize_xstate(struct fpu *fpu);
105 #define user_insn(insn, output, input...) \
108 asm volatile(ASM_STAC "\n" \
110 "2: " ASM_CLAC "\n" \
111 ".section .fixup,\"ax\"\n" \
112 "3: movl $-1,%[err]\n" \
115 _ASM_EXTABLE(1b, 3b) \
116 : [err] "=r" (err), output \
121 #define check_insn(insn, output, input...) \
124 asm volatile("1:" #insn "\n\t" \
126 ".section .fixup,\"ax\"\n" \
127 "3: movl $-1,%[err]\n" \
130 _ASM_EXTABLE(1b, 3b) \
131 : [err] "=r" (err), output \
136 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
138 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
141 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
143 if (config_enabled(CONFIG_X86_32))
144 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
145 else if (config_enabled(CONFIG_AS_FXSAVEQ))
146 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
148 /* See comment in copy_fxregs_to_kernel() below. */
149 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
152 static inline int copy_kernel_to_fxregs(struct fxregs_state *fx)
154 if (config_enabled(CONFIG_X86_32))
155 return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
156 else if (config_enabled(CONFIG_AS_FXSAVEQ))
157 return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
159 /* See comment in copy_fxregs_to_kernel() below. */
160 return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
164 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
166 if (config_enabled(CONFIG_X86_32))
167 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
168 else if (config_enabled(CONFIG_AS_FXSAVEQ))
169 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
171 /* See comment in copy_fxregs_to_kernel() below. */
172 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
176 static inline int copy_kernel_to_fregs(struct fregs_state *fx)
178 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
181 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
183 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
186 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
188 if (config_enabled(CONFIG_X86_32))
189 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
190 else if (config_enabled(CONFIG_AS_FXSAVEQ))
191 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
193 /* Using "rex64; fxsave %0" is broken because, if the memory
194 * operand uses any extended registers for addressing, a second
195 * REX prefix will be generated (to the assembler, rex64
196 * followed by semicolon is a separate instruction), and hence
197 * the 64-bitness is lost.
199 * Using "fxsaveq %0" would be the ideal choice, but is only
200 * supported starting with gas 2.16.
202 * Using, as a workaround, the properly prefixed form below
203 * isn't accepted by any binutils version so far released,
204 * complaining that the same type of prefix is used twice if
205 * an extended register is needed for addressing (fix submitted
206 * to mainline 2005-11-21).
208 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
210 * This, however, we can work around by forcing the compiler to
211 * select an addressing mode that doesn't require extended
214 asm volatile( "rex64/fxsave (%[fx])"
215 : "=m" (fpu->state.fxsave)
216 : [fx] "R" (&fpu->state.fxsave));
221 * These must be called with preempt disabled. Returns
222 * 'true' if the FPU state is still intact and we can
223 * keep registers active.
225 * The legacy FNSAVE instruction cleared all FPU state
226 * unconditionally, so registers are essentially destroyed.
227 * Modern FPU state can be kept in registers, if there are
228 * no pending FP exceptions.
230 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
232 if (likely(use_xsave())) {
233 copy_xregs_to_kernel(&fpu->state.xsave);
237 if (likely(use_fxsr())) {
238 copy_fxregs_to_kernel(fpu);
243 * Legacy FPU register saving, FNSAVE always clears FPU registers,
244 * so we have to mark them inactive:
246 asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state.fsave));
251 static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
254 return copy_kernel_to_xregs(&fpu->state.xsave, -1);
256 return copy_kernel_to_fxregs(&fpu->state.fxsave);
258 return copy_kernel_to_fregs(&fpu->state.fsave);
261 static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
264 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
265 * pending. Clear the x87 state here by setting it to fixed values.
266 * "m" is a random variable that should be in L1.
268 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
272 "fildl %P[addr]" /* set F?P to defined value */
273 : : [addr] "m" (fpu->fpregs_active));
276 return __copy_fpstate_to_fpregs(fpu);
280 * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
281 * idiom, which is then paired with the sw-flag (fpregs_active) later on:
284 static inline void __fpregs_activate_hw(void)
286 if (!use_eager_fpu())
290 static inline void __fpregs_deactivate_hw(void)
292 if (!use_eager_fpu())
296 /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
297 static inline void __fpregs_deactivate(struct fpu *fpu)
299 fpu->fpregs_active = 0;
300 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
303 /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
304 static inline void __fpregs_activate(struct fpu *fpu)
306 fpu->fpregs_active = 1;
307 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
311 * The question "does this thread have fpu access?"
312 * is slightly racy, since preemption could come in
313 * and revoke it immediately after the test.
315 * However, even in that very unlikely scenario,
316 * we can just assume we have FPU access - typically
317 * to save the FP state - we'll just take a #NM
318 * fault and get the FPU access back.
320 static inline int fpregs_active(void)
322 return current->thread.fpu.fpregs_active;
326 * Encapsulate the CR0.TS handling together with the
329 * These generally need preemption protection to work,
330 * do try to avoid using these on their own.
332 static inline void fpregs_activate(struct fpu *fpu)
334 __fpregs_activate_hw();
335 __fpregs_activate(fpu);
338 static inline void fpregs_deactivate(struct fpu *fpu)
340 __fpregs_deactivate(fpu);
341 __fpregs_deactivate_hw();
345 * Definitions for the eXtended Control Register instructions
348 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
350 static inline u64 xgetbv(u32 index)
354 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
355 : "=a" (eax), "=d" (edx)
357 return eax + ((u64)edx << 32);
360 static inline void xsetbv(u32 index, u64 value)
363 u32 edx = value >> 32;
365 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
366 : : "a" (eax), "d" (edx), "c" (index));
370 * FPU state switching for scheduling.
372 * This is a two-stage process:
374 * - switch_fpu_prepare() saves the old state and
375 * sets the new state of the CR0.TS bit. This is
376 * done within the context of the old process.
378 * - switch_fpu_finish() restores the new state as
381 typedef struct { int preload; } fpu_switch_t;
383 static inline fpu_switch_t
384 switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
389 * If the task has used the math, pre-load the FPU on xsave processors
390 * or if the past 5 consecutive context-switches used math.
392 fpu.preload = new_fpu->fpstate_active &&
393 (use_eager_fpu() || new_fpu->counter > 5);
395 if (old_fpu->fpregs_active) {
396 if (!copy_fpregs_to_fpstate(old_fpu))
397 old_fpu->last_cpu = -1;
399 old_fpu->last_cpu = cpu;
401 /* But leave fpu_fpregs_owner_ctx! */
402 old_fpu->fpregs_active = 0;
404 /* Don't change CR0.TS if we just switch! */
407 __fpregs_activate(new_fpu);
408 prefetch(&new_fpu->state);
410 __fpregs_deactivate_hw();
413 old_fpu->counter = 0;
414 old_fpu->last_cpu = -1;
417 if (fpu_want_lazy_restore(new_fpu, cpu))
420 prefetch(&new_fpu->state);
421 fpregs_activate(new_fpu);
428 * By the time this gets called, we've already cleared CR0.TS and
429 * given the process the FPU if we are going to preload the FPU
430 * state - all we need to do is to conditionally restore the register
433 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
435 if (fpu_switch.preload) {
436 if (unlikely(copy_fpstate_to_fpregs(new_fpu)))
442 * Signal frame handlers...
444 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fx, int size);
447 * Needs to be preemption-safe.
449 * NOTE! user_fpu_begin() must be used only immediately before restoring
450 * the save state. It does not do any saving/restoring on its own. In
451 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
452 * the task can lose the FPU right after preempt_enable().
454 static inline void user_fpu_begin(void)
456 struct fpu *fpu = ¤t->thread.fpu;
459 if (!fpregs_active())
460 fpregs_activate(fpu);
464 #endif /* _ASM_X86_FPU_INTERNAL_H */