8 u32 cwd; /* FPU Control Word */
9 u32 swd; /* FPU Status Word */
10 u32 twd; /* FPU Tag Word */
11 u32 fip; /* FPU IP Offset */
12 u32 fcs; /* FPU IP Selector */
13 u32 foo; /* FPU Operand Pointer Offset */
14 u32 fos; /* FPU Operand Pointer Selector */
16 /* 8*10 bytes for each FP-reg = 80 bytes: */
19 /* Software status information [not touched by FSAVE ]: */
24 u16 cwd; /* Control Word */
25 u16 swd; /* Status Word */
26 u16 twd; /* Tag Word */
27 u16 fop; /* Last Instruction Opcode */
30 u64 rip; /* Instruction Pointer */
31 u64 rdp; /* Data Pointer */
34 u32 fip; /* FPU IP Offset */
35 u32 fcs; /* FPU IP Selector */
36 u32 foo; /* FPU Operand Offset */
37 u32 fos; /* FPU Operand Selector */
40 u32 mxcsr; /* MXCSR Register State */
41 u32 mxcsr_mask; /* MXCSR Mask */
43 /* 8*16 bytes for each FP-reg = 128 bytes: */
46 /* 16*16 bytes for each XMM-reg = 256 bytes: */
56 } __attribute__((aligned(16)));
59 * Software based FPU emulation state:
69 /* 8*10 bytes for each FP-reg = 80 bytes: */
77 struct math_emu_info *info;
82 * List of XSAVE features Linux knows about:
97 #define XSTATE_FP (1 << XSTATE_BIT_FP)
98 #define XSTATE_SSE (1 << XSTATE_BIT_SSE)
99 #define XSTATE_YMM (1 << XSTATE_BIT_YMM)
100 #define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS)
101 #define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR)
102 #define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK)
103 #define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256)
104 #define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM)
106 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
107 #define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
110 * There are 16x 256-bit AVX registers named YMM0-YMM15.
111 * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
112 * and are stored in 'struct fxregs_state::xmm_space[]'.
114 * The high 128 bits are stored here:
115 * 16x 128 bits == 256 bytes.
121 /* We don't support LWP yet: */
126 /* Intel MPX support: */
137 struct xstate_header {
141 } __attribute__((packed));
144 struct fxregs_state i387;
145 struct xstate_header header;
146 struct ymmh_struct ymmh;
147 struct lwp_struct lwp;
148 struct bndreg bndreg[4];
149 struct bndcsr bndcsr;
150 /* New processor state extensions will go here. */
151 } __attribute__ ((packed, aligned (64)));
154 struct fregs_state fsave;
155 struct fxregs_state fxsave;
156 struct swregs_state soft;
157 struct xregs_state xsave;
164 * In-memory copy of all FPU registers that we save/restore
165 * over context switches. If the task is using the FPU then
166 * the registers in the FPU are more recent than this state
167 * copy. If the task context-switches away then they get
168 * saved here and represent the FPU state.
170 * After context switches there may be a (short) time period
171 * during which the in-FPU hardware registers are unchanged
172 * and still perfectly match this state, if the tasks
173 * scheduled afterwards are not using the FPU.
175 * This is the 'lazy restore' window of optimization, which
176 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
178 * We detect whether a subsequent task uses the FPU via setting
179 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
181 * During this window, if the task gets scheduled again, we
182 * might be able to skip having to do a restore from this
183 * memory buffer to the hardware registers - at the cost of
184 * incurring the overhead of #NM fault traps.
186 * Note that on modern CPUs that support the XSAVEOPT (or other
187 * optimized XSAVE instructions), we don't use #NM traps anymore,
188 * as the hardware can track whether FPU registers need saving
189 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
190 * logic, which unconditionally saves/restores all FPU state
191 * across context switches. (if FPU state exists.)
193 union fpregs_state state;
198 * Records the last CPU on which this context was loaded into
199 * FPU registers. (In the lazy-restore case we might be
200 * able to reuse FPU registers across multiple context switches
201 * this way, if no intermediate task used the FPU.)
203 * A value of -1 is used to indicate that the FPU state in context
204 * memory is newer than the FPU state in registers, and that the
205 * FPU state should be reloaded next time the task is run.
207 unsigned int last_cpu;
212 * This flag indicates whether this context is active: if the task
213 * is not running then we can restore from this context, if the task
214 * is running then we should save into this context.
216 unsigned char fpstate_active;
221 * This flag determines whether a given context is actively
222 * loaded into the FPU's registers and that those registers
223 * represent the task's current FPU state.
225 * Note the interaction with fpstate_active:
227 * # task does not use the FPU:
228 * fpstate_active == 0
230 * # task uses the FPU and regs are active:
231 * fpstate_active == 1 && fpregs_active == 1
233 * # the regs are inactive but still match fpstate:
234 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
236 * The third state is what we use for the lazy restore optimization
237 * on lazy-switching CPUs.
239 unsigned char fpregs_active;
244 * This counter contains the number of consecutive context switches
245 * during which the FPU stays used. If this is over a threshold, the
246 * lazy FPU restore logic becomes eager, to save the trap overhead.
247 * This is an unsigned char so that after 256 iterations the counter
248 * wraps and the context switch behavior turns lazy again; this is to
249 * deal with bursty apps that only use the FPU for a short time:
251 unsigned char counter;
254 #endif /* _ASM_X86_FPU_H */