static void kernel_fpu_disable(void)
{
- WARN_ON(this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
}
static void kernel_fpu_enable(void)
{
- WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, false);
}
{
struct fpu *fpu = ¤t->thread.fpu;
- WARN_ON_ONCE(!irq_fpu_usable());
+ WARN_ON_FPU(!irq_fpu_usable());
kernel_fpu_disable();
struct fpu *fpu = ¤t->thread.fpu;
if (fpu->fpregs_active) {
- if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
+ if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
fpu__clear(fpu);
} else {
__fpregs_deactivate_hw();
*/
void fpu__save(struct fpu *fpu)
{
- WARN_ON(fpu != ¤t->thread.fpu);
+ WARN_ON_FPU(fpu != ¤t->thread.fpu);
preempt_disable();
if (fpu->fpregs_active) {
*/
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- WARN_ON(src_fpu != ¤t->thread.fpu);
+ WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
/*
* Don't let 'init optimized' areas of the XSAVE area
*/
void fpu__activate_curr(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != ¤t->thread.fpu);
+ WARN_ON_FPU(fpu != ¤t->thread.fpu);
if (!fpu->fpstate_active) {
fpstate_init(&fpu->state);
EXPORT_SYMBOL_GPL(fpu__activate_curr);
/*
- * This function must be called before we modify a stopped child's
+ * This function must be called before we read a task's fpstate.
+ *
+ * If the task has not used the FPU before then initialize its
* fpstate.
*
- * If the child has not used the FPU before then initialize its
+ * If the task has used the FPU before then save it.
+ */
+void fpu__activate_fpstate_read(struct fpu *fpu)
+{
+ /*
+ * If fpregs are active (in the current CPU), then
+ * copy them to the fpstate:
+ */
+ if (fpu->fpregs_active) {
+ fpu__save(fpu);
+ } else {
+ if (!fpu->fpstate_active) {
+ fpstate_init(&fpu->state);
+
+ /* Safe to do for current and for stopped child tasks: */
+ fpu->fpstate_active = 1;
+ }
+ }
+}
+
+/*
+ * This function must be called before we read or write a task's fpstate.
+ *
+ * If the task has not used the FPU before then initialize its
* fpstate.
*
- * If the child has used the FPU before then unlazy it.
+ * If the task has used the FPU before then save and unlazy it.
*
- * [ After this function call, after registers in the fpstate are
+ * [ If this function is used for non-current child tasks, then
+ * after this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will
* restore the modified FPU state from the modified context. If we
* didn't clear its lazy status here then the lazy in-registers
* state pending on its former CPU could be restored, corrupting
- * the modifications. ]
+ * the modifications.
*
- * This function is also called before we read a stopped child's
- * FPU state - to make sure it's initialized if the child has
- * no active FPU state.
+ * This function can be used for the current task as well, but
+ * only for reading the fpstate. Modifications to the fpstate
+ * will be lost on eagerfpu systems. ]
*
* TODO: A future optimization would be to skip the unlazying in
* the read-only case, it's not strictly necessary for
* read-only access to the context.
*/
-void fpu__activate_stopped(struct fpu *child_fpu)
+void fpu__activate_fpstate_write(struct fpu *fpu)
{
- WARN_ON_ONCE(child_fpu == ¤t->thread.fpu);
-
- if (child_fpu->fpstate_active) {
- child_fpu->last_cpu = -1;
+ /*
+ * If fpregs are active (in the current CPU), then
+ * copy them to the fpstate:
+ */
+ if (fpu->fpregs_active) {
+ fpu__save(fpu);
} else {
- fpstate_init(&child_fpu->state);
-
- /* Safe to do for stopped child tasks: */
- child_fpu->fpstate_active = 1;
+ if (fpu->fpstate_active) {
+ /* Invalidate any lazy state: */
+ fpu->last_cpu = -1;
+ } else {
+ fpstate_init(&fpu->state);
+
+ /* Safe to do for current and for stopped child tasks: */
+ fpu->fpstate_active = 1;
+ }
}
}
* with local interrupts disabled, as it is in the case of
* do_device_not_available()).
*/
-void fpu__restore(void)
+void fpu__restore(struct fpu *fpu)
{
- struct task_struct *tsk = current;
- struct fpu *fpu = &tsk->thread.fpu;
-
fpu__activate_curr(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
fpregs_activate(fpu);
if (unlikely(copy_fpstate_to_fpregs(fpu))) {
fpu__clear(fpu);
- force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
+ force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
} else {
- tsk->thread.fpu.counter++;
+ fpu->counter++;
}
kernel_fpu_enable();
}
*/
void fpu__clear(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
+ WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */