For non-SMP, uses the new random canary value that is stored in the
task struct whenever a new task is forked. Based on ARM version in
df0698be14c6683606d5df2d83e3ae40f85ed0d9 and subject to the same
limitations: the variable GCC expects, __stack_chk_guard, is global,
so this will not work on SMP.
Quoting Nicolas Pitre <nico@fluxnic.net>: "One way to overcome this
GCC limitation would be to locate the __stack_chk_guard variable into
a memory page of its own for each CPU, and then use TLB locking to
have each CPU see its own page at the same virtual address for each of
them."
Signed-off-by: Gregory Fong <gregory.0xf0@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5488/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_CC_STACKPROTECTOR)
+ OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
mtc0 t0, $11,7 /* CvmMemCtl */
#endif
3:
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_L t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
fpu_save_single a0, t0 # clobbers t0
1:
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_L t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
# clobbers t1
1:
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_L t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.