From 0cccdda8d1512af4d3f6913044e8c8e58e15ef37 Mon Sep 17 00:00:00 2001
From: Heiko Carstens <heiko.carstens@de.ibm.com>
Date: Wed, 8 Oct 2014 10:03:08 +0200
Subject: [PATCH] s390/ftrace: simplify enabling/disabling of
 ftrace_graph_caller

We can simply patch the mask field within the branch relative on
condition instruction at the beginning of the ftrace_graph_caller
code block.
This makes the logic even simpler and we get rid of the displacement
calculation.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---
 arch/s390/kernel/ftrace.c | 22 +++++++++-------------
 arch/s390/kernel/mcount.S |  3 +--
 2 files changed, 10 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index f0072125926c..51d14fe5eb9a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -138,28 +138,24 @@ out:
 
 /*
  * Patch the kernel code at ftrace_graph_caller location. The instruction
- * there is branch relative and save to prepare_ftrace_return. To disable
- * the call to prepare_ftrace_return we patch the bras offset to point
- * directly after the instructions. To enable the call we calculate
- * the original offset to prepare_ftrace_return and put it back.
+ * there is branch relative on condition. To enable the ftrace graph code
+ * block, we simply patch the mask field of the instruction to zero and
+ * turn the instruction into a nop.
+ * To disable the ftrace graph code the mask field will be patched to
+ * all ones, which turns the instruction into an unconditional branch.
  */
-
 int ftrace_enable_ftrace_graph_caller(void)
 {
-	static unsigned short offset = 0x0002;
+	u8 op = 0x04; /* set mask field to zero */
 
-	return probe_kernel_write((void *) ftrace_graph_caller + 2,
-				  &offset, sizeof(offset));
+	return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
-	unsigned short offset;
+	u8 op = 0xf4; /* set mask field to all ones */
 
-	offset = ((void *) &ftrace_graph_caller_end -
-		  (void *) ftrace_graph_caller) / 2;
-	return probe_kernel_write((void *) ftrace_graph_caller + 2,
-				  &offset, sizeof(offset));
+	return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
 }
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 07abe8d464d4..4300ea374826 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -49,8 +49,7 @@ ENTRY(ftrace_caller)
 	basr	%r14,%r1
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 # The j instruction gets runtime patched to a nop instruction.
-# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
-#	j	.+4
+# See ftrace_enable_ftrace_graph_caller.
 ENTRY(ftrace_graph_caller)
 	j	ftrace_graph_caller_end
 	lg	%r2,(STACK_PTREGS_GPRS+14*8)(%r15)
-- 
2.34.1