[S390] ftrace: add function graph tracer support
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Fri, 12 Jun 2009 08:26:46 +0000 (10:26 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 12 Jun 2009 08:27:39 +0000 (10:27 +0200)
Function graph tracer support for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/Kconfig
arch/s390/include/asm/ftrace.h
arch/s390/kernel/Makefile
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount.S
arch/s390/kernel/s390_ext.c
arch/s390/kernel/time.c
arch/s390/kernel/vmlinux.lds.S
drivers/s390/cio/cio.c

index 480590f2157003746e70b625608c9a8166b3a20a..9023cc900bdae646a3dd2fdf21464ce39a94d183 100644 (file)
@@ -85,6 +85,7 @@ config S390
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
+       select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_DEFAULT_NO_SPIN_MUTEXES
        select HAVE_OPROFILE
        select HAVE_KPROBES
index ba23d8f97d075bda7810ed3ac96519b67ff9fc37..96c14a9102b8beadc324ae8b50cf8c81806af420 100644 (file)
@@ -11,11 +11,13 @@ struct dyn_arch_ftrace { };
 #define MCOUNT_ADDR ((long)_mcount)
 
 #ifdef CONFIG_64BIT
-#define MCOUNT_INSN_SIZE 24
-#define MCOUNT_OFFSET   14
+#define MCOUNT_OFFSET_RET 18
+#define MCOUNT_INSN_SIZE  24
+#define MCOUNT_OFFSET    14
 #else
-#define MCOUNT_INSN_SIZE 30
-#define MCOUNT_OFFSET    8
+#define MCOUNT_OFFSET_RET 26
+#define MCOUNT_INSN_SIZE  30
+#define MCOUNT_OFFSET     8
 #endif
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
index ce172bfaab8ad9cc432c300544bf2ff5bf26df4d..c75ed43b1a181e250695312fae9844e64185f590 100644 (file)
@@ -3,11 +3,8 @@
 #
 
 ifdef CONFIG_FUNCTION_TRACER
-# Do not trace early boot code
+# Don't trace early setup code and tracing code
 CFLAGS_REMOVE_early.o = -pg
-endif
-
-ifdef CONFIG_DYNAMIC_FTRACE
 CFLAGS_REMOVE_ftrace.o = -pg
 endif
 
@@ -46,6 +43,7 @@ obj-$(CONFIG_STACKTRACE)      += stacktrace.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
index 0b81a784e0394564584fa6ee67abfca42de222c5..c92a10953279fc16c0645a6849a9300c59934828 100644 (file)
@@ -7,13 +7,17 @@
  *
  */
 
+#include <linux/hardirq.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <asm/lowcore.h>
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+
 void ftrace_disable_code(void);
+void ftrace_disable_return(void);
 void ftrace_call_code(void);
 void ftrace_nop_code(void);
 
@@ -28,6 +32,7 @@ asm(
        "       .word   0x0024\n"
        "       lg      %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
        "       basr    %r14,%r1\n"
+       "ftrace_disable_return:\n"
        "       lg      %r14,8(15)\n"
        "       lgr     %r0,%r0\n"
        "0:\n");
@@ -50,6 +55,7 @@ asm(
        "       j       0f\n"
        "       l       %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
        "       basr    %r14,%r1\n"
+       "ftrace_disable_return:\n"
        "       l       %r14,4(%r15)\n"
        "       j       0f\n"
        "       bcr     0,%r7\n"
@@ -130,3 +136,69 @@ int __init ftrace_dyn_arch_init(void *data)
        *(unsigned long *)data = 0;
        return 0;
 }
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Patch the kernel code at ftrace_graph_caller location:
+ * The instruction there is branch relative on condition. The condition mask
+ * is either all ones (always branch aka disable ftrace_graph_caller) or all
+ * zeroes (nop aka enable ftrace_graph_caller).
+ * Instruction format for brc is a7m4xxxx where m is the condition mask.
+ */
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       unsigned short opcode = 0xa704;
+
+       return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       unsigned short opcode = 0xa7f4;
+
+       return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
+}
+
+static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
+{
+       return addr - (ftrace_disable_return - ftrace_disable_code);
+}
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
+{
+       return addr - MCOUNT_OFFSET_RET;
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addresses
+ * in current thread info.
+ */
+unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
+{
+       struct ftrace_graph_ent trace;
+
+       /* Nmi's are currently unsupported. */
+       if (unlikely(in_nmi()))
+               goto out;
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               goto out;
+       if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
+               goto out;
+       trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
+       /* Only trace if the calling function expects to. */
+       if (!ftrace_graph_entry(&trace)) {
+               current->curr_ret_stack--;
+               goto out;
+       }
+       parent = (unsigned long)return_to_handler;
+out:
+       return parent;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 0aa85ec94d0821ea8ff43e990063d6ec9f423590..2a0a5e97ba8c2a6f954c2209a1836fe70d92ac0f 100644 (file)
@@ -34,6 +34,18 @@ ftrace_caller:
        larl    %r14,ftrace_dyn_func
        lg      %r14,0(%r14)
        basr    %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl  ftrace_graph_caller
+ftrace_graph_caller:
+       # This unconditional branch gets runtime patched. Change only if
+       # you know what you are doing. See ftrace_enable_graph_caller().
+       j       0f
+       lg      %r2,272(%r15)
+       lg      %r3,168(%r15)
+       brasl   %r14,prepare_ftrace_return
+       stg     %r2,168(%r15)
+0:
+#endif
        aghi    %r15,160
        lmg     %r2,%r5,32(%r15)
        lg      %r14,112(%r15)
@@ -62,6 +74,12 @@ _mcount:
        larl    %r14,ftrace_trace_function
        lg      %r14,0(%r14)
        basr    %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       lg      %r2,272(%r15)
+       lg      %r3,168(%r15)
+       brasl   %r14,prepare_ftrace_return
+       stg     %r2,168(%r15)
+#endif
        aghi    %r15,160
        lmg     %r2,%r5,32(%r15)
        lg      %r14,112(%r15)
@@ -69,6 +87,22 @@ _mcount:
 
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+       .globl  return_to_handler
+return_to_handler:
+       stmg    %r2,%r5,32(%r15)
+       lgr     %r1,%r15
+       aghi    %r15,-160
+       stg     %r1,__SF_BACKCHAIN(%r15)
+       brasl   %r14,ftrace_return_to_handler
+       aghi    %r15,160
+       lgr     %r14,%r2
+       lmg     %r2,%r5,32(%r15)
+       br      %r14
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 #else /* CONFIG_64BIT */
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -96,6 +130,21 @@ ftrace_caller:
        l       %r14,0b-0b(%r1)
        l       %r14,0(%r14)
        basr    %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl  ftrace_graph_caller
+ftrace_graph_caller:
+       # This unconditional branch gets runtime patched. Change only if
+       # you know what you are doing. See ftrace_enable_graph_caller().
+       j       1f
+       bras    %r1,0f
+       .long   prepare_ftrace_return
+0:     l       %r2,152(%r15)
+       l       %r4,0(%r1)
+       l       %r3,100(%r15)
+       basr    %r14,%r4
+       st      %r2,100(%r15)
+1:
+#endif
        ahi     %r15,96
        l       %r14,56(%r15)
 3:     lm      %r2,%r5,16(%r15)
@@ -128,10 +177,40 @@ _mcount:
        l       %r14,0b-0b(%r1)
        l       %r14,0(%r14)
        basr    %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       bras    %r1,0f
+       .long   prepare_ftrace_return
+0:     l       %r2,152(%r15)
+       l       %r4,0(%r1)
+       l       %r3,100(%r15)
+       basr    %r14,%r4
+       st      %r2,100(%r15)
+#endif
        ahi     %r15,96
        l       %r14,56(%r15)
 3:     lm      %r2,%r5,16(%r15)
        br      %r14
 
 #endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+       .globl  return_to_handler
+return_to_handler:
+       stm     %r2,%r5,16(%r15)
+       st      %r14,56(%r15)
+       lr      %r0,%r15
+       ahi     %r15,-96
+       st      %r0,__SF_BACKCHAIN(%r15)
+       bras    %r1,0f
+       .long   ftrace_return_to_handler
+0:     l       %r2,0b-0b(%r1)
+       basr    %r14,%r2
+       lr      %r14,%r2
+       ahi     %r15,96
+       lm      %r2,%r5,16(%r15)
+       br      %r14
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 #endif /* CONFIG_64BIT */
index 6b0686d78fc7b5975027a70447375853bc052c32..0de305b598cee30a1352f67d4ded1eda4f0d3be0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/ftrace.h>
 #include <linux/errno.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
@@ -112,7 +113,7 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
        return 0;
 }
 
-void do_extint(struct pt_regs *regs, unsigned short code)
+void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
 {
         ext_int_info_t *p;
         int index;
index ad9a999aaa924bbb74f218d5d0690cfe8d186c07..215330a2c128dfce1bb341fed1bd138ef089c918 100644 (file)
@@ -70,7 +70,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
-unsigned long long sched_clock(void)
+unsigned long long notrace sched_clock(void)
 {
        return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
 }
index 89399b8756c2bae265601ba314bfb51dd9493f0f..a53db23ee092fcbd37f2fa2e16ca5fbc8a583cd7 100644 (file)
@@ -34,6 +34,7 @@ SECTIONS
                SCHED_TEXT
                LOCK_TEXT
                KPROBES_TEXT
+               IRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
        } :text = 0x0700
index 9889f188c7c55503a80c8241bcdb9a06d8889371..5ec7789bd9d84a0ab171d75d66315f4bce8e4b9c 100644 (file)
@@ -12,6 +12,7 @@
 #define KMSG_COMPONENT "cio"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/ftrace.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -626,8 +627,7 @@ out:
  *         handlers).
  *
  */
-void
-do_IRQ (struct pt_regs *regs)
+void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        struct tpi_info *tpi_info;
        struct subchannel *sch;