2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * TILE-Gx specific ftrace support
17 #include <linux/ftrace.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/ftrace.h>
22 #include <asm/sections.h>
24 #include <arch/opcode.h>
26 #ifdef CONFIG_DYNAMIC_FTRACE
28 static inline tilegx_bundle_bits NOP(void)
30 return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
31 create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
32 create_Opcode_X0(RRR_0_OPCODE_X0) |
33 create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
34 create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
35 create_Opcode_X1(RRR_0_OPCODE_X1);
38 static int machine_stopped __read_mostly;
40 int ftrace_arch_code_modify_prepare(void)
46 int ftrace_arch_code_modify_post_process(void)
48 flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
54 * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
55 * tracer just add one cycle overhead to every kernel function when disabled.
57 static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
60 tilegx_bundle_bits opcode_x0, opcode_x1;
61 long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
64 /* opcode: jal addr */
66 create_Opcode_X1(JUMP_OPCODE_X1) |
67 create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
68 create_JumpOff_X1(pcrel_by_instr);
72 create_Opcode_X1(JUMP_OPCODE_X1) |
73 create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
74 create_JumpOff_X1(pcrel_by_instr);
78 * Also put { move r10, lr; jal ftrace_stub } in a bundle, which
79 * is used to replace the instruction in address ftrace_call.
81 if (addr == FTRACE_ADDR || addr == (unsigned long)ftrace_stub) {
82 /* opcode: or r10, lr, zero */
85 create_SrcA_X0(TREG_LR) |
86 create_SrcB_X0(TREG_ZERO) |
87 create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
88 create_Opcode_X0(RRR_0_OPCODE_X0);
92 create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
93 create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
94 create_Opcode_X0(RRR_0_OPCODE_X0);
97 return opcode_x1 | opcode_x0;
100 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
105 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
107 return ftrace_gen_branch(pc, addr, true);
110 static int ftrace_modify_code(unsigned long pc, unsigned long old,
115 /* Check if the address is in kernel text space and module space. */
116 if (!kernel_text_address(pc))
119 /* Operate on writable kernel text mapping. */
120 pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
122 if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
127 if (!machine_stopped && num_online_cpus() > 1)
128 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
133 int ftrace_update_ftrace_func(ftrace_func_t func)
135 unsigned long pc, old;
139 pc = (unsigned long)&ftrace_call;
140 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
141 new = ftrace_call_replace(pc, (unsigned long)func);
143 ret = ftrace_modify_code(pc, old, new);
148 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
150 unsigned long new, old;
151 unsigned long ip = rec->ip;
153 old = ftrace_nop_replace(rec);
154 new = ftrace_call_replace(ip, addr);
156 return ftrace_modify_code(rec->ip, old, new);
159 int ftrace_make_nop(struct module *mod,
160 struct dyn_ftrace *rec, unsigned long addr)
162 unsigned long ip = rec->ip;
167 old = ftrace_call_replace(ip, addr);
168 new = ftrace_nop_replace(rec);
169 ret = ftrace_modify_code(ip, old, new);
174 int __init ftrace_dyn_arch_init(void)
178 #endif /* CONFIG_DYNAMIC_FTRACE */
180 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
181 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
182 unsigned long frame_pointer)
184 unsigned long return_hooker = (unsigned long) &return_to_handler;
185 struct ftrace_graph_ent trace;
189 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
193 *parent = return_hooker;
195 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
202 trace.func = self_addr;
204 /* Only trace if the calling function expects to */
205 if (!ftrace_graph_entry(&trace)) {
206 current->curr_ret_stack--;
211 #ifdef CONFIG_DYNAMIC_FTRACE
212 extern unsigned long ftrace_graph_call;
214 static int __ftrace_modify_caller(unsigned long *callsite,
215 void (*func) (void), bool enable)
217 unsigned long caller_fn = (unsigned long) func;
218 unsigned long pc = (unsigned long) callsite;
219 unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
220 unsigned long nop = NOP();
221 unsigned long old = enable ? nop : branch;
222 unsigned long new = enable ? branch : nop;
224 return ftrace_modify_code(pc, old, new);
227 static int ftrace_modify_graph_caller(bool enable)
231 ret = __ftrace_modify_caller(&ftrace_graph_call,
238 int ftrace_enable_ftrace_graph_caller(void)
240 return ftrace_modify_graph_caller(true);
243 int ftrace_disable_ftrace_graph_caller(void)
245 return ftrace_modify_graph_caller(false);
247 #endif /* CONFIG_DYNAMIC_FTRACE */
248 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */