1 /*
2  * Dynamic function tracing support.
3  *
4  * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5  * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6  *
7  * For licencing details, see COPYING.
8  *
9  * Defines low-level handling of mcount calls when the kernel
10  * is compiled with the -pg flag. When using dynamic ftrace, the
11  * mcount call-sites get patched with NOP till they are enabled.
12  * All code mutation routines here are called under stop_machine().
13  */
14 
15 #include <linux/ftrace.h>
16 #include <linux/uaccess.h>
17 
18 #include <asm/cacheflush.h>
19 #include <asm/opcodes.h>
20 #include <asm/ftrace.h>
21 
22 #include "insn.h"
23 
24 #ifdef CONFIG_THUMB2_KERNEL
25 #define	NOP		0xf85deb04	/* pop.w {lr} */
26 #else
27 #define	NOP		0xe8bd4000	/* pop {lr} */
28 #endif
29 
30 #ifdef CONFIG_DYNAMIC_FTRACE
31 #ifdef CONFIG_OLD_MCOUNT
32 #define OLD_MCOUNT_ADDR	((unsigned long) mcount)
33 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
34 
35 #define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
36 
ftrace_nop_replace(struct dyn_ftrace * rec)37 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
38 {
39 	return rec->arch.old_mcount ? OLD_NOP : NOP;
40 }
41 
adjust_address(struct dyn_ftrace * rec,unsigned long addr)42 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
43 {
44 	if (!rec->arch.old_mcount)
45 		return addr;
46 
47 	if (addr == MCOUNT_ADDR)
48 		addr = OLD_MCOUNT_ADDR;
49 	else if (addr == FTRACE_ADDR)
50 		addr = OLD_FTRACE_ADDR;
51 
52 	return addr;
53 }
54 #else
ftrace_nop_replace(struct dyn_ftrace * rec)55 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
56 {
57 	return NOP;
58 }
59 
adjust_address(struct dyn_ftrace * rec,unsigned long addr)60 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
61 {
62 	return addr;
63 }
64 #endif
65 
ftrace_call_replace(unsigned long pc,unsigned long addr)66 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
67 {
68 	return arm_gen_branch_link(pc, addr);
69 }
70 
ftrace_modify_code(unsigned long pc,unsigned long old,unsigned long new,bool validate)71 static int ftrace_modify_code(unsigned long pc, unsigned long old,
72 			      unsigned long new, bool validate)
73 {
74 	unsigned long replaced;
75 
76 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
77 		old = __opcode_to_mem_thumb32(old);
78 		new = __opcode_to_mem_thumb32(new);
79 	} else {
80 		old = __opcode_to_mem_arm(old);
81 		new = __opcode_to_mem_arm(new);
82 	}
83 
84 	if (validate) {
85 		if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
86 			return -EFAULT;
87 
88 		if (replaced != old)
89 			return -EINVAL;
90 	}
91 
92 	if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
93 		return -EPERM;
94 
95 	flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
96 
97 	return 0;
98 }
99 
ftrace_update_ftrace_func(ftrace_func_t func)100 int ftrace_update_ftrace_func(ftrace_func_t func)
101 {
102 	unsigned long pc;
103 	unsigned long new;
104 	int ret;
105 
106 	pc = (unsigned long)&ftrace_call;
107 	new = ftrace_call_replace(pc, (unsigned long)func);
108 
109 	ret = ftrace_modify_code(pc, 0, new, false);
110 
111 #ifdef CONFIG_OLD_MCOUNT
112 	if (!ret) {
113 		pc = (unsigned long)&ftrace_call_old;
114 		new = ftrace_call_replace(pc, (unsigned long)func);
115 
116 		ret = ftrace_modify_code(pc, 0, new, false);
117 	}
118 #endif
119 
120 	return ret;
121 }
122 
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)123 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
124 {
125 	unsigned long new, old;
126 	unsigned long ip = rec->ip;
127 
128 	old = ftrace_nop_replace(rec);
129 	new = ftrace_call_replace(ip, adjust_address(rec, addr));
130 
131 	return ftrace_modify_code(rec->ip, old, new, true);
132 }
133 
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)134 int ftrace_make_nop(struct module *mod,
135 		    struct dyn_ftrace *rec, unsigned long addr)
136 {
137 	unsigned long ip = rec->ip;
138 	unsigned long old;
139 	unsigned long new;
140 	int ret;
141 
142 	old = ftrace_call_replace(ip, adjust_address(rec, addr));
143 	new = ftrace_nop_replace(rec);
144 	ret = ftrace_modify_code(ip, old, new, true);
145 
146 #ifdef CONFIG_OLD_MCOUNT
147 	if (ret == -EINVAL && addr == MCOUNT_ADDR) {
148 		rec->arch.old_mcount = true;
149 
150 		old = ftrace_call_replace(ip, adjust_address(rec, addr));
151 		new = ftrace_nop_replace(rec);
152 		ret = ftrace_modify_code(ip, old, new, true);
153 	}
154 #endif
155 
156 	return ret;
157 }
158 
ftrace_dyn_arch_init(void * data)159 int __init ftrace_dyn_arch_init(void *data)
160 {
161 	*(unsigned long *)data = 0;
162 
163 	return 0;
164 }
165 #endif /* CONFIG_DYNAMIC_FTRACE */
166 
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
prepare_ftrace_return(unsigned long * parent,unsigned long self_addr,unsigned long frame_pointer)168 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
169 			   unsigned long frame_pointer)
170 {
171 	unsigned long return_hooker = (unsigned long) &return_to_handler;
172 	struct ftrace_graph_ent trace;
173 	unsigned long old;
174 	int err;
175 
176 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
177 		return;
178 
179 	old = *parent;
180 	*parent = return_hooker;
181 
182 	err = ftrace_push_return_trace(old, self_addr, &trace.depth,
183 				       frame_pointer);
184 	if (err == -EBUSY) {
185 		*parent = old;
186 		return;
187 	}
188 
189 	trace.func = self_addr;
190 
191 	/* Only trace if the calling function expects to */
192 	if (!ftrace_graph_entry(&trace)) {
193 		current->curr_ret_stack--;
194 		*parent = old;
195 	}
196 }
197 
198 #ifdef CONFIG_DYNAMIC_FTRACE
199 extern unsigned long ftrace_graph_call;
200 extern unsigned long ftrace_graph_call_old;
201 extern void ftrace_graph_caller_old(void);
202 
__ftrace_modify_caller(unsigned long * callsite,void (* func)(void),bool enable)203 static int __ftrace_modify_caller(unsigned long *callsite,
204 				  void (*func) (void), bool enable)
205 {
206 	unsigned long caller_fn = (unsigned long) func;
207 	unsigned long pc = (unsigned long) callsite;
208 	unsigned long branch = arm_gen_branch(pc, caller_fn);
209 	unsigned long nop = 0xe1a00000;	/* mov r0, r0 */
210 	unsigned long old = enable ? nop : branch;
211 	unsigned long new = enable ? branch : nop;
212 
213 	return ftrace_modify_code(pc, old, new, true);
214 }
215 
ftrace_modify_graph_caller(bool enable)216 static int ftrace_modify_graph_caller(bool enable)
217 {
218 	int ret;
219 
220 	ret = __ftrace_modify_caller(&ftrace_graph_call,
221 				     ftrace_graph_caller,
222 				     enable);
223 
224 #ifdef CONFIG_OLD_MCOUNT
225 	if (!ret)
226 		ret = __ftrace_modify_caller(&ftrace_graph_call_old,
227 					     ftrace_graph_caller_old,
228 					     enable);
229 #endif
230 
231 	return ret;
232 }
233 
ftrace_enable_ftrace_graph_caller(void)234 int ftrace_enable_ftrace_graph_caller(void)
235 {
236 	return ftrace_modify_graph_caller(true);
237 }
238 
ftrace_disable_ftrace_graph_caller(void)239 int ftrace_disable_ftrace_graph_caller(void)
240 {
241 	return ftrace_modify_graph_caller(false);
242 }
243 #endif /* CONFIG_DYNAMIC_FTRACE */
244 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
245