1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
14 * Copyright (C) 2004 Nadia Yvette Chambers
15 */
16
17 #include <linux/stop_machine.h>
18 #include <linux/clocksource.h>
19 #include <linux/sched/task.h>
20 #include <linux/kallsyms.h>
21 #include <linux/security.h>
22 #include <linux/seq_file.h>
23 #include <linux/tracefs.h>
24 #include <linux/hardirq.h>
25 #include <linux/kthread.h>
26 #include <linux/uaccess.h>
27 #include <linux/bsearch.h>
28 #include <linux/module.h>
29 #include <linux/ftrace.h>
30 #include <linux/sysctl.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/sort.h>
34 #include <linux/list.h>
35 #include <linux/hash.h>
36 #include <linux/rcupdate.h>
37 #include <linux/kprobes.h>
38
39 #include <trace/events/sched.h>
40
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43
44 #include "ftrace_internal.h"
45 #include "trace_output.h"
46 #include "trace_stat.h"
47
48 #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
49
50 #define FTRACE_WARN_ON(cond) \
51 ({ \
52 int ___r = cond; \
53 if (WARN_ON(___r)) \
54 ftrace_kill(); \
55 ___r; \
56 })
57
58 #define FTRACE_WARN_ON_ONCE(cond) \
59 ({ \
60 int ___r = cond; \
61 if (WARN_ON_ONCE(___r)) \
62 ftrace_kill(); \
63 ___r; \
64 })
65
66 /* hash bits for specific function selection */
67 #define FTRACE_HASH_DEFAULT_BITS 10
68 #define FTRACE_HASH_MAX_BITS 12
69
70 #ifdef CONFIG_DYNAMIC_FTRACE
71 #define INIT_OPS_HASH(opsname) \
72 .func_hash = &opsname.local_hash, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #endif
77
78 enum {
79 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
81 };
82
83 struct ftrace_ops ftrace_list_end __read_mostly = {
84 .func = ftrace_stub,
85 .flags = FTRACE_OPS_FL_STUB,
86 INIT_OPS_HASH(ftrace_list_end)
87 };
88
89 /* ftrace_enabled is a method to turn ftrace on or off */
90 int ftrace_enabled __read_mostly;
91 static int __maybe_unused last_ftrace_enabled;
92
93 /* Current function tracing op */
94 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
95 /* What to set function_trace_op to */
96 static struct ftrace_ops *set_function_trace_op;
97
ftrace_pids_enabled(struct ftrace_ops * ops)98 static bool ftrace_pids_enabled(struct ftrace_ops *ops)
99 {
100 struct trace_array *tr;
101
102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
103 return false;
104
105 tr = ops->private;
106
107 return tr->function_pids != NULL || tr->function_no_pids != NULL;
108 }
109
110 static void ftrace_update_trampoline(struct ftrace_ops *ops);
111
112 /*
113 * ftrace_disabled is set when an anomaly is discovered.
114 * ftrace_disabled is much stronger than ftrace_enabled.
115 */
116 static int ftrace_disabled __read_mostly;
117
118 DEFINE_MUTEX(ftrace_lock);
119
120 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
121 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
122 struct ftrace_ops global_ops;
123
124 /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
125 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct ftrace_regs *fregs);
127
ftrace_ops_init(struct ftrace_ops * ops)128 static inline void ftrace_ops_init(struct ftrace_ops *ops)
129 {
130 #ifdef CONFIG_DYNAMIC_FTRACE
131 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
132 mutex_init(&ops->local_hash.regex_lock);
133 ops->func_hash = &ops->local_hash;
134 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
135 }
136 #endif
137 }
138
ftrace_pid_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)139 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct ftrace_regs *fregs)
141 {
142 struct trace_array *tr = op->private;
143 int pid;
144
145 if (tr) {
146 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
147 if (pid == FTRACE_PID_IGNORE)
148 return;
149 if (pid != FTRACE_PID_TRACE &&
150 pid != current->pid)
151 return;
152 }
153
154 op->saved_func(ip, parent_ip, op, fregs);
155 }
156
ftrace_sync_ipi(void * data)157 static void ftrace_sync_ipi(void *data)
158 {
159 /* Probably not needed, but do it anyway */
160 smp_rmb();
161 }
162
ftrace_ops_get_list_func(struct ftrace_ops * ops)163 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
164 {
165 /*
166 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
167 * then it needs to call the list anyway.
168 */
169 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
170 FTRACE_FORCE_LIST_FUNC)
171 return ftrace_ops_list_func;
172
173 return ftrace_ops_get_func(ops);
174 }
175
update_ftrace_function(void)176 static void update_ftrace_function(void)
177 {
178 ftrace_func_t func;
179
180 /*
181 * Prepare the ftrace_ops that the arch callback will use.
182 * If there's only one ftrace_ops registered, the ftrace_ops_list
183 * will point to the ops we want.
184 */
185 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
186 lockdep_is_held(&ftrace_lock));
187
188 /* If there's no ftrace_ops registered, just call the stub function */
189 if (set_function_trace_op == &ftrace_list_end) {
190 func = ftrace_stub;
191
192 /*
193 * If we are at the end of the list and this ops is
194 * recursion safe and not dynamic and the arch supports passing ops,
195 * then have the mcount trampoline call the function directly.
196 */
197 } else if (rcu_dereference_protected(ftrace_ops_list->next,
198 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
199 func = ftrace_ops_get_list_func(ftrace_ops_list);
200
201 } else {
202 /* Just use the default ftrace_ops */
203 set_function_trace_op = &ftrace_list_end;
204 func = ftrace_ops_list_func;
205 }
206
207 update_function_graph_func();
208
209 /* If there's no change, then do nothing more here */
210 if (ftrace_trace_function == func)
211 return;
212
213 /*
214 * If we are using the list function, it doesn't care
215 * about the function_trace_ops.
216 */
217 if (func == ftrace_ops_list_func) {
218 ftrace_trace_function = func;
219 /*
220 * Don't even bother setting function_trace_ops,
221 * it would be racy to do so anyway.
222 */
223 return;
224 }
225
226 #ifndef CONFIG_DYNAMIC_FTRACE
227 /*
228 * For static tracing, we need to be a bit more careful.
229 * The function change takes affect immediately. Thus,
230 * we need to coordinate the setting of the function_trace_ops
231 * with the setting of the ftrace_trace_function.
232 *
233 * Set the function to the list ops, which will call the
234 * function we want, albeit indirectly, but it handles the
235 * ftrace_ops and doesn't depend on function_trace_op.
236 */
237 ftrace_trace_function = ftrace_ops_list_func;
238 /*
239 * Make sure all CPUs see this. Yes this is slow, but static
240 * tracing is slow and nasty to have enabled.
241 */
242 synchronize_rcu_tasks_rude();
243 /* Now all cpus are using the list ops. */
244 function_trace_op = set_function_trace_op;
245 /* Make sure the function_trace_op is visible on all CPUs */
246 smp_wmb();
247 /* Nasty way to force a rmb on all cpus */
248 smp_call_function(ftrace_sync_ipi, NULL, 1);
249 /* OK, we are all set to update the ftrace_trace_function now! */
250 #endif /* !CONFIG_DYNAMIC_FTRACE */
251
252 ftrace_trace_function = func;
253 }
254
add_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)255 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
256 struct ftrace_ops *ops)
257 {
258 rcu_assign_pointer(ops->next, *list);
259
260 /*
261 * We are entering ops into the list but another
262 * CPU might be walking that list. We need to make sure
263 * the ops->next pointer is valid before another CPU sees
264 * the ops pointer included into the list.
265 */
266 rcu_assign_pointer(*list, ops);
267 }
268
remove_ftrace_ops(struct ftrace_ops __rcu ** list,struct ftrace_ops * ops)269 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
270 struct ftrace_ops *ops)
271 {
272 struct ftrace_ops **p;
273
274 /*
275 * If we are removing the last function, then simply point
276 * to the ftrace_stub.
277 */
278 if (rcu_dereference_protected(*list,
279 lockdep_is_held(&ftrace_lock)) == ops &&
280 rcu_dereference_protected(ops->next,
281 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
282 *list = &ftrace_list_end;
283 return 0;
284 }
285
286 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
287 if (*p == ops)
288 break;
289
290 if (*p != ops)
291 return -1;
292
293 *p = (*p)->next;
294 return 0;
295 }
296
297 static void ftrace_update_trampoline(struct ftrace_ops *ops);
298
__register_ftrace_function(struct ftrace_ops * ops)299 int __register_ftrace_function(struct ftrace_ops *ops)
300 {
301 if (ops->flags & FTRACE_OPS_FL_DELETED)
302 return -EINVAL;
303
304 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
305 return -EBUSY;
306
307 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
308 /*
309 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
310 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
311 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
312 */
313 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
314 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
315 return -EINVAL;
316
317 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
318 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
319 #endif
320 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
321 return -EBUSY;
322
323 if (!is_kernel_core_data((unsigned long)ops))
324 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
325
326 add_ftrace_ops(&ftrace_ops_list, ops);
327
328 /* Always save the function, and reset at unregistering */
329 ops->saved_func = ops->func;
330
331 if (ftrace_pids_enabled(ops))
332 ops->func = ftrace_pid_func;
333
334 ftrace_update_trampoline(ops);
335
336 if (ftrace_enabled)
337 update_ftrace_function();
338
339 return 0;
340 }
341
__unregister_ftrace_function(struct ftrace_ops * ops)342 int __unregister_ftrace_function(struct ftrace_ops *ops)
343 {
344 int ret;
345
346 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
347 return -EBUSY;
348
349 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
350
351 if (ret < 0)
352 return ret;
353
354 if (ftrace_enabled)
355 update_ftrace_function();
356
357 ops->func = ops->saved_func;
358
359 return 0;
360 }
361
ftrace_update_pid_func(void)362 static void ftrace_update_pid_func(void)
363 {
364 struct ftrace_ops *op;
365
366 /* Only do something if we are tracing something */
367 if (ftrace_trace_function == ftrace_stub)
368 return;
369
370 do_for_each_ftrace_op(op, ftrace_ops_list) {
371 if (op->flags & FTRACE_OPS_FL_PID) {
372 op->func = ftrace_pids_enabled(op) ?
373 ftrace_pid_func : op->saved_func;
374 ftrace_update_trampoline(op);
375 }
376 } while_for_each_ftrace_op(op);
377
378 update_ftrace_function();
379 }
380
381 #ifdef CONFIG_FUNCTION_PROFILER
382 struct ftrace_profile {
383 struct hlist_node node;
384 unsigned long ip;
385 unsigned long counter;
386 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
387 unsigned long long time;
388 unsigned long long time_squared;
389 #endif
390 };
391
392 struct ftrace_profile_page {
393 struct ftrace_profile_page *next;
394 unsigned long index;
395 struct ftrace_profile records[];
396 };
397
398 struct ftrace_profile_stat {
399 atomic_t disabled;
400 struct hlist_head *hash;
401 struct ftrace_profile_page *pages;
402 struct ftrace_profile_page *start;
403 struct tracer_stat stat;
404 };
405
406 #define PROFILE_RECORDS_SIZE \
407 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
408
409 #define PROFILES_PER_PAGE \
410 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
411
412 static int ftrace_profile_enabled __read_mostly;
413
414 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
415 static DEFINE_MUTEX(ftrace_profile_lock);
416
417 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
418
419 #define FTRACE_PROFILE_HASH_BITS 10
420 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
421
422 static void *
function_stat_next(void * v,int idx)423 function_stat_next(void *v, int idx)
424 {
425 struct ftrace_profile *rec = v;
426 struct ftrace_profile_page *pg;
427
428 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
429
430 again:
431 if (idx != 0)
432 rec++;
433
434 if ((void *)rec >= (void *)&pg->records[pg->index]) {
435 pg = pg->next;
436 if (!pg)
437 return NULL;
438 rec = &pg->records[0];
439 if (!rec->counter)
440 goto again;
441 }
442
443 return rec;
444 }
445
function_stat_start(struct tracer_stat * trace)446 static void *function_stat_start(struct tracer_stat *trace)
447 {
448 struct ftrace_profile_stat *stat =
449 container_of(trace, struct ftrace_profile_stat, stat);
450
451 if (!stat || !stat->start)
452 return NULL;
453
454 return function_stat_next(&stat->start->records[0], 0);
455 }
456
457 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
458 /* function graph compares on total time */
function_stat_cmp(const void * p1,const void * p2)459 static int function_stat_cmp(const void *p1, const void *p2)
460 {
461 const struct ftrace_profile *a = p1;
462 const struct ftrace_profile *b = p2;
463
464 if (a->time < b->time)
465 return -1;
466 if (a->time > b->time)
467 return 1;
468 else
469 return 0;
470 }
471 #else
472 /* not function graph compares against hits */
function_stat_cmp(const void * p1,const void * p2)473 static int function_stat_cmp(const void *p1, const void *p2)
474 {
475 const struct ftrace_profile *a = p1;
476 const struct ftrace_profile *b = p2;
477
478 if (a->counter < b->counter)
479 return -1;
480 if (a->counter > b->counter)
481 return 1;
482 else
483 return 0;
484 }
485 #endif
486
function_stat_headers(struct seq_file * m)487 static int function_stat_headers(struct seq_file *m)
488 {
489 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
490 seq_puts(m, " Function "
491 "Hit Time Avg s^2\n"
492 " -------- "
493 "--- ---- --- ---\n");
494 #else
495 seq_puts(m, " Function Hit\n"
496 " -------- ---\n");
497 #endif
498 return 0;
499 }
500
function_stat_show(struct seq_file * m,void * v)501 static int function_stat_show(struct seq_file *m, void *v)
502 {
503 struct ftrace_profile *rec = v;
504 char str[KSYM_SYMBOL_LEN];
505 int ret = 0;
506 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
507 static struct trace_seq s;
508 unsigned long long avg;
509 unsigned long long stddev;
510 #endif
511 mutex_lock(&ftrace_profile_lock);
512
513 /* we raced with function_profile_reset() */
514 if (unlikely(rec->counter == 0)) {
515 ret = -EBUSY;
516 goto out;
517 }
518
519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
520 avg = div64_ul(rec->time, rec->counter);
521 if (tracing_thresh && (avg < tracing_thresh))
522 goto out;
523 #endif
524
525 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
526 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
527
528 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
529 seq_puts(m, " ");
530
531 /* Sample standard deviation (s^2) */
532 if (rec->counter <= 1)
533 stddev = 0;
534 else {
535 /*
536 * Apply Welford's method:
537 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
538 */
539 stddev = rec->counter * rec->time_squared -
540 rec->time * rec->time;
541
542 /*
543 * Divide only 1000 for ns^2 -> us^2 conversion.
544 * trace_print_graph_duration will divide 1000 again.
545 */
546 stddev = div64_ul(stddev,
547 rec->counter * (rec->counter - 1) * 1000);
548 }
549
550 trace_seq_init(&s);
551 trace_print_graph_duration(rec->time, &s);
552 trace_seq_puts(&s, " ");
553 trace_print_graph_duration(avg, &s);
554 trace_seq_puts(&s, " ");
555 trace_print_graph_duration(stddev, &s);
556 trace_print_seq(m, &s);
557 #endif
558 seq_putc(m, '\n');
559 out:
560 mutex_unlock(&ftrace_profile_lock);
561
562 return ret;
563 }
564
ftrace_profile_reset(struct ftrace_profile_stat * stat)565 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
566 {
567 struct ftrace_profile_page *pg;
568
569 pg = stat->pages = stat->start;
570
571 while (pg) {
572 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
573 pg->index = 0;
574 pg = pg->next;
575 }
576
577 memset(stat->hash, 0,
578 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
579 }
580
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)581 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
582 {
583 struct ftrace_profile_page *pg;
584 int functions;
585 int pages;
586 int i;
587
588 /* If we already allocated, do nothing */
589 if (stat->pages)
590 return 0;
591
592 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
593 if (!stat->pages)
594 return -ENOMEM;
595
596 #ifdef CONFIG_DYNAMIC_FTRACE
597 functions = ftrace_update_tot_cnt;
598 #else
599 /*
600 * We do not know the number of functions that exist because
601 * dynamic tracing is what counts them. With past experience
602 * we have around 20K functions. That should be more than enough.
603 * It is highly unlikely we will execute every function in
604 * the kernel.
605 */
606 functions = 20000;
607 #endif
608
609 pg = stat->start = stat->pages;
610
611 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
612
613 for (i = 1; i < pages; i++) {
614 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
615 if (!pg->next)
616 goto out_free;
617 pg = pg->next;
618 }
619
620 return 0;
621
622 out_free:
623 pg = stat->start;
624 while (pg) {
625 unsigned long tmp = (unsigned long)pg;
626
627 pg = pg->next;
628 free_page(tmp);
629 }
630
631 stat->pages = NULL;
632 stat->start = NULL;
633
634 return -ENOMEM;
635 }
636
ftrace_profile_init_cpu(int cpu)637 static int ftrace_profile_init_cpu(int cpu)
638 {
639 struct ftrace_profile_stat *stat;
640 int size;
641
642 stat = &per_cpu(ftrace_profile_stats, cpu);
643
644 if (stat->hash) {
645 /* If the profile is already created, simply reset it */
646 ftrace_profile_reset(stat);
647 return 0;
648 }
649
650 /*
651 * We are profiling all functions, but usually only a few thousand
652 * functions are hit. We'll make a hash of 1024 items.
653 */
654 size = FTRACE_PROFILE_HASH_SIZE;
655
656 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
657
658 if (!stat->hash)
659 return -ENOMEM;
660
661 /* Preallocate the function profiling pages */
662 if (ftrace_profile_pages_init(stat) < 0) {
663 kfree(stat->hash);
664 stat->hash = NULL;
665 return -ENOMEM;
666 }
667
668 return 0;
669 }
670
ftrace_profile_init(void)671 static int ftrace_profile_init(void)
672 {
673 int cpu;
674 int ret = 0;
675
676 for_each_possible_cpu(cpu) {
677 ret = ftrace_profile_init_cpu(cpu);
678 if (ret)
679 break;
680 }
681
682 return ret;
683 }
684
685 /* interrupts must be disabled */
686 static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)687 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
688 {
689 struct ftrace_profile *rec;
690 struct hlist_head *hhd;
691 unsigned long key;
692
693 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
694 hhd = &stat->hash[key];
695
696 if (hlist_empty(hhd))
697 return NULL;
698
699 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
700 if (rec->ip == ip)
701 return rec;
702 }
703
704 return NULL;
705 }
706
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)707 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
708 struct ftrace_profile *rec)
709 {
710 unsigned long key;
711
712 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
713 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
714 }
715
716 /*
717 * The memory is already allocated, this simply finds a new record to use.
718 */
719 static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)720 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
721 {
722 struct ftrace_profile *rec = NULL;
723
724 /* prevent recursion (from NMIs) */
725 if (atomic_inc_return(&stat->disabled) != 1)
726 goto out;
727
728 /*
729 * Try to find the function again since an NMI
730 * could have added it
731 */
732 rec = ftrace_find_profiled_func(stat, ip);
733 if (rec)
734 goto out;
735
736 if (stat->pages->index == PROFILES_PER_PAGE) {
737 if (!stat->pages->next)
738 goto out;
739 stat->pages = stat->pages->next;
740 }
741
742 rec = &stat->pages->records[stat->pages->index++];
743 rec->ip = ip;
744 ftrace_add_profile(stat, rec);
745
746 out:
747 atomic_dec(&stat->disabled);
748
749 return rec;
750 }
751
752 static void
function_profile_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)753 function_profile_call(unsigned long ip, unsigned long parent_ip,
754 struct ftrace_ops *ops, struct ftrace_regs *fregs)
755 {
756 struct ftrace_profile_stat *stat;
757 struct ftrace_profile *rec;
758 unsigned long flags;
759
760 if (!ftrace_profile_enabled)
761 return;
762
763 local_irq_save(flags);
764
765 stat = this_cpu_ptr(&ftrace_profile_stats);
766 if (!stat->hash || !ftrace_profile_enabled)
767 goto out;
768
769 rec = ftrace_find_profiled_func(stat, ip);
770 if (!rec) {
771 rec = ftrace_profile_alloc(stat, ip);
772 if (!rec)
773 goto out;
774 }
775
776 rec->counter++;
777 out:
778 local_irq_restore(flags);
779 }
780
781 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
782 static bool fgraph_graph_time = true;
783
ftrace_graph_graph_time_control(bool enable)784 void ftrace_graph_graph_time_control(bool enable)
785 {
786 fgraph_graph_time = enable;
787 }
788
profile_graph_entry(struct ftrace_graph_ent * trace)789 static int profile_graph_entry(struct ftrace_graph_ent *trace)
790 {
791 struct ftrace_ret_stack *ret_stack;
792
793 function_profile_call(trace->func, 0, NULL, NULL);
794
795 /* If function graph is shutting down, ret_stack can be NULL */
796 if (!current->ret_stack)
797 return 0;
798
799 ret_stack = ftrace_graph_get_ret_stack(current, 0);
800 if (ret_stack)
801 ret_stack->subtime = 0;
802
803 return 1;
804 }
805
profile_graph_return(struct ftrace_graph_ret * trace)806 static void profile_graph_return(struct ftrace_graph_ret *trace)
807 {
808 struct ftrace_ret_stack *ret_stack;
809 struct ftrace_profile_stat *stat;
810 unsigned long long calltime;
811 struct ftrace_profile *rec;
812 unsigned long flags;
813
814 local_irq_save(flags);
815 stat = this_cpu_ptr(&ftrace_profile_stats);
816 if (!stat->hash || !ftrace_profile_enabled)
817 goto out;
818
819 /* If the calltime was zero'd ignore it */
820 if (!trace->calltime)
821 goto out;
822
823 calltime = trace->rettime - trace->calltime;
824
825 if (!fgraph_graph_time) {
826
827 /* Append this call time to the parent time to subtract */
828 ret_stack = ftrace_graph_get_ret_stack(current, 1);
829 if (ret_stack)
830 ret_stack->subtime += calltime;
831
832 ret_stack = ftrace_graph_get_ret_stack(current, 0);
833 if (ret_stack && ret_stack->subtime < calltime)
834 calltime -= ret_stack->subtime;
835 else
836 calltime = 0;
837 }
838
839 rec = ftrace_find_profiled_func(stat, trace->func);
840 if (rec) {
841 rec->time += calltime;
842 rec->time_squared += calltime * calltime;
843 }
844
845 out:
846 local_irq_restore(flags);
847 }
848
849 static struct fgraph_ops fprofiler_ops = {
850 .entryfunc = &profile_graph_entry,
851 .retfunc = &profile_graph_return,
852 };
853
register_ftrace_profiler(void)854 static int register_ftrace_profiler(void)
855 {
856 return register_ftrace_graph(&fprofiler_ops);
857 }
858
unregister_ftrace_profiler(void)859 static void unregister_ftrace_profiler(void)
860 {
861 unregister_ftrace_graph(&fprofiler_ops);
862 }
863 #else
864 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
865 .func = function_profile_call,
866 .flags = FTRACE_OPS_FL_INITIALIZED,
867 INIT_OPS_HASH(ftrace_profile_ops)
868 };
869
register_ftrace_profiler(void)870 static int register_ftrace_profiler(void)
871 {
872 return register_ftrace_function(&ftrace_profile_ops);
873 }
874
unregister_ftrace_profiler(void)875 static void unregister_ftrace_profiler(void)
876 {
877 unregister_ftrace_function(&ftrace_profile_ops);
878 }
879 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
880
881 static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)882 ftrace_profile_write(struct file *filp, const char __user *ubuf,
883 size_t cnt, loff_t *ppos)
884 {
885 unsigned long val;
886 int ret;
887
888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
889 if (ret)
890 return ret;
891
892 val = !!val;
893
894 mutex_lock(&ftrace_profile_lock);
895 if (ftrace_profile_enabled ^ val) {
896 if (val) {
897 ret = ftrace_profile_init();
898 if (ret < 0) {
899 cnt = ret;
900 goto out;
901 }
902
903 ret = register_ftrace_profiler();
904 if (ret < 0) {
905 cnt = ret;
906 goto out;
907 }
908 ftrace_profile_enabled = 1;
909 } else {
910 ftrace_profile_enabled = 0;
911 /*
912 * unregister_ftrace_profiler calls stop_machine
913 * so this acts like an synchronize_rcu.
914 */
915 unregister_ftrace_profiler();
916 }
917 }
918 out:
919 mutex_unlock(&ftrace_profile_lock);
920
921 *ppos += cnt;
922
923 return cnt;
924 }
925
926 static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)927 ftrace_profile_read(struct file *filp, char __user *ubuf,
928 size_t cnt, loff_t *ppos)
929 {
930 char buf[64]; /* big enough to hold a number */
931 int r;
932
933 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
935 }
936
937 static const struct file_operations ftrace_profile_fops = {
938 .open = tracing_open_generic,
939 .read = ftrace_profile_read,
940 .write = ftrace_profile_write,
941 .llseek = default_llseek,
942 };
943
944 /* used to initialize the real stat files */
945 static struct tracer_stat function_stats __initdata = {
946 .name = "functions",
947 .stat_start = function_stat_start,
948 .stat_next = function_stat_next,
949 .stat_cmp = function_stat_cmp,
950 .stat_headers = function_stat_headers,
951 .stat_show = function_stat_show
952 };
953
ftrace_profile_tracefs(struct dentry * d_tracer)954 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
955 {
956 struct ftrace_profile_stat *stat;
957 char *name;
958 int ret;
959 int cpu;
960
961 for_each_possible_cpu(cpu) {
962 stat = &per_cpu(ftrace_profile_stats, cpu);
963
964 name = kasprintf(GFP_KERNEL, "function%d", cpu);
965 if (!name) {
966 /*
967 * The files created are permanent, if something happens
968 * we still do not free memory.
969 */
970 WARN(1,
971 "Could not allocate stat file for cpu %d\n",
972 cpu);
973 return;
974 }
975 stat->stat = function_stats;
976 stat->stat.name = name;
977 ret = register_stat_tracer(&stat->stat);
978 if (ret) {
979 WARN(1,
980 "Could not register function stat for cpu %d\n",
981 cpu);
982 kfree(name);
983 return;
984 }
985 }
986
987 trace_create_file("function_profile_enabled",
988 TRACE_MODE_WRITE, d_tracer, NULL,
989 &ftrace_profile_fops);
990 }
991
992 #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_tracefs(struct dentry * d_tracer)993 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
994 {
995 }
996 #endif /* CONFIG_FUNCTION_PROFILER */
997
998 #ifdef CONFIG_DYNAMIC_FTRACE
999
1000 static struct ftrace_ops *removed_ops;
1001
1002 /*
1003 * Set when doing a global update, like enabling all recs or disabling them.
1004 * It is not set when just updating a single ftrace_ops.
1005 */
1006 static bool update_all_ops;
1007
1008 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1009 # error Dynamic ftrace depends on MCOUNT_RECORD
1010 #endif
1011
1012 struct ftrace_func_probe {
1013 struct ftrace_probe_ops *probe_ops;
1014 struct ftrace_ops ops;
1015 struct trace_array *tr;
1016 struct list_head list;
1017 void *data;
1018 int ref;
1019 };
1020
1021 /*
1022 * We make these constant because no one should touch them,
1023 * but they are used as the default "empty hash", to avoid allocating
1024 * it all the time. These are in a read only section such that if
1025 * anyone does try to modify it, it will cause an exception.
1026 */
1027 static const struct hlist_head empty_buckets[1];
1028 static const struct ftrace_hash empty_hash = {
1029 .buckets = (struct hlist_head *)empty_buckets,
1030 };
1031 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1032
1033 struct ftrace_ops global_ops = {
1034 .func = ftrace_stub,
1035 .local_hash.notrace_hash = EMPTY_HASH,
1036 .local_hash.filter_hash = EMPTY_HASH,
1037 INIT_OPS_HASH(global_ops)
1038 .flags = FTRACE_OPS_FL_INITIALIZED |
1039 FTRACE_OPS_FL_PID,
1040 };
1041
1042 /*
1043 * Used by the stack unwinder to know about dynamic ftrace trampolines.
1044 */
ftrace_ops_trampoline(unsigned long addr)1045 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1046 {
1047 struct ftrace_ops *op = NULL;
1048
1049 /*
1050 * Some of the ops may be dynamically allocated,
1051 * they are freed after a synchronize_rcu().
1052 */
1053 preempt_disable_notrace();
1054
1055 do_for_each_ftrace_op(op, ftrace_ops_list) {
1056 /*
1057 * This is to check for dynamically allocated trampolines.
1058 * Trampolines that are in kernel text will have
1059 * core_kernel_text() return true.
1060 */
1061 if (op->trampoline && op->trampoline_size)
1062 if (addr >= op->trampoline &&
1063 addr < op->trampoline + op->trampoline_size) {
1064 preempt_enable_notrace();
1065 return op;
1066 }
1067 } while_for_each_ftrace_op(op);
1068 preempt_enable_notrace();
1069
1070 return NULL;
1071 }
1072
1073 /*
1074 * This is used by __kernel_text_address() to return true if the
1075 * address is on a dynamically allocated trampoline that would
1076 * not return true for either core_kernel_text() or
1077 * is_module_text_address().
1078 */
is_ftrace_trampoline(unsigned long addr)1079 bool is_ftrace_trampoline(unsigned long addr)
1080 {
1081 return ftrace_ops_trampoline(addr) != NULL;
1082 }
1083
1084 struct ftrace_page {
1085 struct ftrace_page *next;
1086 struct dyn_ftrace *records;
1087 int index;
1088 int order;
1089 };
1090
1091 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1092 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1093
1094 static struct ftrace_page *ftrace_pages_start;
1095 static struct ftrace_page *ftrace_pages;
1096
1097 static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash * hash,unsigned long ip)1098 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1099 {
1100 if (hash->size_bits > 0)
1101 return hash_long(ip, hash->size_bits);
1102
1103 return 0;
1104 }
1105
1106 /* Only use this function if ftrace_hash_empty() has already been tested */
1107 static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1108 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1109 {
1110 unsigned long key;
1111 struct ftrace_func_entry *entry;
1112 struct hlist_head *hhd;
1113
1114 key = ftrace_hash_key(hash, ip);
1115 hhd = &hash->buckets[key];
1116
1117 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1118 if (entry->ip == ip)
1119 return entry;
1120 }
1121 return NULL;
1122 }
1123
1124 /**
1125 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1126 * @hash: The hash to look at
1127 * @ip: The instruction pointer to test
1128 *
1129 * Search a given @hash to see if a given instruction pointer (@ip)
1130 * exists in it.
1131 *
1132 * Returns the entry that holds the @ip if found. NULL otherwise.
1133 */
1134 struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash * hash,unsigned long ip)1135 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1136 {
1137 if (ftrace_hash_empty(hash))
1138 return NULL;
1139
1140 return __ftrace_lookup_ip(hash, ip);
1141 }
1142
__add_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1143 static void __add_hash_entry(struct ftrace_hash *hash,
1144 struct ftrace_func_entry *entry)
1145 {
1146 struct hlist_head *hhd;
1147 unsigned long key;
1148
1149 key = ftrace_hash_key(hash, entry->ip);
1150 hhd = &hash->buckets[key];
1151 hlist_add_head(&entry->hlist, hhd);
1152 hash->count++;
1153 }
1154
add_hash_entry(struct ftrace_hash * hash,unsigned long ip)1155 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1156 {
1157 struct ftrace_func_entry *entry;
1158
1159 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1160 if (!entry)
1161 return -ENOMEM;
1162
1163 entry->ip = ip;
1164 __add_hash_entry(hash, entry);
1165
1166 return 0;
1167 }
1168
1169 static void
free_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1170 free_hash_entry(struct ftrace_hash *hash,
1171 struct ftrace_func_entry *entry)
1172 {
1173 hlist_del(&entry->hlist);
1174 kfree(entry);
1175 hash->count--;
1176 }
1177
1178 static void
remove_hash_entry(struct ftrace_hash * hash,struct ftrace_func_entry * entry)1179 remove_hash_entry(struct ftrace_hash *hash,
1180 struct ftrace_func_entry *entry)
1181 {
1182 hlist_del_rcu(&entry->hlist);
1183 hash->count--;
1184 }
1185
ftrace_hash_clear(struct ftrace_hash * hash)1186 static void ftrace_hash_clear(struct ftrace_hash *hash)
1187 {
1188 struct hlist_head *hhd;
1189 struct hlist_node *tn;
1190 struct ftrace_func_entry *entry;
1191 int size = 1 << hash->size_bits;
1192 int i;
1193
1194 if (!hash->count)
1195 return;
1196
1197 for (i = 0; i < size; i++) {
1198 hhd = &hash->buckets[i];
1199 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1200 free_hash_entry(hash, entry);
1201 }
1202 FTRACE_WARN_ON(hash->count);
1203 }
1204
free_ftrace_mod(struct ftrace_mod_load * ftrace_mod)1205 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1206 {
1207 list_del(&ftrace_mod->list);
1208 kfree(ftrace_mod->module);
1209 kfree(ftrace_mod->func);
1210 kfree(ftrace_mod);
1211 }
1212
clear_ftrace_mod_list(struct list_head * head)1213 static void clear_ftrace_mod_list(struct list_head *head)
1214 {
1215 struct ftrace_mod_load *p, *n;
1216
1217 /* stack tracer isn't supported yet */
1218 if (!head)
1219 return;
1220
1221 mutex_lock(&ftrace_lock);
1222 list_for_each_entry_safe(p, n, head, list)
1223 free_ftrace_mod(p);
1224 mutex_unlock(&ftrace_lock);
1225 }
1226
free_ftrace_hash(struct ftrace_hash * hash)1227 static void free_ftrace_hash(struct ftrace_hash *hash)
1228 {
1229 if (!hash || hash == EMPTY_HASH)
1230 return;
1231 ftrace_hash_clear(hash);
1232 kfree(hash->buckets);
1233 kfree(hash);
1234 }
1235
__free_ftrace_hash_rcu(struct rcu_head * rcu)1236 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1237 {
1238 struct ftrace_hash *hash;
1239
1240 hash = container_of(rcu, struct ftrace_hash, rcu);
1241 free_ftrace_hash(hash);
1242 }
1243
free_ftrace_hash_rcu(struct ftrace_hash * hash)1244 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1245 {
1246 if (!hash || hash == EMPTY_HASH)
1247 return;
1248 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1249 }
1250
1251 /**
1252 * ftrace_free_filter - remove all filters for an ftrace_ops
1253 * @ops - the ops to remove the filters from
1254 */
ftrace_free_filter(struct ftrace_ops * ops)1255 void ftrace_free_filter(struct ftrace_ops *ops)
1256 {
1257 ftrace_ops_init(ops);
1258 free_ftrace_hash(ops->func_hash->filter_hash);
1259 free_ftrace_hash(ops->func_hash->notrace_hash);
1260 }
1261 EXPORT_SYMBOL_GPL(ftrace_free_filter);
1262
alloc_ftrace_hash(int size_bits)1263 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1264 {
1265 struct ftrace_hash *hash;
1266 int size;
1267
1268 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1269 if (!hash)
1270 return NULL;
1271
1272 size = 1 << size_bits;
1273 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1274
1275 if (!hash->buckets) {
1276 kfree(hash);
1277 return NULL;
1278 }
1279
1280 hash->size_bits = size_bits;
1281
1282 return hash;
1283 }
1284
1285
ftrace_add_mod(struct trace_array * tr,const char * func,const char * module,int enable)1286 static int ftrace_add_mod(struct trace_array *tr,
1287 const char *func, const char *module,
1288 int enable)
1289 {
1290 struct ftrace_mod_load *ftrace_mod;
1291 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1292
1293 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1294 if (!ftrace_mod)
1295 return -ENOMEM;
1296
1297 INIT_LIST_HEAD(&ftrace_mod->list);
1298 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1299 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1300 ftrace_mod->enable = enable;
1301
1302 if (!ftrace_mod->func || !ftrace_mod->module)
1303 goto out_free;
1304
1305 list_add(&ftrace_mod->list, mod_head);
1306
1307 return 0;
1308
1309 out_free:
1310 free_ftrace_mod(ftrace_mod);
1311
1312 return -ENOMEM;
1313 }
1314
1315 static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits,struct ftrace_hash * hash)1316 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1317 {
1318 struct ftrace_func_entry *entry;
1319 struct ftrace_hash *new_hash;
1320 int size;
1321 int ret;
1322 int i;
1323
1324 new_hash = alloc_ftrace_hash(size_bits);
1325 if (!new_hash)
1326 return NULL;
1327
1328 if (hash)
1329 new_hash->flags = hash->flags;
1330
1331 /* Empty hash? */
1332 if (ftrace_hash_empty(hash))
1333 return new_hash;
1334
1335 size = 1 << hash->size_bits;
1336 for (i = 0; i < size; i++) {
1337 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1338 ret = add_hash_entry(new_hash, entry->ip);
1339 if (ret < 0)
1340 goto free_hash;
1341 }
1342 }
1343
1344 FTRACE_WARN_ON(new_hash->count != hash->count);
1345
1346 return new_hash;
1347
1348 free_hash:
1349 free_ftrace_hash(new_hash);
1350 return NULL;
1351 }
1352
1353 static void
1354 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1355 static void
1356 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1357
1358 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1359 struct ftrace_hash *new_hash);
1360
dup_hash(struct ftrace_hash * src,int size)1361 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1362 {
1363 struct ftrace_func_entry *entry;
1364 struct ftrace_hash *new_hash;
1365 struct hlist_head *hhd;
1366 struct hlist_node *tn;
1367 int bits = 0;
1368 int i;
1369
1370 /*
1371 * Use around half the size (max bit of it), but
1372 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1373 */
1374 bits = fls(size / 2);
1375
1376 /* Don't allocate too much */
1377 if (bits > FTRACE_HASH_MAX_BITS)
1378 bits = FTRACE_HASH_MAX_BITS;
1379
1380 new_hash = alloc_ftrace_hash(bits);
1381 if (!new_hash)
1382 return NULL;
1383
1384 new_hash->flags = src->flags;
1385
1386 size = 1 << src->size_bits;
1387 for (i = 0; i < size; i++) {
1388 hhd = &src->buckets[i];
1389 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1390 remove_hash_entry(src, entry);
1391 __add_hash_entry(new_hash, entry);
1392 }
1393 }
1394 return new_hash;
1395 }
1396
1397 static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash * src)1398 __ftrace_hash_move(struct ftrace_hash *src)
1399 {
1400 int size = src->count;
1401
1402 /*
1403 * If the new source is empty, just return the empty_hash.
1404 */
1405 if (ftrace_hash_empty(src))
1406 return EMPTY_HASH;
1407
1408 return dup_hash(src, size);
1409 }
1410
1411 static int
ftrace_hash_move(struct ftrace_ops * ops,int enable,struct ftrace_hash ** dst,struct ftrace_hash * src)1412 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1413 struct ftrace_hash **dst, struct ftrace_hash *src)
1414 {
1415 struct ftrace_hash *new_hash;
1416 int ret;
1417
1418 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1419 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1420 return -EINVAL;
1421
1422 new_hash = __ftrace_hash_move(src);
1423 if (!new_hash)
1424 return -ENOMEM;
1425
1426 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1427 if (enable) {
1428 /* IPMODIFY should be updated only when filter_hash updating */
1429 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1430 if (ret < 0) {
1431 free_ftrace_hash(new_hash);
1432 return ret;
1433 }
1434 }
1435
1436 /*
1437 * Remove the current set, update the hash and add
1438 * them back.
1439 */
1440 ftrace_hash_rec_disable_modify(ops, enable);
1441
1442 rcu_assign_pointer(*dst, new_hash);
1443
1444 ftrace_hash_rec_enable_modify(ops, enable);
1445
1446 return 0;
1447 }
1448
hash_contains_ip(unsigned long ip,struct ftrace_ops_hash * hash)1449 static bool hash_contains_ip(unsigned long ip,
1450 struct ftrace_ops_hash *hash)
1451 {
1452 /*
1453 * The function record is a match if it exists in the filter
1454 * hash and not in the notrace hash. Note, an empty hash is
1455 * considered a match for the filter hash, but an empty
1456 * notrace hash is considered not in the notrace hash.
1457 */
1458 return (ftrace_hash_empty(hash->filter_hash) ||
1459 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1460 (ftrace_hash_empty(hash->notrace_hash) ||
1461 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1462 }
1463
1464 /*
1465 * Test the hashes for this ops to see if we want to call
1466 * the ops->func or not.
1467 *
1468 * It's a match if the ip is in the ops->filter_hash or
1469 * the filter_hash does not exist or is empty,
1470 * AND
1471 * the ip is not in the ops->notrace_hash.
1472 *
1473 * This needs to be called with preemption disabled as
1474 * the hashes are freed with call_rcu().
1475 */
1476 int
ftrace_ops_test(struct ftrace_ops * ops,unsigned long ip,void * regs)1477 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1478 {
1479 struct ftrace_ops_hash hash;
1480 int ret;
1481
1482 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1483 /*
1484 * There's a small race when adding ops that the ftrace handler
1485 * that wants regs, may be called without them. We can not
1486 * allow that handler to be called if regs is NULL.
1487 */
1488 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1489 return 0;
1490 #endif
1491
1492 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1493 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1494
1495 if (hash_contains_ip(ip, &hash))
1496 ret = 1;
1497 else
1498 ret = 0;
1499
1500 return ret;
1501 }
1502
1503 /*
1504 * This is a double for. Do not use 'break' to break out of the loop,
1505 * you must use a goto.
1506 */
1507 #define do_for_each_ftrace_rec(pg, rec) \
1508 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1509 int _____i; \
1510 for (_____i = 0; _____i < pg->index; _____i++) { \
1511 rec = &pg->records[_____i];
1512
1513 #define while_for_each_ftrace_rec() \
1514 } \
1515 }
1516
1517
ftrace_cmp_recs(const void * a,const void * b)1518 static int ftrace_cmp_recs(const void *a, const void *b)
1519 {
1520 const struct dyn_ftrace *key = a;
1521 const struct dyn_ftrace *rec = b;
1522
1523 if (key->flags < rec->ip)
1524 return -1;
1525 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1526 return 1;
1527 return 0;
1528 }
1529
lookup_rec(unsigned long start,unsigned long end)1530 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1531 {
1532 struct ftrace_page *pg;
1533 struct dyn_ftrace *rec = NULL;
1534 struct dyn_ftrace key;
1535
1536 key.ip = start;
1537 key.flags = end; /* overload flags, as it is unsigned long */
1538
1539 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1540 if (end < pg->records[0].ip ||
1541 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1542 continue;
1543 rec = bsearch(&key, pg->records, pg->index,
1544 sizeof(struct dyn_ftrace),
1545 ftrace_cmp_recs);
1546 if (rec)
1547 break;
1548 }
1549 return rec;
1550 }
1551
1552 /**
1553 * ftrace_location_range - return the first address of a traced location
1554 * if it touches the given ip range
1555 * @start: start of range to search.
1556 * @end: end of range to search (inclusive). @end points to the last byte
1557 * to check.
1558 *
1559 * Returns rec->ip if the related ftrace location is a least partly within
1560 * the given address range. That is, the first address of the instruction
1561 * that is either a NOP or call to the function tracer. It checks the ftrace
1562 * internal tables to determine if the address belongs or not.
1563 */
ftrace_location_range(unsigned long start,unsigned long end)1564 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1565 {
1566 struct dyn_ftrace *rec;
1567
1568 rec = lookup_rec(start, end);
1569 if (rec)
1570 return rec->ip;
1571
1572 return 0;
1573 }
1574
1575 /**
1576 * ftrace_location - return the ftrace location
1577 * @ip: the instruction pointer to check
1578 *
1579 * If @ip matches the ftrace location, return @ip.
1580 * If @ip matches sym+0, return sym's ftrace location.
1581 * Otherwise, return 0.
1582 */
ftrace_location(unsigned long ip)1583 unsigned long ftrace_location(unsigned long ip)
1584 {
1585 struct dyn_ftrace *rec;
1586 unsigned long offset;
1587 unsigned long size;
1588
1589 rec = lookup_rec(ip, ip);
1590 if (!rec) {
1591 if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1592 goto out;
1593
1594 /* map sym+0 to __fentry__ */
1595 if (!offset)
1596 rec = lookup_rec(ip, ip + size - 1);
1597 }
1598
1599 if (rec)
1600 return rec->ip;
1601
1602 out:
1603 return 0;
1604 }
1605
1606 /**
1607 * ftrace_text_reserved - return true if range contains an ftrace location
1608 * @start: start of range to search
1609 * @end: end of range to search (inclusive). @end points to the last byte to check.
1610 *
1611 * Returns 1 if @start and @end contains a ftrace location.
1612 * That is, the instruction that is either a NOP or call to
1613 * the function tracer. It checks the ftrace internal tables to
1614 * determine if the address belongs or not.
1615 */
ftrace_text_reserved(const void * start,const void * end)1616 int ftrace_text_reserved(const void *start, const void *end)
1617 {
1618 unsigned long ret;
1619
1620 ret = ftrace_location_range((unsigned long)start,
1621 (unsigned long)end);
1622
1623 return (int)!!ret;
1624 }
1625
1626 /* Test if ops registered to this rec needs regs */
test_rec_ops_needs_regs(struct dyn_ftrace * rec)1627 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1628 {
1629 struct ftrace_ops *ops;
1630 bool keep_regs = false;
1631
1632 for (ops = ftrace_ops_list;
1633 ops != &ftrace_list_end; ops = ops->next) {
1634 /* pass rec in as regs to have non-NULL val */
1635 if (ftrace_ops_test(ops, rec->ip, rec)) {
1636 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1637 keep_regs = true;
1638 break;
1639 }
1640 }
1641 }
1642
1643 return keep_regs;
1644 }
1645
1646 static struct ftrace_ops *
1647 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1648 static struct ftrace_ops *
1649 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1650 static struct ftrace_ops *
1651 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1652
skip_record(struct dyn_ftrace * rec)1653 static bool skip_record(struct dyn_ftrace *rec)
1654 {
1655 /*
1656 * At boot up, weak functions are set to disable. Function tracing
1657 * can be enabled before they are, and they still need to be disabled now.
1658 * If the record is disabled, still continue if it is marked as already
1659 * enabled (this is needed to keep the accounting working).
1660 */
1661 return rec->flags & FTRACE_FL_DISABLED &&
1662 !(rec->flags & FTRACE_FL_ENABLED);
1663 }
1664
__ftrace_hash_rec_update(struct ftrace_ops * ops,int filter_hash,bool inc)1665 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1666 int filter_hash,
1667 bool inc)
1668 {
1669 struct ftrace_hash *hash;
1670 struct ftrace_hash *other_hash;
1671 struct ftrace_page *pg;
1672 struct dyn_ftrace *rec;
1673 bool update = false;
1674 int count = 0;
1675 int all = false;
1676
1677 /* Only update if the ops has been registered */
1678 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1679 return false;
1680
1681 /*
1682 * In the filter_hash case:
1683 * If the count is zero, we update all records.
1684 * Otherwise we just update the items in the hash.
1685 *
1686 * In the notrace_hash case:
1687 * We enable the update in the hash.
1688 * As disabling notrace means enabling the tracing,
1689 * and enabling notrace means disabling, the inc variable
1690 * gets inversed.
1691 */
1692 if (filter_hash) {
1693 hash = ops->func_hash->filter_hash;
1694 other_hash = ops->func_hash->notrace_hash;
1695 if (ftrace_hash_empty(hash))
1696 all = true;
1697 } else {
1698 inc = !inc;
1699 hash = ops->func_hash->notrace_hash;
1700 other_hash = ops->func_hash->filter_hash;
1701 /*
1702 * If the notrace hash has no items,
1703 * then there's nothing to do.
1704 */
1705 if (ftrace_hash_empty(hash))
1706 return false;
1707 }
1708
1709 do_for_each_ftrace_rec(pg, rec) {
1710 int in_other_hash = 0;
1711 int in_hash = 0;
1712 int match = 0;
1713
1714 if (skip_record(rec))
1715 continue;
1716
1717 if (all) {
1718 /*
1719 * Only the filter_hash affects all records.
1720 * Update if the record is not in the notrace hash.
1721 */
1722 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1723 match = 1;
1724 } else {
1725 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1726 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1727
1728 /*
1729 * If filter_hash is set, we want to match all functions
1730 * that are in the hash but not in the other hash.
1731 *
1732 * If filter_hash is not set, then we are decrementing.
1733 * That means we match anything that is in the hash
1734 * and also in the other_hash. That is, we need to turn
1735 * off functions in the other hash because they are disabled
1736 * by this hash.
1737 */
1738 if (filter_hash && in_hash && !in_other_hash)
1739 match = 1;
1740 else if (!filter_hash && in_hash &&
1741 (in_other_hash || ftrace_hash_empty(other_hash)))
1742 match = 1;
1743 }
1744 if (!match)
1745 continue;
1746
1747 if (inc) {
1748 rec->flags++;
1749 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1750 return false;
1751
1752 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1753 rec->flags |= FTRACE_FL_DIRECT;
1754
1755 /*
1756 * If there's only a single callback registered to a
1757 * function, and the ops has a trampoline registered
1758 * for it, then we can call it directly.
1759 */
1760 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1761 rec->flags |= FTRACE_FL_TRAMP;
1762 else
1763 /*
1764 * If we are adding another function callback
1765 * to this function, and the previous had a
1766 * custom trampoline in use, then we need to go
1767 * back to the default trampoline.
1768 */
1769 rec->flags &= ~FTRACE_FL_TRAMP;
1770
1771 /*
1772 * If any ops wants regs saved for this function
1773 * then all ops will get saved regs.
1774 */
1775 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1776 rec->flags |= FTRACE_FL_REGS;
1777 } else {
1778 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1779 return false;
1780 rec->flags--;
1781
1782 /*
1783 * Only the internal direct_ops should have the
1784 * DIRECT flag set. Thus, if it is removing a
1785 * function, then that function should no longer
1786 * be direct.
1787 */
1788 if (ops->flags & FTRACE_OPS_FL_DIRECT)
1789 rec->flags &= ~FTRACE_FL_DIRECT;
1790
1791 /*
1792 * If the rec had REGS enabled and the ops that is
1793 * being removed had REGS set, then see if there is
1794 * still any ops for this record that wants regs.
1795 * If not, we can stop recording them.
1796 */
1797 if (ftrace_rec_count(rec) > 0 &&
1798 rec->flags & FTRACE_FL_REGS &&
1799 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1800 if (!test_rec_ops_needs_regs(rec))
1801 rec->flags &= ~FTRACE_FL_REGS;
1802 }
1803
1804 /*
1805 * The TRAMP needs to be set only if rec count
1806 * is decremented to one, and the ops that is
1807 * left has a trampoline. As TRAMP can only be
1808 * enabled if there is only a single ops attached
1809 * to it.
1810 */
1811 if (ftrace_rec_count(rec) == 1 &&
1812 ftrace_find_tramp_ops_any_other(rec, ops))
1813 rec->flags |= FTRACE_FL_TRAMP;
1814 else
1815 rec->flags &= ~FTRACE_FL_TRAMP;
1816
1817 /*
1818 * flags will be cleared in ftrace_check_record()
1819 * if rec count is zero.
1820 */
1821 }
1822 count++;
1823
1824 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1825 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1826
1827 /* Shortcut, if we handled all records, we are done. */
1828 if (!all && count == hash->count)
1829 return update;
1830 } while_for_each_ftrace_rec();
1831
1832 return update;
1833 }
1834
ftrace_hash_rec_disable(struct ftrace_ops * ops,int filter_hash)1835 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1836 int filter_hash)
1837 {
1838 return __ftrace_hash_rec_update(ops, filter_hash, 0);
1839 }
1840
ftrace_hash_rec_enable(struct ftrace_ops * ops,int filter_hash)1841 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1842 int filter_hash)
1843 {
1844 return __ftrace_hash_rec_update(ops, filter_hash, 1);
1845 }
1846
ftrace_hash_rec_update_modify(struct ftrace_ops * ops,int filter_hash,int inc)1847 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1848 int filter_hash, int inc)
1849 {
1850 struct ftrace_ops *op;
1851
1852 __ftrace_hash_rec_update(ops, filter_hash, inc);
1853
1854 if (ops->func_hash != &global_ops.local_hash)
1855 return;
1856
1857 /*
1858 * If the ops shares the global_ops hash, then we need to update
1859 * all ops that are enabled and use this hash.
1860 */
1861 do_for_each_ftrace_op(op, ftrace_ops_list) {
1862 /* Already done */
1863 if (op == ops)
1864 continue;
1865 if (op->func_hash == &global_ops.local_hash)
1866 __ftrace_hash_rec_update(op, filter_hash, inc);
1867 } while_for_each_ftrace_op(op);
1868 }
1869
ftrace_hash_rec_disable_modify(struct ftrace_ops * ops,int filter_hash)1870 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1871 int filter_hash)
1872 {
1873 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1874 }
1875
ftrace_hash_rec_enable_modify(struct ftrace_ops * ops,int filter_hash)1876 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1877 int filter_hash)
1878 {
1879 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1880 }
1881
1882 /*
1883 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1884 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1885 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1886 * Note that old_hash and new_hash has below meanings
1887 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1888 * - If the hash is EMPTY_HASH, it hits nothing
1889 * - Anything else hits the recs which match the hash entries.
1890 *
1891 * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1892 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1893 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1894 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1895 * the return value to the caller and eventually to the owner of the DIRECT
1896 * ops.
1897 */
__ftrace_hash_update_ipmodify(struct ftrace_ops * ops,struct ftrace_hash * old_hash,struct ftrace_hash * new_hash)1898 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1899 struct ftrace_hash *old_hash,
1900 struct ftrace_hash *new_hash)
1901 {
1902 struct ftrace_page *pg;
1903 struct dyn_ftrace *rec, *end = NULL;
1904 int in_old, in_new;
1905 bool is_ipmodify, is_direct;
1906
1907 /* Only update if the ops has been registered */
1908 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1909 return 0;
1910
1911 is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
1912 is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
1913
1914 /* neither IPMODIFY nor DIRECT, skip */
1915 if (!is_ipmodify && !is_direct)
1916 return 0;
1917
1918 if (WARN_ON_ONCE(is_ipmodify && is_direct))
1919 return 0;
1920
1921 /*
1922 * Since the IPMODIFY and DIRECT are very address sensitive
1923 * actions, we do not allow ftrace_ops to set all functions to new
1924 * hash.
1925 */
1926 if (!new_hash || !old_hash)
1927 return -EINVAL;
1928
1929 /* Update rec->flags */
1930 do_for_each_ftrace_rec(pg, rec) {
1931
1932 if (rec->flags & FTRACE_FL_DISABLED)
1933 continue;
1934
1935 /* We need to update only differences of filter_hash */
1936 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1937 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1938 if (in_old == in_new)
1939 continue;
1940
1941 if (in_new) {
1942 if (rec->flags & FTRACE_FL_IPMODIFY) {
1943 int ret;
1944
1945 /* Cannot have two ipmodify on same rec */
1946 if (is_ipmodify)
1947 goto rollback;
1948
1949 FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
1950
1951 /*
1952 * Another ops with IPMODIFY is already
1953 * attached. We are now attaching a direct
1954 * ops. Run SHARE_IPMODIFY_SELF, to check
1955 * whether sharing is supported.
1956 */
1957 if (!ops->ops_func)
1958 return -EBUSY;
1959 ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
1960 if (ret)
1961 return ret;
1962 } else if (is_ipmodify) {
1963 rec->flags |= FTRACE_FL_IPMODIFY;
1964 }
1965 } else if (is_ipmodify) {
1966 rec->flags &= ~FTRACE_FL_IPMODIFY;
1967 }
1968 } while_for_each_ftrace_rec();
1969
1970 return 0;
1971
1972 rollback:
1973 end = rec;
1974
1975 /* Roll back what we did above */
1976 do_for_each_ftrace_rec(pg, rec) {
1977
1978 if (rec->flags & FTRACE_FL_DISABLED)
1979 continue;
1980
1981 if (rec == end)
1982 goto err_out;
1983
1984 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1985 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1986 if (in_old == in_new)
1987 continue;
1988
1989 if (in_new)
1990 rec->flags &= ~FTRACE_FL_IPMODIFY;
1991 else
1992 rec->flags |= FTRACE_FL_IPMODIFY;
1993 } while_for_each_ftrace_rec();
1994
1995 err_out:
1996 return -EBUSY;
1997 }
1998
ftrace_hash_ipmodify_enable(struct ftrace_ops * ops)1999 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2000 {
2001 struct ftrace_hash *hash = ops->func_hash->filter_hash;
2002
2003 if (ftrace_hash_empty(hash))
2004 hash = NULL;
2005
2006 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2007 }
2008
2009 /* Disabling always succeeds */
ftrace_hash_ipmodify_disable(struct ftrace_ops * ops)2010 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2011 {
2012 struct ftrace_hash *hash = ops->func_hash->filter_hash;
2013
2014 if (ftrace_hash_empty(hash))
2015 hash = NULL;
2016
2017 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2018 }
2019
ftrace_hash_ipmodify_update(struct ftrace_ops * ops,struct ftrace_hash * new_hash)2020 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2021 struct ftrace_hash *new_hash)
2022 {
2023 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2024
2025 if (ftrace_hash_empty(old_hash))
2026 old_hash = NULL;
2027
2028 if (ftrace_hash_empty(new_hash))
2029 new_hash = NULL;
2030
2031 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2032 }
2033
print_ip_ins(const char * fmt,const unsigned char * p)2034 static void print_ip_ins(const char *fmt, const unsigned char *p)
2035 {
2036 char ins[MCOUNT_INSN_SIZE];
2037
2038 if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2039 printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2040 return;
2041 }
2042
2043 printk(KERN_CONT "%s", fmt);
2044 pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
2045 }
2046
2047 enum ftrace_bug_type ftrace_bug_type;
2048 const void *ftrace_expected;
2049
print_bug_type(void)2050 static void print_bug_type(void)
2051 {
2052 switch (ftrace_bug_type) {
2053 case FTRACE_BUG_UNKNOWN:
2054 break;
2055 case FTRACE_BUG_INIT:
2056 pr_info("Initializing ftrace call sites\n");
2057 break;
2058 case FTRACE_BUG_NOP:
2059 pr_info("Setting ftrace call site to NOP\n");
2060 break;
2061 case FTRACE_BUG_CALL:
2062 pr_info("Setting ftrace call site to call ftrace function\n");
2063 break;
2064 case FTRACE_BUG_UPDATE:
2065 pr_info("Updating ftrace call site to call a different ftrace function\n");
2066 break;
2067 }
2068 }
2069
2070 /**
2071 * ftrace_bug - report and shutdown function tracer
2072 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2073 * @rec: The record that failed
2074 *
2075 * The arch code that enables or disables the function tracing
2076 * can call ftrace_bug() when it has detected a problem in
2077 * modifying the code. @failed should be one of either:
2078 * EFAULT - if the problem happens on reading the @ip address
2079 * EINVAL - if what is read at @ip is not what was expected
2080 * EPERM - if the problem happens on writing to the @ip address
2081 */
ftrace_bug(int failed,struct dyn_ftrace * rec)2082 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2083 {
2084 unsigned long ip = rec ? rec->ip : 0;
2085
2086 pr_info("------------[ ftrace bug ]------------\n");
2087
2088 switch (failed) {
2089 case -EFAULT:
2090 pr_info("ftrace faulted on modifying ");
2091 print_ip_sym(KERN_INFO, ip);
2092 break;
2093 case -EINVAL:
2094 pr_info("ftrace failed to modify ");
2095 print_ip_sym(KERN_INFO, ip);
2096 print_ip_ins(" actual: ", (unsigned char *)ip);
2097 pr_cont("\n");
2098 if (ftrace_expected) {
2099 print_ip_ins(" expected: ", ftrace_expected);
2100 pr_cont("\n");
2101 }
2102 break;
2103 case -EPERM:
2104 pr_info("ftrace faulted on writing ");
2105 print_ip_sym(KERN_INFO, ip);
2106 break;
2107 default:
2108 pr_info("ftrace faulted on unknown error ");
2109 print_ip_sym(KERN_INFO, ip);
2110 }
2111 print_bug_type();
2112 if (rec) {
2113 struct ftrace_ops *ops = NULL;
2114
2115 pr_info("ftrace record flags: %lx\n", rec->flags);
2116 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2117 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2118 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2119 ops = ftrace_find_tramp_ops_any(rec);
2120 if (ops) {
2121 do {
2122 pr_cont("\ttramp: %pS (%pS)",
2123 (void *)ops->trampoline,
2124 (void *)ops->func);
2125 ops = ftrace_find_tramp_ops_next(rec, ops);
2126 } while (ops);
2127 } else
2128 pr_cont("\ttramp: ERROR!");
2129
2130 }
2131 ip = ftrace_get_addr_curr(rec);
2132 pr_cont("\n expected tramp: %lx\n", ip);
2133 }
2134
2135 FTRACE_WARN_ON_ONCE(1);
2136 }
2137
ftrace_check_record(struct dyn_ftrace * rec,bool enable,bool update)2138 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2139 {
2140 unsigned long flag = 0UL;
2141
2142 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2143
2144 if (skip_record(rec))
2145 return FTRACE_UPDATE_IGNORE;
2146
2147 /*
2148 * If we are updating calls:
2149 *
2150 * If the record has a ref count, then we need to enable it
2151 * because someone is using it.
2152 *
2153 * Otherwise we make sure its disabled.
2154 *
2155 * If we are disabling calls, then disable all records that
2156 * are enabled.
2157 */
2158 if (enable && ftrace_rec_count(rec))
2159 flag = FTRACE_FL_ENABLED;
2160
2161 /*
2162 * If enabling and the REGS flag does not match the REGS_EN, or
2163 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2164 * this record. Set flags to fail the compare against ENABLED.
2165 * Same for direct calls.
2166 */
2167 if (flag) {
2168 if (!(rec->flags & FTRACE_FL_REGS) !=
2169 !(rec->flags & FTRACE_FL_REGS_EN))
2170 flag |= FTRACE_FL_REGS;
2171
2172 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2173 !(rec->flags & FTRACE_FL_TRAMP_EN))
2174 flag |= FTRACE_FL_TRAMP;
2175
2176 /*
2177 * Direct calls are special, as count matters.
2178 * We must test the record for direct, if the
2179 * DIRECT and DIRECT_EN do not match, but only
2180 * if the count is 1. That's because, if the
2181 * count is something other than one, we do not
2182 * want the direct enabled (it will be done via the
2183 * direct helper). But if DIRECT_EN is set, and
2184 * the count is not one, we need to clear it.
2185 */
2186 if (ftrace_rec_count(rec) == 1) {
2187 if (!(rec->flags & FTRACE_FL_DIRECT) !=
2188 !(rec->flags & FTRACE_FL_DIRECT_EN))
2189 flag |= FTRACE_FL_DIRECT;
2190 } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2191 flag |= FTRACE_FL_DIRECT;
2192 }
2193 }
2194
2195 /* If the state of this record hasn't changed, then do nothing */
2196 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2197 return FTRACE_UPDATE_IGNORE;
2198
2199 if (flag) {
2200 /* Save off if rec is being enabled (for return value) */
2201 flag ^= rec->flags & FTRACE_FL_ENABLED;
2202
2203 if (update) {
2204 rec->flags |= FTRACE_FL_ENABLED;
2205 if (flag & FTRACE_FL_REGS) {
2206 if (rec->flags & FTRACE_FL_REGS)
2207 rec->flags |= FTRACE_FL_REGS_EN;
2208 else
2209 rec->flags &= ~FTRACE_FL_REGS_EN;
2210 }
2211 if (flag & FTRACE_FL_TRAMP) {
2212 if (rec->flags & FTRACE_FL_TRAMP)
2213 rec->flags |= FTRACE_FL_TRAMP_EN;
2214 else
2215 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2216 }
2217
2218 if (flag & FTRACE_FL_DIRECT) {
2219 /*
2220 * If there's only one user (direct_ops helper)
2221 * then we can call the direct function
2222 * directly (no ftrace trampoline).
2223 */
2224 if (ftrace_rec_count(rec) == 1) {
2225 if (rec->flags & FTRACE_FL_DIRECT)
2226 rec->flags |= FTRACE_FL_DIRECT_EN;
2227 else
2228 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2229 } else {
2230 /*
2231 * Can only call directly if there's
2232 * only one callback to the function.
2233 */
2234 rec->flags &= ~FTRACE_FL_DIRECT_EN;
2235 }
2236 }
2237 }
2238
2239 /*
2240 * If this record is being updated from a nop, then
2241 * return UPDATE_MAKE_CALL.
2242 * Otherwise,
2243 * return UPDATE_MODIFY_CALL to tell the caller to convert
2244 * from the save regs, to a non-save regs function or
2245 * vice versa, or from a trampoline call.
2246 */
2247 if (flag & FTRACE_FL_ENABLED) {
2248 ftrace_bug_type = FTRACE_BUG_CALL;
2249 return FTRACE_UPDATE_MAKE_CALL;
2250 }
2251
2252 ftrace_bug_type = FTRACE_BUG_UPDATE;
2253 return FTRACE_UPDATE_MODIFY_CALL;
2254 }
2255
2256 if (update) {
2257 /* If there's no more users, clear all flags */
2258 if (!ftrace_rec_count(rec))
2259 rec->flags &= FTRACE_FL_DISABLED;
2260 else
2261 /*
2262 * Just disable the record, but keep the ops TRAMP
2263 * and REGS states. The _EN flags must be disabled though.
2264 */
2265 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2266 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2267 }
2268
2269 ftrace_bug_type = FTRACE_BUG_NOP;
2270 return FTRACE_UPDATE_MAKE_NOP;
2271 }
2272
2273 /**
2274 * ftrace_update_record - set a record that now is tracing or not
2275 * @rec: the record to update
2276 * @enable: set to true if the record is tracing, false to force disable
2277 *
2278 * The records that represent all functions that can be traced need
2279 * to be updated when tracing has been enabled.
2280 */
ftrace_update_record(struct dyn_ftrace * rec,bool enable)2281 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2282 {
2283 return ftrace_check_record(rec, enable, true);
2284 }
2285
2286 /**
2287 * ftrace_test_record - check if the record has been enabled or not
2288 * @rec: the record to test
2289 * @enable: set to true to check if enabled, false if it is disabled
2290 *
2291 * The arch code may need to test if a record is already set to
2292 * tracing to determine how to modify the function code that it
2293 * represents.
2294 */
ftrace_test_record(struct dyn_ftrace * rec,bool enable)2295 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2296 {
2297 return ftrace_check_record(rec, enable, false);
2298 }
2299
2300 static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace * rec)2301 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2302 {
2303 struct ftrace_ops *op;
2304 unsigned long ip = rec->ip;
2305
2306 do_for_each_ftrace_op(op, ftrace_ops_list) {
2307
2308 if (!op->trampoline)
2309 continue;
2310
2311 if (hash_contains_ip(ip, op->func_hash))
2312 return op;
2313 } while_for_each_ftrace_op(op);
2314
2315 return NULL;
2316 }
2317
2318 static struct ftrace_ops *
ftrace_find_tramp_ops_any_other(struct dyn_ftrace * rec,struct ftrace_ops * op_exclude)2319 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2320 {
2321 struct ftrace_ops *op;
2322 unsigned long ip = rec->ip;
2323
2324 do_for_each_ftrace_op(op, ftrace_ops_list) {
2325
2326 if (op == op_exclude || !op->trampoline)
2327 continue;
2328
2329 if (hash_contains_ip(ip, op->func_hash))
2330 return op;
2331 } while_for_each_ftrace_op(op);
2332
2333 return NULL;
2334 }
2335
2336 static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace * rec,struct ftrace_ops * op)2337 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2338 struct ftrace_ops *op)
2339 {
2340 unsigned long ip = rec->ip;
2341
2342 while_for_each_ftrace_op(op) {
2343
2344 if (!op->trampoline)
2345 continue;
2346
2347 if (hash_contains_ip(ip, op->func_hash))
2348 return op;
2349 }
2350
2351 return NULL;
2352 }
2353
2354 static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace * rec)2355 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2356 {
2357 struct ftrace_ops *op;
2358 unsigned long ip = rec->ip;
2359
2360 /*
2361 * Need to check removed ops first.
2362 * If they are being removed, and this rec has a tramp,
2363 * and this rec is in the ops list, then it would be the
2364 * one with the tramp.
2365 */
2366 if (removed_ops) {
2367 if (hash_contains_ip(ip, &removed_ops->old_hash))
2368 return removed_ops;
2369 }
2370
2371 /*
2372 * Need to find the current trampoline for a rec.
2373 * Now, a trampoline is only attached to a rec if there
2374 * was a single 'ops' attached to it. But this can be called
2375 * when we are adding another op to the rec or removing the
2376 * current one. Thus, if the op is being added, we can
2377 * ignore it because it hasn't attached itself to the rec
2378 * yet.
2379 *
2380 * If an ops is being modified (hooking to different functions)
2381 * then we don't care about the new functions that are being
2382 * added, just the old ones (that are probably being removed).
2383 *
2384 * If we are adding an ops to a function that already is using
2385 * a trampoline, it needs to be removed (trampolines are only
2386 * for single ops connected), then an ops that is not being
2387 * modified also needs to be checked.
2388 */
2389 do_for_each_ftrace_op(op, ftrace_ops_list) {
2390
2391 if (!op->trampoline)
2392 continue;
2393
2394 /*
2395 * If the ops is being added, it hasn't gotten to
2396 * the point to be removed from this tree yet.
2397 */
2398 if (op->flags & FTRACE_OPS_FL_ADDING)
2399 continue;
2400
2401
2402 /*
2403 * If the ops is being modified and is in the old
2404 * hash, then it is probably being removed from this
2405 * function.
2406 */
2407 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2408 hash_contains_ip(ip, &op->old_hash))
2409 return op;
2410 /*
2411 * If the ops is not being added or modified, and it's
2412 * in its normal filter hash, then this must be the one
2413 * we want!
2414 */
2415 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2416 hash_contains_ip(ip, op->func_hash))
2417 return op;
2418
2419 } while_for_each_ftrace_op(op);
2420
2421 return NULL;
2422 }
2423
2424 static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace * rec)2425 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2426 {
2427 struct ftrace_ops *op;
2428 unsigned long ip = rec->ip;
2429
2430 do_for_each_ftrace_op(op, ftrace_ops_list) {
2431 /* pass rec in as regs to have non-NULL val */
2432 if (hash_contains_ip(ip, op->func_hash))
2433 return op;
2434 } while_for_each_ftrace_op(op);
2435
2436 return NULL;
2437 }
2438
2439 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2440 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2441 static struct ftrace_hash *direct_functions = EMPTY_HASH;
2442 static DEFINE_MUTEX(direct_mutex);
2443 int ftrace_direct_func_count;
2444
2445 /*
2446 * Search the direct_functions hash to see if the given instruction pointer
2447 * has a direct caller attached to it.
2448 */
ftrace_find_rec_direct(unsigned long ip)2449 unsigned long ftrace_find_rec_direct(unsigned long ip)
2450 {
2451 struct ftrace_func_entry *entry;
2452
2453 entry = __ftrace_lookup_ip(direct_functions, ip);
2454 if (!entry)
2455 return 0;
2456
2457 return entry->direct;
2458 }
2459
2460 static struct ftrace_func_entry*
ftrace_add_rec_direct(unsigned long ip,unsigned long addr,struct ftrace_hash ** free_hash)2461 ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
2462 struct ftrace_hash **free_hash)
2463 {
2464 struct ftrace_func_entry *entry;
2465
2466 if (ftrace_hash_empty(direct_functions) ||
2467 direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
2468 struct ftrace_hash *new_hash;
2469 int size = ftrace_hash_empty(direct_functions) ? 0 :
2470 direct_functions->count + 1;
2471
2472 if (size < 32)
2473 size = 32;
2474
2475 new_hash = dup_hash(direct_functions, size);
2476 if (!new_hash)
2477 return NULL;
2478
2479 *free_hash = direct_functions;
2480 direct_functions = new_hash;
2481 }
2482
2483 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2484 if (!entry)
2485 return NULL;
2486
2487 entry->ip = ip;
2488 entry->direct = addr;
2489 __add_hash_entry(direct_functions, entry);
2490 return entry;
2491 }
2492
call_direct_funcs(unsigned long ip,unsigned long pip,struct ftrace_ops * ops,struct ftrace_regs * fregs)2493 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2494 struct ftrace_ops *ops, struct ftrace_regs *fregs)
2495 {
2496 struct pt_regs *regs = ftrace_get_regs(fregs);
2497 unsigned long addr;
2498
2499 addr = ftrace_find_rec_direct(ip);
2500 if (!addr)
2501 return;
2502
2503 arch_ftrace_set_direct_caller(regs, addr);
2504 }
2505
2506 struct ftrace_ops direct_ops = {
2507 .func = call_direct_funcs,
2508 .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2509 | FTRACE_OPS_FL_PERMANENT,
2510 /*
2511 * By declaring the main trampoline as this trampoline
2512 * it will never have one allocated for it. Allocated
2513 * trampolines should not call direct functions.
2514 * The direct_ops should only be called by the builtin
2515 * ftrace_regs_caller trampoline.
2516 */
2517 .trampoline = FTRACE_REGS_ADDR,
2518 };
2519 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2520
2521 /**
2522 * ftrace_get_addr_new - Get the call address to set to
2523 * @rec: The ftrace record descriptor
2524 *
2525 * If the record has the FTRACE_FL_REGS set, that means that it
2526 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2527 * is not set, then it wants to convert to the normal callback.
2528 *
2529 * Returns the address of the trampoline to set to
2530 */
ftrace_get_addr_new(struct dyn_ftrace * rec)2531 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2532 {
2533 struct ftrace_ops *ops;
2534 unsigned long addr;
2535
2536 if ((rec->flags & FTRACE_FL_DIRECT) &&
2537 (ftrace_rec_count(rec) == 1)) {
2538 addr = ftrace_find_rec_direct(rec->ip);
2539 if (addr)
2540 return addr;
2541 WARN_ON_ONCE(1);
2542 }
2543
2544 /* Trampolines take precedence over regs */
2545 if (rec->flags & FTRACE_FL_TRAMP) {
2546 ops = ftrace_find_tramp_ops_new(rec);
2547 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2548 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2549 (void *)rec->ip, (void *)rec->ip, rec->flags);
2550 /* Ftrace is shutting down, return anything */
2551 return (unsigned long)FTRACE_ADDR;
2552 }
2553 return ops->trampoline;
2554 }
2555
2556 if (rec->flags & FTRACE_FL_REGS)
2557 return (unsigned long)FTRACE_REGS_ADDR;
2558 else
2559 return (unsigned long)FTRACE_ADDR;
2560 }
2561
2562 /**
2563 * ftrace_get_addr_curr - Get the call address that is already there
2564 * @rec: The ftrace record descriptor
2565 *
2566 * The FTRACE_FL_REGS_EN is set when the record already points to
2567 * a function that saves all the regs. Basically the '_EN' version
2568 * represents the current state of the function.
2569 *
2570 * Returns the address of the trampoline that is currently being called
2571 */
ftrace_get_addr_curr(struct dyn_ftrace * rec)2572 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2573 {
2574 struct ftrace_ops *ops;
2575 unsigned long addr;
2576
2577 /* Direct calls take precedence over trampolines */
2578 if (rec->flags & FTRACE_FL_DIRECT_EN) {
2579 addr = ftrace_find_rec_direct(rec->ip);
2580 if (addr)
2581 return addr;
2582 WARN_ON_ONCE(1);
2583 }
2584
2585 /* Trampolines take precedence over regs */
2586 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2587 ops = ftrace_find_tramp_ops_curr(rec);
2588 if (FTRACE_WARN_ON(!ops)) {
2589 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2590 (void *)rec->ip, (void *)rec->ip);
2591 /* Ftrace is shutting down, return anything */
2592 return (unsigned long)FTRACE_ADDR;
2593 }
2594 return ops->trampoline;
2595 }
2596
2597 if (rec->flags & FTRACE_FL_REGS_EN)
2598 return (unsigned long)FTRACE_REGS_ADDR;
2599 else
2600 return (unsigned long)FTRACE_ADDR;
2601 }
2602
2603 static int
__ftrace_replace_code(struct dyn_ftrace * rec,bool enable)2604 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2605 {
2606 unsigned long ftrace_old_addr;
2607 unsigned long ftrace_addr;
2608 int ret;
2609
2610 ftrace_addr = ftrace_get_addr_new(rec);
2611
2612 /* This needs to be done before we call ftrace_update_record */
2613 ftrace_old_addr = ftrace_get_addr_curr(rec);
2614
2615 ret = ftrace_update_record(rec, enable);
2616
2617 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2618
2619 switch (ret) {
2620 case FTRACE_UPDATE_IGNORE:
2621 return 0;
2622
2623 case FTRACE_UPDATE_MAKE_CALL:
2624 ftrace_bug_type = FTRACE_BUG_CALL;
2625 return ftrace_make_call(rec, ftrace_addr);
2626
2627 case FTRACE_UPDATE_MAKE_NOP:
2628 ftrace_bug_type = FTRACE_BUG_NOP;
2629 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2630
2631 case FTRACE_UPDATE_MODIFY_CALL:
2632 ftrace_bug_type = FTRACE_BUG_UPDATE;
2633 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2634 }
2635
2636 return -1; /* unknown ftrace bug */
2637 }
2638
ftrace_replace_code(int mod_flags)2639 void __weak ftrace_replace_code(int mod_flags)
2640 {
2641 struct dyn_ftrace *rec;
2642 struct ftrace_page *pg;
2643 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2644 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2645 int failed;
2646
2647 if (unlikely(ftrace_disabled))
2648 return;
2649
2650 do_for_each_ftrace_rec(pg, rec) {
2651
2652 if (skip_record(rec))
2653 continue;
2654
2655 failed = __ftrace_replace_code(rec, enable);
2656 if (failed) {
2657 ftrace_bug(failed, rec);
2658 /* Stop processing */
2659 return;
2660 }
2661 if (schedulable)
2662 cond_resched();
2663 } while_for_each_ftrace_rec();
2664 }
2665
2666 struct ftrace_rec_iter {
2667 struct ftrace_page *pg;
2668 int index;
2669 };
2670
2671 /**
2672 * ftrace_rec_iter_start - start up iterating over traced functions
2673 *
2674 * Returns an iterator handle that is used to iterate over all
2675 * the records that represent address locations where functions
2676 * are traced.
2677 *
2678 * May return NULL if no records are available.
2679 */
ftrace_rec_iter_start(void)2680 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2681 {
2682 /*
2683 * We only use a single iterator.
2684 * Protected by the ftrace_lock mutex.
2685 */
2686 static struct ftrace_rec_iter ftrace_rec_iter;
2687 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2688
2689 iter->pg = ftrace_pages_start;
2690 iter->index = 0;
2691
2692 /* Could have empty pages */
2693 while (iter->pg && !iter->pg->index)
2694 iter->pg = iter->pg->next;
2695
2696 if (!iter->pg)
2697 return NULL;
2698
2699 return iter;
2700 }
2701
2702 /**
2703 * ftrace_rec_iter_next - get the next record to process.
2704 * @iter: The handle to the iterator.
2705 *
2706 * Returns the next iterator after the given iterator @iter.
2707 */
ftrace_rec_iter_next(struct ftrace_rec_iter * iter)2708 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2709 {
2710 iter->index++;
2711
2712 if (iter->index >= iter->pg->index) {
2713 iter->pg = iter->pg->next;
2714 iter->index = 0;
2715
2716 /* Could have empty pages */
2717 while (iter->pg && !iter->pg->index)
2718 iter->pg = iter->pg->next;
2719 }
2720
2721 if (!iter->pg)
2722 return NULL;
2723
2724 return iter;
2725 }
2726
2727 /**
2728 * ftrace_rec_iter_record - get the record at the iterator location
2729 * @iter: The current iterator location
2730 *
2731 * Returns the record that the current @iter is at.
2732 */
ftrace_rec_iter_record(struct ftrace_rec_iter * iter)2733 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2734 {
2735 return &iter->pg->records[iter->index];
2736 }
2737
2738 static int
ftrace_nop_initialize(struct module * mod,struct dyn_ftrace * rec)2739 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2740 {
2741 int ret;
2742
2743 if (unlikely(ftrace_disabled))
2744 return 0;
2745
2746 ret = ftrace_init_nop(mod, rec);
2747 if (ret) {
2748 ftrace_bug_type = FTRACE_BUG_INIT;
2749 ftrace_bug(ret, rec);
2750 return 0;
2751 }
2752 return 1;
2753 }
2754
2755 /*
2756 * archs can override this function if they must do something
2757 * before the modifying code is performed.
2758 */
ftrace_arch_code_modify_prepare(void)2759 void __weak ftrace_arch_code_modify_prepare(void)
2760 {
2761 }
2762
2763 /*
2764 * archs can override this function if they must do something
2765 * after the modifying code is performed.
2766 */
ftrace_arch_code_modify_post_process(void)2767 void __weak ftrace_arch_code_modify_post_process(void)
2768 {
2769 }
2770
ftrace_modify_all_code(int command)2771 void ftrace_modify_all_code(int command)
2772 {
2773 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2774 int mod_flags = 0;
2775 int err = 0;
2776
2777 if (command & FTRACE_MAY_SLEEP)
2778 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2779
2780 /*
2781 * If the ftrace_caller calls a ftrace_ops func directly,
2782 * we need to make sure that it only traces functions it
2783 * expects to trace. When doing the switch of functions,
2784 * we need to update to the ftrace_ops_list_func first
2785 * before the transition between old and new calls are set,
2786 * as the ftrace_ops_list_func will check the ops hashes
2787 * to make sure the ops are having the right functions
2788 * traced.
2789 */
2790 if (update) {
2791 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2792 if (FTRACE_WARN_ON(err))
2793 return;
2794 }
2795
2796 if (command & FTRACE_UPDATE_CALLS)
2797 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2798 else if (command & FTRACE_DISABLE_CALLS)
2799 ftrace_replace_code(mod_flags);
2800
2801 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2802 function_trace_op = set_function_trace_op;
2803 smp_wmb();
2804 /* If irqs are disabled, we are in stop machine */
2805 if (!irqs_disabled())
2806 smp_call_function(ftrace_sync_ipi, NULL, 1);
2807 err = ftrace_update_ftrace_func(ftrace_trace_function);
2808 if (FTRACE_WARN_ON(err))
2809 return;
2810 }
2811
2812 if (command & FTRACE_START_FUNC_RET)
2813 err = ftrace_enable_ftrace_graph_caller();
2814 else if (command & FTRACE_STOP_FUNC_RET)
2815 err = ftrace_disable_ftrace_graph_caller();
2816 FTRACE_WARN_ON(err);
2817 }
2818
__ftrace_modify_code(void * data)2819 static int __ftrace_modify_code(void *data)
2820 {
2821 int *command = data;
2822
2823 ftrace_modify_all_code(*command);
2824
2825 return 0;
2826 }
2827
2828 /**
2829 * ftrace_run_stop_machine - go back to the stop machine method
2830 * @command: The command to tell ftrace what to do
2831 *
2832 * If an arch needs to fall back to the stop machine method, the
2833 * it can call this function.
2834 */
ftrace_run_stop_machine(int command)2835 void ftrace_run_stop_machine(int command)
2836 {
2837 stop_machine(__ftrace_modify_code, &command, NULL);
2838 }
2839
2840 /**
2841 * arch_ftrace_update_code - modify the code to trace or not trace
2842 * @command: The command that needs to be done
2843 *
2844 * Archs can override this function if it does not need to
2845 * run stop_machine() to modify code.
2846 */
arch_ftrace_update_code(int command)2847 void __weak arch_ftrace_update_code(int command)
2848 {
2849 ftrace_run_stop_machine(command);
2850 }
2851
ftrace_run_update_code(int command)2852 static void ftrace_run_update_code(int command)
2853 {
2854 ftrace_arch_code_modify_prepare();
2855
2856 /*
2857 * By default we use stop_machine() to modify the code.
2858 * But archs can do what ever they want as long as it
2859 * is safe. The stop_machine() is the safest, but also
2860 * produces the most overhead.
2861 */
2862 arch_ftrace_update_code(command);
2863
2864 ftrace_arch_code_modify_post_process();
2865 }
2866
ftrace_run_modify_code(struct ftrace_ops * ops,int command,struct ftrace_ops_hash * old_hash)2867 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2868 struct ftrace_ops_hash *old_hash)
2869 {
2870 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2871 ops->old_hash.filter_hash = old_hash->filter_hash;
2872 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2873 ftrace_run_update_code(command);
2874 ops->old_hash.filter_hash = NULL;
2875 ops->old_hash.notrace_hash = NULL;
2876 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2877 }
2878
2879 static ftrace_func_t saved_ftrace_func;
2880 static int ftrace_start_up;
2881
arch_ftrace_trampoline_free(struct ftrace_ops * ops)2882 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2883 {
2884 }
2885
2886 /* List of trace_ops that have allocated trampolines */
2887 static LIST_HEAD(ftrace_ops_trampoline_list);
2888
ftrace_add_trampoline_to_kallsyms(struct ftrace_ops * ops)2889 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2890 {
2891 lockdep_assert_held(&ftrace_lock);
2892 list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2893 }
2894
ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops * ops)2895 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2896 {
2897 lockdep_assert_held(&ftrace_lock);
2898 list_del_rcu(&ops->list);
2899 synchronize_rcu();
2900 }
2901
2902 /*
2903 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2904 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2905 * not a module.
2906 */
2907 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2908 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2909
ftrace_trampoline_free(struct ftrace_ops * ops)2910 static void ftrace_trampoline_free(struct ftrace_ops *ops)
2911 {
2912 if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2913 ops->trampoline) {
2914 /*
2915 * Record the text poke event before the ksymbol unregister
2916 * event.
2917 */
2918 perf_event_text_poke((void *)ops->trampoline,
2919 (void *)ops->trampoline,
2920 ops->trampoline_size, NULL, 0);
2921 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2922 ops->trampoline, ops->trampoline_size,
2923 true, FTRACE_TRAMPOLINE_SYM);
2924 /* Remove from kallsyms after the perf events */
2925 ftrace_remove_trampoline_from_kallsyms(ops);
2926 }
2927
2928 arch_ftrace_trampoline_free(ops);
2929 }
2930
ftrace_startup_enable(int command)2931 static void ftrace_startup_enable(int command)
2932 {
2933 if (saved_ftrace_func != ftrace_trace_function) {
2934 saved_ftrace_func = ftrace_trace_function;
2935 command |= FTRACE_UPDATE_TRACE_FUNC;
2936 }
2937
2938 if (!command || !ftrace_enabled)
2939 return;
2940
2941 ftrace_run_update_code(command);
2942 }
2943
ftrace_startup_all(int command)2944 static void ftrace_startup_all(int command)
2945 {
2946 update_all_ops = true;
2947 ftrace_startup_enable(command);
2948 update_all_ops = false;
2949 }
2950
ftrace_startup(struct ftrace_ops * ops,int command)2951 int ftrace_startup(struct ftrace_ops *ops, int command)
2952 {
2953 int ret;
2954
2955 if (unlikely(ftrace_disabled))
2956 return -ENODEV;
2957
2958 ret = __register_ftrace_function(ops);
2959 if (ret)
2960 return ret;
2961
2962 ftrace_start_up++;
2963
2964 /*
2965 * Note that ftrace probes uses this to start up
2966 * and modify functions it will probe. But we still
2967 * set the ADDING flag for modification, as probes
2968 * do not have trampolines. If they add them in the
2969 * future, then the probes will need to distinguish
2970 * between adding and updating probes.
2971 */
2972 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2973
2974 ret = ftrace_hash_ipmodify_enable(ops);
2975 if (ret < 0) {
2976 /* Rollback registration process */
2977 __unregister_ftrace_function(ops);
2978 ftrace_start_up--;
2979 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2980 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2981 ftrace_trampoline_free(ops);
2982 return ret;
2983 }
2984
2985 if (ftrace_hash_rec_enable(ops, 1))
2986 command |= FTRACE_UPDATE_CALLS;
2987
2988 ftrace_startup_enable(command);
2989
2990 /*
2991 * If ftrace is in an undefined state, we just remove ops from list
2992 * to prevent the NULL pointer, instead of totally rolling it back and
2993 * free trampoline, because those actions could cause further damage.
2994 */
2995 if (unlikely(ftrace_disabled)) {
2996 __unregister_ftrace_function(ops);
2997 return -ENODEV;
2998 }
2999
3000 ops->flags &= ~FTRACE_OPS_FL_ADDING;
3001
3002 return 0;
3003 }
3004
ftrace_shutdown(struct ftrace_ops * ops,int command)3005 int ftrace_shutdown(struct ftrace_ops *ops, int command)
3006 {
3007 int ret;
3008
3009 if (unlikely(ftrace_disabled))
3010 return -ENODEV;
3011
3012 ret = __unregister_ftrace_function(ops);
3013 if (ret)
3014 return ret;
3015
3016 ftrace_start_up--;
3017 /*
3018 * Just warn in case of unbalance, no need to kill ftrace, it's not
3019 * critical but the ftrace_call callers may be never nopped again after
3020 * further ftrace uses.
3021 */
3022 WARN_ON_ONCE(ftrace_start_up < 0);
3023
3024 /* Disabling ipmodify never fails */
3025 ftrace_hash_ipmodify_disable(ops);
3026
3027 if (ftrace_hash_rec_disable(ops, 1))
3028 command |= FTRACE_UPDATE_CALLS;
3029
3030 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3031
3032 if (saved_ftrace_func != ftrace_trace_function) {
3033 saved_ftrace_func = ftrace_trace_function;
3034 command |= FTRACE_UPDATE_TRACE_FUNC;
3035 }
3036
3037 if (!command || !ftrace_enabled)
3038 goto out;
3039
3040 /*
3041 * If the ops uses a trampoline, then it needs to be
3042 * tested first on update.
3043 */
3044 ops->flags |= FTRACE_OPS_FL_REMOVING;
3045 removed_ops = ops;
3046
3047 /* The trampoline logic checks the old hashes */
3048 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3049 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3050
3051 ftrace_run_update_code(command);
3052
3053 /*
3054 * If there's no more ops registered with ftrace, run a
3055 * sanity check to make sure all rec flags are cleared.
3056 */
3057 if (rcu_dereference_protected(ftrace_ops_list,
3058 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3059 struct ftrace_page *pg;
3060 struct dyn_ftrace *rec;
3061
3062 do_for_each_ftrace_rec(pg, rec) {
3063 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
3064 pr_warn(" %pS flags:%lx\n",
3065 (void *)rec->ip, rec->flags);
3066 } while_for_each_ftrace_rec();
3067 }
3068
3069 ops->old_hash.filter_hash = NULL;
3070 ops->old_hash.notrace_hash = NULL;
3071
3072 removed_ops = NULL;
3073 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3074
3075 out:
3076 /*
3077 * Dynamic ops may be freed, we must make sure that all
3078 * callers are done before leaving this function.
3079 * The same goes for freeing the per_cpu data of the per_cpu
3080 * ops.
3081 */
3082 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3083 /*
3084 * We need to do a hard force of sched synchronization.
3085 * This is because we use preempt_disable() to do RCU, but
3086 * the function tracers can be called where RCU is not watching
3087 * (like before user_exit()). We can not rely on the RCU
3088 * infrastructure to do the synchronization, thus we must do it
3089 * ourselves.
3090 */
3091 synchronize_rcu_tasks_rude();
3092
3093 /*
3094 * When the kernel is preemptive, tasks can be preempted
3095 * while on a ftrace trampoline. Just scheduling a task on
3096 * a CPU is not good enough to flush them. Calling
3097 * synchronize_rcu_tasks() will wait for those tasks to
3098 * execute and either schedule voluntarily or enter user space.
3099 */
3100 if (IS_ENABLED(CONFIG_PREEMPTION))
3101 synchronize_rcu_tasks();
3102
3103 ftrace_trampoline_free(ops);
3104 }
3105
3106 return 0;
3107 }
3108
3109 static u64 ftrace_update_time;
3110 unsigned long ftrace_update_tot_cnt;
3111 unsigned long ftrace_number_of_pages;
3112 unsigned long ftrace_number_of_groups;
3113
ops_traces_mod(struct ftrace_ops * ops)3114 static inline int ops_traces_mod(struct ftrace_ops *ops)
3115 {
3116 /*
3117 * Filter_hash being empty will default to trace module.
3118 * But notrace hash requires a test of individual module functions.
3119 */
3120 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3121 ftrace_hash_empty(ops->func_hash->notrace_hash);
3122 }
3123
ftrace_update_code(struct module * mod,struct ftrace_page * new_pgs)3124 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3125 {
3126 bool init_nop = ftrace_need_init_nop();
3127 struct ftrace_page *pg;
3128 struct dyn_ftrace *p;
3129 u64 start, stop;
3130 unsigned long update_cnt = 0;
3131 unsigned long rec_flags = 0;
3132 int i;
3133
3134 start = ftrace_now(raw_smp_processor_id());
3135
3136 /*
3137 * When a module is loaded, this function is called to convert
3138 * the calls to mcount in its text to nops, and also to create
3139 * an entry in the ftrace data. Now, if ftrace is activated
3140 * after this call, but before the module sets its text to
3141 * read-only, the modification of enabling ftrace can fail if
3142 * the read-only is done while ftrace is converting the calls.
3143 * To prevent this, the module's records are set as disabled
3144 * and will be enabled after the call to set the module's text
3145 * to read-only.
3146 */
3147 if (mod)
3148 rec_flags |= FTRACE_FL_DISABLED;
3149
3150 for (pg = new_pgs; pg; pg = pg->next) {
3151
3152 for (i = 0; i < pg->index; i++) {
3153
3154 /* If something went wrong, bail without enabling anything */
3155 if (unlikely(ftrace_disabled))
3156 return -1;
3157
3158 p = &pg->records[i];
3159 p->flags = rec_flags;
3160
3161 /*
3162 * Do the initial record conversion from mcount jump
3163 * to the NOP instructions.
3164 */
3165 if (init_nop && !ftrace_nop_initialize(mod, p))
3166 break;
3167
3168 update_cnt++;
3169 }
3170 }
3171
3172 stop = ftrace_now(raw_smp_processor_id());
3173 ftrace_update_time = stop - start;
3174 ftrace_update_tot_cnt += update_cnt;
3175
3176 return 0;
3177 }
3178
ftrace_allocate_records(struct ftrace_page * pg,int count)3179 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3180 {
3181 int order;
3182 int pages;
3183 int cnt;
3184
3185 if (WARN_ON(!count))
3186 return -EINVAL;
3187
3188 /* We want to fill as much as possible, with no empty pages */
3189 pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3190 order = fls(pages) - 1;
3191
3192 again:
3193 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3194
3195 if (!pg->records) {
3196 /* if we can't allocate this size, try something smaller */
3197 if (!order)
3198 return -ENOMEM;
3199 order--;
3200 goto again;
3201 }
3202
3203 ftrace_number_of_pages += 1 << order;
3204 ftrace_number_of_groups++;
3205
3206 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3207 pg->order = order;
3208
3209 if (cnt > count)
3210 cnt = count;
3211
3212 return cnt;
3213 }
3214
3215 static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)3216 ftrace_allocate_pages(unsigned long num_to_init)
3217 {
3218 struct ftrace_page *start_pg;
3219 struct ftrace_page *pg;
3220 int cnt;
3221
3222 if (!num_to_init)
3223 return NULL;
3224
3225 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3226 if (!pg)
3227 return NULL;
3228
3229 /*
3230 * Try to allocate as much as possible in one continues
3231 * location that fills in all of the space. We want to
3232 * waste as little space as possible.
3233 */
3234 for (;;) {
3235 cnt = ftrace_allocate_records(pg, num_to_init);
3236 if (cnt < 0)
3237 goto free_pages;
3238
3239 num_to_init -= cnt;
3240 if (!num_to_init)
3241 break;
3242
3243 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3244 if (!pg->next)
3245 goto free_pages;
3246
3247 pg = pg->next;
3248 }
3249
3250 return start_pg;
3251
3252 free_pages:
3253 pg = start_pg;
3254 while (pg) {
3255 if (pg->records) {
3256 free_pages((unsigned long)pg->records, pg->order);
3257 ftrace_number_of_pages -= 1 << pg->order;
3258 }
3259 start_pg = pg->next;
3260 kfree(pg);
3261 pg = start_pg;
3262 ftrace_number_of_groups--;
3263 }
3264 pr_info("ftrace: FAILED to allocate memory for functions\n");
3265 return NULL;
3266 }
3267
3268 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3269
3270 struct ftrace_iterator {
3271 loff_t pos;
3272 loff_t func_pos;
3273 loff_t mod_pos;
3274 struct ftrace_page *pg;
3275 struct dyn_ftrace *func;
3276 struct ftrace_func_probe *probe;
3277 struct ftrace_func_entry *probe_entry;
3278 struct trace_parser parser;
3279 struct ftrace_hash *hash;
3280 struct ftrace_ops *ops;
3281 struct trace_array *tr;
3282 struct list_head *mod_list;
3283 int pidx;
3284 int idx;
3285 unsigned flags;
3286 };
3287
3288 static void *
t_probe_next(struct seq_file * m,loff_t * pos)3289 t_probe_next(struct seq_file *m, loff_t *pos)
3290 {
3291 struct ftrace_iterator *iter = m->private;
3292 struct trace_array *tr = iter->ops->private;
3293 struct list_head *func_probes;
3294 struct ftrace_hash *hash;
3295 struct list_head *next;
3296 struct hlist_node *hnd = NULL;
3297 struct hlist_head *hhd;
3298 int size;
3299
3300 (*pos)++;
3301 iter->pos = *pos;
3302
3303 if (!tr)
3304 return NULL;
3305
3306 func_probes = &tr->func_probes;
3307 if (list_empty(func_probes))
3308 return NULL;
3309
3310 if (!iter->probe) {
3311 next = func_probes->next;
3312 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3313 }
3314
3315 if (iter->probe_entry)
3316 hnd = &iter->probe_entry->hlist;
3317
3318 hash = iter->probe->ops.func_hash->filter_hash;
3319
3320 /*
3321 * A probe being registered may temporarily have an empty hash
3322 * and it's at the end of the func_probes list.
3323 */
3324 if (!hash || hash == EMPTY_HASH)
3325 return NULL;
3326
3327 size = 1 << hash->size_bits;
3328
3329 retry:
3330 if (iter->pidx >= size) {
3331 if (iter->probe->list.next == func_probes)
3332 return NULL;
3333 next = iter->probe->list.next;
3334 iter->probe = list_entry(next, struct ftrace_func_probe, list);
3335 hash = iter->probe->ops.func_hash->filter_hash;
3336 size = 1 << hash->size_bits;
3337 iter->pidx = 0;
3338 }
3339
3340 hhd = &hash->buckets[iter->pidx];
3341
3342 if (hlist_empty(hhd)) {
3343 iter->pidx++;
3344 hnd = NULL;
3345 goto retry;
3346 }
3347
3348 if (!hnd)
3349 hnd = hhd->first;
3350 else {
3351 hnd = hnd->next;
3352 if (!hnd) {
3353 iter->pidx++;
3354 goto retry;
3355 }
3356 }
3357
3358 if (WARN_ON_ONCE(!hnd))
3359 return NULL;
3360
3361 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3362
3363 return iter;
3364 }
3365
t_probe_start(struct seq_file * m,loff_t * pos)3366 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3367 {
3368 struct ftrace_iterator *iter = m->private;
3369 void *p = NULL;
3370 loff_t l;
3371
3372 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3373 return NULL;
3374
3375 if (iter->mod_pos > *pos)
3376 return NULL;
3377
3378 iter->probe = NULL;
3379 iter->probe_entry = NULL;
3380 iter->pidx = 0;
3381 for (l = 0; l <= (*pos - iter->mod_pos); ) {
3382 p = t_probe_next(m, &l);
3383 if (!p)
3384 break;
3385 }
3386 if (!p)
3387 return NULL;
3388
3389 /* Only set this if we have an item */
3390 iter->flags |= FTRACE_ITER_PROBE;
3391
3392 return iter;
3393 }
3394
3395 static int
t_probe_show(struct seq_file * m,struct ftrace_iterator * iter)3396 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3397 {
3398 struct ftrace_func_entry *probe_entry;
3399 struct ftrace_probe_ops *probe_ops;
3400 struct ftrace_func_probe *probe;
3401
3402 probe = iter->probe;
3403 probe_entry = iter->probe_entry;
3404
3405 if (WARN_ON_ONCE(!probe || !probe_entry))
3406 return -EIO;
3407
3408 probe_ops = probe->probe_ops;
3409
3410 if (probe_ops->print)
3411 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3412
3413 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3414 (void *)probe_ops->func);
3415
3416 return 0;
3417 }
3418
3419 static void *
t_mod_next(struct seq_file * m,loff_t * pos)3420 t_mod_next(struct seq_file *m, loff_t *pos)
3421 {
3422 struct ftrace_iterator *iter = m->private;
3423 struct trace_array *tr = iter->tr;
3424
3425 (*pos)++;
3426 iter->pos = *pos;
3427
3428 iter->mod_list = iter->mod_list->next;
3429
3430 if (iter->mod_list == &tr->mod_trace ||
3431 iter->mod_list == &tr->mod_notrace) {
3432 iter->flags &= ~FTRACE_ITER_MOD;
3433 return NULL;
3434 }
3435
3436 iter->mod_pos = *pos;
3437
3438 return iter;
3439 }
3440
t_mod_start(struct seq_file * m,loff_t * pos)3441 static void *t_mod_start(struct seq_file *m, loff_t *pos)
3442 {
3443 struct ftrace_iterator *iter = m->private;
3444 void *p = NULL;
3445 loff_t l;
3446
3447 if (iter->func_pos > *pos)
3448 return NULL;
3449
3450 iter->mod_pos = iter->func_pos;
3451
3452 /* probes are only available if tr is set */
3453 if (!iter->tr)
3454 return NULL;
3455
3456 for (l = 0; l <= (*pos - iter->func_pos); ) {
3457 p = t_mod_next(m, &l);
3458 if (!p)
3459 break;
3460 }
3461 if (!p) {
3462 iter->flags &= ~FTRACE_ITER_MOD;
3463 return t_probe_start(m, pos);
3464 }
3465
3466 /* Only set this if we have an item */
3467 iter->flags |= FTRACE_ITER_MOD;
3468
3469 return iter;
3470 }
3471
3472 static int
t_mod_show(struct seq_file * m,struct ftrace_iterator * iter)3473 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3474 {
3475 struct ftrace_mod_load *ftrace_mod;
3476 struct trace_array *tr = iter->tr;
3477
3478 if (WARN_ON_ONCE(!iter->mod_list) ||
3479 iter->mod_list == &tr->mod_trace ||
3480 iter->mod_list == &tr->mod_notrace)
3481 return -EIO;
3482
3483 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3484
3485 if (ftrace_mod->func)
3486 seq_printf(m, "%s", ftrace_mod->func);
3487 else
3488 seq_putc(m, '*');
3489
3490 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3491
3492 return 0;
3493 }
3494
3495 static void *
t_func_next(struct seq_file * m,loff_t * pos)3496 t_func_next(struct seq_file *m, loff_t *pos)
3497 {
3498 struct ftrace_iterator *iter = m->private;
3499 struct dyn_ftrace *rec = NULL;
3500
3501 (*pos)++;
3502
3503 retry:
3504 if (iter->idx >= iter->pg->index) {
3505 if (iter->pg->next) {
3506 iter->pg = iter->pg->next;
3507 iter->idx = 0;
3508 goto retry;
3509 }
3510 } else {
3511 rec = &iter->pg->records[iter->idx++];
3512 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3513 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3514
3515 ((iter->flags & FTRACE_ITER_ENABLED) &&
3516 !(rec->flags & FTRACE_FL_ENABLED))) {
3517
3518 rec = NULL;
3519 goto retry;
3520 }
3521 }
3522
3523 if (!rec)
3524 return NULL;
3525
3526 iter->pos = iter->func_pos = *pos;
3527 iter->func = rec;
3528
3529 return iter;
3530 }
3531
3532 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)3533 t_next(struct seq_file *m, void *v, loff_t *pos)
3534 {
3535 struct ftrace_iterator *iter = m->private;
3536 loff_t l = *pos; /* t_probe_start() must use original pos */
3537 void *ret;
3538
3539 if (unlikely(ftrace_disabled))
3540 return NULL;
3541
3542 if (iter->flags & FTRACE_ITER_PROBE)
3543 return t_probe_next(m, pos);
3544
3545 if (iter->flags & FTRACE_ITER_MOD)
3546 return t_mod_next(m, pos);
3547
3548 if (iter->flags & FTRACE_ITER_PRINTALL) {
3549 /* next must increment pos, and t_probe_start does not */
3550 (*pos)++;
3551 return t_mod_start(m, &l);
3552 }
3553
3554 ret = t_func_next(m, pos);
3555
3556 if (!ret)
3557 return t_mod_start(m, &l);
3558
3559 return ret;
3560 }
3561
reset_iter_read(struct ftrace_iterator * iter)3562 static void reset_iter_read(struct ftrace_iterator *iter)
3563 {
3564 iter->pos = 0;
3565 iter->func_pos = 0;
3566 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3567 }
3568
t_start(struct seq_file * m,loff_t * pos)3569 static void *t_start(struct seq_file *m, loff_t *pos)
3570 {
3571 struct ftrace_iterator *iter = m->private;
3572 void *p = NULL;
3573 loff_t l;
3574
3575 mutex_lock(&ftrace_lock);
3576
3577 if (unlikely(ftrace_disabled))
3578 return NULL;
3579
3580 /*
3581 * If an lseek was done, then reset and start from beginning.
3582 */
3583 if (*pos < iter->pos)
3584 reset_iter_read(iter);
3585
3586 /*
3587 * For set_ftrace_filter reading, if we have the filter
3588 * off, we can short cut and just print out that all
3589 * functions are enabled.
3590 */
3591 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3592 ftrace_hash_empty(iter->hash)) {
3593 iter->func_pos = 1; /* Account for the message */
3594 if (*pos > 0)
3595 return t_mod_start(m, pos);
3596 iter->flags |= FTRACE_ITER_PRINTALL;
3597 /* reset in case of seek/pread */
3598 iter->flags &= ~FTRACE_ITER_PROBE;
3599 return iter;
3600 }
3601
3602 if (iter->flags & FTRACE_ITER_MOD)
3603 return t_mod_start(m, pos);
3604
3605 /*
3606 * Unfortunately, we need to restart at ftrace_pages_start
3607 * every time we let go of the ftrace_mutex. This is because
3608 * those pointers can change without the lock.
3609 */
3610 iter->pg = ftrace_pages_start;
3611 iter->idx = 0;
3612 for (l = 0; l <= *pos; ) {
3613 p = t_func_next(m, &l);
3614 if (!p)
3615 break;
3616 }
3617
3618 if (!p)
3619 return t_mod_start(m, pos);
3620
3621 return iter;
3622 }
3623
t_stop(struct seq_file * m,void * p)3624 static void t_stop(struct seq_file *m, void *p)
3625 {
3626 mutex_unlock(&ftrace_lock);
3627 }
3628
3629 void * __weak
arch_ftrace_trampoline_func(struct ftrace_ops * ops,struct dyn_ftrace * rec)3630 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3631 {
3632 return NULL;
3633 }
3634
add_trampoline_func(struct seq_file * m,struct ftrace_ops * ops,struct dyn_ftrace * rec)3635 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3636 struct dyn_ftrace *rec)
3637 {
3638 void *ptr;
3639
3640 ptr = arch_ftrace_trampoline_func(ops, rec);
3641 if (ptr)
3642 seq_printf(m, " ->%pS", ptr);
3643 }
3644
3645 #ifdef FTRACE_MCOUNT_MAX_OFFSET
3646 /*
3647 * Weak functions can still have an mcount/fentry that is saved in
3648 * the __mcount_loc section. These can be detected by having a
3649 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
3650 * symbol found by kallsyms is not the function that the mcount/fentry
3651 * is part of. The offset is much greater in these cases.
3652 *
3653 * Test the record to make sure that the ip points to a valid kallsyms
3654 * and if not, mark it disabled.
3655 */
test_for_valid_rec(struct dyn_ftrace * rec)3656 static int test_for_valid_rec(struct dyn_ftrace *rec)
3657 {
3658 char str[KSYM_SYMBOL_LEN];
3659 unsigned long offset;
3660 const char *ret;
3661
3662 ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
3663
3664 /* Weak functions can cause invalid addresses */
3665 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3666 rec->flags |= FTRACE_FL_DISABLED;
3667 return 0;
3668 }
3669 return 1;
3670 }
3671
3672 static struct workqueue_struct *ftrace_check_wq __initdata;
3673 static struct work_struct ftrace_check_work __initdata;
3674
3675 /*
3676 * Scan all the mcount/fentry entries to make sure they are valid.
3677 */
ftrace_check_work_func(struct work_struct * work)3678 static __init void ftrace_check_work_func(struct work_struct *work)
3679 {
3680 struct ftrace_page *pg;
3681 struct dyn_ftrace *rec;
3682
3683 mutex_lock(&ftrace_lock);
3684 do_for_each_ftrace_rec(pg, rec) {
3685 test_for_valid_rec(rec);
3686 } while_for_each_ftrace_rec();
3687 mutex_unlock(&ftrace_lock);
3688 }
3689
ftrace_check_for_weak_functions(void)3690 static int __init ftrace_check_for_weak_functions(void)
3691 {
3692 INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
3693
3694 ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
3695
3696 queue_work(ftrace_check_wq, &ftrace_check_work);
3697 return 0;
3698 }
3699
ftrace_check_sync(void)3700 static int __init ftrace_check_sync(void)
3701 {
3702 /* Make sure the ftrace_check updates are finished */
3703 if (ftrace_check_wq)
3704 destroy_workqueue(ftrace_check_wq);
3705 return 0;
3706 }
3707
3708 late_initcall_sync(ftrace_check_sync);
3709 subsys_initcall(ftrace_check_for_weak_functions);
3710
print_rec(struct seq_file * m,unsigned long ip)3711 static int print_rec(struct seq_file *m, unsigned long ip)
3712 {
3713 unsigned long offset;
3714 char str[KSYM_SYMBOL_LEN];
3715 char *modname;
3716 const char *ret;
3717
3718 ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
3719 /* Weak functions can cause invalid addresses */
3720 if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
3721 snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
3722 FTRACE_INVALID_FUNCTION, offset);
3723 ret = NULL;
3724 }
3725
3726 seq_puts(m, str);
3727 if (modname)
3728 seq_printf(m, " [%s]", modname);
3729 return ret == NULL ? -1 : 0;
3730 }
3731 #else
test_for_valid_rec(struct dyn_ftrace * rec)3732 static inline int test_for_valid_rec(struct dyn_ftrace *rec)
3733 {
3734 return 1;
3735 }
3736
print_rec(struct seq_file * m,unsigned long ip)3737 static inline int print_rec(struct seq_file *m, unsigned long ip)
3738 {
3739 seq_printf(m, "%ps", (void *)ip);
3740 return 0;
3741 }
3742 #endif
3743
t_show(struct seq_file * m,void * v)3744 static int t_show(struct seq_file *m, void *v)
3745 {
3746 struct ftrace_iterator *iter = m->private;
3747 struct dyn_ftrace *rec;
3748
3749 if (iter->flags & FTRACE_ITER_PROBE)
3750 return t_probe_show(m, iter);
3751
3752 if (iter->flags & FTRACE_ITER_MOD)
3753 return t_mod_show(m, iter);
3754
3755 if (iter->flags & FTRACE_ITER_PRINTALL) {
3756 if (iter->flags & FTRACE_ITER_NOTRACE)
3757 seq_puts(m, "#### no functions disabled ####\n");
3758 else
3759 seq_puts(m, "#### all functions enabled ####\n");
3760 return 0;
3761 }
3762
3763 rec = iter->func;
3764
3765 if (!rec)
3766 return 0;
3767
3768 if (print_rec(m, rec->ip)) {
3769 /* This should only happen when a rec is disabled */
3770 WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
3771 seq_putc(m, '\n');
3772 return 0;
3773 }
3774
3775 if (iter->flags & FTRACE_ITER_ENABLED) {
3776 struct ftrace_ops *ops;
3777
3778 seq_printf(m, " (%ld)%s%s%s",
3779 ftrace_rec_count(rec),
3780 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3781 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
3782 rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
3783 if (rec->flags & FTRACE_FL_TRAMP_EN) {
3784 ops = ftrace_find_tramp_ops_any(rec);
3785 if (ops) {
3786 do {
3787 seq_printf(m, "\ttramp: %pS (%pS)",
3788 (void *)ops->trampoline,
3789 (void *)ops->func);
3790 add_trampoline_func(m, ops, rec);
3791 ops = ftrace_find_tramp_ops_next(rec, ops);
3792 } while (ops);
3793 } else
3794 seq_puts(m, "\ttramp: ERROR!");
3795 } else {
3796 add_trampoline_func(m, NULL, rec);
3797 }
3798 if (rec->flags & FTRACE_FL_DIRECT) {
3799 unsigned long direct;
3800
3801 direct = ftrace_find_rec_direct(rec->ip);
3802 if (direct)
3803 seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3804 }
3805 }
3806
3807 seq_putc(m, '\n');
3808
3809 return 0;
3810 }
3811
3812 static const struct seq_operations show_ftrace_seq_ops = {
3813 .start = t_start,
3814 .next = t_next,
3815 .stop = t_stop,
3816 .show = t_show,
3817 };
3818
3819 static int
ftrace_avail_open(struct inode * inode,struct file * file)3820 ftrace_avail_open(struct inode *inode, struct file *file)
3821 {
3822 struct ftrace_iterator *iter;
3823 int ret;
3824
3825 ret = security_locked_down(LOCKDOWN_TRACEFS);
3826 if (ret)
3827 return ret;
3828
3829 if (unlikely(ftrace_disabled))
3830 return -ENODEV;
3831
3832 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3833 if (!iter)
3834 return -ENOMEM;
3835
3836 iter->pg = ftrace_pages_start;
3837 iter->ops = &global_ops;
3838
3839 return 0;
3840 }
3841
3842 static int
ftrace_enabled_open(struct inode * inode,struct file * file)3843 ftrace_enabled_open(struct inode *inode, struct file *file)
3844 {
3845 struct ftrace_iterator *iter;
3846
3847 /*
3848 * This shows us what functions are currently being
3849 * traced and by what. Not sure if we want lockdown
3850 * to hide such critical information for an admin.
3851 * Although, perhaps it can show information we don't
3852 * want people to see, but if something is tracing
3853 * something, we probably want to know about it.
3854 */
3855
3856 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3857 if (!iter)
3858 return -ENOMEM;
3859
3860 iter->pg = ftrace_pages_start;
3861 iter->flags = FTRACE_ITER_ENABLED;
3862 iter->ops = &global_ops;
3863
3864 return 0;
3865 }
3866
3867 /**
3868 * ftrace_regex_open - initialize function tracer filter files
3869 * @ops: The ftrace_ops that hold the hash filters
3870 * @flag: The type of filter to process
3871 * @inode: The inode, usually passed in to your open routine
3872 * @file: The file, usually passed in to your open routine
3873 *
3874 * ftrace_regex_open() initializes the filter files for the
3875 * @ops. Depending on @flag it may process the filter hash or
3876 * the notrace hash of @ops. With this called from the open
3877 * routine, you can use ftrace_filter_write() for the write
3878 * routine if @flag has FTRACE_ITER_FILTER set, or
3879 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3880 * tracing_lseek() should be used as the lseek routine, and
3881 * release must call ftrace_regex_release().
3882 */
3883 int
ftrace_regex_open(struct ftrace_ops * ops,int flag,struct inode * inode,struct file * file)3884 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3885 struct inode *inode, struct file *file)
3886 {
3887 struct ftrace_iterator *iter;
3888 struct ftrace_hash *hash;
3889 struct list_head *mod_head;
3890 struct trace_array *tr = ops->private;
3891 int ret = -ENOMEM;
3892
3893 ftrace_ops_init(ops);
3894
3895 if (unlikely(ftrace_disabled))
3896 return -ENODEV;
3897
3898 if (tracing_check_open_get_tr(tr))
3899 return -ENODEV;
3900
3901 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3902 if (!iter)
3903 goto out;
3904
3905 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3906 goto out;
3907
3908 iter->ops = ops;
3909 iter->flags = flag;
3910 iter->tr = tr;
3911
3912 mutex_lock(&ops->func_hash->regex_lock);
3913
3914 if (flag & FTRACE_ITER_NOTRACE) {
3915 hash = ops->func_hash->notrace_hash;
3916 mod_head = tr ? &tr->mod_notrace : NULL;
3917 } else {
3918 hash = ops->func_hash->filter_hash;
3919 mod_head = tr ? &tr->mod_trace : NULL;
3920 }
3921
3922 iter->mod_list = mod_head;
3923
3924 if (file->f_mode & FMODE_WRITE) {
3925 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3926
3927 if (file->f_flags & O_TRUNC) {
3928 iter->hash = alloc_ftrace_hash(size_bits);
3929 clear_ftrace_mod_list(mod_head);
3930 } else {
3931 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3932 }
3933
3934 if (!iter->hash) {
3935 trace_parser_put(&iter->parser);
3936 goto out_unlock;
3937 }
3938 } else
3939 iter->hash = hash;
3940
3941 ret = 0;
3942
3943 if (file->f_mode & FMODE_READ) {
3944 iter->pg = ftrace_pages_start;
3945
3946 ret = seq_open(file, &show_ftrace_seq_ops);
3947 if (!ret) {
3948 struct seq_file *m = file->private_data;
3949 m->private = iter;
3950 } else {
3951 /* Failed */
3952 free_ftrace_hash(iter->hash);
3953 trace_parser_put(&iter->parser);
3954 }
3955 } else
3956 file->private_data = iter;
3957
3958 out_unlock:
3959 mutex_unlock(&ops->func_hash->regex_lock);
3960
3961 out:
3962 if (ret) {
3963 kfree(iter);
3964 if (tr)
3965 trace_array_put(tr);
3966 }
3967
3968 return ret;
3969 }
3970
3971 static int
ftrace_filter_open(struct inode * inode,struct file * file)3972 ftrace_filter_open(struct inode *inode, struct file *file)
3973 {
3974 struct ftrace_ops *ops = inode->i_private;
3975
3976 /* Checks for tracefs lockdown */
3977 return ftrace_regex_open(ops,
3978 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3979 inode, file);
3980 }
3981
3982 static int
ftrace_notrace_open(struct inode * inode,struct file * file)3983 ftrace_notrace_open(struct inode *inode, struct file *file)
3984 {
3985 struct ftrace_ops *ops = inode->i_private;
3986
3987 /* Checks for tracefs lockdown */
3988 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3989 inode, file);
3990 }
3991
3992 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3993 struct ftrace_glob {
3994 char *search;
3995 unsigned len;
3996 int type;
3997 };
3998
3999 /*
4000 * If symbols in an architecture don't correspond exactly to the user-visible
4001 * name of what they represent, it is possible to define this function to
4002 * perform the necessary adjustments.
4003 */
arch_ftrace_match_adjust(char * str,const char * search)4004 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4005 {
4006 return str;
4007 }
4008
ftrace_match(char * str,struct ftrace_glob * g)4009 static int ftrace_match(char *str, struct ftrace_glob *g)
4010 {
4011 int matched = 0;
4012 int slen;
4013
4014 str = arch_ftrace_match_adjust(str, g->search);
4015
4016 switch (g->type) {
4017 case MATCH_FULL:
4018 if (strcmp(str, g->search) == 0)
4019 matched = 1;
4020 break;
4021 case MATCH_FRONT_ONLY:
4022 if (strncmp(str, g->search, g->len) == 0)
4023 matched = 1;
4024 break;
4025 case MATCH_MIDDLE_ONLY:
4026 if (strstr(str, g->search))
4027 matched = 1;
4028 break;
4029 case MATCH_END_ONLY:
4030 slen = strlen(str);
4031 if (slen >= g->len &&
4032 memcmp(str + slen - g->len, g->search, g->len) == 0)
4033 matched = 1;
4034 break;
4035 case MATCH_GLOB:
4036 if (glob_match(g->search, str))
4037 matched = 1;
4038 break;
4039 }
4040
4041 return matched;
4042 }
4043
4044 static int
enter_record(struct ftrace_hash * hash,struct dyn_ftrace * rec,int clear_filter)4045 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4046 {
4047 struct ftrace_func_entry *entry;
4048 int ret = 0;
4049
4050 entry = ftrace_lookup_ip(hash, rec->ip);
4051 if (clear_filter) {
4052 /* Do nothing if it doesn't exist */
4053 if (!entry)
4054 return 0;
4055
4056 free_hash_entry(hash, entry);
4057 } else {
4058 /* Do nothing if it exists */
4059 if (entry)
4060 return 0;
4061
4062 ret = add_hash_entry(hash, rec->ip);
4063 }
4064 return ret;
4065 }
4066
4067 static int
add_rec_by_index(struct ftrace_hash * hash,struct ftrace_glob * func_g,int clear_filter)4068 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4069 int clear_filter)
4070 {
4071 long index = simple_strtoul(func_g->search, NULL, 0);
4072 struct ftrace_page *pg;
4073 struct dyn_ftrace *rec;
4074
4075 /* The index starts at 1 */
4076 if (--index < 0)
4077 return 0;
4078
4079 do_for_each_ftrace_rec(pg, rec) {
4080 if (pg->index <= index) {
4081 index -= pg->index;
4082 /* this is a double loop, break goes to the next page */
4083 break;
4084 }
4085 rec = &pg->records[index];
4086 enter_record(hash, rec, clear_filter);
4087 return 1;
4088 } while_for_each_ftrace_rec();
4089 return 0;
4090 }
4091
4092 #ifdef FTRACE_MCOUNT_MAX_OFFSET
lookup_ip(unsigned long ip,char ** modname,char * str)4093 static int lookup_ip(unsigned long ip, char **modname, char *str)
4094 {
4095 unsigned long offset;
4096
4097 kallsyms_lookup(ip, NULL, &offset, modname, str);
4098 if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4099 return -1;
4100 return 0;
4101 }
4102 #else
lookup_ip(unsigned long ip,char ** modname,char * str)4103 static int lookup_ip(unsigned long ip, char **modname, char *str)
4104 {
4105 kallsyms_lookup(ip, NULL, NULL, modname, str);
4106 return 0;
4107 }
4108 #endif
4109
4110 static int
ftrace_match_record(struct dyn_ftrace * rec,struct ftrace_glob * func_g,struct ftrace_glob * mod_g,int exclude_mod)4111 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4112 struct ftrace_glob *mod_g, int exclude_mod)
4113 {
4114 char str[KSYM_SYMBOL_LEN];
4115 char *modname;
4116
4117 if (lookup_ip(rec->ip, &modname, str)) {
4118 /* This should only happen when a rec is disabled */
4119 WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4120 !(rec->flags & FTRACE_FL_DISABLED));
4121 return 0;
4122 }
4123
4124 if (mod_g) {
4125 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4126
4127 /* blank module name to match all modules */
4128 if (!mod_g->len) {
4129 /* blank module globbing: modname xor exclude_mod */
4130 if (!exclude_mod != !modname)
4131 goto func_match;
4132 return 0;
4133 }
4134
4135 /*
4136 * exclude_mod is set to trace everything but the given
4137 * module. If it is set and the module matches, then
4138 * return 0. If it is not set, and the module doesn't match
4139 * also return 0. Otherwise, check the function to see if
4140 * that matches.
4141 */
4142 if (!mod_matches == !exclude_mod)
4143 return 0;
4144 func_match:
4145 /* blank search means to match all funcs in the mod */
4146 if (!func_g->len)
4147 return 1;
4148 }
4149
4150 return ftrace_match(str, func_g);
4151 }
4152
4153 static int
match_records(struct ftrace_hash * hash,char * func,int len,char * mod)4154 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4155 {
4156 struct ftrace_page *pg;
4157 struct dyn_ftrace *rec;
4158 struct ftrace_glob func_g = { .type = MATCH_FULL };
4159 struct ftrace_glob mod_g = { .type = MATCH_FULL };
4160 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4161 int exclude_mod = 0;
4162 int found = 0;
4163 int ret;
4164 int clear_filter = 0;
4165
4166 if (func) {
4167 func_g.type = filter_parse_regex(func, len, &func_g.search,
4168 &clear_filter);
4169 func_g.len = strlen(func_g.search);
4170 }
4171
4172 if (mod) {
4173 mod_g.type = filter_parse_regex(mod, strlen(mod),
4174 &mod_g.search, &exclude_mod);
4175 mod_g.len = strlen(mod_g.search);
4176 }
4177
4178 mutex_lock(&ftrace_lock);
4179
4180 if (unlikely(ftrace_disabled))
4181 goto out_unlock;
4182
4183 if (func_g.type == MATCH_INDEX) {
4184 found = add_rec_by_index(hash, &func_g, clear_filter);
4185 goto out_unlock;
4186 }
4187
4188 do_for_each_ftrace_rec(pg, rec) {
4189
4190 if (rec->flags & FTRACE_FL_DISABLED)
4191 continue;
4192
4193 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4194 ret = enter_record(hash, rec, clear_filter);
4195 if (ret < 0) {
4196 found = ret;
4197 goto out_unlock;
4198 }
4199 found = 1;
4200 }
4201 } while_for_each_ftrace_rec();
4202 out_unlock:
4203 mutex_unlock(&ftrace_lock);
4204
4205 return found;
4206 }
4207
4208 static int
ftrace_match_records(struct ftrace_hash * hash,char * buff,int len)4209 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4210 {
4211 return match_records(hash, buff, len, NULL);
4212 }
4213
ftrace_ops_update_code(struct ftrace_ops * ops,struct ftrace_ops_hash * old_hash)4214 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4215 struct ftrace_ops_hash *old_hash)
4216 {
4217 struct ftrace_ops *op;
4218
4219 if (!ftrace_enabled)
4220 return;
4221
4222 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4223 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4224 return;
4225 }
4226
4227 /*
4228 * If this is the shared global_ops filter, then we need to
4229 * check if there is another ops that shares it, is enabled.
4230 * If so, we still need to run the modify code.
4231 */
4232 if (ops->func_hash != &global_ops.local_hash)
4233 return;
4234
4235 do_for_each_ftrace_op(op, ftrace_ops_list) {
4236 if (op->func_hash == &global_ops.local_hash &&
4237 op->flags & FTRACE_OPS_FL_ENABLED) {
4238 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4239 /* Only need to do this once */
4240 return;
4241 }
4242 } while_for_each_ftrace_op(op);
4243 }
4244
ftrace_hash_move_and_update_ops(struct ftrace_ops * ops,struct ftrace_hash ** orig_hash,struct ftrace_hash * hash,int enable)4245 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4246 struct ftrace_hash **orig_hash,
4247 struct ftrace_hash *hash,
4248 int enable)
4249 {
4250 struct ftrace_ops_hash old_hash_ops;
4251 struct ftrace_hash *old_hash;
4252 int ret;
4253
4254 old_hash = *orig_hash;
4255 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4256 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4257 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4258 if (!ret) {
4259 ftrace_ops_update_code(ops, &old_hash_ops);
4260 free_ftrace_hash_rcu(old_hash);
4261 }
4262 return ret;
4263 }
4264
module_exists(const char * module)4265 static bool module_exists(const char *module)
4266 {
4267 /* All modules have the symbol __this_module */
4268 static const char this_mod[] = "__this_module";
4269 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4270 unsigned long val;
4271 int n;
4272
4273 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4274
4275 if (n > sizeof(modname) - 1)
4276 return false;
4277
4278 val = module_kallsyms_lookup_name(modname);
4279 return val != 0;
4280 }
4281
cache_mod(struct trace_array * tr,const char * func,char * module,int enable)4282 static int cache_mod(struct trace_array *tr,
4283 const char *func, char *module, int enable)
4284 {
4285 struct ftrace_mod_load *ftrace_mod, *n;
4286 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4287 int ret;
4288
4289 mutex_lock(&ftrace_lock);
4290
4291 /* We do not cache inverse filters */
4292 if (func[0] == '!') {
4293 func++;
4294 ret = -EINVAL;
4295
4296 /* Look to remove this hash */
4297 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4298 if (strcmp(ftrace_mod->module, module) != 0)
4299 continue;
4300
4301 /* no func matches all */
4302 if (strcmp(func, "*") == 0 ||
4303 (ftrace_mod->func &&
4304 strcmp(ftrace_mod->func, func) == 0)) {
4305 ret = 0;
4306 free_ftrace_mod(ftrace_mod);
4307 continue;
4308 }
4309 }
4310 goto out;
4311 }
4312
4313 ret = -EINVAL;
4314 /* We only care about modules that have not been loaded yet */
4315 if (module_exists(module))
4316 goto out;
4317
4318 /* Save this string off, and execute it when the module is loaded */
4319 ret = ftrace_add_mod(tr, func, module, enable);
4320 out:
4321 mutex_unlock(&ftrace_lock);
4322
4323 return ret;
4324 }
4325
4326 static int
4327 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4328 int reset, int enable);
4329
4330 #ifdef CONFIG_MODULES
process_mod_list(struct list_head * head,struct ftrace_ops * ops,char * mod,bool enable)4331 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4332 char *mod, bool enable)
4333 {
4334 struct ftrace_mod_load *ftrace_mod, *n;
4335 struct ftrace_hash **orig_hash, *new_hash;
4336 LIST_HEAD(process_mods);
4337 char *func;
4338
4339 mutex_lock(&ops->func_hash->regex_lock);
4340
4341 if (enable)
4342 orig_hash = &ops->func_hash->filter_hash;
4343 else
4344 orig_hash = &ops->func_hash->notrace_hash;
4345
4346 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4347 *orig_hash);
4348 if (!new_hash)
4349 goto out; /* warn? */
4350
4351 mutex_lock(&ftrace_lock);
4352
4353 list_for_each_entry_safe(ftrace_mod, n, head, list) {
4354
4355 if (strcmp(ftrace_mod->module, mod) != 0)
4356 continue;
4357
4358 if (ftrace_mod->func)
4359 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4360 else
4361 func = kstrdup("*", GFP_KERNEL);
4362
4363 if (!func) /* warn? */
4364 continue;
4365
4366 list_move(&ftrace_mod->list, &process_mods);
4367
4368 /* Use the newly allocated func, as it may be "*" */
4369 kfree(ftrace_mod->func);
4370 ftrace_mod->func = func;
4371 }
4372
4373 mutex_unlock(&ftrace_lock);
4374
4375 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4376
4377 func = ftrace_mod->func;
4378
4379 /* Grabs ftrace_lock, which is why we have this extra step */
4380 match_records(new_hash, func, strlen(func), mod);
4381 free_ftrace_mod(ftrace_mod);
4382 }
4383
4384 if (enable && list_empty(head))
4385 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4386
4387 mutex_lock(&ftrace_lock);
4388
4389 ftrace_hash_move_and_update_ops(ops, orig_hash,
4390 new_hash, enable);
4391 mutex_unlock(&ftrace_lock);
4392
4393 out:
4394 mutex_unlock(&ops->func_hash->regex_lock);
4395
4396 free_ftrace_hash(new_hash);
4397 }
4398
process_cached_mods(const char * mod_name)4399 static void process_cached_mods(const char *mod_name)
4400 {
4401 struct trace_array *tr;
4402 char *mod;
4403
4404 mod = kstrdup(mod_name, GFP_KERNEL);
4405 if (!mod)
4406 return;
4407
4408 mutex_lock(&trace_types_lock);
4409 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4410 if (!list_empty(&tr->mod_trace))
4411 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4412 if (!list_empty(&tr->mod_notrace))
4413 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4414 }
4415 mutex_unlock(&trace_types_lock);
4416
4417 kfree(mod);
4418 }
4419 #endif
4420
4421 /*
4422 * We register the module command as a template to show others how
4423 * to register the a command as well.
4424 */
4425
4426 static int
ftrace_mod_callback(struct trace_array * tr,struct ftrace_hash * hash,char * func_orig,char * cmd,char * module,int enable)4427 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4428 char *func_orig, char *cmd, char *module, int enable)
4429 {
4430 char *func;
4431 int ret;
4432
4433 /* match_records() modifies func, and we need the original */
4434 func = kstrdup(func_orig, GFP_KERNEL);
4435 if (!func)
4436 return -ENOMEM;
4437
4438 /*
4439 * cmd == 'mod' because we only registered this func
4440 * for the 'mod' ftrace_func_command.
4441 * But if you register one func with multiple commands,
4442 * you can tell which command was used by the cmd
4443 * parameter.
4444 */
4445 ret = match_records(hash, func, strlen(func), module);
4446 kfree(func);
4447
4448 if (!ret)
4449 return cache_mod(tr, func_orig, module, enable);
4450 if (ret < 0)
4451 return ret;
4452 return 0;
4453 }
4454
4455 static struct ftrace_func_command ftrace_mod_cmd = {
4456 .name = "mod",
4457 .func = ftrace_mod_callback,
4458 };
4459
ftrace_mod_cmd_init(void)4460 static int __init ftrace_mod_cmd_init(void)
4461 {
4462 return register_ftrace_command(&ftrace_mod_cmd);
4463 }
4464 core_initcall(ftrace_mod_cmd_init);
4465
function_trace_probe_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)4466 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4467 struct ftrace_ops *op, struct ftrace_regs *fregs)
4468 {
4469 struct ftrace_probe_ops *probe_ops;
4470 struct ftrace_func_probe *probe;
4471
4472 probe = container_of(op, struct ftrace_func_probe, ops);
4473 probe_ops = probe->probe_ops;
4474
4475 /*
4476 * Disable preemption for these calls to prevent a RCU grace
4477 * period. This syncs the hash iteration and freeing of items
4478 * on the hash. rcu_read_lock is too dangerous here.
4479 */
4480 preempt_disable_notrace();
4481 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4482 preempt_enable_notrace();
4483 }
4484
4485 struct ftrace_func_map {
4486 struct ftrace_func_entry entry;
4487 void *data;
4488 };
4489
4490 struct ftrace_func_mapper {
4491 struct ftrace_hash hash;
4492 };
4493
4494 /**
4495 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4496 *
4497 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4498 */
allocate_ftrace_func_mapper(void)4499 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4500 {
4501 struct ftrace_hash *hash;
4502
4503 /*
4504 * The mapper is simply a ftrace_hash, but since the entries
4505 * in the hash are not ftrace_func_entry type, we define it
4506 * as a separate structure.
4507 */
4508 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4509 return (struct ftrace_func_mapper *)hash;
4510 }
4511
4512 /**
4513 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4514 * @mapper: The mapper that has the ip maps
4515 * @ip: the instruction pointer to find the data for
4516 *
4517 * Returns the data mapped to @ip if found otherwise NULL. The return
4518 * is actually the address of the mapper data pointer. The address is
4519 * returned for use cases where the data is no bigger than a long, and
4520 * the user can use the data pointer as its data instead of having to
4521 * allocate more memory for the reference.
4522 */
ftrace_func_mapper_find_ip(struct ftrace_func_mapper * mapper,unsigned long ip)4523 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4524 unsigned long ip)
4525 {
4526 struct ftrace_func_entry *entry;
4527 struct ftrace_func_map *map;
4528
4529 entry = ftrace_lookup_ip(&mapper->hash, ip);
4530 if (!entry)
4531 return NULL;
4532
4533 map = (struct ftrace_func_map *)entry;
4534 return &map->data;
4535 }
4536
4537 /**
4538 * ftrace_func_mapper_add_ip - Map some data to an ip
4539 * @mapper: The mapper that has the ip maps
4540 * @ip: The instruction pointer address to map @data to
4541 * @data: The data to map to @ip
4542 *
4543 * Returns 0 on success otherwise an error.
4544 */
ftrace_func_mapper_add_ip(struct ftrace_func_mapper * mapper,unsigned long ip,void * data)4545 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4546 unsigned long ip, void *data)
4547 {
4548 struct ftrace_func_entry *entry;
4549 struct ftrace_func_map *map;
4550
4551 entry = ftrace_lookup_ip(&mapper->hash, ip);
4552 if (entry)
4553 return -EBUSY;
4554
4555 map = kmalloc(sizeof(*map), GFP_KERNEL);
4556 if (!map)
4557 return -ENOMEM;
4558
4559 map->entry.ip = ip;
4560 map->data = data;
4561
4562 __add_hash_entry(&mapper->hash, &map->entry);
4563
4564 return 0;
4565 }
4566
4567 /**
4568 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4569 * @mapper: The mapper that has the ip maps
4570 * @ip: The instruction pointer address to remove the data from
4571 *
4572 * Returns the data if it is found, otherwise NULL.
4573 * Note, if the data pointer is used as the data itself, (see
4574 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4575 * if the data pointer was set to zero.
4576 */
ftrace_func_mapper_remove_ip(struct ftrace_func_mapper * mapper,unsigned long ip)4577 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4578 unsigned long ip)
4579 {
4580 struct ftrace_func_entry *entry;
4581 struct ftrace_func_map *map;
4582 void *data;
4583
4584 entry = ftrace_lookup_ip(&mapper->hash, ip);
4585 if (!entry)
4586 return NULL;
4587
4588 map = (struct ftrace_func_map *)entry;
4589 data = map->data;
4590
4591 remove_hash_entry(&mapper->hash, entry);
4592 kfree(entry);
4593
4594 return data;
4595 }
4596
4597 /**
4598 * free_ftrace_func_mapper - free a mapping of ips and data
4599 * @mapper: The mapper that has the ip maps
4600 * @free_func: A function to be called on each data item.
4601 *
4602 * This is used to free the function mapper. The @free_func is optional
4603 * and can be used if the data needs to be freed as well.
4604 */
free_ftrace_func_mapper(struct ftrace_func_mapper * mapper,ftrace_mapper_func free_func)4605 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4606 ftrace_mapper_func free_func)
4607 {
4608 struct ftrace_func_entry *entry;
4609 struct ftrace_func_map *map;
4610 struct hlist_head *hhd;
4611 int size, i;
4612
4613 if (!mapper)
4614 return;
4615
4616 if (free_func && mapper->hash.count) {
4617 size = 1 << mapper->hash.size_bits;
4618 for (i = 0; i < size; i++) {
4619 hhd = &mapper->hash.buckets[i];
4620 hlist_for_each_entry(entry, hhd, hlist) {
4621 map = (struct ftrace_func_map *)entry;
4622 free_func(map);
4623 }
4624 }
4625 }
4626 free_ftrace_hash(&mapper->hash);
4627 }
4628
release_probe(struct ftrace_func_probe * probe)4629 static void release_probe(struct ftrace_func_probe *probe)
4630 {
4631 struct ftrace_probe_ops *probe_ops;
4632
4633 mutex_lock(&ftrace_lock);
4634
4635 WARN_ON(probe->ref <= 0);
4636
4637 /* Subtract the ref that was used to protect this instance */
4638 probe->ref--;
4639
4640 if (!probe->ref) {
4641 probe_ops = probe->probe_ops;
4642 /*
4643 * Sending zero as ip tells probe_ops to free
4644 * the probe->data itself
4645 */
4646 if (probe_ops->free)
4647 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4648 list_del(&probe->list);
4649 kfree(probe);
4650 }
4651 mutex_unlock(&ftrace_lock);
4652 }
4653
acquire_probe_locked(struct ftrace_func_probe * probe)4654 static void acquire_probe_locked(struct ftrace_func_probe *probe)
4655 {
4656 /*
4657 * Add one ref to keep it from being freed when releasing the
4658 * ftrace_lock mutex.
4659 */
4660 probe->ref++;
4661 }
4662
4663 int
register_ftrace_function_probe(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops,void * data)4664 register_ftrace_function_probe(char *glob, struct trace_array *tr,
4665 struct ftrace_probe_ops *probe_ops,
4666 void *data)
4667 {
4668 struct ftrace_func_probe *probe = NULL, *iter;
4669 struct ftrace_func_entry *entry;
4670 struct ftrace_hash **orig_hash;
4671 struct ftrace_hash *old_hash;
4672 struct ftrace_hash *hash;
4673 int count = 0;
4674 int size;
4675 int ret;
4676 int i;
4677
4678 if (WARN_ON(!tr))
4679 return -EINVAL;
4680
4681 /* We do not support '!' for function probes */
4682 if (WARN_ON(glob[0] == '!'))
4683 return -EINVAL;
4684
4685
4686 mutex_lock(&ftrace_lock);
4687 /* Check if the probe_ops is already registered */
4688 list_for_each_entry(iter, &tr->func_probes, list) {
4689 if (iter->probe_ops == probe_ops) {
4690 probe = iter;
4691 break;
4692 }
4693 }
4694 if (!probe) {
4695 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4696 if (!probe) {
4697 mutex_unlock(&ftrace_lock);
4698 return -ENOMEM;
4699 }
4700 probe->probe_ops = probe_ops;
4701 probe->ops.func = function_trace_probe_call;
4702 probe->tr = tr;
4703 ftrace_ops_init(&probe->ops);
4704 list_add(&probe->list, &tr->func_probes);
4705 }
4706
4707 acquire_probe_locked(probe);
4708
4709 mutex_unlock(&ftrace_lock);
4710
4711 /*
4712 * Note, there's a small window here that the func_hash->filter_hash
4713 * may be NULL or empty. Need to be careful when reading the loop.
4714 */
4715 mutex_lock(&probe->ops.func_hash->regex_lock);
4716
4717 orig_hash = &probe->ops.func_hash->filter_hash;
4718 old_hash = *orig_hash;
4719 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4720
4721 if (!hash) {
4722 ret = -ENOMEM;
4723 goto out;
4724 }
4725
4726 ret = ftrace_match_records(hash, glob, strlen(glob));
4727
4728 /* Nothing found? */
4729 if (!ret)
4730 ret = -EINVAL;
4731
4732 if (ret < 0)
4733 goto out;
4734
4735 size = 1 << hash->size_bits;
4736 for (i = 0; i < size; i++) {
4737 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4738 if (ftrace_lookup_ip(old_hash, entry->ip))
4739 continue;
4740 /*
4741 * The caller might want to do something special
4742 * for each function we find. We call the callback
4743 * to give the caller an opportunity to do so.
4744 */
4745 if (probe_ops->init) {
4746 ret = probe_ops->init(probe_ops, tr,
4747 entry->ip, data,
4748 &probe->data);
4749 if (ret < 0) {
4750 if (probe_ops->free && count)
4751 probe_ops->free(probe_ops, tr,
4752 0, probe->data);
4753 probe->data = NULL;
4754 goto out;
4755 }
4756 }
4757 count++;
4758 }
4759 }
4760
4761 mutex_lock(&ftrace_lock);
4762
4763 if (!count) {
4764 /* Nothing was added? */
4765 ret = -EINVAL;
4766 goto out_unlock;
4767 }
4768
4769 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4770 hash, 1);
4771 if (ret < 0)
4772 goto err_unlock;
4773
4774 /* One ref for each new function traced */
4775 probe->ref += count;
4776
4777 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4778 ret = ftrace_startup(&probe->ops, 0);
4779
4780 out_unlock:
4781 mutex_unlock(&ftrace_lock);
4782
4783 if (!ret)
4784 ret = count;
4785 out:
4786 mutex_unlock(&probe->ops.func_hash->regex_lock);
4787 free_ftrace_hash(hash);
4788
4789 release_probe(probe);
4790
4791 return ret;
4792
4793 err_unlock:
4794 if (!probe_ops->free || !count)
4795 goto out_unlock;
4796
4797 /* Failed to do the move, need to call the free functions */
4798 for (i = 0; i < size; i++) {
4799 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4800 if (ftrace_lookup_ip(old_hash, entry->ip))
4801 continue;
4802 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4803 }
4804 }
4805 goto out_unlock;
4806 }
4807
4808 int
unregister_ftrace_function_probe_func(char * glob,struct trace_array * tr,struct ftrace_probe_ops * probe_ops)4809 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4810 struct ftrace_probe_ops *probe_ops)
4811 {
4812 struct ftrace_func_probe *probe = NULL, *iter;
4813 struct ftrace_ops_hash old_hash_ops;
4814 struct ftrace_func_entry *entry;
4815 struct ftrace_glob func_g;
4816 struct ftrace_hash **orig_hash;
4817 struct ftrace_hash *old_hash;
4818 struct ftrace_hash *hash = NULL;
4819 struct hlist_node *tmp;
4820 struct hlist_head hhd;
4821 char str[KSYM_SYMBOL_LEN];
4822 int count = 0;
4823 int i, ret = -ENODEV;
4824 int size;
4825
4826 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4827 func_g.search = NULL;
4828 else {
4829 int not;
4830
4831 func_g.type = filter_parse_regex(glob, strlen(glob),
4832 &func_g.search, ¬);
4833 func_g.len = strlen(func_g.search);
4834
4835 /* we do not support '!' for function probes */
4836 if (WARN_ON(not))
4837 return -EINVAL;
4838 }
4839
4840 mutex_lock(&ftrace_lock);
4841 /* Check if the probe_ops is already registered */
4842 list_for_each_entry(iter, &tr->func_probes, list) {
4843 if (iter->probe_ops == probe_ops) {
4844 probe = iter;
4845 break;
4846 }
4847 }
4848 if (!probe)
4849 goto err_unlock_ftrace;
4850
4851 ret = -EINVAL;
4852 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4853 goto err_unlock_ftrace;
4854
4855 acquire_probe_locked(probe);
4856
4857 mutex_unlock(&ftrace_lock);
4858
4859 mutex_lock(&probe->ops.func_hash->regex_lock);
4860
4861 orig_hash = &probe->ops.func_hash->filter_hash;
4862 old_hash = *orig_hash;
4863
4864 if (ftrace_hash_empty(old_hash))
4865 goto out_unlock;
4866
4867 old_hash_ops.filter_hash = old_hash;
4868 /* Probes only have filters */
4869 old_hash_ops.notrace_hash = NULL;
4870
4871 ret = -ENOMEM;
4872 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4873 if (!hash)
4874 goto out_unlock;
4875
4876 INIT_HLIST_HEAD(&hhd);
4877
4878 size = 1 << hash->size_bits;
4879 for (i = 0; i < size; i++) {
4880 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4881
4882 if (func_g.search) {
4883 kallsyms_lookup(entry->ip, NULL, NULL,
4884 NULL, str);
4885 if (!ftrace_match(str, &func_g))
4886 continue;
4887 }
4888 count++;
4889 remove_hash_entry(hash, entry);
4890 hlist_add_head(&entry->hlist, &hhd);
4891 }
4892 }
4893
4894 /* Nothing found? */
4895 if (!count) {
4896 ret = -EINVAL;
4897 goto out_unlock;
4898 }
4899
4900 mutex_lock(&ftrace_lock);
4901
4902 WARN_ON(probe->ref < count);
4903
4904 probe->ref -= count;
4905
4906 if (ftrace_hash_empty(hash))
4907 ftrace_shutdown(&probe->ops, 0);
4908
4909 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4910 hash, 1);
4911
4912 /* still need to update the function call sites */
4913 if (ftrace_enabled && !ftrace_hash_empty(hash))
4914 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4915 &old_hash_ops);
4916 synchronize_rcu();
4917
4918 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4919 hlist_del(&entry->hlist);
4920 if (probe_ops->free)
4921 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4922 kfree(entry);
4923 }
4924 mutex_unlock(&ftrace_lock);
4925
4926 out_unlock:
4927 mutex_unlock(&probe->ops.func_hash->regex_lock);
4928 free_ftrace_hash(hash);
4929
4930 release_probe(probe);
4931
4932 return ret;
4933
4934 err_unlock_ftrace:
4935 mutex_unlock(&ftrace_lock);
4936 return ret;
4937 }
4938
clear_ftrace_function_probes(struct trace_array * tr)4939 void clear_ftrace_function_probes(struct trace_array *tr)
4940 {
4941 struct ftrace_func_probe *probe, *n;
4942
4943 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4944 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4945 }
4946
4947 static LIST_HEAD(ftrace_commands);
4948 static DEFINE_MUTEX(ftrace_cmd_mutex);
4949
4950 /*
4951 * Currently we only register ftrace commands from __init, so mark this
4952 * __init too.
4953 */
register_ftrace_command(struct ftrace_func_command * cmd)4954 __init int register_ftrace_command(struct ftrace_func_command *cmd)
4955 {
4956 struct ftrace_func_command *p;
4957 int ret = 0;
4958
4959 mutex_lock(&ftrace_cmd_mutex);
4960 list_for_each_entry(p, &ftrace_commands, list) {
4961 if (strcmp(cmd->name, p->name) == 0) {
4962 ret = -EBUSY;
4963 goto out_unlock;
4964 }
4965 }
4966 list_add(&cmd->list, &ftrace_commands);
4967 out_unlock:
4968 mutex_unlock(&ftrace_cmd_mutex);
4969
4970 return ret;
4971 }
4972
4973 /*
4974 * Currently we only unregister ftrace commands from __init, so mark
4975 * this __init too.
4976 */
unregister_ftrace_command(struct ftrace_func_command * cmd)4977 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4978 {
4979 struct ftrace_func_command *p, *n;
4980 int ret = -ENODEV;
4981
4982 mutex_lock(&ftrace_cmd_mutex);
4983 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4984 if (strcmp(cmd->name, p->name) == 0) {
4985 ret = 0;
4986 list_del_init(&p->list);
4987 goto out_unlock;
4988 }
4989 }
4990 out_unlock:
4991 mutex_unlock(&ftrace_cmd_mutex);
4992
4993 return ret;
4994 }
4995
ftrace_process_regex(struct ftrace_iterator * iter,char * buff,int len,int enable)4996 static int ftrace_process_regex(struct ftrace_iterator *iter,
4997 char *buff, int len, int enable)
4998 {
4999 struct ftrace_hash *hash = iter->hash;
5000 struct trace_array *tr = iter->ops->private;
5001 char *func, *command, *next = buff;
5002 struct ftrace_func_command *p;
5003 int ret = -EINVAL;
5004
5005 func = strsep(&next, ":");
5006
5007 if (!next) {
5008 ret = ftrace_match_records(hash, func, len);
5009 if (!ret)
5010 ret = -EINVAL;
5011 if (ret < 0)
5012 return ret;
5013 return 0;
5014 }
5015
5016 /* command found */
5017
5018 command = strsep(&next, ":");
5019
5020 mutex_lock(&ftrace_cmd_mutex);
5021 list_for_each_entry(p, &ftrace_commands, list) {
5022 if (strcmp(p->name, command) == 0) {
5023 ret = p->func(tr, hash, func, command, next, enable);
5024 goto out_unlock;
5025 }
5026 }
5027 out_unlock:
5028 mutex_unlock(&ftrace_cmd_mutex);
5029
5030 return ret;
5031 }
5032
5033 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)5034 ftrace_regex_write(struct file *file, const char __user *ubuf,
5035 size_t cnt, loff_t *ppos, int enable)
5036 {
5037 struct ftrace_iterator *iter;
5038 struct trace_parser *parser;
5039 ssize_t ret, read;
5040
5041 if (!cnt)
5042 return 0;
5043
5044 if (file->f_mode & FMODE_READ) {
5045 struct seq_file *m = file->private_data;
5046 iter = m->private;
5047 } else
5048 iter = file->private_data;
5049
5050 if (unlikely(ftrace_disabled))
5051 return -ENODEV;
5052
5053 /* iter->hash is a local copy, so we don't need regex_lock */
5054
5055 parser = &iter->parser;
5056 read = trace_get_user(parser, ubuf, cnt, ppos);
5057
5058 if (read >= 0 && trace_parser_loaded(parser) &&
5059 !trace_parser_cont(parser)) {
5060 ret = ftrace_process_regex(iter, parser->buffer,
5061 parser->idx, enable);
5062 trace_parser_clear(parser);
5063 if (ret < 0)
5064 goto out;
5065 }
5066
5067 ret = read;
5068 out:
5069 return ret;
5070 }
5071
5072 ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5073 ftrace_filter_write(struct file *file, const char __user *ubuf,
5074 size_t cnt, loff_t *ppos)
5075 {
5076 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5077 }
5078
5079 ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)5080 ftrace_notrace_write(struct file *file, const char __user *ubuf,
5081 size_t cnt, loff_t *ppos)
5082 {
5083 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5084 }
5085
5086 static int
__ftrace_match_addr(struct ftrace_hash * hash,unsigned long ip,int remove)5087 __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5088 {
5089 struct ftrace_func_entry *entry;
5090
5091 ip = ftrace_location(ip);
5092 if (!ip)
5093 return -EINVAL;
5094
5095 if (remove) {
5096 entry = ftrace_lookup_ip(hash, ip);
5097 if (!entry)
5098 return -ENOENT;
5099 free_hash_entry(hash, entry);
5100 return 0;
5101 }
5102
5103 return add_hash_entry(hash, ip);
5104 }
5105
5106 static int
ftrace_match_addr(struct ftrace_hash * hash,unsigned long * ips,unsigned int cnt,int remove)5107 ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5108 unsigned int cnt, int remove)
5109 {
5110 unsigned int i;
5111 int err;
5112
5113 for (i = 0; i < cnt; i++) {
5114 err = __ftrace_match_addr(hash, ips[i], remove);
5115 if (err) {
5116 /*
5117 * This expects the @hash is a temporary hash and if this
5118 * fails the caller must free the @hash.
5119 */
5120 return err;
5121 }
5122 }
5123 return 0;
5124 }
5125
5126 static int
ftrace_set_hash(struct ftrace_ops * ops,unsigned char * buf,int len,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5127 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5128 unsigned long *ips, unsigned int cnt,
5129 int remove, int reset, int enable)
5130 {
5131 struct ftrace_hash **orig_hash;
5132 struct ftrace_hash *hash;
5133 int ret;
5134
5135 if (unlikely(ftrace_disabled))
5136 return -ENODEV;
5137
5138 mutex_lock(&ops->func_hash->regex_lock);
5139
5140 if (enable)
5141 orig_hash = &ops->func_hash->filter_hash;
5142 else
5143 orig_hash = &ops->func_hash->notrace_hash;
5144
5145 if (reset)
5146 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5147 else
5148 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5149
5150 if (!hash) {
5151 ret = -ENOMEM;
5152 goto out_regex_unlock;
5153 }
5154
5155 if (buf && !ftrace_match_records(hash, buf, len)) {
5156 ret = -EINVAL;
5157 goto out_regex_unlock;
5158 }
5159 if (ips) {
5160 ret = ftrace_match_addr(hash, ips, cnt, remove);
5161 if (ret < 0)
5162 goto out_regex_unlock;
5163 }
5164
5165 mutex_lock(&ftrace_lock);
5166 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5167 mutex_unlock(&ftrace_lock);
5168
5169 out_regex_unlock:
5170 mutex_unlock(&ops->func_hash->regex_lock);
5171
5172 free_ftrace_hash(hash);
5173 return ret;
5174 }
5175
5176 static int
ftrace_set_addr(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset,int enable)5177 ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5178 int remove, int reset, int enable)
5179 {
5180 return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5181 }
5182
5183 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5184
5185 struct ftrace_direct_func {
5186 struct list_head next;
5187 unsigned long addr;
5188 int count;
5189 };
5190
5191 static LIST_HEAD(ftrace_direct_funcs);
5192
5193 /**
5194 * ftrace_find_direct_func - test an address if it is a registered direct caller
5195 * @addr: The address of a registered direct caller
5196 *
5197 * This searches to see if a ftrace direct caller has been registered
5198 * at a specific address, and if so, it returns a descriptor for it.
5199 *
5200 * This can be used by architecture code to see if an address is
5201 * a direct caller (trampoline) attached to a fentry/mcount location.
5202 * This is useful for the function_graph tracer, as it may need to
5203 * do adjustments if it traced a location that also has a direct
5204 * trampoline attached to it.
5205 */
ftrace_find_direct_func(unsigned long addr)5206 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5207 {
5208 struct ftrace_direct_func *entry;
5209 bool found = false;
5210
5211 /* May be called by fgraph trampoline (protected by rcu tasks) */
5212 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5213 if (entry->addr == addr) {
5214 found = true;
5215 break;
5216 }
5217 }
5218 if (found)
5219 return entry;
5220
5221 return NULL;
5222 }
5223
ftrace_alloc_direct_func(unsigned long addr)5224 static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5225 {
5226 struct ftrace_direct_func *direct;
5227
5228 direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5229 if (!direct)
5230 return NULL;
5231 direct->addr = addr;
5232 direct->count = 0;
5233 list_add_rcu(&direct->next, &ftrace_direct_funcs);
5234 ftrace_direct_func_count++;
5235 return direct;
5236 }
5237
5238 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5239
5240 /**
5241 * register_ftrace_direct - Call a custom trampoline directly
5242 * @ip: The address of the nop at the beginning of a function
5243 * @addr: The address of the trampoline to call at @ip
5244 *
5245 * This is used to connect a direct call from the nop location (@ip)
5246 * at the start of ftrace traced functions. The location that it calls
5247 * (@addr) must be able to handle a direct call, and save the parameters
5248 * of the function being traced, and restore them (or inject new ones
5249 * if needed), before returning.
5250 *
5251 * Returns:
5252 * 0 on success
5253 * -EBUSY - Another direct function is already attached (there can be only one)
5254 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5255 * -ENOMEM - There was an allocation failure.
5256 */
register_ftrace_direct(unsigned long ip,unsigned long addr)5257 int register_ftrace_direct(unsigned long ip, unsigned long addr)
5258 {
5259 struct ftrace_direct_func *direct;
5260 struct ftrace_func_entry *entry;
5261 struct ftrace_hash *free_hash = NULL;
5262 struct dyn_ftrace *rec;
5263 int ret = -ENODEV;
5264
5265 mutex_lock(&direct_mutex);
5266
5267 ip = ftrace_location(ip);
5268 if (!ip)
5269 goto out_unlock;
5270
5271 /* See if there's a direct function at @ip already */
5272 ret = -EBUSY;
5273 if (ftrace_find_rec_direct(ip))
5274 goto out_unlock;
5275
5276 ret = -ENODEV;
5277 rec = lookup_rec(ip, ip);
5278 if (!rec)
5279 goto out_unlock;
5280
5281 /*
5282 * Check if the rec says it has a direct call but we didn't
5283 * find one earlier?
5284 */
5285 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5286 goto out_unlock;
5287
5288 /* Make sure the ip points to the exact record */
5289 if (ip != rec->ip) {
5290 ip = rec->ip;
5291 /* Need to check this ip for a direct. */
5292 if (ftrace_find_rec_direct(ip))
5293 goto out_unlock;
5294 }
5295
5296 ret = -ENOMEM;
5297 direct = ftrace_find_direct_func(addr);
5298 if (!direct) {
5299 direct = ftrace_alloc_direct_func(addr);
5300 if (!direct)
5301 goto out_unlock;
5302 }
5303
5304 entry = ftrace_add_rec_direct(ip, addr, &free_hash);
5305 if (!entry)
5306 goto out_unlock;
5307
5308 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5309
5310 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5311 ret = register_ftrace_function_nolock(&direct_ops);
5312 if (ret)
5313 ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5314 }
5315
5316 if (ret) {
5317 remove_hash_entry(direct_functions, entry);
5318 kfree(entry);
5319 if (!direct->count) {
5320 list_del_rcu(&direct->next);
5321 synchronize_rcu_tasks();
5322 kfree(direct);
5323 if (free_hash)
5324 free_ftrace_hash(free_hash);
5325 free_hash = NULL;
5326 ftrace_direct_func_count--;
5327 }
5328 } else {
5329 direct->count++;
5330 }
5331 out_unlock:
5332 mutex_unlock(&direct_mutex);
5333
5334 if (free_hash) {
5335 synchronize_rcu_tasks();
5336 free_ftrace_hash(free_hash);
5337 }
5338
5339 return ret;
5340 }
5341 EXPORT_SYMBOL_GPL(register_ftrace_direct);
5342
find_direct_entry(unsigned long * ip,struct dyn_ftrace ** recp)5343 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5344 struct dyn_ftrace **recp)
5345 {
5346 struct ftrace_func_entry *entry;
5347 struct dyn_ftrace *rec;
5348
5349 rec = lookup_rec(*ip, *ip);
5350 if (!rec)
5351 return NULL;
5352
5353 entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5354 if (!entry) {
5355 WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5356 return NULL;
5357 }
5358
5359 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5360
5361 /* Passed in ip just needs to be on the call site */
5362 *ip = rec->ip;
5363
5364 if (recp)
5365 *recp = rec;
5366
5367 return entry;
5368 }
5369
unregister_ftrace_direct(unsigned long ip,unsigned long addr)5370 int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5371 {
5372 struct ftrace_direct_func *direct;
5373 struct ftrace_func_entry *entry;
5374 struct ftrace_hash *hash;
5375 int ret = -ENODEV;
5376
5377 mutex_lock(&direct_mutex);
5378
5379 ip = ftrace_location(ip);
5380 if (!ip)
5381 goto out_unlock;
5382
5383 entry = find_direct_entry(&ip, NULL);
5384 if (!entry)
5385 goto out_unlock;
5386
5387 hash = direct_ops.func_hash->filter_hash;
5388 if (hash->count == 1)
5389 unregister_ftrace_function(&direct_ops);
5390
5391 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5392
5393 WARN_ON(ret);
5394
5395 remove_hash_entry(direct_functions, entry);
5396
5397 direct = ftrace_find_direct_func(addr);
5398 if (!WARN_ON(!direct)) {
5399 /* This is the good path (see the ! before WARN) */
5400 direct->count--;
5401 WARN_ON(direct->count < 0);
5402 if (!direct->count) {
5403 list_del_rcu(&direct->next);
5404 synchronize_rcu_tasks();
5405 kfree(direct);
5406 kfree(entry);
5407 ftrace_direct_func_count--;
5408 }
5409 }
5410 out_unlock:
5411 mutex_unlock(&direct_mutex);
5412
5413 return ret;
5414 }
5415 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5416
5417 static struct ftrace_ops stub_ops = {
5418 .func = ftrace_stub,
5419 };
5420
5421 /**
5422 * ftrace_modify_direct_caller - modify ftrace nop directly
5423 * @entry: The ftrace hash entry of the direct helper for @rec
5424 * @rec: The record representing the function site to patch
5425 * @old_addr: The location that the site at @rec->ip currently calls
5426 * @new_addr: The location that the site at @rec->ip should call
5427 *
5428 * An architecture may overwrite this function to optimize the
5429 * changing of the direct callback on an ftrace nop location.
5430 * This is called with the ftrace_lock mutex held, and no other
5431 * ftrace callbacks are on the associated record (@rec). Thus,
5432 * it is safe to modify the ftrace record, where it should be
5433 * currently calling @old_addr directly, to call @new_addr.
5434 *
5435 * This is called with direct_mutex locked.
5436 *
5437 * Safety checks should be made to make sure that the code at
5438 * @rec->ip is currently calling @old_addr. And this must
5439 * also update entry->direct to @new_addr.
5440 */
ftrace_modify_direct_caller(struct ftrace_func_entry * entry,struct dyn_ftrace * rec,unsigned long old_addr,unsigned long new_addr)5441 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5442 struct dyn_ftrace *rec,
5443 unsigned long old_addr,
5444 unsigned long new_addr)
5445 {
5446 unsigned long ip = rec->ip;
5447 int ret;
5448
5449 lockdep_assert_held(&direct_mutex);
5450
5451 /*
5452 * The ftrace_lock was used to determine if the record
5453 * had more than one registered user to it. If it did,
5454 * we needed to prevent that from changing to do the quick
5455 * switch. But if it did not (only a direct caller was attached)
5456 * then this function is called. But this function can deal
5457 * with attached callers to the rec that we care about, and
5458 * since this function uses standard ftrace calls that take
5459 * the ftrace_lock mutex, we need to release it.
5460 */
5461 mutex_unlock(&ftrace_lock);
5462
5463 /*
5464 * By setting a stub function at the same address, we force
5465 * the code to call the iterator and the direct_ops helper.
5466 * This means that @ip does not call the direct call, and
5467 * we can simply modify it.
5468 */
5469 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5470 if (ret)
5471 goto out_lock;
5472
5473 ret = register_ftrace_function_nolock(&stub_ops);
5474 if (ret) {
5475 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5476 goto out_lock;
5477 }
5478
5479 entry->direct = new_addr;
5480
5481 /*
5482 * By removing the stub, we put back the direct call, calling
5483 * the @new_addr.
5484 */
5485 unregister_ftrace_function(&stub_ops);
5486 ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5487
5488 out_lock:
5489 mutex_lock(&ftrace_lock);
5490
5491 return ret;
5492 }
5493
5494 /**
5495 * modify_ftrace_direct - Modify an existing direct call to call something else
5496 * @ip: The instruction pointer to modify
5497 * @old_addr: The address that the current @ip calls directly
5498 * @new_addr: The address that the @ip should call
5499 *
5500 * This modifies a ftrace direct caller at an instruction pointer without
5501 * having to disable it first. The direct call will switch over to the
5502 * @new_addr without missing anything.
5503 *
5504 * Returns: zero on success. Non zero on error, which includes:
5505 * -ENODEV : the @ip given has no direct caller attached
5506 * -EINVAL : the @old_addr does not match the current direct caller
5507 */
modify_ftrace_direct(unsigned long ip,unsigned long old_addr,unsigned long new_addr)5508 int modify_ftrace_direct(unsigned long ip,
5509 unsigned long old_addr, unsigned long new_addr)
5510 {
5511 struct ftrace_direct_func *direct, *new_direct = NULL;
5512 struct ftrace_func_entry *entry;
5513 struct dyn_ftrace *rec;
5514 int ret = -ENODEV;
5515
5516 mutex_lock(&direct_mutex);
5517
5518 mutex_lock(&ftrace_lock);
5519
5520 ip = ftrace_location(ip);
5521 if (!ip)
5522 goto out_unlock;
5523
5524 entry = find_direct_entry(&ip, &rec);
5525 if (!entry)
5526 goto out_unlock;
5527
5528 ret = -EINVAL;
5529 if (entry->direct != old_addr)
5530 goto out_unlock;
5531
5532 direct = ftrace_find_direct_func(old_addr);
5533 if (WARN_ON(!direct))
5534 goto out_unlock;
5535 if (direct->count > 1) {
5536 ret = -ENOMEM;
5537 new_direct = ftrace_alloc_direct_func(new_addr);
5538 if (!new_direct)
5539 goto out_unlock;
5540 direct->count--;
5541 new_direct->count++;
5542 } else {
5543 direct->addr = new_addr;
5544 }
5545
5546 /*
5547 * If there's no other ftrace callback on the rec->ip location,
5548 * then it can be changed directly by the architecture.
5549 * If there is another caller, then we just need to change the
5550 * direct caller helper to point to @new_addr.
5551 */
5552 if (ftrace_rec_count(rec) == 1) {
5553 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5554 } else {
5555 entry->direct = new_addr;
5556 ret = 0;
5557 }
5558
5559 if (unlikely(ret && new_direct)) {
5560 direct->count++;
5561 list_del_rcu(&new_direct->next);
5562 synchronize_rcu_tasks();
5563 kfree(new_direct);
5564 ftrace_direct_func_count--;
5565 }
5566
5567 out_unlock:
5568 mutex_unlock(&ftrace_lock);
5569 mutex_unlock(&direct_mutex);
5570 return ret;
5571 }
5572 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5573
5574 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5575
check_direct_multi(struct ftrace_ops * ops)5576 static int check_direct_multi(struct ftrace_ops *ops)
5577 {
5578 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5579 return -EINVAL;
5580 if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5581 return -EINVAL;
5582 return 0;
5583 }
5584
remove_direct_functions_hash(struct ftrace_hash * hash,unsigned long addr)5585 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5586 {
5587 struct ftrace_func_entry *entry, *del;
5588 int size, i;
5589
5590 size = 1 << hash->size_bits;
5591 for (i = 0; i < size; i++) {
5592 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5593 del = __ftrace_lookup_ip(direct_functions, entry->ip);
5594 if (del && del->direct == addr) {
5595 remove_hash_entry(direct_functions, del);
5596 kfree(del);
5597 }
5598 }
5599 }
5600 }
5601
5602 /**
5603 * register_ftrace_direct_multi - Call a custom trampoline directly
5604 * for multiple functions registered in @ops
5605 * @ops: The address of the struct ftrace_ops object
5606 * @addr: The address of the trampoline to call at @ops functions
5607 *
5608 * This is used to connect a direct calls to @addr from the nop locations
5609 * of the functions registered in @ops (with by ftrace_set_filter_ip
5610 * function).
5611 *
5612 * The location that it calls (@addr) must be able to handle a direct call,
5613 * and save the parameters of the function being traced, and restore them
5614 * (or inject new ones if needed), before returning.
5615 *
5616 * Returns:
5617 * 0 on success
5618 * -EINVAL - The @ops object was already registered with this call or
5619 * when there are no functions in @ops object.
5620 * -EBUSY - Another direct function is already attached (there can be only one)
5621 * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5622 * -ENOMEM - There was an allocation failure.
5623 */
register_ftrace_direct_multi(struct ftrace_ops * ops,unsigned long addr)5624 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5625 {
5626 struct ftrace_hash *hash, *free_hash = NULL;
5627 struct ftrace_func_entry *entry, *new;
5628 int err = -EBUSY, size, i;
5629
5630 if (ops->func || ops->trampoline)
5631 return -EINVAL;
5632 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5633 return -EINVAL;
5634 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5635 return -EINVAL;
5636
5637 hash = ops->func_hash->filter_hash;
5638 if (ftrace_hash_empty(hash))
5639 return -EINVAL;
5640
5641 mutex_lock(&direct_mutex);
5642
5643 /* Make sure requested entries are not already registered.. */
5644 size = 1 << hash->size_bits;
5645 for (i = 0; i < size; i++) {
5646 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5647 if (ftrace_find_rec_direct(entry->ip))
5648 goto out_unlock;
5649 }
5650 }
5651
5652 /* ... and insert them to direct_functions hash. */
5653 err = -ENOMEM;
5654 for (i = 0; i < size; i++) {
5655 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5656 new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
5657 if (!new)
5658 goto out_remove;
5659 entry->direct = addr;
5660 }
5661 }
5662
5663 ops->func = call_direct_funcs;
5664 ops->flags = MULTI_FLAGS;
5665 ops->trampoline = FTRACE_REGS_ADDR;
5666
5667 err = register_ftrace_function_nolock(ops);
5668
5669 out_remove:
5670 if (err)
5671 remove_direct_functions_hash(hash, addr);
5672
5673 out_unlock:
5674 mutex_unlock(&direct_mutex);
5675
5676 if (free_hash) {
5677 synchronize_rcu_tasks();
5678 free_ftrace_hash(free_hash);
5679 }
5680 return err;
5681 }
5682 EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
5683
5684 /**
5685 * unregister_ftrace_direct_multi - Remove calls to custom trampoline
5686 * previously registered by register_ftrace_direct_multi for @ops object.
5687 * @ops: The address of the struct ftrace_ops object
5688 *
5689 * This is used to remove a direct calls to @addr from the nop locations
5690 * of the functions registered in @ops (with by ftrace_set_filter_ip
5691 * function).
5692 *
5693 * Returns:
5694 * 0 on success
5695 * -EINVAL - The @ops object was not properly registered.
5696 */
unregister_ftrace_direct_multi(struct ftrace_ops * ops,unsigned long addr)5697 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5698 {
5699 struct ftrace_hash *hash = ops->func_hash->filter_hash;
5700 int err;
5701
5702 if (check_direct_multi(ops))
5703 return -EINVAL;
5704 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5705 return -EINVAL;
5706
5707 mutex_lock(&direct_mutex);
5708 err = unregister_ftrace_function(ops);
5709 remove_direct_functions_hash(hash, addr);
5710 mutex_unlock(&direct_mutex);
5711
5712 /* cleanup for possible another register call */
5713 ops->func = NULL;
5714 ops->trampoline = 0;
5715 return err;
5716 }
5717 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
5718
5719 static int
__modify_ftrace_direct_multi(struct ftrace_ops * ops,unsigned long addr)5720 __modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5721 {
5722 struct ftrace_hash *hash;
5723 struct ftrace_func_entry *entry, *iter;
5724 static struct ftrace_ops tmp_ops = {
5725 .func = ftrace_stub,
5726 .flags = FTRACE_OPS_FL_STUB,
5727 };
5728 int i, size;
5729 int err;
5730
5731 lockdep_assert_held_once(&direct_mutex);
5732
5733 /* Enable the tmp_ops to have the same functions as the direct ops */
5734 ftrace_ops_init(&tmp_ops);
5735 tmp_ops.func_hash = ops->func_hash;
5736
5737 err = register_ftrace_function_nolock(&tmp_ops);
5738 if (err)
5739 return err;
5740
5741 /*
5742 * Now the ftrace_ops_list_func() is called to do the direct callers.
5743 * We can safely change the direct functions attached to each entry.
5744 */
5745 mutex_lock(&ftrace_lock);
5746
5747 hash = ops->func_hash->filter_hash;
5748 size = 1 << hash->size_bits;
5749 for (i = 0; i < size; i++) {
5750 hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5751 entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5752 if (!entry)
5753 continue;
5754 entry->direct = addr;
5755 }
5756 }
5757
5758 mutex_unlock(&ftrace_lock);
5759
5760 /* Removing the tmp_ops will add the updated direct callers to the functions */
5761 unregister_ftrace_function(&tmp_ops);
5762
5763 return err;
5764 }
5765
5766 /**
5767 * modify_ftrace_direct_multi_nolock - Modify an existing direct 'multi' call
5768 * to call something else
5769 * @ops: The address of the struct ftrace_ops object
5770 * @addr: The address of the new trampoline to call at @ops functions
5771 *
5772 * This is used to unregister currently registered direct caller and
5773 * register new one @addr on functions registered in @ops object.
5774 *
5775 * Note there's window between ftrace_shutdown and ftrace_startup calls
5776 * where there will be no callbacks called.
5777 *
5778 * Caller should already have direct_mutex locked, so we don't lock
5779 * direct_mutex here.
5780 *
5781 * Returns: zero on success. Non zero on error, which includes:
5782 * -EINVAL - The @ops object was not properly registered.
5783 */
modify_ftrace_direct_multi_nolock(struct ftrace_ops * ops,unsigned long addr)5784 int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr)
5785 {
5786 if (check_direct_multi(ops))
5787 return -EINVAL;
5788 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5789 return -EINVAL;
5790
5791 return __modify_ftrace_direct_multi(ops, addr);
5792 }
5793 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi_nolock);
5794
5795 /**
5796 * modify_ftrace_direct_multi - Modify an existing direct 'multi' call
5797 * to call something else
5798 * @ops: The address of the struct ftrace_ops object
5799 * @addr: The address of the new trampoline to call at @ops functions
5800 *
5801 * This is used to unregister currently registered direct caller and
5802 * register new one @addr on functions registered in @ops object.
5803 *
5804 * Note there's window between ftrace_shutdown and ftrace_startup calls
5805 * where there will be no callbacks called.
5806 *
5807 * Returns: zero on success. Non zero on error, which includes:
5808 * -EINVAL - The @ops object was not properly registered.
5809 */
modify_ftrace_direct_multi(struct ftrace_ops * ops,unsigned long addr)5810 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5811 {
5812 int err;
5813
5814 if (check_direct_multi(ops))
5815 return -EINVAL;
5816 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5817 return -EINVAL;
5818
5819 mutex_lock(&direct_mutex);
5820 err = __modify_ftrace_direct_multi(ops, addr);
5821 mutex_unlock(&direct_mutex);
5822 return err;
5823 }
5824 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
5825 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5826
5827 /**
5828 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5829 * @ops - the ops to set the filter with
5830 * @ip - the address to add to or remove from the filter.
5831 * @remove - non zero to remove the ip from the filter
5832 * @reset - non zero to reset all filters before applying this filter.
5833 *
5834 * Filters denote which functions should be enabled when tracing is enabled
5835 * If @ip is NULL, it fails to update filter.
5836 *
5837 * This can allocate memory which must be freed before @ops can be freed,
5838 * either by removing each filtered addr or by using
5839 * ftrace_free_filter(@ops).
5840 */
ftrace_set_filter_ip(struct ftrace_ops * ops,unsigned long ip,int remove,int reset)5841 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5842 int remove, int reset)
5843 {
5844 ftrace_ops_init(ops);
5845 return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
5846 }
5847 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5848
5849 /**
5850 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
5851 * @ops - the ops to set the filter with
5852 * @ips - the array of addresses to add to or remove from the filter.
5853 * @cnt - the number of addresses in @ips
5854 * @remove - non zero to remove ips from the filter
5855 * @reset - non zero to reset all filters before applying this filter.
5856 *
5857 * Filters denote which functions should be enabled when tracing is enabled
5858 * If @ips array or any ip specified within is NULL , it fails to update filter.
5859 *
5860 * This can allocate memory which must be freed before @ops can be freed,
5861 * either by removing each filtered addr or by using
5862 * ftrace_free_filter(@ops).
5863 */
ftrace_set_filter_ips(struct ftrace_ops * ops,unsigned long * ips,unsigned int cnt,int remove,int reset)5864 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
5865 unsigned int cnt, int remove, int reset)
5866 {
5867 ftrace_ops_init(ops);
5868 return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
5869 }
5870 EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
5871
5872 /**
5873 * ftrace_ops_set_global_filter - setup ops to use global filters
5874 * @ops - the ops which will use the global filters
5875 *
5876 * ftrace users who need global function trace filtering should call this.
5877 * It can set the global filter only if ops were not initialized before.
5878 */
ftrace_ops_set_global_filter(struct ftrace_ops * ops)5879 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5880 {
5881 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5882 return;
5883
5884 ftrace_ops_init(ops);
5885 ops->func_hash = &global_ops.local_hash;
5886 }
5887 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5888
5889 static int
ftrace_set_regex(struct ftrace_ops * ops,unsigned char * buf,int len,int reset,int enable)5890 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5891 int reset, int enable)
5892 {
5893 return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
5894 }
5895
5896 /**
5897 * ftrace_set_filter - set a function to filter on in ftrace
5898 * @ops - the ops to set the filter with
5899 * @buf - the string that holds the function filter text.
5900 * @len - the length of the string.
5901 * @reset - non zero to reset all filters before applying this filter.
5902 *
5903 * Filters denote which functions should be enabled when tracing is enabled.
5904 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5905 *
5906 * This can allocate memory which must be freed before @ops can be freed,
5907 * either by removing each filtered addr or by using
5908 * ftrace_free_filter(@ops).
5909 */
ftrace_set_filter(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)5910 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5911 int len, int reset)
5912 {
5913 ftrace_ops_init(ops);
5914 return ftrace_set_regex(ops, buf, len, reset, 1);
5915 }
5916 EXPORT_SYMBOL_GPL(ftrace_set_filter);
5917
5918 /**
5919 * ftrace_set_notrace - set a function to not trace in ftrace
5920 * @ops - the ops to set the notrace filter with
5921 * @buf - the string that holds the function notrace text.
5922 * @len - the length of the string.
5923 * @reset - non zero to reset all filters before applying this filter.
5924 *
5925 * Notrace Filters denote which functions should not be enabled when tracing
5926 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5927 * for tracing.
5928 *
5929 * This can allocate memory which must be freed before @ops can be freed,
5930 * either by removing each filtered addr or by using
5931 * ftrace_free_filter(@ops).
5932 */
ftrace_set_notrace(struct ftrace_ops * ops,unsigned char * buf,int len,int reset)5933 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5934 int len, int reset)
5935 {
5936 ftrace_ops_init(ops);
5937 return ftrace_set_regex(ops, buf, len, reset, 0);
5938 }
5939 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5940 /**
5941 * ftrace_set_global_filter - set a function to filter on with global tracers
5942 * @buf - the string that holds the function filter text.
5943 * @len - the length of the string.
5944 * @reset - non zero to reset all filters before applying this filter.
5945 *
5946 * Filters denote which functions should be enabled when tracing is enabled.
5947 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5948 */
ftrace_set_global_filter(unsigned char * buf,int len,int reset)5949 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5950 {
5951 ftrace_set_regex(&global_ops, buf, len, reset, 1);
5952 }
5953 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5954
5955 /**
5956 * ftrace_set_global_notrace - set a function to not trace with global tracers
5957 * @buf - the string that holds the function notrace text.
5958 * @len - the length of the string.
5959 * @reset - non zero to reset all filters before applying this filter.
5960 *
5961 * Notrace Filters denote which functions should not be enabled when tracing
5962 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5963 * for tracing.
5964 */
ftrace_set_global_notrace(unsigned char * buf,int len,int reset)5965 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5966 {
5967 ftrace_set_regex(&global_ops, buf, len, reset, 0);
5968 }
5969 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5970
5971 /*
5972 * command line interface to allow users to set filters on boot up.
5973 */
5974 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
5975 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5976 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5977
5978 /* Used by function selftest to not test if filter is set */
5979 bool ftrace_filter_param __initdata;
5980
set_ftrace_notrace(char * str)5981 static int __init set_ftrace_notrace(char *str)
5982 {
5983 ftrace_filter_param = true;
5984 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5985 return 1;
5986 }
5987 __setup("ftrace_notrace=", set_ftrace_notrace);
5988
set_ftrace_filter(char * str)5989 static int __init set_ftrace_filter(char *str)
5990 {
5991 ftrace_filter_param = true;
5992 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5993 return 1;
5994 }
5995 __setup("ftrace_filter=", set_ftrace_filter);
5996
5997 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5998 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5999 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6000 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
6001
set_graph_function(char * str)6002 static int __init set_graph_function(char *str)
6003 {
6004 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
6005 return 1;
6006 }
6007 __setup("ftrace_graph_filter=", set_graph_function);
6008
set_graph_notrace_function(char * str)6009 static int __init set_graph_notrace_function(char *str)
6010 {
6011 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
6012 return 1;
6013 }
6014 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
6015
set_graph_max_depth_function(char * str)6016 static int __init set_graph_max_depth_function(char *str)
6017 {
6018 if (!str)
6019 return 0;
6020 fgraph_max_depth = simple_strtoul(str, NULL, 0);
6021 return 1;
6022 }
6023 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
6024
set_ftrace_early_graph(char * buf,int enable)6025 static void __init set_ftrace_early_graph(char *buf, int enable)
6026 {
6027 int ret;
6028 char *func;
6029 struct ftrace_hash *hash;
6030
6031 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6032 if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6033 return;
6034
6035 while (buf) {
6036 func = strsep(&buf, ",");
6037 /* we allow only one expression at a time */
6038 ret = ftrace_graph_set_hash(hash, func);
6039 if (ret)
6040 printk(KERN_DEBUG "ftrace: function %s not "
6041 "traceable\n", func);
6042 }
6043
6044 if (enable)
6045 ftrace_graph_hash = hash;
6046 else
6047 ftrace_graph_notrace_hash = hash;
6048 }
6049 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6050
6051 void __init
ftrace_set_early_filter(struct ftrace_ops * ops,char * buf,int enable)6052 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6053 {
6054 char *func;
6055
6056 ftrace_ops_init(ops);
6057
6058 while (buf) {
6059 func = strsep(&buf, ",");
6060 ftrace_set_regex(ops, func, strlen(func), 0, enable);
6061 }
6062 }
6063
set_ftrace_early_filters(void)6064 static void __init set_ftrace_early_filters(void)
6065 {
6066 if (ftrace_filter_buf[0])
6067 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6068 if (ftrace_notrace_buf[0])
6069 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6070 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6071 if (ftrace_graph_buf[0])
6072 set_ftrace_early_graph(ftrace_graph_buf, 1);
6073 if (ftrace_graph_notrace_buf[0])
6074 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6075 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6076 }
6077
ftrace_regex_release(struct inode * inode,struct file * file)6078 int ftrace_regex_release(struct inode *inode, struct file *file)
6079 {
6080 struct seq_file *m = (struct seq_file *)file->private_data;
6081 struct ftrace_iterator *iter;
6082 struct ftrace_hash **orig_hash;
6083 struct trace_parser *parser;
6084 int filter_hash;
6085
6086 if (file->f_mode & FMODE_READ) {
6087 iter = m->private;
6088 seq_release(inode, file);
6089 } else
6090 iter = file->private_data;
6091
6092 parser = &iter->parser;
6093 if (trace_parser_loaded(parser)) {
6094 int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6095
6096 ftrace_process_regex(iter, parser->buffer,
6097 parser->idx, enable);
6098 }
6099
6100 trace_parser_put(parser);
6101
6102 mutex_lock(&iter->ops->func_hash->regex_lock);
6103
6104 if (file->f_mode & FMODE_WRITE) {
6105 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6106
6107 if (filter_hash) {
6108 orig_hash = &iter->ops->func_hash->filter_hash;
6109 if (iter->tr) {
6110 if (list_empty(&iter->tr->mod_trace))
6111 iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
6112 else
6113 iter->hash->flags |= FTRACE_HASH_FL_MOD;
6114 }
6115 } else
6116 orig_hash = &iter->ops->func_hash->notrace_hash;
6117
6118 mutex_lock(&ftrace_lock);
6119 ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6120 iter->hash, filter_hash);
6121 mutex_unlock(&ftrace_lock);
6122 } else {
6123 /* For read only, the hash is the ops hash */
6124 iter->hash = NULL;
6125 }
6126
6127 mutex_unlock(&iter->ops->func_hash->regex_lock);
6128 free_ftrace_hash(iter->hash);
6129 if (iter->tr)
6130 trace_array_put(iter->tr);
6131 kfree(iter);
6132
6133 return 0;
6134 }
6135
6136 static const struct file_operations ftrace_avail_fops = {
6137 .open = ftrace_avail_open,
6138 .read = seq_read,
6139 .llseek = seq_lseek,
6140 .release = seq_release_private,
6141 };
6142
6143 static const struct file_operations ftrace_enabled_fops = {
6144 .open = ftrace_enabled_open,
6145 .read = seq_read,
6146 .llseek = seq_lseek,
6147 .release = seq_release_private,
6148 };
6149
6150 static const struct file_operations ftrace_filter_fops = {
6151 .open = ftrace_filter_open,
6152 .read = seq_read,
6153 .write = ftrace_filter_write,
6154 .llseek = tracing_lseek,
6155 .release = ftrace_regex_release,
6156 };
6157
6158 static const struct file_operations ftrace_notrace_fops = {
6159 .open = ftrace_notrace_open,
6160 .read = seq_read,
6161 .write = ftrace_notrace_write,
6162 .llseek = tracing_lseek,
6163 .release = ftrace_regex_release,
6164 };
6165
6166 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6167
6168 static DEFINE_MUTEX(graph_lock);
6169
6170 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6171 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6172
6173 enum graph_filter_type {
6174 GRAPH_FILTER_NOTRACE = 0,
6175 GRAPH_FILTER_FUNCTION,
6176 };
6177
6178 #define FTRACE_GRAPH_EMPTY ((void *)1)
6179
6180 struct ftrace_graph_data {
6181 struct ftrace_hash *hash;
6182 struct ftrace_func_entry *entry;
6183 int idx; /* for hash table iteration */
6184 enum graph_filter_type type;
6185 struct ftrace_hash *new_hash;
6186 const struct seq_operations *seq_ops;
6187 struct trace_parser parser;
6188 };
6189
6190 static void *
__g_next(struct seq_file * m,loff_t * pos)6191 __g_next(struct seq_file *m, loff_t *pos)
6192 {
6193 struct ftrace_graph_data *fgd = m->private;
6194 struct ftrace_func_entry *entry = fgd->entry;
6195 struct hlist_head *head;
6196 int i, idx = fgd->idx;
6197
6198 if (*pos >= fgd->hash->count)
6199 return NULL;
6200
6201 if (entry) {
6202 hlist_for_each_entry_continue(entry, hlist) {
6203 fgd->entry = entry;
6204 return entry;
6205 }
6206
6207 idx++;
6208 }
6209
6210 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6211 head = &fgd->hash->buckets[i];
6212 hlist_for_each_entry(entry, head, hlist) {
6213 fgd->entry = entry;
6214 fgd->idx = i;
6215 return entry;
6216 }
6217 }
6218 return NULL;
6219 }
6220
6221 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)6222 g_next(struct seq_file *m, void *v, loff_t *pos)
6223 {
6224 (*pos)++;
6225 return __g_next(m, pos);
6226 }
6227
g_start(struct seq_file * m,loff_t * pos)6228 static void *g_start(struct seq_file *m, loff_t *pos)
6229 {
6230 struct ftrace_graph_data *fgd = m->private;
6231
6232 mutex_lock(&graph_lock);
6233
6234 if (fgd->type == GRAPH_FILTER_FUNCTION)
6235 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6236 lockdep_is_held(&graph_lock));
6237 else
6238 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6239 lockdep_is_held(&graph_lock));
6240
6241 /* Nothing, tell g_show to print all functions are enabled */
6242 if (ftrace_hash_empty(fgd->hash) && !*pos)
6243 return FTRACE_GRAPH_EMPTY;
6244
6245 fgd->idx = 0;
6246 fgd->entry = NULL;
6247 return __g_next(m, pos);
6248 }
6249
g_stop(struct seq_file * m,void * p)6250 static void g_stop(struct seq_file *m, void *p)
6251 {
6252 mutex_unlock(&graph_lock);
6253 }
6254
g_show(struct seq_file * m,void * v)6255 static int g_show(struct seq_file *m, void *v)
6256 {
6257 struct ftrace_func_entry *entry = v;
6258
6259 if (!entry)
6260 return 0;
6261
6262 if (entry == FTRACE_GRAPH_EMPTY) {
6263 struct ftrace_graph_data *fgd = m->private;
6264
6265 if (fgd->type == GRAPH_FILTER_FUNCTION)
6266 seq_puts(m, "#### all functions enabled ####\n");
6267 else
6268 seq_puts(m, "#### no functions disabled ####\n");
6269 return 0;
6270 }
6271
6272 seq_printf(m, "%ps\n", (void *)entry->ip);
6273
6274 return 0;
6275 }
6276
6277 static const struct seq_operations ftrace_graph_seq_ops = {
6278 .start = g_start,
6279 .next = g_next,
6280 .stop = g_stop,
6281 .show = g_show,
6282 };
6283
6284 static int
__ftrace_graph_open(struct inode * inode,struct file * file,struct ftrace_graph_data * fgd)6285 __ftrace_graph_open(struct inode *inode, struct file *file,
6286 struct ftrace_graph_data *fgd)
6287 {
6288 int ret;
6289 struct ftrace_hash *new_hash = NULL;
6290
6291 ret = security_locked_down(LOCKDOWN_TRACEFS);
6292 if (ret)
6293 return ret;
6294
6295 if (file->f_mode & FMODE_WRITE) {
6296 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6297
6298 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6299 return -ENOMEM;
6300
6301 if (file->f_flags & O_TRUNC)
6302 new_hash = alloc_ftrace_hash(size_bits);
6303 else
6304 new_hash = alloc_and_copy_ftrace_hash(size_bits,
6305 fgd->hash);
6306 if (!new_hash) {
6307 ret = -ENOMEM;
6308 goto out;
6309 }
6310 }
6311
6312 if (file->f_mode & FMODE_READ) {
6313 ret = seq_open(file, &ftrace_graph_seq_ops);
6314 if (!ret) {
6315 struct seq_file *m = file->private_data;
6316 m->private = fgd;
6317 } else {
6318 /* Failed */
6319 free_ftrace_hash(new_hash);
6320 new_hash = NULL;
6321 }
6322 } else
6323 file->private_data = fgd;
6324
6325 out:
6326 if (ret < 0 && file->f_mode & FMODE_WRITE)
6327 trace_parser_put(&fgd->parser);
6328
6329 fgd->new_hash = new_hash;
6330
6331 /*
6332 * All uses of fgd->hash must be taken with the graph_lock
6333 * held. The graph_lock is going to be released, so force
6334 * fgd->hash to be reinitialized when it is taken again.
6335 */
6336 fgd->hash = NULL;
6337
6338 return ret;
6339 }
6340
6341 static int
ftrace_graph_open(struct inode * inode,struct file * file)6342 ftrace_graph_open(struct inode *inode, struct file *file)
6343 {
6344 struct ftrace_graph_data *fgd;
6345 int ret;
6346
6347 if (unlikely(ftrace_disabled))
6348 return -ENODEV;
6349
6350 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6351 if (fgd == NULL)
6352 return -ENOMEM;
6353
6354 mutex_lock(&graph_lock);
6355
6356 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6357 lockdep_is_held(&graph_lock));
6358 fgd->type = GRAPH_FILTER_FUNCTION;
6359 fgd->seq_ops = &ftrace_graph_seq_ops;
6360
6361 ret = __ftrace_graph_open(inode, file, fgd);
6362 if (ret < 0)
6363 kfree(fgd);
6364
6365 mutex_unlock(&graph_lock);
6366 return ret;
6367 }
6368
6369 static int
ftrace_graph_notrace_open(struct inode * inode,struct file * file)6370 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6371 {
6372 struct ftrace_graph_data *fgd;
6373 int ret;
6374
6375 if (unlikely(ftrace_disabled))
6376 return -ENODEV;
6377
6378 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6379 if (fgd == NULL)
6380 return -ENOMEM;
6381
6382 mutex_lock(&graph_lock);
6383
6384 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6385 lockdep_is_held(&graph_lock));
6386 fgd->type = GRAPH_FILTER_NOTRACE;
6387 fgd->seq_ops = &ftrace_graph_seq_ops;
6388
6389 ret = __ftrace_graph_open(inode, file, fgd);
6390 if (ret < 0)
6391 kfree(fgd);
6392
6393 mutex_unlock(&graph_lock);
6394 return ret;
6395 }
6396
6397 static int
ftrace_graph_release(struct inode * inode,struct file * file)6398 ftrace_graph_release(struct inode *inode, struct file *file)
6399 {
6400 struct ftrace_graph_data *fgd;
6401 struct ftrace_hash *old_hash, *new_hash;
6402 struct trace_parser *parser;
6403 int ret = 0;
6404
6405 if (file->f_mode & FMODE_READ) {
6406 struct seq_file *m = file->private_data;
6407
6408 fgd = m->private;
6409 seq_release(inode, file);
6410 } else {
6411 fgd = file->private_data;
6412 }
6413
6414
6415 if (file->f_mode & FMODE_WRITE) {
6416
6417 parser = &fgd->parser;
6418
6419 if (trace_parser_loaded((parser))) {
6420 ret = ftrace_graph_set_hash(fgd->new_hash,
6421 parser->buffer);
6422 }
6423
6424 trace_parser_put(parser);
6425
6426 new_hash = __ftrace_hash_move(fgd->new_hash);
6427 if (!new_hash) {
6428 ret = -ENOMEM;
6429 goto out;
6430 }
6431
6432 mutex_lock(&graph_lock);
6433
6434 if (fgd->type == GRAPH_FILTER_FUNCTION) {
6435 old_hash = rcu_dereference_protected(ftrace_graph_hash,
6436 lockdep_is_held(&graph_lock));
6437 rcu_assign_pointer(ftrace_graph_hash, new_hash);
6438 } else {
6439 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6440 lockdep_is_held(&graph_lock));
6441 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6442 }
6443
6444 mutex_unlock(&graph_lock);
6445
6446 /*
6447 * We need to do a hard force of sched synchronization.
6448 * This is because we use preempt_disable() to do RCU, but
6449 * the function tracers can be called where RCU is not watching
6450 * (like before user_exit()). We can not rely on the RCU
6451 * infrastructure to do the synchronization, thus we must do it
6452 * ourselves.
6453 */
6454 if (old_hash != EMPTY_HASH)
6455 synchronize_rcu_tasks_rude();
6456
6457 free_ftrace_hash(old_hash);
6458 }
6459
6460 out:
6461 free_ftrace_hash(fgd->new_hash);
6462 kfree(fgd);
6463
6464 return ret;
6465 }
6466
6467 static int
ftrace_graph_set_hash(struct ftrace_hash * hash,char * buffer)6468 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6469 {
6470 struct ftrace_glob func_g;
6471 struct dyn_ftrace *rec;
6472 struct ftrace_page *pg;
6473 struct ftrace_func_entry *entry;
6474 int fail = 1;
6475 int not;
6476
6477 /* decode regex */
6478 func_g.type = filter_parse_regex(buffer, strlen(buffer),
6479 &func_g.search, ¬);
6480
6481 func_g.len = strlen(func_g.search);
6482
6483 mutex_lock(&ftrace_lock);
6484
6485 if (unlikely(ftrace_disabled)) {
6486 mutex_unlock(&ftrace_lock);
6487 return -ENODEV;
6488 }
6489
6490 do_for_each_ftrace_rec(pg, rec) {
6491
6492 if (rec->flags & FTRACE_FL_DISABLED)
6493 continue;
6494
6495 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6496 entry = ftrace_lookup_ip(hash, rec->ip);
6497
6498 if (!not) {
6499 fail = 0;
6500
6501 if (entry)
6502 continue;
6503 if (add_hash_entry(hash, rec->ip) < 0)
6504 goto out;
6505 } else {
6506 if (entry) {
6507 free_hash_entry(hash, entry);
6508 fail = 0;
6509 }
6510 }
6511 }
6512 } while_for_each_ftrace_rec();
6513 out:
6514 mutex_unlock(&ftrace_lock);
6515
6516 if (fail)
6517 return -EINVAL;
6518
6519 return 0;
6520 }
6521
6522 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)6523 ftrace_graph_write(struct file *file, const char __user *ubuf,
6524 size_t cnt, loff_t *ppos)
6525 {
6526 ssize_t read, ret = 0;
6527 struct ftrace_graph_data *fgd = file->private_data;
6528 struct trace_parser *parser;
6529
6530 if (!cnt)
6531 return 0;
6532
6533 /* Read mode uses seq functions */
6534 if (file->f_mode & FMODE_READ) {
6535 struct seq_file *m = file->private_data;
6536 fgd = m->private;
6537 }
6538
6539 parser = &fgd->parser;
6540
6541 read = trace_get_user(parser, ubuf, cnt, ppos);
6542
6543 if (read >= 0 && trace_parser_loaded(parser) &&
6544 !trace_parser_cont(parser)) {
6545
6546 ret = ftrace_graph_set_hash(fgd->new_hash,
6547 parser->buffer);
6548 trace_parser_clear(parser);
6549 }
6550
6551 if (!ret)
6552 ret = read;
6553
6554 return ret;
6555 }
6556
6557 static const struct file_operations ftrace_graph_fops = {
6558 .open = ftrace_graph_open,
6559 .read = seq_read,
6560 .write = ftrace_graph_write,
6561 .llseek = tracing_lseek,
6562 .release = ftrace_graph_release,
6563 };
6564
6565 static const struct file_operations ftrace_graph_notrace_fops = {
6566 .open = ftrace_graph_notrace_open,
6567 .read = seq_read,
6568 .write = ftrace_graph_write,
6569 .llseek = tracing_lseek,
6570 .release = ftrace_graph_release,
6571 };
6572 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6573
ftrace_create_filter_files(struct ftrace_ops * ops,struct dentry * parent)6574 void ftrace_create_filter_files(struct ftrace_ops *ops,
6575 struct dentry *parent)
6576 {
6577
6578 trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6579 ops, &ftrace_filter_fops);
6580
6581 trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6582 ops, &ftrace_notrace_fops);
6583 }
6584
6585 /*
6586 * The name "destroy_filter_files" is really a misnomer. Although
6587 * in the future, it may actually delete the files, but this is
6588 * really intended to make sure the ops passed in are disabled
6589 * and that when this function returns, the caller is free to
6590 * free the ops.
6591 *
6592 * The "destroy" name is only to match the "create" name that this
6593 * should be paired with.
6594 */
ftrace_destroy_filter_files(struct ftrace_ops * ops)6595 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6596 {
6597 mutex_lock(&ftrace_lock);
6598 if (ops->flags & FTRACE_OPS_FL_ENABLED)
6599 ftrace_shutdown(ops, 0);
6600 ops->flags |= FTRACE_OPS_FL_DELETED;
6601 ftrace_free_filter(ops);
6602 mutex_unlock(&ftrace_lock);
6603 }
6604
ftrace_init_dyn_tracefs(struct dentry * d_tracer)6605 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6606 {
6607
6608 trace_create_file("available_filter_functions", TRACE_MODE_READ,
6609 d_tracer, NULL, &ftrace_avail_fops);
6610
6611 trace_create_file("enabled_functions", TRACE_MODE_READ,
6612 d_tracer, NULL, &ftrace_enabled_fops);
6613
6614 ftrace_create_filter_files(&global_ops, d_tracer);
6615
6616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6617 trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6618 NULL,
6619 &ftrace_graph_fops);
6620 trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6621 NULL,
6622 &ftrace_graph_notrace_fops);
6623 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6624
6625 return 0;
6626 }
6627
ftrace_cmp_ips(const void * a,const void * b)6628 static int ftrace_cmp_ips(const void *a, const void *b)
6629 {
6630 const unsigned long *ipa = a;
6631 const unsigned long *ipb = b;
6632
6633 if (*ipa > *ipb)
6634 return 1;
6635 if (*ipa < *ipb)
6636 return -1;
6637 return 0;
6638 }
6639
6640 #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
test_is_sorted(unsigned long * start,unsigned long count)6641 static void test_is_sorted(unsigned long *start, unsigned long count)
6642 {
6643 int i;
6644
6645 for (i = 1; i < count; i++) {
6646 if (WARN(start[i - 1] > start[i],
6647 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6648 (void *)start[i - 1], start[i - 1],
6649 (void *)start[i], start[i]))
6650 break;
6651 }
6652 if (i == count)
6653 pr_info("ftrace section at %px sorted properly\n", start);
6654 }
6655 #else
test_is_sorted(unsigned long * start,unsigned long count)6656 static void test_is_sorted(unsigned long *start, unsigned long count)
6657 {
6658 }
6659 #endif
6660
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)6661 static int ftrace_process_locs(struct module *mod,
6662 unsigned long *start,
6663 unsigned long *end)
6664 {
6665 struct ftrace_page *start_pg;
6666 struct ftrace_page *pg;
6667 struct dyn_ftrace *rec;
6668 unsigned long count;
6669 unsigned long *p;
6670 unsigned long addr;
6671 unsigned long flags = 0; /* Shut up gcc */
6672 int ret = -ENOMEM;
6673
6674 count = end - start;
6675
6676 if (!count)
6677 return 0;
6678
6679 /*
6680 * Sorting mcount in vmlinux at build time depend on
6681 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
6682 * modules can not be sorted at build time.
6683 */
6684 if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
6685 sort(start, count, sizeof(*start),
6686 ftrace_cmp_ips, NULL);
6687 } else {
6688 test_is_sorted(start, count);
6689 }
6690
6691 start_pg = ftrace_allocate_pages(count);
6692 if (!start_pg)
6693 return -ENOMEM;
6694
6695 mutex_lock(&ftrace_lock);
6696
6697 /*
6698 * Core and each module needs their own pages, as
6699 * modules will free them when they are removed.
6700 * Force a new page to be allocated for modules.
6701 */
6702 if (!mod) {
6703 WARN_ON(ftrace_pages || ftrace_pages_start);
6704 /* First initialization */
6705 ftrace_pages = ftrace_pages_start = start_pg;
6706 } else {
6707 if (!ftrace_pages)
6708 goto out;
6709
6710 if (WARN_ON(ftrace_pages->next)) {
6711 /* Hmm, we have free pages? */
6712 while (ftrace_pages->next)
6713 ftrace_pages = ftrace_pages->next;
6714 }
6715
6716 ftrace_pages->next = start_pg;
6717 }
6718
6719 p = start;
6720 pg = start_pg;
6721 while (p < end) {
6722 unsigned long end_offset;
6723 addr = ftrace_call_adjust(*p++);
6724 /*
6725 * Some architecture linkers will pad between
6726 * the different mcount_loc sections of different
6727 * object files to satisfy alignments.
6728 * Skip any NULL pointers.
6729 */
6730 if (!addr)
6731 continue;
6732
6733 end_offset = (pg->index+1) * sizeof(pg->records[0]);
6734 if (end_offset > PAGE_SIZE << pg->order) {
6735 /* We should have allocated enough */
6736 if (WARN_ON(!pg->next))
6737 break;
6738 pg = pg->next;
6739 }
6740
6741 rec = &pg->records[pg->index++];
6742 rec->ip = addr;
6743 }
6744
6745 /* We should have used all pages */
6746 WARN_ON(pg->next);
6747
6748 /* Assign the last page to ftrace_pages */
6749 ftrace_pages = pg;
6750
6751 /*
6752 * We only need to disable interrupts on start up
6753 * because we are modifying code that an interrupt
6754 * may execute, and the modification is not atomic.
6755 * But for modules, nothing runs the code we modify
6756 * until we are finished with it, and there's no
6757 * reason to cause large interrupt latencies while we do it.
6758 */
6759 if (!mod)
6760 local_irq_save(flags);
6761 ftrace_update_code(mod, start_pg);
6762 if (!mod)
6763 local_irq_restore(flags);
6764 ret = 0;
6765 out:
6766 mutex_unlock(&ftrace_lock);
6767
6768 return ret;
6769 }
6770
6771 struct ftrace_mod_func {
6772 struct list_head list;
6773 char *name;
6774 unsigned long ip;
6775 unsigned int size;
6776 };
6777
6778 struct ftrace_mod_map {
6779 struct rcu_head rcu;
6780 struct list_head list;
6781 struct module *mod;
6782 unsigned long start_addr;
6783 unsigned long end_addr;
6784 struct list_head funcs;
6785 unsigned int num_funcs;
6786 };
6787
ftrace_get_trampoline_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)6788 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6789 unsigned long *value, char *type,
6790 char *name, char *module_name,
6791 int *exported)
6792 {
6793 struct ftrace_ops *op;
6794
6795 list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6796 if (!op->trampoline || symnum--)
6797 continue;
6798 *value = op->trampoline;
6799 *type = 't';
6800 strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6801 strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6802 *exported = 0;
6803 return 0;
6804 }
6805
6806 return -ERANGE;
6807 }
6808
6809 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
6810 /*
6811 * Check if the current ops references the given ip.
6812 *
6813 * If the ops traces all functions, then it was already accounted for.
6814 * If the ops does not trace the current record function, skip it.
6815 * If the ops ignores the function via notrace filter, skip it.
6816 */
6817 static bool
ops_references_ip(struct ftrace_ops * ops,unsigned long ip)6818 ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
6819 {
6820 /* If ops isn't enabled, ignore it */
6821 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6822 return false;
6823
6824 /* If ops traces all then it includes this function */
6825 if (ops_traces_mod(ops))
6826 return true;
6827
6828 /* The function must be in the filter */
6829 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
6830 !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
6831 return false;
6832
6833 /* If in notrace hash, we ignore it too */
6834 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
6835 return false;
6836
6837 return true;
6838 }
6839 #endif
6840
6841 #ifdef CONFIG_MODULES
6842
6843 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6844
6845 static LIST_HEAD(ftrace_mod_maps);
6846
referenced_filters(struct dyn_ftrace * rec)6847 static int referenced_filters(struct dyn_ftrace *rec)
6848 {
6849 struct ftrace_ops *ops;
6850 int cnt = 0;
6851
6852 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6853 if (ops_references_ip(ops, rec->ip)) {
6854 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6855 continue;
6856 if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6857 continue;
6858 cnt++;
6859 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6860 rec->flags |= FTRACE_FL_REGS;
6861 if (cnt == 1 && ops->trampoline)
6862 rec->flags |= FTRACE_FL_TRAMP;
6863 else
6864 rec->flags &= ~FTRACE_FL_TRAMP;
6865 }
6866 }
6867
6868 return cnt;
6869 }
6870
6871 static void
clear_mod_from_hash(struct ftrace_page * pg,struct ftrace_hash * hash)6872 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6873 {
6874 struct ftrace_func_entry *entry;
6875 struct dyn_ftrace *rec;
6876 int i;
6877
6878 if (ftrace_hash_empty(hash))
6879 return;
6880
6881 for (i = 0; i < pg->index; i++) {
6882 rec = &pg->records[i];
6883 entry = __ftrace_lookup_ip(hash, rec->ip);
6884 /*
6885 * Do not allow this rec to match again.
6886 * Yeah, it may waste some memory, but will be removed
6887 * if/when the hash is modified again.
6888 */
6889 if (entry)
6890 entry->ip = 0;
6891 }
6892 }
6893
6894 /* Clear any records from hashes */
clear_mod_from_hashes(struct ftrace_page * pg)6895 static void clear_mod_from_hashes(struct ftrace_page *pg)
6896 {
6897 struct trace_array *tr;
6898
6899 mutex_lock(&trace_types_lock);
6900 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6901 if (!tr->ops || !tr->ops->func_hash)
6902 continue;
6903 mutex_lock(&tr->ops->func_hash->regex_lock);
6904 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6905 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6906 mutex_unlock(&tr->ops->func_hash->regex_lock);
6907 }
6908 mutex_unlock(&trace_types_lock);
6909 }
6910
ftrace_free_mod_map(struct rcu_head * rcu)6911 static void ftrace_free_mod_map(struct rcu_head *rcu)
6912 {
6913 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6914 struct ftrace_mod_func *mod_func;
6915 struct ftrace_mod_func *n;
6916
6917 /* All the contents of mod_map are now not visible to readers */
6918 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6919 kfree(mod_func->name);
6920 list_del(&mod_func->list);
6921 kfree(mod_func);
6922 }
6923
6924 kfree(mod_map);
6925 }
6926
ftrace_release_mod(struct module * mod)6927 void ftrace_release_mod(struct module *mod)
6928 {
6929 struct ftrace_mod_map *mod_map;
6930 struct ftrace_mod_map *n;
6931 struct dyn_ftrace *rec;
6932 struct ftrace_page **last_pg;
6933 struct ftrace_page *tmp_page = NULL;
6934 struct ftrace_page *pg;
6935
6936 mutex_lock(&ftrace_lock);
6937
6938 if (ftrace_disabled)
6939 goto out_unlock;
6940
6941 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6942 if (mod_map->mod == mod) {
6943 list_del_rcu(&mod_map->list);
6944 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6945 break;
6946 }
6947 }
6948
6949 /*
6950 * Each module has its own ftrace_pages, remove
6951 * them from the list.
6952 */
6953 last_pg = &ftrace_pages_start;
6954 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6955 rec = &pg->records[0];
6956 if (within_module_core(rec->ip, mod) ||
6957 within_module_init(rec->ip, mod)) {
6958 /*
6959 * As core pages are first, the first
6960 * page should never be a module page.
6961 */
6962 if (WARN_ON(pg == ftrace_pages_start))
6963 goto out_unlock;
6964
6965 /* Check if we are deleting the last page */
6966 if (pg == ftrace_pages)
6967 ftrace_pages = next_to_ftrace_page(last_pg);
6968
6969 ftrace_update_tot_cnt -= pg->index;
6970 *last_pg = pg->next;
6971
6972 pg->next = tmp_page;
6973 tmp_page = pg;
6974 } else
6975 last_pg = &pg->next;
6976 }
6977 out_unlock:
6978 mutex_unlock(&ftrace_lock);
6979
6980 for (pg = tmp_page; pg; pg = tmp_page) {
6981
6982 /* Needs to be called outside of ftrace_lock */
6983 clear_mod_from_hashes(pg);
6984
6985 if (pg->records) {
6986 free_pages((unsigned long)pg->records, pg->order);
6987 ftrace_number_of_pages -= 1 << pg->order;
6988 }
6989 tmp_page = pg->next;
6990 kfree(pg);
6991 ftrace_number_of_groups--;
6992 }
6993 }
6994
ftrace_module_enable(struct module * mod)6995 void ftrace_module_enable(struct module *mod)
6996 {
6997 struct dyn_ftrace *rec;
6998 struct ftrace_page *pg;
6999
7000 mutex_lock(&ftrace_lock);
7001
7002 if (ftrace_disabled)
7003 goto out_unlock;
7004
7005 /*
7006 * If the tracing is enabled, go ahead and enable the record.
7007 *
7008 * The reason not to enable the record immediately is the
7009 * inherent check of ftrace_make_nop/ftrace_make_call for
7010 * correct previous instructions. Making first the NOP
7011 * conversion puts the module to the correct state, thus
7012 * passing the ftrace_make_call check.
7013 *
7014 * We also delay this to after the module code already set the
7015 * text to read-only, as we now need to set it back to read-write
7016 * so that we can modify the text.
7017 */
7018 if (ftrace_start_up)
7019 ftrace_arch_code_modify_prepare();
7020
7021 do_for_each_ftrace_rec(pg, rec) {
7022 int cnt;
7023 /*
7024 * do_for_each_ftrace_rec() is a double loop.
7025 * module text shares the pg. If a record is
7026 * not part of this module, then skip this pg,
7027 * which the "break" will do.
7028 */
7029 if (!within_module_core(rec->ip, mod) &&
7030 !within_module_init(rec->ip, mod))
7031 break;
7032
7033 /* Weak functions should still be ignored */
7034 if (!test_for_valid_rec(rec)) {
7035 /* Clear all other flags. Should not be enabled anyway */
7036 rec->flags = FTRACE_FL_DISABLED;
7037 continue;
7038 }
7039
7040 cnt = 0;
7041
7042 /*
7043 * When adding a module, we need to check if tracers are
7044 * currently enabled and if they are, and can trace this record,
7045 * we need to enable the module functions as well as update the
7046 * reference counts for those function records.
7047 */
7048 if (ftrace_start_up)
7049 cnt += referenced_filters(rec);
7050
7051 rec->flags &= ~FTRACE_FL_DISABLED;
7052 rec->flags += cnt;
7053
7054 if (ftrace_start_up && cnt) {
7055 int failed = __ftrace_replace_code(rec, 1);
7056 if (failed) {
7057 ftrace_bug(failed, rec);
7058 goto out_loop;
7059 }
7060 }
7061
7062 } while_for_each_ftrace_rec();
7063
7064 out_loop:
7065 if (ftrace_start_up)
7066 ftrace_arch_code_modify_post_process();
7067
7068 out_unlock:
7069 mutex_unlock(&ftrace_lock);
7070
7071 process_cached_mods(mod->name);
7072 }
7073
ftrace_module_init(struct module * mod)7074 void ftrace_module_init(struct module *mod)
7075 {
7076 int ret;
7077
7078 if (ftrace_disabled || !mod->num_ftrace_callsites)
7079 return;
7080
7081 ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7082 mod->ftrace_callsites + mod->num_ftrace_callsites);
7083 if (ret)
7084 pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7085 mod->name);
7086 }
7087
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7088 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7089 struct dyn_ftrace *rec)
7090 {
7091 struct ftrace_mod_func *mod_func;
7092 unsigned long symsize;
7093 unsigned long offset;
7094 char str[KSYM_SYMBOL_LEN];
7095 char *modname;
7096 const char *ret;
7097
7098 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7099 if (!ret)
7100 return;
7101
7102 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7103 if (!mod_func)
7104 return;
7105
7106 mod_func->name = kstrdup(str, GFP_KERNEL);
7107 if (!mod_func->name) {
7108 kfree(mod_func);
7109 return;
7110 }
7111
7112 mod_func->ip = rec->ip - offset;
7113 mod_func->size = symsize;
7114
7115 mod_map->num_funcs++;
7116
7117 list_add_rcu(&mod_func->list, &mod_map->funcs);
7118 }
7119
7120 static struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7121 allocate_ftrace_mod_map(struct module *mod,
7122 unsigned long start, unsigned long end)
7123 {
7124 struct ftrace_mod_map *mod_map;
7125
7126 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7127 if (!mod_map)
7128 return NULL;
7129
7130 mod_map->mod = mod;
7131 mod_map->start_addr = start;
7132 mod_map->end_addr = end;
7133 mod_map->num_funcs = 0;
7134
7135 INIT_LIST_HEAD_RCU(&mod_map->funcs);
7136
7137 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7138
7139 return mod_map;
7140 }
7141
7142 static const char *
ftrace_func_address_lookup(struct ftrace_mod_map * mod_map,unsigned long addr,unsigned long * size,unsigned long * off,char * sym)7143 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7144 unsigned long addr, unsigned long *size,
7145 unsigned long *off, char *sym)
7146 {
7147 struct ftrace_mod_func *found_func = NULL;
7148 struct ftrace_mod_func *mod_func;
7149
7150 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7151 if (addr >= mod_func->ip &&
7152 addr < mod_func->ip + mod_func->size) {
7153 found_func = mod_func;
7154 break;
7155 }
7156 }
7157
7158 if (found_func) {
7159 if (size)
7160 *size = found_func->size;
7161 if (off)
7162 *off = addr - found_func->ip;
7163 if (sym)
7164 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
7165
7166 return found_func->name;
7167 }
7168
7169 return NULL;
7170 }
7171
7172 const char *
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)7173 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7174 unsigned long *off, char **modname, char *sym)
7175 {
7176 struct ftrace_mod_map *mod_map;
7177 const char *ret = NULL;
7178
7179 /* mod_map is freed via call_rcu() */
7180 preempt_disable();
7181 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7182 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7183 if (ret) {
7184 if (modname)
7185 *modname = mod_map->mod->name;
7186 break;
7187 }
7188 }
7189 preempt_enable();
7190
7191 return ret;
7192 }
7193
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7194 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7195 char *type, char *name,
7196 char *module_name, int *exported)
7197 {
7198 struct ftrace_mod_map *mod_map;
7199 struct ftrace_mod_func *mod_func;
7200 int ret;
7201
7202 preempt_disable();
7203 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7204
7205 if (symnum >= mod_map->num_funcs) {
7206 symnum -= mod_map->num_funcs;
7207 continue;
7208 }
7209
7210 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7211 if (symnum > 1) {
7212 symnum--;
7213 continue;
7214 }
7215
7216 *value = mod_func->ip;
7217 *type = 'T';
7218 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
7219 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7220 *exported = 1;
7221 preempt_enable();
7222 return 0;
7223 }
7224 WARN_ON(1);
7225 break;
7226 }
7227 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7228 module_name, exported);
7229 preempt_enable();
7230 return ret;
7231 }
7232
7233 #else
save_ftrace_mod_rec(struct ftrace_mod_map * mod_map,struct dyn_ftrace * rec)7234 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7235 struct dyn_ftrace *rec) { }
7236 static inline struct ftrace_mod_map *
allocate_ftrace_mod_map(struct module * mod,unsigned long start,unsigned long end)7237 allocate_ftrace_mod_map(struct module *mod,
7238 unsigned long start, unsigned long end)
7239 {
7240 return NULL;
7241 }
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)7242 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7243 char *type, char *name, char *module_name,
7244 int *exported)
7245 {
7246 int ret;
7247
7248 preempt_disable();
7249 ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7250 module_name, exported);
7251 preempt_enable();
7252 return ret;
7253 }
7254 #endif /* CONFIG_MODULES */
7255
7256 struct ftrace_init_func {
7257 struct list_head list;
7258 unsigned long ip;
7259 };
7260
7261 /* Clear any init ips from hashes */
7262 static void
clear_func_from_hash(struct ftrace_init_func * func,struct ftrace_hash * hash)7263 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7264 {
7265 struct ftrace_func_entry *entry;
7266
7267 entry = ftrace_lookup_ip(hash, func->ip);
7268 /*
7269 * Do not allow this rec to match again.
7270 * Yeah, it may waste some memory, but will be removed
7271 * if/when the hash is modified again.
7272 */
7273 if (entry)
7274 entry->ip = 0;
7275 }
7276
7277 static void
clear_func_from_hashes(struct ftrace_init_func * func)7278 clear_func_from_hashes(struct ftrace_init_func *func)
7279 {
7280 struct trace_array *tr;
7281
7282 mutex_lock(&trace_types_lock);
7283 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7284 if (!tr->ops || !tr->ops->func_hash)
7285 continue;
7286 mutex_lock(&tr->ops->func_hash->regex_lock);
7287 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7288 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7289 mutex_unlock(&tr->ops->func_hash->regex_lock);
7290 }
7291 mutex_unlock(&trace_types_lock);
7292 }
7293
add_to_clear_hash_list(struct list_head * clear_list,struct dyn_ftrace * rec)7294 static void add_to_clear_hash_list(struct list_head *clear_list,
7295 struct dyn_ftrace *rec)
7296 {
7297 struct ftrace_init_func *func;
7298
7299 func = kmalloc(sizeof(*func), GFP_KERNEL);
7300 if (!func) {
7301 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7302 return;
7303 }
7304
7305 func->ip = rec->ip;
7306 list_add(&func->list, clear_list);
7307 }
7308
ftrace_free_mem(struct module * mod,void * start_ptr,void * end_ptr)7309 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7310 {
7311 unsigned long start = (unsigned long)(start_ptr);
7312 unsigned long end = (unsigned long)(end_ptr);
7313 struct ftrace_page **last_pg = &ftrace_pages_start;
7314 struct ftrace_page *pg;
7315 struct dyn_ftrace *rec;
7316 struct dyn_ftrace key;
7317 struct ftrace_mod_map *mod_map = NULL;
7318 struct ftrace_init_func *func, *func_next;
7319 struct list_head clear_hash;
7320
7321 INIT_LIST_HEAD(&clear_hash);
7322
7323 key.ip = start;
7324 key.flags = end; /* overload flags, as it is unsigned long */
7325
7326 mutex_lock(&ftrace_lock);
7327
7328 /*
7329 * If we are freeing module init memory, then check if
7330 * any tracer is active. If so, we need to save a mapping of
7331 * the module functions being freed with the address.
7332 */
7333 if (mod && ftrace_ops_list != &ftrace_list_end)
7334 mod_map = allocate_ftrace_mod_map(mod, start, end);
7335
7336 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7337 if (end < pg->records[0].ip ||
7338 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7339 continue;
7340 again:
7341 rec = bsearch(&key, pg->records, pg->index,
7342 sizeof(struct dyn_ftrace),
7343 ftrace_cmp_recs);
7344 if (!rec)
7345 continue;
7346
7347 /* rec will be cleared from hashes after ftrace_lock unlock */
7348 add_to_clear_hash_list(&clear_hash, rec);
7349
7350 if (mod_map)
7351 save_ftrace_mod_rec(mod_map, rec);
7352
7353 pg->index--;
7354 ftrace_update_tot_cnt--;
7355 if (!pg->index) {
7356 *last_pg = pg->next;
7357 if (pg->records) {
7358 free_pages((unsigned long)pg->records, pg->order);
7359 ftrace_number_of_pages -= 1 << pg->order;
7360 }
7361 ftrace_number_of_groups--;
7362 kfree(pg);
7363 pg = container_of(last_pg, struct ftrace_page, next);
7364 if (!(*last_pg))
7365 ftrace_pages = pg;
7366 continue;
7367 }
7368 memmove(rec, rec + 1,
7369 (pg->index - (rec - pg->records)) * sizeof(*rec));
7370 /* More than one function may be in this block */
7371 goto again;
7372 }
7373 mutex_unlock(&ftrace_lock);
7374
7375 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7376 clear_func_from_hashes(func);
7377 kfree(func);
7378 }
7379 }
7380
ftrace_free_init_mem(void)7381 void __init ftrace_free_init_mem(void)
7382 {
7383 void *start = (void *)(&__init_begin);
7384 void *end = (void *)(&__init_end);
7385
7386 ftrace_boot_snapshot();
7387
7388 ftrace_free_mem(NULL, start, end);
7389 }
7390
ftrace_dyn_arch_init(void)7391 int __init __weak ftrace_dyn_arch_init(void)
7392 {
7393 return 0;
7394 }
7395
ftrace_init(void)7396 void __init ftrace_init(void)
7397 {
7398 extern unsigned long __start_mcount_loc[];
7399 extern unsigned long __stop_mcount_loc[];
7400 unsigned long count, flags;
7401 int ret;
7402
7403 local_irq_save(flags);
7404 ret = ftrace_dyn_arch_init();
7405 local_irq_restore(flags);
7406 if (ret)
7407 goto failed;
7408
7409 count = __stop_mcount_loc - __start_mcount_loc;
7410 if (!count) {
7411 pr_info("ftrace: No functions to be traced?\n");
7412 goto failed;
7413 }
7414
7415 pr_info("ftrace: allocating %ld entries in %ld pages\n",
7416 count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
7417
7418 ret = ftrace_process_locs(NULL,
7419 __start_mcount_loc,
7420 __stop_mcount_loc);
7421 if (ret) {
7422 pr_warn("ftrace: failed to allocate entries for functions\n");
7423 goto failed;
7424 }
7425
7426 pr_info("ftrace: allocated %ld pages with %ld groups\n",
7427 ftrace_number_of_pages, ftrace_number_of_groups);
7428
7429 last_ftrace_enabled = ftrace_enabled = 1;
7430
7431 set_ftrace_early_filters();
7432
7433 return;
7434 failed:
7435 ftrace_disabled = 1;
7436 }
7437
7438 /* Do nothing if arch does not support this */
arch_ftrace_update_trampoline(struct ftrace_ops * ops)7439 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7440 {
7441 }
7442
ftrace_update_trampoline(struct ftrace_ops * ops)7443 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7444 {
7445 unsigned long trampoline = ops->trampoline;
7446
7447 arch_ftrace_update_trampoline(ops);
7448 if (ops->trampoline && ops->trampoline != trampoline &&
7449 (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7450 /* Add to kallsyms before the perf events */
7451 ftrace_add_trampoline_to_kallsyms(ops);
7452 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7453 ops->trampoline, ops->trampoline_size, false,
7454 FTRACE_TRAMPOLINE_SYM);
7455 /*
7456 * Record the perf text poke event after the ksymbol register
7457 * event.
7458 */
7459 perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7460 (void *)ops->trampoline,
7461 ops->trampoline_size);
7462 }
7463 }
7464
ftrace_init_trace_array(struct trace_array * tr)7465 void ftrace_init_trace_array(struct trace_array *tr)
7466 {
7467 INIT_LIST_HEAD(&tr->func_probes);
7468 INIT_LIST_HEAD(&tr->mod_trace);
7469 INIT_LIST_HEAD(&tr->mod_notrace);
7470 }
7471 #else
7472
7473 struct ftrace_ops global_ops = {
7474 .func = ftrace_stub,
7475 .flags = FTRACE_OPS_FL_INITIALIZED |
7476 FTRACE_OPS_FL_PID,
7477 };
7478
ftrace_nodyn_init(void)7479 static int __init ftrace_nodyn_init(void)
7480 {
7481 ftrace_enabled = 1;
7482 return 0;
7483 }
7484 core_initcall(ftrace_nodyn_init);
7485
ftrace_init_dyn_tracefs(struct dentry * d_tracer)7486 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
ftrace_startup_all(int command)7487 static inline void ftrace_startup_all(int command) { }
7488
ftrace_update_trampoline(struct ftrace_ops * ops)7489 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7490 {
7491 }
7492
7493 #endif /* CONFIG_DYNAMIC_FTRACE */
7494
ftrace_init_global_array_ops(struct trace_array * tr)7495 __init void ftrace_init_global_array_ops(struct trace_array *tr)
7496 {
7497 tr->ops = &global_ops;
7498 tr->ops->private = tr;
7499 ftrace_init_trace_array(tr);
7500 }
7501
ftrace_init_array_ops(struct trace_array * tr,ftrace_func_t func)7502 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7503 {
7504 /* If we filter on pids, update to use the pid function */
7505 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7506 if (WARN_ON(tr->ops->func != ftrace_stub))
7507 printk("ftrace ops had %pS for function\n",
7508 tr->ops->func);
7509 }
7510 tr->ops->func = func;
7511 tr->ops->private = tr;
7512 }
7513
ftrace_reset_array_ops(struct trace_array * tr)7514 void ftrace_reset_array_ops(struct trace_array *tr)
7515 {
7516 tr->ops->func = ftrace_stub;
7517 }
7518
7519 static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ignored,struct ftrace_regs * fregs)7520 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7521 struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7522 {
7523 struct pt_regs *regs = ftrace_get_regs(fregs);
7524 struct ftrace_ops *op;
7525 int bit;
7526
7527 /*
7528 * The ftrace_test_and_set_recursion() will disable preemption,
7529 * which is required since some of the ops may be dynamically
7530 * allocated, they must be freed after a synchronize_rcu().
7531 */
7532 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7533 if (bit < 0)
7534 return;
7535
7536 do_for_each_ftrace_op(op, ftrace_ops_list) {
7537 /* Stub functions don't need to be called nor tested */
7538 if (op->flags & FTRACE_OPS_FL_STUB)
7539 continue;
7540 /*
7541 * Check the following for each ops before calling their func:
7542 * if RCU flag is set, then rcu_is_watching() must be true
7543 * if PER_CPU is set, then ftrace_function_local_disable()
7544 * must be false
7545 * Otherwise test if the ip matches the ops filter
7546 *
7547 * If any of the above fails then the op->func() is not executed.
7548 */
7549 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7550 ftrace_ops_test(op, ip, regs)) {
7551 if (FTRACE_WARN_ON(!op->func)) {
7552 pr_warn("op=%p %pS\n", op, op);
7553 goto out;
7554 }
7555 op->func(ip, parent_ip, op, fregs);
7556 }
7557 } while_for_each_ftrace_op(op);
7558 out:
7559 trace_clear_recursion(bit);
7560 }
7561
7562 /*
7563 * Some archs only support passing ip and parent_ip. Even though
7564 * the list function ignores the op parameter, we do not want any
7565 * C side effects, where a function is called without the caller
7566 * sending a third parameter.
7567 * Archs are to support both the regs and ftrace_ops at the same time.
7568 * If they support ftrace_ops, it is assumed they support regs.
7569 * If call backs want to use regs, they must either check for regs
7570 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7571 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7572 * An architecture can pass partial regs with ftrace_ops and still
7573 * set the ARCH_SUPPORTS_FTRACE_OPS.
7574 *
7575 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7576 * arch_ftrace_ops_list_func.
7577 */
7578 #if ARCH_SUPPORTS_FTRACE_OPS
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7579 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7580 struct ftrace_ops *op, struct ftrace_regs *fregs)
7581 {
7582 __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7583 }
7584 #else
arch_ftrace_ops_list_func(unsigned long ip,unsigned long parent_ip)7585 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7586 {
7587 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7588 }
7589 #endif
7590 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7591
7592 /*
7593 * If there's only one function registered but it does not support
7594 * recursion, needs RCU protection and/or requires per cpu handling, then
7595 * this function will be called by the mcount trampoline.
7596 */
ftrace_ops_assist_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)7597 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7598 struct ftrace_ops *op, struct ftrace_regs *fregs)
7599 {
7600 int bit;
7601
7602 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7603 if (bit < 0)
7604 return;
7605
7606 if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7607 op->func(ip, parent_ip, op, fregs);
7608
7609 trace_clear_recursion(bit);
7610 }
7611 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7612
7613 /**
7614 * ftrace_ops_get_func - get the function a trampoline should call
7615 * @ops: the ops to get the function for
7616 *
7617 * Normally the mcount trampoline will call the ops->func, but there
7618 * are times that it should not. For example, if the ops does not
7619 * have its own recursion protection, then it should call the
7620 * ftrace_ops_assist_func() instead.
7621 *
7622 * Returns the function that the trampoline should call for @ops.
7623 */
ftrace_ops_get_func(struct ftrace_ops * ops)7624 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7625 {
7626 /*
7627 * If the function does not handle recursion or needs to be RCU safe,
7628 * then we need to call the assist handler.
7629 */
7630 if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7631 FTRACE_OPS_FL_RCU))
7632 return ftrace_ops_assist_func;
7633
7634 return ops->func;
7635 }
7636
7637 static void
ftrace_filter_pid_sched_switch_probe(void * data,bool preempt,struct task_struct * prev,struct task_struct * next,unsigned int prev_state)7638 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7639 struct task_struct *prev,
7640 struct task_struct *next,
7641 unsigned int prev_state)
7642 {
7643 struct trace_array *tr = data;
7644 struct trace_pid_list *pid_list;
7645 struct trace_pid_list *no_pid_list;
7646
7647 pid_list = rcu_dereference_sched(tr->function_pids);
7648 no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7649
7650 if (trace_ignore_this_task(pid_list, no_pid_list, next))
7651 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7652 FTRACE_PID_IGNORE);
7653 else
7654 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7655 next->pid);
7656 }
7657
7658 static void
ftrace_pid_follow_sched_process_fork(void * data,struct task_struct * self,struct task_struct * task)7659 ftrace_pid_follow_sched_process_fork(void *data,
7660 struct task_struct *self,
7661 struct task_struct *task)
7662 {
7663 struct trace_pid_list *pid_list;
7664 struct trace_array *tr = data;
7665
7666 pid_list = rcu_dereference_sched(tr->function_pids);
7667 trace_filter_add_remove_task(pid_list, self, task);
7668
7669 pid_list = rcu_dereference_sched(tr->function_no_pids);
7670 trace_filter_add_remove_task(pid_list, self, task);
7671 }
7672
7673 static void
ftrace_pid_follow_sched_process_exit(void * data,struct task_struct * task)7674 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7675 {
7676 struct trace_pid_list *pid_list;
7677 struct trace_array *tr = data;
7678
7679 pid_list = rcu_dereference_sched(tr->function_pids);
7680 trace_filter_add_remove_task(pid_list, NULL, task);
7681
7682 pid_list = rcu_dereference_sched(tr->function_no_pids);
7683 trace_filter_add_remove_task(pid_list, NULL, task);
7684 }
7685
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)7686 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7687 {
7688 if (enable) {
7689 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7690 tr);
7691 register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7692 tr);
7693 } else {
7694 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7695 tr);
7696 unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7697 tr);
7698 }
7699 }
7700
clear_ftrace_pids(struct trace_array * tr,int type)7701 static void clear_ftrace_pids(struct trace_array *tr, int type)
7702 {
7703 struct trace_pid_list *pid_list;
7704 struct trace_pid_list *no_pid_list;
7705 int cpu;
7706
7707 pid_list = rcu_dereference_protected(tr->function_pids,
7708 lockdep_is_held(&ftrace_lock));
7709 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7710 lockdep_is_held(&ftrace_lock));
7711
7712 /* Make sure there's something to do */
7713 if (!pid_type_enabled(type, pid_list, no_pid_list))
7714 return;
7715
7716 /* See if the pids still need to be checked after this */
7717 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7718 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7719 for_each_possible_cpu(cpu)
7720 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7721 }
7722
7723 if (type & TRACE_PIDS)
7724 rcu_assign_pointer(tr->function_pids, NULL);
7725
7726 if (type & TRACE_NO_PIDS)
7727 rcu_assign_pointer(tr->function_no_pids, NULL);
7728
7729 /* Wait till all users are no longer using pid filtering */
7730 synchronize_rcu();
7731
7732 if ((type & TRACE_PIDS) && pid_list)
7733 trace_pid_list_free(pid_list);
7734
7735 if ((type & TRACE_NO_PIDS) && no_pid_list)
7736 trace_pid_list_free(no_pid_list);
7737 }
7738
ftrace_clear_pids(struct trace_array * tr)7739 void ftrace_clear_pids(struct trace_array *tr)
7740 {
7741 mutex_lock(&ftrace_lock);
7742
7743 clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7744
7745 mutex_unlock(&ftrace_lock);
7746 }
7747
ftrace_pid_reset(struct trace_array * tr,int type)7748 static void ftrace_pid_reset(struct trace_array *tr, int type)
7749 {
7750 mutex_lock(&ftrace_lock);
7751 clear_ftrace_pids(tr, type);
7752
7753 ftrace_update_pid_func();
7754 ftrace_startup_all(0);
7755
7756 mutex_unlock(&ftrace_lock);
7757 }
7758
7759 /* Greater than any max PID */
7760 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
7761
fpid_start(struct seq_file * m,loff_t * pos)7762 static void *fpid_start(struct seq_file *m, loff_t *pos)
7763 __acquires(RCU)
7764 {
7765 struct trace_pid_list *pid_list;
7766 struct trace_array *tr = m->private;
7767
7768 mutex_lock(&ftrace_lock);
7769 rcu_read_lock_sched();
7770
7771 pid_list = rcu_dereference_sched(tr->function_pids);
7772
7773 if (!pid_list)
7774 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7775
7776 return trace_pid_start(pid_list, pos);
7777 }
7778
fpid_next(struct seq_file * m,void * v,loff_t * pos)7779 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7780 {
7781 struct trace_array *tr = m->private;
7782 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7783
7784 if (v == FTRACE_NO_PIDS) {
7785 (*pos)++;
7786 return NULL;
7787 }
7788 return trace_pid_next(pid_list, v, pos);
7789 }
7790
fpid_stop(struct seq_file * m,void * p)7791 static void fpid_stop(struct seq_file *m, void *p)
7792 __releases(RCU)
7793 {
7794 rcu_read_unlock_sched();
7795 mutex_unlock(&ftrace_lock);
7796 }
7797
fpid_show(struct seq_file * m,void * v)7798 static int fpid_show(struct seq_file *m, void *v)
7799 {
7800 if (v == FTRACE_NO_PIDS) {
7801 seq_puts(m, "no pid\n");
7802 return 0;
7803 }
7804
7805 return trace_pid_show(m, v);
7806 }
7807
7808 static const struct seq_operations ftrace_pid_sops = {
7809 .start = fpid_start,
7810 .next = fpid_next,
7811 .stop = fpid_stop,
7812 .show = fpid_show,
7813 };
7814
fnpid_start(struct seq_file * m,loff_t * pos)7815 static void *fnpid_start(struct seq_file *m, loff_t *pos)
7816 __acquires(RCU)
7817 {
7818 struct trace_pid_list *pid_list;
7819 struct trace_array *tr = m->private;
7820
7821 mutex_lock(&ftrace_lock);
7822 rcu_read_lock_sched();
7823
7824 pid_list = rcu_dereference_sched(tr->function_no_pids);
7825
7826 if (!pid_list)
7827 return !(*pos) ? FTRACE_NO_PIDS : NULL;
7828
7829 return trace_pid_start(pid_list, pos);
7830 }
7831
fnpid_next(struct seq_file * m,void * v,loff_t * pos)7832 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7833 {
7834 struct trace_array *tr = m->private;
7835 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7836
7837 if (v == FTRACE_NO_PIDS) {
7838 (*pos)++;
7839 return NULL;
7840 }
7841 return trace_pid_next(pid_list, v, pos);
7842 }
7843
7844 static const struct seq_operations ftrace_no_pid_sops = {
7845 .start = fnpid_start,
7846 .next = fnpid_next,
7847 .stop = fpid_stop,
7848 .show = fpid_show,
7849 };
7850
pid_open(struct inode * inode,struct file * file,int type)7851 static int pid_open(struct inode *inode, struct file *file, int type)
7852 {
7853 const struct seq_operations *seq_ops;
7854 struct trace_array *tr = inode->i_private;
7855 struct seq_file *m;
7856 int ret = 0;
7857
7858 ret = tracing_check_open_get_tr(tr);
7859 if (ret)
7860 return ret;
7861
7862 if ((file->f_mode & FMODE_WRITE) &&
7863 (file->f_flags & O_TRUNC))
7864 ftrace_pid_reset(tr, type);
7865
7866 switch (type) {
7867 case TRACE_PIDS:
7868 seq_ops = &ftrace_pid_sops;
7869 break;
7870 case TRACE_NO_PIDS:
7871 seq_ops = &ftrace_no_pid_sops;
7872 break;
7873 default:
7874 trace_array_put(tr);
7875 WARN_ON_ONCE(1);
7876 return -EINVAL;
7877 }
7878
7879 ret = seq_open(file, seq_ops);
7880 if (ret < 0) {
7881 trace_array_put(tr);
7882 } else {
7883 m = file->private_data;
7884 /* copy tr over to seq ops */
7885 m->private = tr;
7886 }
7887
7888 return ret;
7889 }
7890
7891 static int
ftrace_pid_open(struct inode * inode,struct file * file)7892 ftrace_pid_open(struct inode *inode, struct file *file)
7893 {
7894 return pid_open(inode, file, TRACE_PIDS);
7895 }
7896
7897 static int
ftrace_no_pid_open(struct inode * inode,struct file * file)7898 ftrace_no_pid_open(struct inode *inode, struct file *file)
7899 {
7900 return pid_open(inode, file, TRACE_NO_PIDS);
7901 }
7902
ignore_task_cpu(void * data)7903 static void ignore_task_cpu(void *data)
7904 {
7905 struct trace_array *tr = data;
7906 struct trace_pid_list *pid_list;
7907 struct trace_pid_list *no_pid_list;
7908
7909 /*
7910 * This function is called by on_each_cpu() while the
7911 * event_mutex is held.
7912 */
7913 pid_list = rcu_dereference_protected(tr->function_pids,
7914 mutex_is_locked(&ftrace_lock));
7915 no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7916 mutex_is_locked(&ftrace_lock));
7917
7918 if (trace_ignore_this_task(pid_list, no_pid_list, current))
7919 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7920 FTRACE_PID_IGNORE);
7921 else
7922 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7923 current->pid);
7924 }
7925
7926 static ssize_t
pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,int type)7927 pid_write(struct file *filp, const char __user *ubuf,
7928 size_t cnt, loff_t *ppos, int type)
7929 {
7930 struct seq_file *m = filp->private_data;
7931 struct trace_array *tr = m->private;
7932 struct trace_pid_list *filtered_pids;
7933 struct trace_pid_list *other_pids;
7934 struct trace_pid_list *pid_list;
7935 ssize_t ret;
7936
7937 if (!cnt)
7938 return 0;
7939
7940 mutex_lock(&ftrace_lock);
7941
7942 switch (type) {
7943 case TRACE_PIDS:
7944 filtered_pids = rcu_dereference_protected(tr->function_pids,
7945 lockdep_is_held(&ftrace_lock));
7946 other_pids = rcu_dereference_protected(tr->function_no_pids,
7947 lockdep_is_held(&ftrace_lock));
7948 break;
7949 case TRACE_NO_PIDS:
7950 filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7951 lockdep_is_held(&ftrace_lock));
7952 other_pids = rcu_dereference_protected(tr->function_pids,
7953 lockdep_is_held(&ftrace_lock));
7954 break;
7955 default:
7956 ret = -EINVAL;
7957 WARN_ON_ONCE(1);
7958 goto out;
7959 }
7960
7961 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7962 if (ret < 0)
7963 goto out;
7964
7965 switch (type) {
7966 case TRACE_PIDS:
7967 rcu_assign_pointer(tr->function_pids, pid_list);
7968 break;
7969 case TRACE_NO_PIDS:
7970 rcu_assign_pointer(tr->function_no_pids, pid_list);
7971 break;
7972 }
7973
7974
7975 if (filtered_pids) {
7976 synchronize_rcu();
7977 trace_pid_list_free(filtered_pids);
7978 } else if (pid_list && !other_pids) {
7979 /* Register a probe to set whether to ignore the tracing of a task */
7980 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7981 }
7982
7983 /*
7984 * Ignoring of pids is done at task switch. But we have to
7985 * check for those tasks that are currently running.
7986 * Always do this in case a pid was appended or removed.
7987 */
7988 on_each_cpu(ignore_task_cpu, tr, 1);
7989
7990 ftrace_update_pid_func();
7991 ftrace_startup_all(0);
7992 out:
7993 mutex_unlock(&ftrace_lock);
7994
7995 if (ret > 0)
7996 *ppos += ret;
7997
7998 return ret;
7999 }
8000
8001 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8002 ftrace_pid_write(struct file *filp, const char __user *ubuf,
8003 size_t cnt, loff_t *ppos)
8004 {
8005 return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
8006 }
8007
8008 static ssize_t
ftrace_no_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8009 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
8010 size_t cnt, loff_t *ppos)
8011 {
8012 return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
8013 }
8014
8015 static int
ftrace_pid_release(struct inode * inode,struct file * file)8016 ftrace_pid_release(struct inode *inode, struct file *file)
8017 {
8018 struct trace_array *tr = inode->i_private;
8019
8020 trace_array_put(tr);
8021
8022 return seq_release(inode, file);
8023 }
8024
8025 static const struct file_operations ftrace_pid_fops = {
8026 .open = ftrace_pid_open,
8027 .write = ftrace_pid_write,
8028 .read = seq_read,
8029 .llseek = tracing_lseek,
8030 .release = ftrace_pid_release,
8031 };
8032
8033 static const struct file_operations ftrace_no_pid_fops = {
8034 .open = ftrace_no_pid_open,
8035 .write = ftrace_no_pid_write,
8036 .read = seq_read,
8037 .llseek = tracing_lseek,
8038 .release = ftrace_pid_release,
8039 };
8040
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d_tracer)8041 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8042 {
8043 trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8044 tr, &ftrace_pid_fops);
8045 trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8046 d_tracer, tr, &ftrace_no_pid_fops);
8047 }
8048
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d_tracer)8049 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8050 struct dentry *d_tracer)
8051 {
8052 /* Only the top level directory has the dyn_tracefs and profile */
8053 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8054
8055 ftrace_init_dyn_tracefs(d_tracer);
8056 ftrace_profile_tracefs(d_tracer);
8057 }
8058
8059 /**
8060 * ftrace_kill - kill ftrace
8061 *
8062 * This function should be used by panic code. It stops ftrace
8063 * but in a not so nice way. If you need to simply kill ftrace
8064 * from a non-atomic section, use ftrace_kill.
8065 */
ftrace_kill(void)8066 void ftrace_kill(void)
8067 {
8068 ftrace_disabled = 1;
8069 ftrace_enabled = 0;
8070 ftrace_trace_function = ftrace_stub;
8071 }
8072
8073 /**
8074 * ftrace_is_dead - Test if ftrace is dead or not.
8075 *
8076 * Returns 1 if ftrace is "dead", zero otherwise.
8077 */
ftrace_is_dead(void)8078 int ftrace_is_dead(void)
8079 {
8080 return ftrace_disabled;
8081 }
8082
8083 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8084 /*
8085 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
8086 * it doesn't conflict with any direct ftrace_ops. If there is existing
8087 * direct ftrace_ops on a kernel function being patched, call
8088 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
8089 *
8090 * @ops: ftrace_ops being registered.
8091 *
8092 * Returns:
8093 * 0 on success;
8094 * Negative on failure.
8095 */
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8096 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8097 {
8098 struct ftrace_func_entry *entry;
8099 struct ftrace_hash *hash;
8100 struct ftrace_ops *op;
8101 int size, i, ret;
8102
8103 lockdep_assert_held_once(&direct_mutex);
8104
8105 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8106 return 0;
8107
8108 hash = ops->func_hash->filter_hash;
8109 size = 1 << hash->size_bits;
8110 for (i = 0; i < size; i++) {
8111 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8112 unsigned long ip = entry->ip;
8113 bool found_op = false;
8114
8115 mutex_lock(&ftrace_lock);
8116 do_for_each_ftrace_op(op, ftrace_ops_list) {
8117 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8118 continue;
8119 if (ops_references_ip(op, ip)) {
8120 found_op = true;
8121 break;
8122 }
8123 } while_for_each_ftrace_op(op);
8124 mutex_unlock(&ftrace_lock);
8125
8126 if (found_op) {
8127 if (!op->ops_func)
8128 return -EBUSY;
8129
8130 ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8131 if (ret)
8132 return ret;
8133 }
8134 }
8135 }
8136
8137 return 0;
8138 }
8139
8140 /*
8141 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
8142 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
8143 * ops.
8144 */
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8145 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8146 {
8147 struct ftrace_func_entry *entry;
8148 struct ftrace_hash *hash;
8149 struct ftrace_ops *op;
8150 int size, i;
8151
8152 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8153 return;
8154
8155 mutex_lock(&direct_mutex);
8156
8157 hash = ops->func_hash->filter_hash;
8158 size = 1 << hash->size_bits;
8159 for (i = 0; i < size; i++) {
8160 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8161 unsigned long ip = entry->ip;
8162 bool found_op = false;
8163
8164 mutex_lock(&ftrace_lock);
8165 do_for_each_ftrace_op(op, ftrace_ops_list) {
8166 if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8167 continue;
8168 if (ops_references_ip(op, ip)) {
8169 found_op = true;
8170 break;
8171 }
8172 } while_for_each_ftrace_op(op);
8173 mutex_unlock(&ftrace_lock);
8174
8175 /* The cleanup is optional, ignore any errors */
8176 if (found_op && op->ops_func)
8177 op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8178 }
8179 }
8180 mutex_unlock(&direct_mutex);
8181 }
8182
8183 #define lock_direct_mutex() mutex_lock(&direct_mutex)
8184 #define unlock_direct_mutex() mutex_unlock(&direct_mutex)
8185
8186 #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8187
prepare_direct_functions_for_ipmodify(struct ftrace_ops * ops)8188 static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8189 {
8190 return 0;
8191 }
8192
cleanup_direct_functions_after_ipmodify(struct ftrace_ops * ops)8193 static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8194 {
8195 }
8196
8197 #define lock_direct_mutex() do { } while (0)
8198 #define unlock_direct_mutex() do { } while (0)
8199
8200 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8201
8202 /*
8203 * Similar to register_ftrace_function, except we don't lock direct_mutex.
8204 */
register_ftrace_function_nolock(struct ftrace_ops * ops)8205 static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8206 {
8207 int ret;
8208
8209 ftrace_ops_init(ops);
8210
8211 mutex_lock(&ftrace_lock);
8212
8213 ret = ftrace_startup(ops, 0);
8214
8215 mutex_unlock(&ftrace_lock);
8216
8217 return ret;
8218 }
8219
8220 /**
8221 * register_ftrace_function - register a function for profiling
8222 * @ops: ops structure that holds the function for profiling.
8223 *
8224 * Register a function to be called by all functions in the
8225 * kernel.
8226 *
8227 * Note: @ops->func and all the functions it calls must be labeled
8228 * with "notrace", otherwise it will go into a
8229 * recursive loop.
8230 */
register_ftrace_function(struct ftrace_ops * ops)8231 int register_ftrace_function(struct ftrace_ops *ops)
8232 {
8233 int ret;
8234
8235 lock_direct_mutex();
8236 ret = prepare_direct_functions_for_ipmodify(ops);
8237 if (ret < 0)
8238 goto out_unlock;
8239
8240 ret = register_ftrace_function_nolock(ops);
8241
8242 out_unlock:
8243 unlock_direct_mutex();
8244 return ret;
8245 }
8246 EXPORT_SYMBOL_GPL(register_ftrace_function);
8247
8248 /**
8249 * unregister_ftrace_function - unregister a function for profiling.
8250 * @ops: ops structure that holds the function to unregister
8251 *
8252 * Unregister a function that was added to be called by ftrace profiling.
8253 */
unregister_ftrace_function(struct ftrace_ops * ops)8254 int unregister_ftrace_function(struct ftrace_ops *ops)
8255 {
8256 int ret;
8257
8258 mutex_lock(&ftrace_lock);
8259 ret = ftrace_shutdown(ops, 0);
8260 mutex_unlock(&ftrace_lock);
8261
8262 cleanup_direct_functions_after_ipmodify(ops);
8263 return ret;
8264 }
8265 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8266
symbols_cmp(const void * a,const void * b)8267 static int symbols_cmp(const void *a, const void *b)
8268 {
8269 const char **str_a = (const char **) a;
8270 const char **str_b = (const char **) b;
8271
8272 return strcmp(*str_a, *str_b);
8273 }
8274
8275 struct kallsyms_data {
8276 unsigned long *addrs;
8277 const char **syms;
8278 size_t cnt;
8279 size_t found;
8280 };
8281
kallsyms_callback(void * data,const char * name,struct module * mod,unsigned long addr)8282 static int kallsyms_callback(void *data, const char *name,
8283 struct module *mod, unsigned long addr)
8284 {
8285 struct kallsyms_data *args = data;
8286 const char **sym;
8287 int idx;
8288
8289 sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8290 if (!sym)
8291 return 0;
8292
8293 idx = sym - args->syms;
8294 if (args->addrs[idx])
8295 return 0;
8296
8297 if (!ftrace_location(addr))
8298 return 0;
8299
8300 args->addrs[idx] = addr;
8301 args->found++;
8302 return args->found == args->cnt ? 1 : 0;
8303 }
8304
8305 /**
8306 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8307 *
8308 * @sorted_syms: array of symbols pointers symbols to resolve,
8309 * must be alphabetically sorted
8310 * @cnt: number of symbols/addresses in @syms/@addrs arrays
8311 * @addrs: array for storing resulting addresses
8312 *
8313 * This function looks up addresses for array of symbols provided in
8314 * @syms array (must be alphabetically sorted) and stores them in
8315 * @addrs array, which needs to be big enough to store at least @cnt
8316 * addresses.
8317 *
8318 * This function returns 0 if all provided symbols are found,
8319 * -ESRCH otherwise.
8320 */
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)8321 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8322 {
8323 struct kallsyms_data args;
8324 int err;
8325
8326 memset(addrs, 0, sizeof(*addrs) * cnt);
8327 args.addrs = addrs;
8328 args.syms = sorted_syms;
8329 args.cnt = cnt;
8330 args.found = 0;
8331 err = kallsyms_on_each_symbol(kallsyms_callback, &args);
8332 if (err < 0)
8333 return err;
8334 return args.found == args.cnt ? 0 : -ESRCH;
8335 }
8336
8337 #ifdef CONFIG_SYSCTL
8338
8339 #ifdef CONFIG_DYNAMIC_FTRACE
ftrace_startup_sysctl(void)8340 static void ftrace_startup_sysctl(void)
8341 {
8342 int command;
8343
8344 if (unlikely(ftrace_disabled))
8345 return;
8346
8347 /* Force update next time */
8348 saved_ftrace_func = NULL;
8349 /* ftrace_start_up is true if we want ftrace running */
8350 if (ftrace_start_up) {
8351 command = FTRACE_UPDATE_CALLS;
8352 if (ftrace_graph_active)
8353 command |= FTRACE_START_FUNC_RET;
8354 ftrace_startup_enable(command);
8355 }
8356 }
8357
ftrace_shutdown_sysctl(void)8358 static void ftrace_shutdown_sysctl(void)
8359 {
8360 int command;
8361
8362 if (unlikely(ftrace_disabled))
8363 return;
8364
8365 /* ftrace_start_up is true if ftrace is running */
8366 if (ftrace_start_up) {
8367 command = FTRACE_DISABLE_CALLS;
8368 if (ftrace_graph_active)
8369 command |= FTRACE_STOP_FUNC_RET;
8370 ftrace_run_update_code(command);
8371 }
8372 }
8373 #else
8374 # define ftrace_startup_sysctl() do { } while (0)
8375 # define ftrace_shutdown_sysctl() do { } while (0)
8376 #endif /* CONFIG_DYNAMIC_FTRACE */
8377
is_permanent_ops_registered(void)8378 static bool is_permanent_ops_registered(void)
8379 {
8380 struct ftrace_ops *op;
8381
8382 do_for_each_ftrace_op(op, ftrace_ops_list) {
8383 if (op->flags & FTRACE_OPS_FL_PERMANENT)
8384 return true;
8385 } while_for_each_ftrace_op(op);
8386
8387 return false;
8388 }
8389
8390 static int
ftrace_enable_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)8391 ftrace_enable_sysctl(struct ctl_table *table, int write,
8392 void *buffer, size_t *lenp, loff_t *ppos)
8393 {
8394 int ret = -ENODEV;
8395
8396 mutex_lock(&ftrace_lock);
8397
8398 if (unlikely(ftrace_disabled))
8399 goto out;
8400
8401 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8402
8403 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8404 goto out;
8405
8406 if (ftrace_enabled) {
8407
8408 /* we are starting ftrace again */
8409 if (rcu_dereference_protected(ftrace_ops_list,
8410 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8411 update_ftrace_function();
8412
8413 ftrace_startup_sysctl();
8414
8415 } else {
8416 if (is_permanent_ops_registered()) {
8417 ftrace_enabled = true;
8418 ret = -EBUSY;
8419 goto out;
8420 }
8421
8422 /* stopping ftrace calls (just send to ftrace_stub) */
8423 ftrace_trace_function = ftrace_stub;
8424
8425 ftrace_shutdown_sysctl();
8426 }
8427
8428 last_ftrace_enabled = !!ftrace_enabled;
8429 out:
8430 mutex_unlock(&ftrace_lock);
8431 return ret;
8432 }
8433
8434 static struct ctl_table ftrace_sysctls[] = {
8435 {
8436 .procname = "ftrace_enabled",
8437 .data = &ftrace_enabled,
8438 .maxlen = sizeof(int),
8439 .mode = 0644,
8440 .proc_handler = ftrace_enable_sysctl,
8441 },
8442 {}
8443 };
8444
ftrace_sysctl_init(void)8445 static int __init ftrace_sysctl_init(void)
8446 {
8447 register_sysctl_init("kernel", ftrace_sysctls);
8448 return 0;
8449 }
8450 late_initcall(ftrace_sysctl_init);
8451 #endif
8452