1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32 
33 #include <trace/events/sched.h>
34 
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37 
38 #include "trace_output.h"
39 #include "trace_stat.h"
40 
41 #define FTRACE_WARN_ON(cond)			\
42 	do {					\
43 		if (WARN_ON(cond))		\
44 			ftrace_kill();		\
45 	} while (0)
46 
47 #define FTRACE_WARN_ON_ONCE(cond)		\
48 	do {					\
49 		if (WARN_ON_ONCE(cond))		\
50 			ftrace_kill();		\
51 	} while (0)
52 
53 /* hash bits for specific function selection */
54 #define FTRACE_HASH_BITS 7
55 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
56 
57 /* ftrace_enabled is a method to turn ftrace on or off */
58 int ftrace_enabled __read_mostly;
59 static int last_ftrace_enabled;
60 
61 /* Quick disabling of function tracer. */
62 int function_trace_stop;
63 
64 /* List for set_ftrace_pid's pids. */
65 LIST_HEAD(ftrace_pids);
66 struct ftrace_pid {
67 	struct list_head list;
68 	struct pid *pid;
69 };
70 
71 /*
72  * ftrace_disabled is set when an anomaly is discovered.
73  * ftrace_disabled is much stronger than ftrace_enabled.
74  */
75 static int ftrace_disabled __read_mostly;
76 
77 static DEFINE_MUTEX(ftrace_lock);
78 
79 static struct ftrace_ops ftrace_list_end __read_mostly =
80 {
81 	.func		= ftrace_stub,
82 };
83 
84 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
85 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
86 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
87 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
88 
89 /*
90  * Traverse the ftrace_list, invoking all entries.  The reason that we
91  * can use rcu_dereference_raw() is that elements removed from this list
92  * are simply leaked, so there is no need to interact with a grace-period
93  * mechanism.  The rcu_dereference_raw() calls are needed to handle
94  * concurrent insertions into the ftrace_list.
95  *
96  * Silly Alpha and silly pointer-speculation compiler optimizations!
97  */
ftrace_list_func(unsigned long ip,unsigned long parent_ip)98 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
99 {
100 	struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
101 
102 	while (op != &ftrace_list_end) {
103 		op->func(ip, parent_ip);
104 		op = rcu_dereference_raw(op->next); /*see above*/
105 	};
106 }
107 
ftrace_pid_func(unsigned long ip,unsigned long parent_ip)108 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
109 {
110 	if (!test_tsk_trace_trace(current))
111 		return;
112 
113 	ftrace_pid_function(ip, parent_ip);
114 }
115 
set_ftrace_pid_function(ftrace_func_t func)116 static void set_ftrace_pid_function(ftrace_func_t func)
117 {
118 	/* do not set ftrace_pid_function to itself! */
119 	if (func != ftrace_pid_func)
120 		ftrace_pid_function = func;
121 }
122 
123 /**
124  * clear_ftrace_function - reset the ftrace function
125  *
126  * This NULLs the ftrace function and in essence stops
127  * tracing.  There may be lag
128  */
clear_ftrace_function(void)129 void clear_ftrace_function(void)
130 {
131 	ftrace_trace_function = ftrace_stub;
132 	__ftrace_trace_function = ftrace_stub;
133 	ftrace_pid_function = ftrace_stub;
134 }
135 
136 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
137 /*
138  * For those archs that do not test ftrace_trace_stop in their
139  * mcount call site, we need to do it from C.
140  */
ftrace_test_stop_func(unsigned long ip,unsigned long parent_ip)141 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
142 {
143 	if (function_trace_stop)
144 		return;
145 
146 	__ftrace_trace_function(ip, parent_ip);
147 }
148 #endif
149 
__register_ftrace_function(struct ftrace_ops * ops)150 static int __register_ftrace_function(struct ftrace_ops *ops)
151 {
152 	ops->next = ftrace_list;
153 	/*
154 	 * We are entering ops into the ftrace_list but another
155 	 * CPU might be walking that list. We need to make sure
156 	 * the ops->next pointer is valid before another CPU sees
157 	 * the ops pointer included into the ftrace_list.
158 	 */
159 	rcu_assign_pointer(ftrace_list, ops);
160 
161 	if (ftrace_enabled) {
162 		ftrace_func_t func;
163 
164 		if (ops->next == &ftrace_list_end)
165 			func = ops->func;
166 		else
167 			func = ftrace_list_func;
168 
169 		if (!list_empty(&ftrace_pids)) {
170 			set_ftrace_pid_function(func);
171 			func = ftrace_pid_func;
172 		}
173 
174 		/*
175 		 * For one func, simply call it directly.
176 		 * For more than one func, call the chain.
177 		 */
178 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
179 		ftrace_trace_function = func;
180 #else
181 		__ftrace_trace_function = func;
182 		ftrace_trace_function = ftrace_test_stop_func;
183 #endif
184 	}
185 
186 	return 0;
187 }
188 
__unregister_ftrace_function(struct ftrace_ops * ops)189 static int __unregister_ftrace_function(struct ftrace_ops *ops)
190 {
191 	struct ftrace_ops **p;
192 
193 	/*
194 	 * If we are removing the last function, then simply point
195 	 * to the ftrace_stub.
196 	 */
197 	if (ftrace_list == ops && ops->next == &ftrace_list_end) {
198 		ftrace_trace_function = ftrace_stub;
199 		ftrace_list = &ftrace_list_end;
200 		return 0;
201 	}
202 
203 	for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
204 		if (*p == ops)
205 			break;
206 
207 	if (*p != ops)
208 		return -1;
209 
210 	*p = (*p)->next;
211 
212 	if (ftrace_enabled) {
213 		/* If we only have one func left, then call that directly */
214 		if (ftrace_list->next == &ftrace_list_end) {
215 			ftrace_func_t func = ftrace_list->func;
216 
217 			if (!list_empty(&ftrace_pids)) {
218 				set_ftrace_pid_function(func);
219 				func = ftrace_pid_func;
220 			}
221 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
222 			ftrace_trace_function = func;
223 #else
224 			__ftrace_trace_function = func;
225 #endif
226 		}
227 	}
228 
229 	return 0;
230 }
231 
ftrace_update_pid_func(void)232 static void ftrace_update_pid_func(void)
233 {
234 	ftrace_func_t func;
235 
236 	if (ftrace_trace_function == ftrace_stub)
237 		return;
238 
239 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
240 	func = ftrace_trace_function;
241 #else
242 	func = __ftrace_trace_function;
243 #endif
244 
245 	if (!list_empty(&ftrace_pids)) {
246 		set_ftrace_pid_function(func);
247 		func = ftrace_pid_func;
248 	} else {
249 		if (func == ftrace_pid_func)
250 			func = ftrace_pid_function;
251 	}
252 
253 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
254 	ftrace_trace_function = func;
255 #else
256 	__ftrace_trace_function = func;
257 #endif
258 }
259 
260 #ifdef CONFIG_FUNCTION_PROFILER
261 struct ftrace_profile {
262 	struct hlist_node		node;
263 	unsigned long			ip;
264 	unsigned long			counter;
265 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
266 	unsigned long long		time;
267 	unsigned long long		time_squared;
268 #endif
269 };
270 
271 struct ftrace_profile_page {
272 	struct ftrace_profile_page	*next;
273 	unsigned long			index;
274 	struct ftrace_profile		records[];
275 };
276 
277 struct ftrace_profile_stat {
278 	atomic_t			disabled;
279 	struct hlist_head		*hash;
280 	struct ftrace_profile_page	*pages;
281 	struct ftrace_profile_page	*start;
282 	struct tracer_stat		stat;
283 };
284 
285 #define PROFILE_RECORDS_SIZE						\
286 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
287 
288 #define PROFILES_PER_PAGE					\
289 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
290 
291 static int ftrace_profile_bits __read_mostly;
292 static int ftrace_profile_enabled __read_mostly;
293 
294 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
295 static DEFINE_MUTEX(ftrace_profile_lock);
296 
297 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
298 
299 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
300 
301 static void *
function_stat_next(void * v,int idx)302 function_stat_next(void *v, int idx)
303 {
304 	struct ftrace_profile *rec = v;
305 	struct ftrace_profile_page *pg;
306 
307 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
308 
309  again:
310 	if (idx != 0)
311 		rec++;
312 
313 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
314 		pg = pg->next;
315 		if (!pg)
316 			return NULL;
317 		rec = &pg->records[0];
318 		if (!rec->counter)
319 			goto again;
320 	}
321 
322 	return rec;
323 }
324 
function_stat_start(struct tracer_stat * trace)325 static void *function_stat_start(struct tracer_stat *trace)
326 {
327 	struct ftrace_profile_stat *stat =
328 		container_of(trace, struct ftrace_profile_stat, stat);
329 
330 	if (!stat || !stat->start)
331 		return NULL;
332 
333 	return function_stat_next(&stat->start->records[0], 0);
334 }
335 
336 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
337 /* function graph compares on total time */
function_stat_cmp(void * p1,void * p2)338 static int function_stat_cmp(void *p1, void *p2)
339 {
340 	struct ftrace_profile *a = p1;
341 	struct ftrace_profile *b = p2;
342 
343 	if (a->time < b->time)
344 		return -1;
345 	if (a->time > b->time)
346 		return 1;
347 	else
348 		return 0;
349 }
350 #else
351 /* not function graph compares against hits */
function_stat_cmp(void * p1,void * p2)352 static int function_stat_cmp(void *p1, void *p2)
353 {
354 	struct ftrace_profile *a = p1;
355 	struct ftrace_profile *b = p2;
356 
357 	if (a->counter < b->counter)
358 		return -1;
359 	if (a->counter > b->counter)
360 		return 1;
361 	else
362 		return 0;
363 }
364 #endif
365 
function_stat_headers(struct seq_file * m)366 static int function_stat_headers(struct seq_file *m)
367 {
368 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369 	seq_printf(m, "  Function                               "
370 		   "Hit    Time            Avg             s^2\n"
371 		      "  --------                               "
372 		   "---    ----            ---             ---\n");
373 #else
374 	seq_printf(m, "  Function                               Hit\n"
375 		      "  --------                               ---\n");
376 #endif
377 	return 0;
378 }
379 
function_stat_show(struct seq_file * m,void * v)380 static int function_stat_show(struct seq_file *m, void *v)
381 {
382 	struct ftrace_profile *rec = v;
383 	char str[KSYM_SYMBOL_LEN];
384 	int ret = 0;
385 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
386 	static struct trace_seq s;
387 	unsigned long long avg;
388 	unsigned long long stddev;
389 #endif
390 	mutex_lock(&ftrace_profile_lock);
391 
392 	/* we raced with function_profile_reset() */
393 	if (unlikely(rec->counter == 0)) {
394 		ret = -EBUSY;
395 		goto out;
396 	}
397 
398 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
399 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
400 
401 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
402 	seq_printf(m, "    ");
403 	avg = rec->time;
404 	do_div(avg, rec->counter);
405 
406 	/* Sample standard deviation (s^2) */
407 	if (rec->counter <= 1)
408 		stddev = 0;
409 	else {
410 		stddev = rec->time_squared - rec->counter * avg * avg;
411 		/*
412 		 * Divide only 1000 for ns^2 -> us^2 conversion.
413 		 * trace_print_graph_duration will divide 1000 again.
414 		 */
415 		do_div(stddev, (rec->counter - 1) * 1000);
416 	}
417 
418 	trace_seq_init(&s);
419 	trace_print_graph_duration(rec->time, &s);
420 	trace_seq_puts(&s, "    ");
421 	trace_print_graph_duration(avg, &s);
422 	trace_seq_puts(&s, "    ");
423 	trace_print_graph_duration(stddev, &s);
424 	trace_print_seq(m, &s);
425 #endif
426 	seq_putc(m, '\n');
427 out:
428 	mutex_unlock(&ftrace_profile_lock);
429 
430 	return ret;
431 }
432 
ftrace_profile_reset(struct ftrace_profile_stat * stat)433 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
434 {
435 	struct ftrace_profile_page *pg;
436 
437 	pg = stat->pages = stat->start;
438 
439 	while (pg) {
440 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
441 		pg->index = 0;
442 		pg = pg->next;
443 	}
444 
445 	memset(stat->hash, 0,
446 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
447 }
448 
ftrace_profile_pages_init(struct ftrace_profile_stat * stat)449 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
450 {
451 	struct ftrace_profile_page *pg;
452 	int functions;
453 	int pages;
454 	int i;
455 
456 	/* If we already allocated, do nothing */
457 	if (stat->pages)
458 		return 0;
459 
460 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
461 	if (!stat->pages)
462 		return -ENOMEM;
463 
464 #ifdef CONFIG_DYNAMIC_FTRACE
465 	functions = ftrace_update_tot_cnt;
466 #else
467 	/*
468 	 * We do not know the number of functions that exist because
469 	 * dynamic tracing is what counts them. With past experience
470 	 * we have around 20K functions. That should be more than enough.
471 	 * It is highly unlikely we will execute every function in
472 	 * the kernel.
473 	 */
474 	functions = 20000;
475 #endif
476 
477 	pg = stat->start = stat->pages;
478 
479 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
480 
481 	for (i = 0; i < pages; i++) {
482 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
483 		if (!pg->next)
484 			goto out_free;
485 		pg = pg->next;
486 	}
487 
488 	return 0;
489 
490  out_free:
491 	pg = stat->start;
492 	while (pg) {
493 		unsigned long tmp = (unsigned long)pg;
494 
495 		pg = pg->next;
496 		free_page(tmp);
497 	}
498 
499 	free_page((unsigned long)stat->pages);
500 	stat->pages = NULL;
501 	stat->start = NULL;
502 
503 	return -ENOMEM;
504 }
505 
ftrace_profile_init_cpu(int cpu)506 static int ftrace_profile_init_cpu(int cpu)
507 {
508 	struct ftrace_profile_stat *stat;
509 	int size;
510 
511 	stat = &per_cpu(ftrace_profile_stats, cpu);
512 
513 	if (stat->hash) {
514 		/* If the profile is already created, simply reset it */
515 		ftrace_profile_reset(stat);
516 		return 0;
517 	}
518 
519 	/*
520 	 * We are profiling all functions, but usually only a few thousand
521 	 * functions are hit. We'll make a hash of 1024 items.
522 	 */
523 	size = FTRACE_PROFILE_HASH_SIZE;
524 
525 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
526 
527 	if (!stat->hash)
528 		return -ENOMEM;
529 
530 	if (!ftrace_profile_bits) {
531 		size--;
532 
533 		for (; size; size >>= 1)
534 			ftrace_profile_bits++;
535 	}
536 
537 	/* Preallocate the function profiling pages */
538 	if (ftrace_profile_pages_init(stat) < 0) {
539 		kfree(stat->hash);
540 		stat->hash = NULL;
541 		return -ENOMEM;
542 	}
543 
544 	return 0;
545 }
546 
ftrace_profile_init(void)547 static int ftrace_profile_init(void)
548 {
549 	int cpu;
550 	int ret = 0;
551 
552 	for_each_online_cpu(cpu) {
553 		ret = ftrace_profile_init_cpu(cpu);
554 		if (ret)
555 			break;
556 	}
557 
558 	return ret;
559 }
560 
561 /* interrupts must be disabled */
562 static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat * stat,unsigned long ip)563 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
564 {
565 	struct ftrace_profile *rec;
566 	struct hlist_head *hhd;
567 	struct hlist_node *n;
568 	unsigned long key;
569 
570 	key = hash_long(ip, ftrace_profile_bits);
571 	hhd = &stat->hash[key];
572 
573 	if (hlist_empty(hhd))
574 		return NULL;
575 
576 	hlist_for_each_entry_rcu(rec, n, hhd, node) {
577 		if (rec->ip == ip)
578 			return rec;
579 	}
580 
581 	return NULL;
582 }
583 
ftrace_add_profile(struct ftrace_profile_stat * stat,struct ftrace_profile * rec)584 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
585 			       struct ftrace_profile *rec)
586 {
587 	unsigned long key;
588 
589 	key = hash_long(rec->ip, ftrace_profile_bits);
590 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
591 }
592 
593 /*
594  * The memory is already allocated, this simply finds a new record to use.
595  */
596 static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat * stat,unsigned long ip)597 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
598 {
599 	struct ftrace_profile *rec = NULL;
600 
601 	/* prevent recursion (from NMIs) */
602 	if (atomic_inc_return(&stat->disabled) != 1)
603 		goto out;
604 
605 	/*
606 	 * Try to find the function again since an NMI
607 	 * could have added it
608 	 */
609 	rec = ftrace_find_profiled_func(stat, ip);
610 	if (rec)
611 		goto out;
612 
613 	if (stat->pages->index == PROFILES_PER_PAGE) {
614 		if (!stat->pages->next)
615 			goto out;
616 		stat->pages = stat->pages->next;
617 	}
618 
619 	rec = &stat->pages->records[stat->pages->index++];
620 	rec->ip = ip;
621 	ftrace_add_profile(stat, rec);
622 
623  out:
624 	atomic_dec(&stat->disabled);
625 
626 	return rec;
627 }
628 
629 static void
function_profile_call(unsigned long ip,unsigned long parent_ip)630 function_profile_call(unsigned long ip, unsigned long parent_ip)
631 {
632 	struct ftrace_profile_stat *stat;
633 	struct ftrace_profile *rec;
634 	unsigned long flags;
635 
636 	if (!ftrace_profile_enabled)
637 		return;
638 
639 	local_irq_save(flags);
640 
641 	stat = &__get_cpu_var(ftrace_profile_stats);
642 	if (!stat->hash || !ftrace_profile_enabled)
643 		goto out;
644 
645 	rec = ftrace_find_profiled_func(stat, ip);
646 	if (!rec) {
647 		rec = ftrace_profile_alloc(stat, ip);
648 		if (!rec)
649 			goto out;
650 	}
651 
652 	rec->counter++;
653  out:
654 	local_irq_restore(flags);
655 }
656 
657 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
profile_graph_entry(struct ftrace_graph_ent * trace)658 static int profile_graph_entry(struct ftrace_graph_ent *trace)
659 {
660 	function_profile_call(trace->func, 0);
661 	return 1;
662 }
663 
profile_graph_return(struct ftrace_graph_ret * trace)664 static void profile_graph_return(struct ftrace_graph_ret *trace)
665 {
666 	struct ftrace_profile_stat *stat;
667 	unsigned long long calltime;
668 	struct ftrace_profile *rec;
669 	unsigned long flags;
670 
671 	local_irq_save(flags);
672 	stat = &__get_cpu_var(ftrace_profile_stats);
673 	if (!stat->hash || !ftrace_profile_enabled)
674 		goto out;
675 
676 	/* If the calltime was zero'd ignore it */
677 	if (!trace->calltime)
678 		goto out;
679 
680 	calltime = trace->rettime - trace->calltime;
681 
682 	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
683 		int index;
684 
685 		index = trace->depth;
686 
687 		/* Append this call time to the parent time to subtract */
688 		if (index)
689 			current->ret_stack[index - 1].subtime += calltime;
690 
691 		if (current->ret_stack[index].subtime < calltime)
692 			calltime -= current->ret_stack[index].subtime;
693 		else
694 			calltime = 0;
695 	}
696 
697 	rec = ftrace_find_profiled_func(stat, trace->func);
698 	if (rec) {
699 		rec->time += calltime;
700 		rec->time_squared += calltime * calltime;
701 	}
702 
703  out:
704 	local_irq_restore(flags);
705 }
706 
register_ftrace_profiler(void)707 static int register_ftrace_profiler(void)
708 {
709 	return register_ftrace_graph(&profile_graph_return,
710 				     &profile_graph_entry);
711 }
712 
unregister_ftrace_profiler(void)713 static void unregister_ftrace_profiler(void)
714 {
715 	unregister_ftrace_graph();
716 }
717 #else
718 static struct ftrace_ops ftrace_profile_ops __read_mostly =
719 {
720 	.func		= function_profile_call,
721 };
722 
register_ftrace_profiler(void)723 static int register_ftrace_profiler(void)
724 {
725 	return register_ftrace_function(&ftrace_profile_ops);
726 }
727 
unregister_ftrace_profiler(void)728 static void unregister_ftrace_profiler(void)
729 {
730 	unregister_ftrace_function(&ftrace_profile_ops);
731 }
732 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
733 
734 static ssize_t
ftrace_profile_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)735 ftrace_profile_write(struct file *filp, const char __user *ubuf,
736 		     size_t cnt, loff_t *ppos)
737 {
738 	unsigned long val;
739 	char buf[64];		/* big enough to hold a number */
740 	int ret;
741 
742 	if (cnt >= sizeof(buf))
743 		return -EINVAL;
744 
745 	if (copy_from_user(&buf, ubuf, cnt))
746 		return -EFAULT;
747 
748 	buf[cnt] = 0;
749 
750 	ret = strict_strtoul(buf, 10, &val);
751 	if (ret < 0)
752 		return ret;
753 
754 	val = !!val;
755 
756 	mutex_lock(&ftrace_profile_lock);
757 	if (ftrace_profile_enabled ^ val) {
758 		if (val) {
759 			ret = ftrace_profile_init();
760 			if (ret < 0) {
761 				cnt = ret;
762 				goto out;
763 			}
764 
765 			ret = register_ftrace_profiler();
766 			if (ret < 0) {
767 				cnt = ret;
768 				goto out;
769 			}
770 			ftrace_profile_enabled = 1;
771 		} else {
772 			ftrace_profile_enabled = 0;
773 			/*
774 			 * unregister_ftrace_profiler calls stop_machine
775 			 * so this acts like an synchronize_sched.
776 			 */
777 			unregister_ftrace_profiler();
778 		}
779 	}
780  out:
781 	mutex_unlock(&ftrace_profile_lock);
782 
783 	*ppos += cnt;
784 
785 	return cnt;
786 }
787 
788 static ssize_t
ftrace_profile_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)789 ftrace_profile_read(struct file *filp, char __user *ubuf,
790 		     size_t cnt, loff_t *ppos)
791 {
792 	char buf[64];		/* big enough to hold a number */
793 	int r;
794 
795 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
796 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
797 }
798 
799 static const struct file_operations ftrace_profile_fops = {
800 	.open		= tracing_open_generic,
801 	.read		= ftrace_profile_read,
802 	.write		= ftrace_profile_write,
803 	.llseek		= default_llseek,
804 };
805 
806 /* used to initialize the real stat files */
807 static struct tracer_stat function_stats __initdata = {
808 	.name		= "functions",
809 	.stat_start	= function_stat_start,
810 	.stat_next	= function_stat_next,
811 	.stat_cmp	= function_stat_cmp,
812 	.stat_headers	= function_stat_headers,
813 	.stat_show	= function_stat_show
814 };
815 
ftrace_profile_debugfs(struct dentry * d_tracer)816 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
817 {
818 	struct ftrace_profile_stat *stat;
819 	struct dentry *entry;
820 	char *name;
821 	int ret;
822 	int cpu;
823 
824 	for_each_possible_cpu(cpu) {
825 		stat = &per_cpu(ftrace_profile_stats, cpu);
826 
827 		/* allocate enough for function name + cpu number */
828 		name = kmalloc(32, GFP_KERNEL);
829 		if (!name) {
830 			/*
831 			 * The files created are permanent, if something happens
832 			 * we still do not free memory.
833 			 */
834 			WARN(1,
835 			     "Could not allocate stat file for cpu %d\n",
836 			     cpu);
837 			return;
838 		}
839 		stat->stat = function_stats;
840 		snprintf(name, 32, "function%d", cpu);
841 		stat->stat.name = name;
842 		ret = register_stat_tracer(&stat->stat);
843 		if (ret) {
844 			WARN(1,
845 			     "Could not register function stat for cpu %d\n",
846 			     cpu);
847 			kfree(name);
848 			return;
849 		}
850 	}
851 
852 	entry = debugfs_create_file("function_profile_enabled", 0644,
853 				    d_tracer, NULL, &ftrace_profile_fops);
854 	if (!entry)
855 		pr_warning("Could not create debugfs "
856 			   "'function_profile_enabled' entry\n");
857 }
858 
859 #else /* CONFIG_FUNCTION_PROFILER */
ftrace_profile_debugfs(struct dentry * d_tracer)860 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
861 {
862 }
863 #endif /* CONFIG_FUNCTION_PROFILER */
864 
865 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
866 
867 #ifdef CONFIG_DYNAMIC_FTRACE
868 
869 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
870 # error Dynamic ftrace depends on MCOUNT_RECORD
871 #endif
872 
873 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
874 
875 struct ftrace_func_probe {
876 	struct hlist_node	node;
877 	struct ftrace_probe_ops	*ops;
878 	unsigned long		flags;
879 	unsigned long		ip;
880 	void			*data;
881 	struct rcu_head		rcu;
882 };
883 
884 enum {
885 	FTRACE_ENABLE_CALLS		= (1 << 0),
886 	FTRACE_DISABLE_CALLS		= (1 << 1),
887 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
888 	FTRACE_START_FUNC_RET		= (1 << 3),
889 	FTRACE_STOP_FUNC_RET		= (1 << 4),
890 };
891 
892 static int ftrace_filtered;
893 
894 static struct dyn_ftrace *ftrace_new_addrs;
895 
896 static DEFINE_MUTEX(ftrace_regex_lock);
897 
898 struct ftrace_page {
899 	struct ftrace_page	*next;
900 	int			index;
901 	struct dyn_ftrace	records[];
902 };
903 
904 #define ENTRIES_PER_PAGE \
905   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
906 
907 /* estimate from running different kernels */
908 #define NR_TO_INIT		10000
909 
910 static struct ftrace_page	*ftrace_pages_start;
911 static struct ftrace_page	*ftrace_pages;
912 
913 static struct dyn_ftrace *ftrace_free_records;
914 
915 /*
916  * This is a double for. Do not use 'break' to break out of the loop,
917  * you must use a goto.
918  */
919 #define do_for_each_ftrace_rec(pg, rec)					\
920 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
921 		int _____i;						\
922 		for (_____i = 0; _____i < pg->index; _____i++) {	\
923 			rec = &pg->records[_____i];
924 
925 #define while_for_each_ftrace_rec()		\
926 		}				\
927 	}
928 
ftrace_free_rec(struct dyn_ftrace * rec)929 static void ftrace_free_rec(struct dyn_ftrace *rec)
930 {
931 	rec->freelist = ftrace_free_records;
932 	ftrace_free_records = rec;
933 	rec->flags |= FTRACE_FL_FREE;
934 }
935 
ftrace_alloc_dyn_node(unsigned long ip)936 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
937 {
938 	struct dyn_ftrace *rec;
939 
940 	/* First check for freed records */
941 	if (ftrace_free_records) {
942 		rec = ftrace_free_records;
943 
944 		if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
945 			FTRACE_WARN_ON_ONCE(1);
946 			ftrace_free_records = NULL;
947 			return NULL;
948 		}
949 
950 		ftrace_free_records = rec->freelist;
951 		memset(rec, 0, sizeof(*rec));
952 		return rec;
953 	}
954 
955 	if (ftrace_pages->index == ENTRIES_PER_PAGE) {
956 		if (!ftrace_pages->next) {
957 			/* allocate another page */
958 			ftrace_pages->next =
959 				(void *)get_zeroed_page(GFP_KERNEL);
960 			if (!ftrace_pages->next)
961 				return NULL;
962 		}
963 		ftrace_pages = ftrace_pages->next;
964 	}
965 
966 	return &ftrace_pages->records[ftrace_pages->index++];
967 }
968 
969 static struct dyn_ftrace *
ftrace_record_ip(unsigned long ip)970 ftrace_record_ip(unsigned long ip)
971 {
972 	struct dyn_ftrace *rec;
973 
974 	if (ftrace_disabled)
975 		return NULL;
976 
977 	rec = ftrace_alloc_dyn_node(ip);
978 	if (!rec)
979 		return NULL;
980 
981 	rec->ip = ip;
982 	rec->newlist = ftrace_new_addrs;
983 	ftrace_new_addrs = rec;
984 
985 	return rec;
986 }
987 
print_ip_ins(const char * fmt,unsigned char * p)988 static void print_ip_ins(const char *fmt, unsigned char *p)
989 {
990 	int i;
991 
992 	printk(KERN_CONT "%s", fmt);
993 
994 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
995 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
996 }
997 
ftrace_bug(int failed,unsigned long ip)998 static void ftrace_bug(int failed, unsigned long ip)
999 {
1000 	switch (failed) {
1001 	case -EFAULT:
1002 		FTRACE_WARN_ON_ONCE(1);
1003 		pr_info("ftrace faulted on modifying ");
1004 		print_ip_sym(ip);
1005 		break;
1006 	case -EINVAL:
1007 		FTRACE_WARN_ON_ONCE(1);
1008 		pr_info("ftrace failed to modify ");
1009 		print_ip_sym(ip);
1010 		print_ip_ins(" actual: ", (unsigned char *)ip);
1011 		printk(KERN_CONT "\n");
1012 		break;
1013 	case -EPERM:
1014 		FTRACE_WARN_ON_ONCE(1);
1015 		pr_info("ftrace faulted on writing ");
1016 		print_ip_sym(ip);
1017 		break;
1018 	default:
1019 		FTRACE_WARN_ON_ONCE(1);
1020 		pr_info("ftrace faulted on unknown error ");
1021 		print_ip_sym(ip);
1022 	}
1023 }
1024 
1025 
1026 /* Return 1 if the address range is reserved for ftrace */
ftrace_text_reserved(void * start,void * end)1027 int ftrace_text_reserved(void *start, void *end)
1028 {
1029 	struct dyn_ftrace *rec;
1030 	struct ftrace_page *pg;
1031 
1032 	do_for_each_ftrace_rec(pg, rec) {
1033 		if (rec->ip <= (unsigned long)end &&
1034 		    rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1035 			return 1;
1036 	} while_for_each_ftrace_rec();
1037 	return 0;
1038 }
1039 
1040 
1041 static int
__ftrace_replace_code(struct dyn_ftrace * rec,int enable)1042 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1043 {
1044 	unsigned long ftrace_addr;
1045 	unsigned long flag = 0UL;
1046 
1047 	ftrace_addr = (unsigned long)FTRACE_ADDR;
1048 
1049 	/*
1050 	 * If this record is not to be traced or we want to disable it,
1051 	 * then disable it.
1052 	 *
1053 	 * If we want to enable it and filtering is off, then enable it.
1054 	 *
1055 	 * If we want to enable it and filtering is on, enable it only if
1056 	 * it's filtered
1057 	 */
1058 	if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1059 		if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1060 			flag = FTRACE_FL_ENABLED;
1061 	}
1062 
1063 	/* If the state of this record hasn't changed, then do nothing */
1064 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1065 		return 0;
1066 
1067 	if (flag) {
1068 		rec->flags |= FTRACE_FL_ENABLED;
1069 		return ftrace_make_call(rec, ftrace_addr);
1070 	}
1071 
1072 	rec->flags &= ~FTRACE_FL_ENABLED;
1073 	return ftrace_make_nop(NULL, rec, ftrace_addr);
1074 }
1075 
ftrace_replace_code(int enable)1076 static void ftrace_replace_code(int enable)
1077 {
1078 	struct dyn_ftrace *rec;
1079 	struct ftrace_page *pg;
1080 	int failed;
1081 
1082 	do_for_each_ftrace_rec(pg, rec) {
1083 		/*
1084 		 * Skip over free records, records that have
1085 		 * failed and not converted.
1086 		 */
1087 		if (rec->flags & FTRACE_FL_FREE ||
1088 		    rec->flags & FTRACE_FL_FAILED ||
1089 		    !(rec->flags & FTRACE_FL_CONVERTED))
1090 			continue;
1091 
1092 		failed = __ftrace_replace_code(rec, enable);
1093 		if (failed) {
1094 			rec->flags |= FTRACE_FL_FAILED;
1095 			ftrace_bug(failed, rec->ip);
1096 			/* Stop processing */
1097 			return;
1098 		}
1099 	} while_for_each_ftrace_rec();
1100 }
1101 
1102 static int
ftrace_code_disable(struct module * mod,struct dyn_ftrace * rec)1103 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1104 {
1105 	unsigned long ip;
1106 	int ret;
1107 
1108 	ip = rec->ip;
1109 
1110 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1111 	if (ret) {
1112 		ftrace_bug(ret, ip);
1113 		rec->flags |= FTRACE_FL_FAILED;
1114 		return 0;
1115 	}
1116 	return 1;
1117 }
1118 
1119 /*
1120  * archs can override this function if they must do something
1121  * before the modifying code is performed.
1122  */
ftrace_arch_code_modify_prepare(void)1123 int __weak ftrace_arch_code_modify_prepare(void)
1124 {
1125 	return 0;
1126 }
1127 
1128 /*
1129  * archs can override this function if they must do something
1130  * after the modifying code is performed.
1131  */
ftrace_arch_code_modify_post_process(void)1132 int __weak ftrace_arch_code_modify_post_process(void)
1133 {
1134 	return 0;
1135 }
1136 
__ftrace_modify_code(void * data)1137 static int __ftrace_modify_code(void *data)
1138 {
1139 	int *command = data;
1140 
1141 	if (*command & FTRACE_ENABLE_CALLS)
1142 		ftrace_replace_code(1);
1143 	else if (*command & FTRACE_DISABLE_CALLS)
1144 		ftrace_replace_code(0);
1145 
1146 	if (*command & FTRACE_UPDATE_TRACE_FUNC)
1147 		ftrace_update_ftrace_func(ftrace_trace_function);
1148 
1149 	if (*command & FTRACE_START_FUNC_RET)
1150 		ftrace_enable_ftrace_graph_caller();
1151 	else if (*command & FTRACE_STOP_FUNC_RET)
1152 		ftrace_disable_ftrace_graph_caller();
1153 
1154 	return 0;
1155 }
1156 
ftrace_run_update_code(int command)1157 static void ftrace_run_update_code(int command)
1158 {
1159 	int ret;
1160 
1161 	ret = ftrace_arch_code_modify_prepare();
1162 	FTRACE_WARN_ON(ret);
1163 	if (ret)
1164 		return;
1165 
1166 	stop_machine(__ftrace_modify_code, &command, NULL);
1167 
1168 	ret = ftrace_arch_code_modify_post_process();
1169 	FTRACE_WARN_ON(ret);
1170 }
1171 
1172 static ftrace_func_t saved_ftrace_func;
1173 static int ftrace_start_up;
1174 
ftrace_startup_enable(int command)1175 static void ftrace_startup_enable(int command)
1176 {
1177 	if (saved_ftrace_func != ftrace_trace_function) {
1178 		saved_ftrace_func = ftrace_trace_function;
1179 		command |= FTRACE_UPDATE_TRACE_FUNC;
1180 	}
1181 
1182 	if (!command || !ftrace_enabled)
1183 		return;
1184 
1185 	ftrace_run_update_code(command);
1186 }
1187 
ftrace_startup(int command)1188 static void ftrace_startup(int command)
1189 {
1190 	if (unlikely(ftrace_disabled))
1191 		return;
1192 
1193 	ftrace_start_up++;
1194 	command |= FTRACE_ENABLE_CALLS;
1195 
1196 	ftrace_startup_enable(command);
1197 }
1198 
ftrace_shutdown(int command)1199 static void ftrace_shutdown(int command)
1200 {
1201 	if (unlikely(ftrace_disabled))
1202 		return;
1203 
1204 	ftrace_start_up--;
1205 	/*
1206 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
1207 	 * critical but the ftrace_call callers may be never nopped again after
1208 	 * further ftrace uses.
1209 	 */
1210 	WARN_ON_ONCE(ftrace_start_up < 0);
1211 
1212 	if (!ftrace_start_up)
1213 		command |= FTRACE_DISABLE_CALLS;
1214 
1215 	if (saved_ftrace_func != ftrace_trace_function) {
1216 		saved_ftrace_func = ftrace_trace_function;
1217 		command |= FTRACE_UPDATE_TRACE_FUNC;
1218 	}
1219 
1220 	if (!command || !ftrace_enabled)
1221 		return;
1222 
1223 	ftrace_run_update_code(command);
1224 }
1225 
ftrace_startup_sysctl(void)1226 static void ftrace_startup_sysctl(void)
1227 {
1228 	if (unlikely(ftrace_disabled))
1229 		return;
1230 
1231 	/* Force update next time */
1232 	saved_ftrace_func = NULL;
1233 	/* ftrace_start_up is true if we want ftrace running */
1234 	if (ftrace_start_up)
1235 		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1236 }
1237 
ftrace_shutdown_sysctl(void)1238 static void ftrace_shutdown_sysctl(void)
1239 {
1240 	if (unlikely(ftrace_disabled))
1241 		return;
1242 
1243 	/* ftrace_start_up is true if ftrace is running */
1244 	if (ftrace_start_up)
1245 		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1246 }
1247 
1248 static cycle_t		ftrace_update_time;
1249 static unsigned long	ftrace_update_cnt;
1250 unsigned long		ftrace_update_tot_cnt;
1251 
ftrace_update_code(struct module * mod)1252 static int ftrace_update_code(struct module *mod)
1253 {
1254 	struct dyn_ftrace *p;
1255 	cycle_t start, stop;
1256 
1257 	start = ftrace_now(raw_smp_processor_id());
1258 	ftrace_update_cnt = 0;
1259 
1260 	while (ftrace_new_addrs) {
1261 
1262 		/* If something went wrong, bail without enabling anything */
1263 		if (unlikely(ftrace_disabled))
1264 			return -1;
1265 
1266 		p = ftrace_new_addrs;
1267 		ftrace_new_addrs = p->newlist;
1268 		p->flags = 0L;
1269 
1270 		/*
1271 		 * Do the initial record conversion from mcount jump
1272 		 * to the NOP instructions.
1273 		 */
1274 		if (!ftrace_code_disable(mod, p)) {
1275 			ftrace_free_rec(p);
1276 			continue;
1277 		}
1278 
1279 		p->flags |= FTRACE_FL_CONVERTED;
1280 		ftrace_update_cnt++;
1281 
1282 		/*
1283 		 * If the tracing is enabled, go ahead and enable the record.
1284 		 *
1285 		 * The reason not to enable the record immediatelly is the
1286 		 * inherent check of ftrace_make_nop/ftrace_make_call for
1287 		 * correct previous instructions.  Making first the NOP
1288 		 * conversion puts the module to the correct state, thus
1289 		 * passing the ftrace_make_call check.
1290 		 */
1291 		if (ftrace_start_up) {
1292 			int failed = __ftrace_replace_code(p, 1);
1293 			if (failed) {
1294 				ftrace_bug(failed, p->ip);
1295 				ftrace_free_rec(p);
1296 			}
1297 		}
1298 	}
1299 
1300 	stop = ftrace_now(raw_smp_processor_id());
1301 	ftrace_update_time = stop - start;
1302 	ftrace_update_tot_cnt += ftrace_update_cnt;
1303 
1304 	return 0;
1305 }
1306 
ftrace_dyn_table_alloc(unsigned long num_to_init)1307 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1308 {
1309 	struct ftrace_page *pg;
1310 	int cnt;
1311 	int i;
1312 
1313 	/* allocate a few pages */
1314 	ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1315 	if (!ftrace_pages_start)
1316 		return -1;
1317 
1318 	/*
1319 	 * Allocate a few more pages.
1320 	 *
1321 	 * TODO: have some parser search vmlinux before
1322 	 *   final linking to find all calls to ftrace.
1323 	 *   Then we can:
1324 	 *    a) know how many pages to allocate.
1325 	 *     and/or
1326 	 *    b) set up the table then.
1327 	 *
1328 	 *  The dynamic code is still necessary for
1329 	 *  modules.
1330 	 */
1331 
1332 	pg = ftrace_pages = ftrace_pages_start;
1333 
1334 	cnt = num_to_init / ENTRIES_PER_PAGE;
1335 	pr_info("ftrace: allocating %ld entries in %d pages\n",
1336 		num_to_init, cnt + 1);
1337 
1338 	for (i = 0; i < cnt; i++) {
1339 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1340 
1341 		/* If we fail, we'll try later anyway */
1342 		if (!pg->next)
1343 			break;
1344 
1345 		pg = pg->next;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 enum {
1352 	FTRACE_ITER_FILTER	= (1 << 0),
1353 	FTRACE_ITER_NOTRACE	= (1 << 1),
1354 	FTRACE_ITER_FAILURES	= (1 << 2),
1355 	FTRACE_ITER_PRINTALL	= (1 << 3),
1356 	FTRACE_ITER_HASH	= (1 << 4),
1357 };
1358 
1359 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1360 
1361 struct ftrace_iterator {
1362 	loff_t				pos;
1363 	loff_t				func_pos;
1364 	struct ftrace_page		*pg;
1365 	struct dyn_ftrace		*func;
1366 	struct ftrace_func_probe	*probe;
1367 	struct trace_parser		parser;
1368 	int				hidx;
1369 	int				idx;
1370 	unsigned			flags;
1371 };
1372 
1373 static void *
t_hash_next(struct seq_file * m,loff_t * pos)1374 t_hash_next(struct seq_file *m, loff_t *pos)
1375 {
1376 	struct ftrace_iterator *iter = m->private;
1377 	struct hlist_node *hnd = NULL;
1378 	struct hlist_head *hhd;
1379 
1380 	(*pos)++;
1381 	iter->pos = *pos;
1382 
1383 	if (iter->probe)
1384 		hnd = &iter->probe->node;
1385  retry:
1386 	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1387 		return NULL;
1388 
1389 	hhd = &ftrace_func_hash[iter->hidx];
1390 
1391 	if (hlist_empty(hhd)) {
1392 		iter->hidx++;
1393 		hnd = NULL;
1394 		goto retry;
1395 	}
1396 
1397 	if (!hnd)
1398 		hnd = hhd->first;
1399 	else {
1400 		hnd = hnd->next;
1401 		if (!hnd) {
1402 			iter->hidx++;
1403 			goto retry;
1404 		}
1405 	}
1406 
1407 	if (WARN_ON_ONCE(!hnd))
1408 		return NULL;
1409 
1410 	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1411 
1412 	return iter;
1413 }
1414 
t_hash_start(struct seq_file * m,loff_t * pos)1415 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1416 {
1417 	struct ftrace_iterator *iter = m->private;
1418 	void *p = NULL;
1419 	loff_t l;
1420 
1421 	if (iter->func_pos > *pos)
1422 		return NULL;
1423 
1424 	iter->hidx = 0;
1425 	for (l = 0; l <= (*pos - iter->func_pos); ) {
1426 		p = t_hash_next(m, &l);
1427 		if (!p)
1428 			break;
1429 	}
1430 	if (!p)
1431 		return NULL;
1432 
1433 	/* Only set this if we have an item */
1434 	iter->flags |= FTRACE_ITER_HASH;
1435 
1436 	return iter;
1437 }
1438 
1439 static int
t_hash_show(struct seq_file * m,struct ftrace_iterator * iter)1440 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1441 {
1442 	struct ftrace_func_probe *rec;
1443 
1444 	rec = iter->probe;
1445 	if (WARN_ON_ONCE(!rec))
1446 		return -EIO;
1447 
1448 	if (rec->ops->print)
1449 		return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1450 
1451 	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1452 
1453 	if (rec->data)
1454 		seq_printf(m, ":%p", rec->data);
1455 	seq_putc(m, '\n');
1456 
1457 	return 0;
1458 }
1459 
1460 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)1461 t_next(struct seq_file *m, void *v, loff_t *pos)
1462 {
1463 	struct ftrace_iterator *iter = m->private;
1464 	struct dyn_ftrace *rec = NULL;
1465 
1466 	if (iter->flags & FTRACE_ITER_HASH)
1467 		return t_hash_next(m, pos);
1468 
1469 	(*pos)++;
1470 	iter->pos = iter->func_pos = *pos;
1471 
1472 	if (iter->flags & FTRACE_ITER_PRINTALL)
1473 		return t_hash_start(m, pos);
1474 
1475  retry:
1476 	if (iter->idx >= iter->pg->index) {
1477 		if (iter->pg->next) {
1478 			iter->pg = iter->pg->next;
1479 			iter->idx = 0;
1480 			goto retry;
1481 		}
1482 	} else {
1483 		rec = &iter->pg->records[iter->idx++];
1484 		if ((rec->flags & FTRACE_FL_FREE) ||
1485 
1486 		    (!(iter->flags & FTRACE_ITER_FAILURES) &&
1487 		     (rec->flags & FTRACE_FL_FAILED)) ||
1488 
1489 		    ((iter->flags & FTRACE_ITER_FAILURES) &&
1490 		     !(rec->flags & FTRACE_FL_FAILED)) ||
1491 
1492 		    ((iter->flags & FTRACE_ITER_FILTER) &&
1493 		     !(rec->flags & FTRACE_FL_FILTER)) ||
1494 
1495 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
1496 		     !(rec->flags & FTRACE_FL_NOTRACE))) {
1497 			rec = NULL;
1498 			goto retry;
1499 		}
1500 	}
1501 
1502 	if (!rec)
1503 		return t_hash_start(m, pos);
1504 
1505 	iter->func = rec;
1506 
1507 	return iter;
1508 }
1509 
reset_iter_read(struct ftrace_iterator * iter)1510 static void reset_iter_read(struct ftrace_iterator *iter)
1511 {
1512 	iter->pos = 0;
1513 	iter->func_pos = 0;
1514 	iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1515 }
1516 
t_start(struct seq_file * m,loff_t * pos)1517 static void *t_start(struct seq_file *m, loff_t *pos)
1518 {
1519 	struct ftrace_iterator *iter = m->private;
1520 	void *p = NULL;
1521 	loff_t l;
1522 
1523 	mutex_lock(&ftrace_lock);
1524 	/*
1525 	 * If an lseek was done, then reset and start from beginning.
1526 	 */
1527 	if (*pos < iter->pos)
1528 		reset_iter_read(iter);
1529 
1530 	/*
1531 	 * For set_ftrace_filter reading, if we have the filter
1532 	 * off, we can short cut and just print out that all
1533 	 * functions are enabled.
1534 	 */
1535 	if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1536 		if (*pos > 0)
1537 			return t_hash_start(m, pos);
1538 		iter->flags |= FTRACE_ITER_PRINTALL;
1539 		/* reset in case of seek/pread */
1540 		iter->flags &= ~FTRACE_ITER_HASH;
1541 		return iter;
1542 	}
1543 
1544 	if (iter->flags & FTRACE_ITER_HASH)
1545 		return t_hash_start(m, pos);
1546 
1547 	/*
1548 	 * Unfortunately, we need to restart at ftrace_pages_start
1549 	 * every time we let go of the ftrace_mutex. This is because
1550 	 * those pointers can change without the lock.
1551 	 */
1552 	iter->pg = ftrace_pages_start;
1553 	iter->idx = 0;
1554 	for (l = 0; l <= *pos; ) {
1555 		p = t_next(m, p, &l);
1556 		if (!p)
1557 			break;
1558 	}
1559 
1560 	if (!p) {
1561 		if (iter->flags & FTRACE_ITER_FILTER)
1562 			return t_hash_start(m, pos);
1563 
1564 		return NULL;
1565 	}
1566 
1567 	return iter;
1568 }
1569 
t_stop(struct seq_file * m,void * p)1570 static void t_stop(struct seq_file *m, void *p)
1571 {
1572 	mutex_unlock(&ftrace_lock);
1573 }
1574 
t_show(struct seq_file * m,void * v)1575 static int t_show(struct seq_file *m, void *v)
1576 {
1577 	struct ftrace_iterator *iter = m->private;
1578 	struct dyn_ftrace *rec;
1579 
1580 	if (iter->flags & FTRACE_ITER_HASH)
1581 		return t_hash_show(m, iter);
1582 
1583 	if (iter->flags & FTRACE_ITER_PRINTALL) {
1584 		seq_printf(m, "#### all functions enabled ####\n");
1585 		return 0;
1586 	}
1587 
1588 	rec = iter->func;
1589 
1590 	if (!rec)
1591 		return 0;
1592 
1593 	seq_printf(m, "%ps\n", (void *)rec->ip);
1594 
1595 	return 0;
1596 }
1597 
1598 static const struct seq_operations show_ftrace_seq_ops = {
1599 	.start = t_start,
1600 	.next = t_next,
1601 	.stop = t_stop,
1602 	.show = t_show,
1603 };
1604 
1605 static int
ftrace_avail_open(struct inode * inode,struct file * file)1606 ftrace_avail_open(struct inode *inode, struct file *file)
1607 {
1608 	struct ftrace_iterator *iter;
1609 	int ret;
1610 
1611 	if (unlikely(ftrace_disabled))
1612 		return -ENODEV;
1613 
1614 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1615 	if (!iter)
1616 		return -ENOMEM;
1617 
1618 	iter->pg = ftrace_pages_start;
1619 
1620 	ret = seq_open(file, &show_ftrace_seq_ops);
1621 	if (!ret) {
1622 		struct seq_file *m = file->private_data;
1623 
1624 		m->private = iter;
1625 	} else {
1626 		kfree(iter);
1627 	}
1628 
1629 	return ret;
1630 }
1631 
1632 static int
ftrace_failures_open(struct inode * inode,struct file * file)1633 ftrace_failures_open(struct inode *inode, struct file *file)
1634 {
1635 	int ret;
1636 	struct seq_file *m;
1637 	struct ftrace_iterator *iter;
1638 
1639 	ret = ftrace_avail_open(inode, file);
1640 	if (!ret) {
1641 		m = file->private_data;
1642 		iter = m->private;
1643 		iter->flags = FTRACE_ITER_FAILURES;
1644 	}
1645 
1646 	return ret;
1647 }
1648 
1649 
ftrace_filter_reset(int enable)1650 static void ftrace_filter_reset(int enable)
1651 {
1652 	struct ftrace_page *pg;
1653 	struct dyn_ftrace *rec;
1654 	unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1655 
1656 	mutex_lock(&ftrace_lock);
1657 	if (enable)
1658 		ftrace_filtered = 0;
1659 	do_for_each_ftrace_rec(pg, rec) {
1660 		if (rec->flags & FTRACE_FL_FAILED)
1661 			continue;
1662 		rec->flags &= ~type;
1663 	} while_for_each_ftrace_rec();
1664 	mutex_unlock(&ftrace_lock);
1665 }
1666 
1667 static int
ftrace_regex_open(struct inode * inode,struct file * file,int enable)1668 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1669 {
1670 	struct ftrace_iterator *iter;
1671 	int ret = 0;
1672 
1673 	if (unlikely(ftrace_disabled))
1674 		return -ENODEV;
1675 
1676 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1677 	if (!iter)
1678 		return -ENOMEM;
1679 
1680 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1681 		kfree(iter);
1682 		return -ENOMEM;
1683 	}
1684 
1685 	mutex_lock(&ftrace_regex_lock);
1686 	if ((file->f_mode & FMODE_WRITE) &&
1687 	    (file->f_flags & O_TRUNC))
1688 		ftrace_filter_reset(enable);
1689 
1690 	if (file->f_mode & FMODE_READ) {
1691 		iter->pg = ftrace_pages_start;
1692 		iter->flags = enable ? FTRACE_ITER_FILTER :
1693 			FTRACE_ITER_NOTRACE;
1694 
1695 		ret = seq_open(file, &show_ftrace_seq_ops);
1696 		if (!ret) {
1697 			struct seq_file *m = file->private_data;
1698 			m->private = iter;
1699 		} else {
1700 			trace_parser_put(&iter->parser);
1701 			kfree(iter);
1702 		}
1703 	} else
1704 		file->private_data = iter;
1705 	mutex_unlock(&ftrace_regex_lock);
1706 
1707 	return ret;
1708 }
1709 
1710 static int
ftrace_filter_open(struct inode * inode,struct file * file)1711 ftrace_filter_open(struct inode *inode, struct file *file)
1712 {
1713 	return ftrace_regex_open(inode, file, 1);
1714 }
1715 
1716 static int
ftrace_notrace_open(struct inode * inode,struct file * file)1717 ftrace_notrace_open(struct inode *inode, struct file *file)
1718 {
1719 	return ftrace_regex_open(inode, file, 0);
1720 }
1721 
1722 static loff_t
ftrace_regex_lseek(struct file * file,loff_t offset,int origin)1723 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1724 {
1725 	loff_t ret;
1726 
1727 	if (file->f_mode & FMODE_READ)
1728 		ret = seq_lseek(file, offset, origin);
1729 	else
1730 		file->f_pos = ret = 1;
1731 
1732 	return ret;
1733 }
1734 
ftrace_match(char * str,char * regex,int len,int type)1735 static int ftrace_match(char *str, char *regex, int len, int type)
1736 {
1737 	int matched = 0;
1738 	int slen;
1739 
1740 	switch (type) {
1741 	case MATCH_FULL:
1742 		if (strcmp(str, regex) == 0)
1743 			matched = 1;
1744 		break;
1745 	case MATCH_FRONT_ONLY:
1746 		if (strncmp(str, regex, len) == 0)
1747 			matched = 1;
1748 		break;
1749 	case MATCH_MIDDLE_ONLY:
1750 		if (strstr(str, regex))
1751 			matched = 1;
1752 		break;
1753 	case MATCH_END_ONLY:
1754 		slen = strlen(str);
1755 		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1756 			matched = 1;
1757 		break;
1758 	}
1759 
1760 	return matched;
1761 }
1762 
1763 static int
ftrace_match_record(struct dyn_ftrace * rec,char * regex,int len,int type)1764 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1765 {
1766 	char str[KSYM_SYMBOL_LEN];
1767 
1768 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1769 	return ftrace_match(str, regex, len, type);
1770 }
1771 
ftrace_match_records(char * buff,int len,int enable)1772 static int ftrace_match_records(char *buff, int len, int enable)
1773 {
1774 	unsigned int search_len;
1775 	struct ftrace_page *pg;
1776 	struct dyn_ftrace *rec;
1777 	unsigned long flag;
1778 	char *search;
1779 	int type;
1780 	int not;
1781 	int found = 0;
1782 
1783 	flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1784 	type = filter_parse_regex(buff, len, &search, &not);
1785 
1786 	search_len = strlen(search);
1787 
1788 	mutex_lock(&ftrace_lock);
1789 	do_for_each_ftrace_rec(pg, rec) {
1790 
1791 		if (rec->flags & FTRACE_FL_FAILED)
1792 			continue;
1793 
1794 		if (ftrace_match_record(rec, search, search_len, type)) {
1795 			if (not)
1796 				rec->flags &= ~flag;
1797 			else
1798 				rec->flags |= flag;
1799 			found = 1;
1800 		}
1801 		/*
1802 		 * Only enable filtering if we have a function that
1803 		 * is filtered on.
1804 		 */
1805 		if (enable && (rec->flags & FTRACE_FL_FILTER))
1806 			ftrace_filtered = 1;
1807 	} while_for_each_ftrace_rec();
1808 	mutex_unlock(&ftrace_lock);
1809 
1810 	return found;
1811 }
1812 
1813 static int
ftrace_match_module_record(struct dyn_ftrace * rec,char * mod,char * regex,int len,int type)1814 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1815 			   char *regex, int len, int type)
1816 {
1817 	char str[KSYM_SYMBOL_LEN];
1818 	char *modname;
1819 
1820 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1821 
1822 	if (!modname || strcmp(modname, mod))
1823 		return 0;
1824 
1825 	/* blank search means to match all funcs in the mod */
1826 	if (len)
1827 		return ftrace_match(str, regex, len, type);
1828 	else
1829 		return 1;
1830 }
1831 
ftrace_match_module_records(char * buff,char * mod,int enable)1832 static int ftrace_match_module_records(char *buff, char *mod, int enable)
1833 {
1834 	unsigned search_len = 0;
1835 	struct ftrace_page *pg;
1836 	struct dyn_ftrace *rec;
1837 	int type = MATCH_FULL;
1838 	char *search = buff;
1839 	unsigned long flag;
1840 	int not = 0;
1841 	int found = 0;
1842 
1843 	flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1844 
1845 	/* blank or '*' mean the same */
1846 	if (strcmp(buff, "*") == 0)
1847 		buff[0] = 0;
1848 
1849 	/* handle the case of 'dont filter this module' */
1850 	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1851 		buff[0] = 0;
1852 		not = 1;
1853 	}
1854 
1855 	if (strlen(buff)) {
1856 		type = filter_parse_regex(buff, strlen(buff), &search, &not);
1857 		search_len = strlen(search);
1858 	}
1859 
1860 	mutex_lock(&ftrace_lock);
1861 	do_for_each_ftrace_rec(pg, rec) {
1862 
1863 		if (rec->flags & FTRACE_FL_FAILED)
1864 			continue;
1865 
1866 		if (ftrace_match_module_record(rec, mod,
1867 					       search, search_len, type)) {
1868 			if (not)
1869 				rec->flags &= ~flag;
1870 			else
1871 				rec->flags |= flag;
1872 			found = 1;
1873 		}
1874 		if (enable && (rec->flags & FTRACE_FL_FILTER))
1875 			ftrace_filtered = 1;
1876 
1877 	} while_for_each_ftrace_rec();
1878 	mutex_unlock(&ftrace_lock);
1879 
1880 	return found;
1881 }
1882 
1883 /*
1884  * We register the module command as a template to show others how
1885  * to register the a command as well.
1886  */
1887 
1888 static int
ftrace_mod_callback(char * func,char * cmd,char * param,int enable)1889 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1890 {
1891 	char *mod;
1892 
1893 	/*
1894 	 * cmd == 'mod' because we only registered this func
1895 	 * for the 'mod' ftrace_func_command.
1896 	 * But if you register one func with multiple commands,
1897 	 * you can tell which command was used by the cmd
1898 	 * parameter.
1899 	 */
1900 
1901 	/* we must have a module name */
1902 	if (!param)
1903 		return -EINVAL;
1904 
1905 	mod = strsep(&param, ":");
1906 	if (!strlen(mod))
1907 		return -EINVAL;
1908 
1909 	if (ftrace_match_module_records(func, mod, enable))
1910 		return 0;
1911 	return -EINVAL;
1912 }
1913 
1914 static struct ftrace_func_command ftrace_mod_cmd = {
1915 	.name			= "mod",
1916 	.func			= ftrace_mod_callback,
1917 };
1918 
ftrace_mod_cmd_init(void)1919 static int __init ftrace_mod_cmd_init(void)
1920 {
1921 	return register_ftrace_command(&ftrace_mod_cmd);
1922 }
1923 device_initcall(ftrace_mod_cmd_init);
1924 
1925 static void
function_trace_probe_call(unsigned long ip,unsigned long parent_ip)1926 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1927 {
1928 	struct ftrace_func_probe *entry;
1929 	struct hlist_head *hhd;
1930 	struct hlist_node *n;
1931 	unsigned long key;
1932 
1933 	key = hash_long(ip, FTRACE_HASH_BITS);
1934 
1935 	hhd = &ftrace_func_hash[key];
1936 
1937 	if (hlist_empty(hhd))
1938 		return;
1939 
1940 	/*
1941 	 * Disable preemption for these calls to prevent a RCU grace
1942 	 * period. This syncs the hash iteration and freeing of items
1943 	 * on the hash. rcu_read_lock is too dangerous here.
1944 	 */
1945 	preempt_disable_notrace();
1946 	hlist_for_each_entry_rcu(entry, n, hhd, node) {
1947 		if (entry->ip == ip)
1948 			entry->ops->func(ip, parent_ip, &entry->data);
1949 	}
1950 	preempt_enable_notrace();
1951 }
1952 
1953 static struct ftrace_ops trace_probe_ops __read_mostly =
1954 {
1955 	.func		= function_trace_probe_call,
1956 };
1957 
1958 static int ftrace_probe_registered;
1959 
__enable_ftrace_function_probe(void)1960 static void __enable_ftrace_function_probe(void)
1961 {
1962 	int i;
1963 
1964 	if (ftrace_probe_registered)
1965 		return;
1966 
1967 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1968 		struct hlist_head *hhd = &ftrace_func_hash[i];
1969 		if (hhd->first)
1970 			break;
1971 	}
1972 	/* Nothing registered? */
1973 	if (i == FTRACE_FUNC_HASHSIZE)
1974 		return;
1975 
1976 	__register_ftrace_function(&trace_probe_ops);
1977 	ftrace_startup(0);
1978 	ftrace_probe_registered = 1;
1979 }
1980 
__disable_ftrace_function_probe(void)1981 static void __disable_ftrace_function_probe(void)
1982 {
1983 	int i;
1984 
1985 	if (!ftrace_probe_registered)
1986 		return;
1987 
1988 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1989 		struct hlist_head *hhd = &ftrace_func_hash[i];
1990 		if (hhd->first)
1991 			return;
1992 	}
1993 
1994 	/* no more funcs left */
1995 	__unregister_ftrace_function(&trace_probe_ops);
1996 	ftrace_shutdown(0);
1997 	ftrace_probe_registered = 0;
1998 }
1999 
2000 
ftrace_free_entry_rcu(struct rcu_head * rhp)2001 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2002 {
2003 	struct ftrace_func_probe *entry =
2004 		container_of(rhp, struct ftrace_func_probe, rcu);
2005 
2006 	if (entry->ops->free)
2007 		entry->ops->free(&entry->data);
2008 	kfree(entry);
2009 }
2010 
2011 
2012 int
register_ftrace_function_probe(char * glob,struct ftrace_probe_ops * ops,void * data)2013 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2014 			      void *data)
2015 {
2016 	struct ftrace_func_probe *entry;
2017 	struct ftrace_page *pg;
2018 	struct dyn_ftrace *rec;
2019 	int type, len, not;
2020 	unsigned long key;
2021 	int count = 0;
2022 	char *search;
2023 
2024 	type = filter_parse_regex(glob, strlen(glob), &search, &not);
2025 	len = strlen(search);
2026 
2027 	/* we do not support '!' for function probes */
2028 	if (WARN_ON(not))
2029 		return -EINVAL;
2030 
2031 	mutex_lock(&ftrace_lock);
2032 	do_for_each_ftrace_rec(pg, rec) {
2033 
2034 		if (rec->flags & FTRACE_FL_FAILED)
2035 			continue;
2036 
2037 		if (!ftrace_match_record(rec, search, len, type))
2038 			continue;
2039 
2040 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2041 		if (!entry) {
2042 			/* If we did not process any, then return error */
2043 			if (!count)
2044 				count = -ENOMEM;
2045 			goto out_unlock;
2046 		}
2047 
2048 		count++;
2049 
2050 		entry->data = data;
2051 
2052 		/*
2053 		 * The caller might want to do something special
2054 		 * for each function we find. We call the callback
2055 		 * to give the caller an opportunity to do so.
2056 		 */
2057 		if (ops->callback) {
2058 			if (ops->callback(rec->ip, &entry->data) < 0) {
2059 				/* caller does not like this func */
2060 				kfree(entry);
2061 				continue;
2062 			}
2063 		}
2064 
2065 		entry->ops = ops;
2066 		entry->ip = rec->ip;
2067 
2068 		key = hash_long(entry->ip, FTRACE_HASH_BITS);
2069 		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2070 
2071 	} while_for_each_ftrace_rec();
2072 	__enable_ftrace_function_probe();
2073 
2074  out_unlock:
2075 	mutex_unlock(&ftrace_lock);
2076 
2077 	return count;
2078 }
2079 
2080 enum {
2081 	PROBE_TEST_FUNC		= 1,
2082 	PROBE_TEST_DATA		= 2
2083 };
2084 
2085 static void
__unregister_ftrace_function_probe(char * glob,struct ftrace_probe_ops * ops,void * data,int flags)2086 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2087 				  void *data, int flags)
2088 {
2089 	struct ftrace_func_probe *entry;
2090 	struct hlist_node *n, *tmp;
2091 	char str[KSYM_SYMBOL_LEN];
2092 	int type = MATCH_FULL;
2093 	int i, len = 0;
2094 	char *search;
2095 
2096 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2097 		glob = NULL;
2098 	else if (glob) {
2099 		int not;
2100 
2101 		type = filter_parse_regex(glob, strlen(glob), &search, &not);
2102 		len = strlen(search);
2103 
2104 		/* we do not support '!' for function probes */
2105 		if (WARN_ON(not))
2106 			return;
2107 	}
2108 
2109 	mutex_lock(&ftrace_lock);
2110 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2111 		struct hlist_head *hhd = &ftrace_func_hash[i];
2112 
2113 		hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2114 
2115 			/* break up if statements for readability */
2116 			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2117 				continue;
2118 
2119 			if ((flags & PROBE_TEST_DATA) && entry->data != data)
2120 				continue;
2121 
2122 			/* do this last, since it is the most expensive */
2123 			if (glob) {
2124 				kallsyms_lookup(entry->ip, NULL, NULL,
2125 						NULL, str);
2126 				if (!ftrace_match(str, glob, len, type))
2127 					continue;
2128 			}
2129 
2130 			hlist_del(&entry->node);
2131 			call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2132 		}
2133 	}
2134 	__disable_ftrace_function_probe();
2135 	mutex_unlock(&ftrace_lock);
2136 }
2137 
2138 void
unregister_ftrace_function_probe(char * glob,struct ftrace_probe_ops * ops,void * data)2139 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2140 				void *data)
2141 {
2142 	__unregister_ftrace_function_probe(glob, ops, data,
2143 					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
2144 }
2145 
2146 void
unregister_ftrace_function_probe_func(char * glob,struct ftrace_probe_ops * ops)2147 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2148 {
2149 	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2150 }
2151 
unregister_ftrace_function_probe_all(char * glob)2152 void unregister_ftrace_function_probe_all(char *glob)
2153 {
2154 	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2155 }
2156 
2157 static LIST_HEAD(ftrace_commands);
2158 static DEFINE_MUTEX(ftrace_cmd_mutex);
2159 
register_ftrace_command(struct ftrace_func_command * cmd)2160 int register_ftrace_command(struct ftrace_func_command *cmd)
2161 {
2162 	struct ftrace_func_command *p;
2163 	int ret = 0;
2164 
2165 	mutex_lock(&ftrace_cmd_mutex);
2166 	list_for_each_entry(p, &ftrace_commands, list) {
2167 		if (strcmp(cmd->name, p->name) == 0) {
2168 			ret = -EBUSY;
2169 			goto out_unlock;
2170 		}
2171 	}
2172 	list_add(&cmd->list, &ftrace_commands);
2173  out_unlock:
2174 	mutex_unlock(&ftrace_cmd_mutex);
2175 
2176 	return ret;
2177 }
2178 
unregister_ftrace_command(struct ftrace_func_command * cmd)2179 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2180 {
2181 	struct ftrace_func_command *p, *n;
2182 	int ret = -ENODEV;
2183 
2184 	mutex_lock(&ftrace_cmd_mutex);
2185 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2186 		if (strcmp(cmd->name, p->name) == 0) {
2187 			ret = 0;
2188 			list_del_init(&p->list);
2189 			goto out_unlock;
2190 		}
2191 	}
2192  out_unlock:
2193 	mutex_unlock(&ftrace_cmd_mutex);
2194 
2195 	return ret;
2196 }
2197 
ftrace_process_regex(char * buff,int len,int enable)2198 static int ftrace_process_regex(char *buff, int len, int enable)
2199 {
2200 	char *func, *command, *next = buff;
2201 	struct ftrace_func_command *p;
2202 	int ret = -EINVAL;
2203 
2204 	func = strsep(&next, ":");
2205 
2206 	if (!next) {
2207 		if (ftrace_match_records(func, len, enable))
2208 			return 0;
2209 		return ret;
2210 	}
2211 
2212 	/* command found */
2213 
2214 	command = strsep(&next, ":");
2215 
2216 	mutex_lock(&ftrace_cmd_mutex);
2217 	list_for_each_entry(p, &ftrace_commands, list) {
2218 		if (strcmp(p->name, command) == 0) {
2219 			ret = p->func(func, command, next, enable);
2220 			goto out_unlock;
2221 		}
2222 	}
2223  out_unlock:
2224 	mutex_unlock(&ftrace_cmd_mutex);
2225 
2226 	return ret;
2227 }
2228 
2229 static ssize_t
ftrace_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos,int enable)2230 ftrace_regex_write(struct file *file, const char __user *ubuf,
2231 		   size_t cnt, loff_t *ppos, int enable)
2232 {
2233 	struct ftrace_iterator *iter;
2234 	struct trace_parser *parser;
2235 	ssize_t ret, read;
2236 
2237 	if (!cnt)
2238 		return 0;
2239 
2240 	mutex_lock(&ftrace_regex_lock);
2241 
2242 	if (file->f_mode & FMODE_READ) {
2243 		struct seq_file *m = file->private_data;
2244 		iter = m->private;
2245 	} else
2246 		iter = file->private_data;
2247 
2248 	parser = &iter->parser;
2249 	read = trace_get_user(parser, ubuf, cnt, ppos);
2250 
2251 	if (read >= 0 && trace_parser_loaded(parser) &&
2252 	    !trace_parser_cont(parser)) {
2253 		ret = ftrace_process_regex(parser->buffer,
2254 					   parser->idx, enable);
2255 		trace_parser_clear(parser);
2256 		if (ret)
2257 			goto out_unlock;
2258 	}
2259 
2260 	ret = read;
2261 out_unlock:
2262 	mutex_unlock(&ftrace_regex_lock);
2263 
2264 	return ret;
2265 }
2266 
2267 static ssize_t
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)2268 ftrace_filter_write(struct file *file, const char __user *ubuf,
2269 		    size_t cnt, loff_t *ppos)
2270 {
2271 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2272 }
2273 
2274 static ssize_t
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)2275 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2276 		     size_t cnt, loff_t *ppos)
2277 {
2278 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2279 }
2280 
2281 static void
ftrace_set_regex(unsigned char * buf,int len,int reset,int enable)2282 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2283 {
2284 	if (unlikely(ftrace_disabled))
2285 		return;
2286 
2287 	mutex_lock(&ftrace_regex_lock);
2288 	if (reset)
2289 		ftrace_filter_reset(enable);
2290 	if (buf)
2291 		ftrace_match_records(buf, len, enable);
2292 	mutex_unlock(&ftrace_regex_lock);
2293 }
2294 
2295 /**
2296  * ftrace_set_filter - set a function to filter on in ftrace
2297  * @buf - the string that holds the function filter text.
2298  * @len - the length of the string.
2299  * @reset - non zero to reset all filters before applying this filter.
2300  *
2301  * Filters denote which functions should be enabled when tracing is enabled.
2302  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2303  */
ftrace_set_filter(unsigned char * buf,int len,int reset)2304 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2305 {
2306 	ftrace_set_regex(buf, len, reset, 1);
2307 }
2308 
2309 /**
2310  * ftrace_set_notrace - set a function to not trace in ftrace
2311  * @buf - the string that holds the function notrace text.
2312  * @len - the length of the string.
2313  * @reset - non zero to reset all filters before applying this filter.
2314  *
2315  * Notrace Filters denote which functions should not be enabled when tracing
2316  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2317  * for tracing.
2318  */
ftrace_set_notrace(unsigned char * buf,int len,int reset)2319 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2320 {
2321 	ftrace_set_regex(buf, len, reset, 0);
2322 }
2323 
2324 /*
2325  * command line interface to allow users to set filters on boot up.
2326  */
2327 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
2328 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2329 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2330 
set_ftrace_notrace(char * str)2331 static int __init set_ftrace_notrace(char *str)
2332 {
2333 	strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2334 	return 1;
2335 }
2336 __setup("ftrace_notrace=", set_ftrace_notrace);
2337 
set_ftrace_filter(char * str)2338 static int __init set_ftrace_filter(char *str)
2339 {
2340 	strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2341 	return 1;
2342 }
2343 __setup("ftrace_filter=", set_ftrace_filter);
2344 
2345 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2346 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2347 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2348 
set_graph_function(char * str)2349 static int __init set_graph_function(char *str)
2350 {
2351 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2352 	return 1;
2353 }
2354 __setup("ftrace_graph_filter=", set_graph_function);
2355 
set_ftrace_early_graph(char * buf)2356 static void __init set_ftrace_early_graph(char *buf)
2357 {
2358 	int ret;
2359 	char *func;
2360 
2361 	while (buf) {
2362 		func = strsep(&buf, ",");
2363 		/* we allow only one expression at a time */
2364 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2365 				      func);
2366 		if (ret)
2367 			printk(KERN_DEBUG "ftrace: function %s not "
2368 					  "traceable\n", func);
2369 	}
2370 }
2371 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2372 
set_ftrace_early_filter(char * buf,int enable)2373 static void __init set_ftrace_early_filter(char *buf, int enable)
2374 {
2375 	char *func;
2376 
2377 	while (buf) {
2378 		func = strsep(&buf, ",");
2379 		ftrace_set_regex(func, strlen(func), 0, enable);
2380 	}
2381 }
2382 
set_ftrace_early_filters(void)2383 static void __init set_ftrace_early_filters(void)
2384 {
2385 	if (ftrace_filter_buf[0])
2386 		set_ftrace_early_filter(ftrace_filter_buf, 1);
2387 	if (ftrace_notrace_buf[0])
2388 		set_ftrace_early_filter(ftrace_notrace_buf, 0);
2389 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2390 	if (ftrace_graph_buf[0])
2391 		set_ftrace_early_graph(ftrace_graph_buf);
2392 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2393 }
2394 
2395 static int
ftrace_regex_release(struct inode * inode,struct file * file,int enable)2396 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2397 {
2398 	struct seq_file *m = (struct seq_file *)file->private_data;
2399 	struct ftrace_iterator *iter;
2400 	struct trace_parser *parser;
2401 
2402 	mutex_lock(&ftrace_regex_lock);
2403 	if (file->f_mode & FMODE_READ) {
2404 		iter = m->private;
2405 
2406 		seq_release(inode, file);
2407 	} else
2408 		iter = file->private_data;
2409 
2410 	parser = &iter->parser;
2411 	if (trace_parser_loaded(parser)) {
2412 		parser->buffer[parser->idx] = 0;
2413 		ftrace_match_records(parser->buffer, parser->idx, enable);
2414 	}
2415 
2416 	mutex_lock(&ftrace_lock);
2417 	if (ftrace_start_up && ftrace_enabled)
2418 		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2419 	mutex_unlock(&ftrace_lock);
2420 
2421 	trace_parser_put(parser);
2422 	kfree(iter);
2423 
2424 	mutex_unlock(&ftrace_regex_lock);
2425 	return 0;
2426 }
2427 
2428 static int
ftrace_filter_release(struct inode * inode,struct file * file)2429 ftrace_filter_release(struct inode *inode, struct file *file)
2430 {
2431 	return ftrace_regex_release(inode, file, 1);
2432 }
2433 
2434 static int
ftrace_notrace_release(struct inode * inode,struct file * file)2435 ftrace_notrace_release(struct inode *inode, struct file *file)
2436 {
2437 	return ftrace_regex_release(inode, file, 0);
2438 }
2439 
2440 static const struct file_operations ftrace_avail_fops = {
2441 	.open = ftrace_avail_open,
2442 	.read = seq_read,
2443 	.llseek = seq_lseek,
2444 	.release = seq_release_private,
2445 };
2446 
2447 static const struct file_operations ftrace_failures_fops = {
2448 	.open = ftrace_failures_open,
2449 	.read = seq_read,
2450 	.llseek = seq_lseek,
2451 	.release = seq_release_private,
2452 };
2453 
2454 static const struct file_operations ftrace_filter_fops = {
2455 	.open = ftrace_filter_open,
2456 	.read = seq_read,
2457 	.write = ftrace_filter_write,
2458 	.llseek = ftrace_regex_lseek,
2459 	.release = ftrace_filter_release,
2460 };
2461 
2462 static const struct file_operations ftrace_notrace_fops = {
2463 	.open = ftrace_notrace_open,
2464 	.read = seq_read,
2465 	.write = ftrace_notrace_write,
2466 	.llseek = ftrace_regex_lseek,
2467 	.release = ftrace_notrace_release,
2468 };
2469 
2470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2471 
2472 static DEFINE_MUTEX(graph_lock);
2473 
2474 int ftrace_graph_count;
2475 int ftrace_graph_filter_enabled;
2476 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2477 
2478 static void *
__g_next(struct seq_file * m,loff_t * pos)2479 __g_next(struct seq_file *m, loff_t *pos)
2480 {
2481 	if (*pos >= ftrace_graph_count)
2482 		return NULL;
2483 	return &ftrace_graph_funcs[*pos];
2484 }
2485 
2486 static void *
g_next(struct seq_file * m,void * v,loff_t * pos)2487 g_next(struct seq_file *m, void *v, loff_t *pos)
2488 {
2489 	(*pos)++;
2490 	return __g_next(m, pos);
2491 }
2492 
g_start(struct seq_file * m,loff_t * pos)2493 static void *g_start(struct seq_file *m, loff_t *pos)
2494 {
2495 	mutex_lock(&graph_lock);
2496 
2497 	/* Nothing, tell g_show to print all functions are enabled */
2498 	if (!ftrace_graph_filter_enabled && !*pos)
2499 		return (void *)1;
2500 
2501 	return __g_next(m, pos);
2502 }
2503 
g_stop(struct seq_file * m,void * p)2504 static void g_stop(struct seq_file *m, void *p)
2505 {
2506 	mutex_unlock(&graph_lock);
2507 }
2508 
g_show(struct seq_file * m,void * v)2509 static int g_show(struct seq_file *m, void *v)
2510 {
2511 	unsigned long *ptr = v;
2512 
2513 	if (!ptr)
2514 		return 0;
2515 
2516 	if (ptr == (unsigned long *)1) {
2517 		seq_printf(m, "#### all functions enabled ####\n");
2518 		return 0;
2519 	}
2520 
2521 	seq_printf(m, "%ps\n", (void *)*ptr);
2522 
2523 	return 0;
2524 }
2525 
2526 static const struct seq_operations ftrace_graph_seq_ops = {
2527 	.start = g_start,
2528 	.next = g_next,
2529 	.stop = g_stop,
2530 	.show = g_show,
2531 };
2532 
2533 static int
ftrace_graph_open(struct inode * inode,struct file * file)2534 ftrace_graph_open(struct inode *inode, struct file *file)
2535 {
2536 	int ret = 0;
2537 
2538 	if (unlikely(ftrace_disabled))
2539 		return -ENODEV;
2540 
2541 	mutex_lock(&graph_lock);
2542 	if ((file->f_mode & FMODE_WRITE) &&
2543 	    (file->f_flags & O_TRUNC)) {
2544 		ftrace_graph_filter_enabled = 0;
2545 		ftrace_graph_count = 0;
2546 		memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2547 	}
2548 	mutex_unlock(&graph_lock);
2549 
2550 	if (file->f_mode & FMODE_READ)
2551 		ret = seq_open(file, &ftrace_graph_seq_ops);
2552 
2553 	return ret;
2554 }
2555 
2556 static int
ftrace_graph_release(struct inode * inode,struct file * file)2557 ftrace_graph_release(struct inode *inode, struct file *file)
2558 {
2559 	if (file->f_mode & FMODE_READ)
2560 		seq_release(inode, file);
2561 	return 0;
2562 }
2563 
2564 static int
ftrace_set_func(unsigned long * array,int * idx,char * buffer)2565 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2566 {
2567 	struct dyn_ftrace *rec;
2568 	struct ftrace_page *pg;
2569 	int search_len;
2570 	int fail = 1;
2571 	int type, not;
2572 	char *search;
2573 	bool exists;
2574 	int i;
2575 
2576 	if (ftrace_disabled)
2577 		return -ENODEV;
2578 
2579 	/* decode regex */
2580 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2581 	if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2582 		return -EBUSY;
2583 
2584 	search_len = strlen(search);
2585 
2586 	mutex_lock(&ftrace_lock);
2587 	do_for_each_ftrace_rec(pg, rec) {
2588 
2589 		if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2590 			continue;
2591 
2592 		if (ftrace_match_record(rec, search, search_len, type)) {
2593 			/* if it is in the array */
2594 			exists = false;
2595 			for (i = 0; i < *idx; i++) {
2596 				if (array[i] == rec->ip) {
2597 					exists = true;
2598 					break;
2599 				}
2600 			}
2601 
2602 			if (!not) {
2603 				fail = 0;
2604 				if (!exists) {
2605 					array[(*idx)++] = rec->ip;
2606 					if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2607 						goto out;
2608 				}
2609 			} else {
2610 				if (exists) {
2611 					array[i] = array[--(*idx)];
2612 					array[*idx] = 0;
2613 					fail = 0;
2614 				}
2615 			}
2616 		}
2617 	} while_for_each_ftrace_rec();
2618 out:
2619 	mutex_unlock(&ftrace_lock);
2620 
2621 	if (fail)
2622 		return -EINVAL;
2623 
2624 	ftrace_graph_filter_enabled = 1;
2625 	return 0;
2626 }
2627 
2628 static ssize_t
ftrace_graph_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)2629 ftrace_graph_write(struct file *file, const char __user *ubuf,
2630 		   size_t cnt, loff_t *ppos)
2631 {
2632 	struct trace_parser parser;
2633 	ssize_t read, ret;
2634 
2635 	if (!cnt)
2636 		return 0;
2637 
2638 	mutex_lock(&graph_lock);
2639 
2640 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2641 		ret = -ENOMEM;
2642 		goto out_unlock;
2643 	}
2644 
2645 	read = trace_get_user(&parser, ubuf, cnt, ppos);
2646 
2647 	if (read >= 0 && trace_parser_loaded((&parser))) {
2648 		parser.buffer[parser.idx] = 0;
2649 
2650 		/* we allow only one expression at a time */
2651 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2652 					parser.buffer);
2653 		if (ret)
2654 			goto out_free;
2655 	}
2656 
2657 	ret = read;
2658 
2659 out_free:
2660 	trace_parser_put(&parser);
2661 out_unlock:
2662 	mutex_unlock(&graph_lock);
2663 
2664 	return ret;
2665 }
2666 
2667 static const struct file_operations ftrace_graph_fops = {
2668 	.open		= ftrace_graph_open,
2669 	.read		= seq_read,
2670 	.write		= ftrace_graph_write,
2671 	.release	= ftrace_graph_release,
2672 	.llseek		= seq_lseek,
2673 };
2674 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2675 
ftrace_init_dyn_debugfs(struct dentry * d_tracer)2676 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2677 {
2678 
2679 	trace_create_file("available_filter_functions", 0444,
2680 			d_tracer, NULL, &ftrace_avail_fops);
2681 
2682 	trace_create_file("failures", 0444,
2683 			d_tracer, NULL, &ftrace_failures_fops);
2684 
2685 	trace_create_file("set_ftrace_filter", 0644, d_tracer,
2686 			NULL, &ftrace_filter_fops);
2687 
2688 	trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2689 				    NULL, &ftrace_notrace_fops);
2690 
2691 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2692 	trace_create_file("set_graph_function", 0444, d_tracer,
2693 				    NULL,
2694 				    &ftrace_graph_fops);
2695 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2696 
2697 	return 0;
2698 }
2699 
ftrace_process_locs(struct module * mod,unsigned long * start,unsigned long * end)2700 static int ftrace_process_locs(struct module *mod,
2701 			       unsigned long *start,
2702 			       unsigned long *end)
2703 {
2704 	unsigned long *p;
2705 	unsigned long addr;
2706 	unsigned long flags;
2707 
2708 	mutex_lock(&ftrace_lock);
2709 	p = start;
2710 	while (p < end) {
2711 		addr = ftrace_call_adjust(*p++);
2712 		/*
2713 		 * Some architecture linkers will pad between
2714 		 * the different mcount_loc sections of different
2715 		 * object files to satisfy alignments.
2716 		 * Skip any NULL pointers.
2717 		 */
2718 		if (!addr)
2719 			continue;
2720 		ftrace_record_ip(addr);
2721 	}
2722 
2723 	/* disable interrupts to prevent kstop machine */
2724 	local_irq_save(flags);
2725 	ftrace_update_code(mod);
2726 	local_irq_restore(flags);
2727 	mutex_unlock(&ftrace_lock);
2728 
2729 	return 0;
2730 }
2731 
2732 #ifdef CONFIG_MODULES
ftrace_release_mod(struct module * mod)2733 void ftrace_release_mod(struct module *mod)
2734 {
2735 	struct dyn_ftrace *rec;
2736 	struct ftrace_page *pg;
2737 
2738 	if (ftrace_disabled)
2739 		return;
2740 
2741 	mutex_lock(&ftrace_lock);
2742 	do_for_each_ftrace_rec(pg, rec) {
2743 		if (within_module_core(rec->ip, mod)) {
2744 			/*
2745 			 * rec->ip is changed in ftrace_free_rec()
2746 			 * It should not between s and e if record was freed.
2747 			 */
2748 			FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2749 			ftrace_free_rec(rec);
2750 		}
2751 	} while_for_each_ftrace_rec();
2752 	mutex_unlock(&ftrace_lock);
2753 }
2754 
ftrace_init_module(struct module * mod,unsigned long * start,unsigned long * end)2755 static void ftrace_init_module(struct module *mod,
2756 			       unsigned long *start, unsigned long *end)
2757 {
2758 	if (ftrace_disabled || start == end)
2759 		return;
2760 	ftrace_process_locs(mod, start, end);
2761 }
2762 
ftrace_module_notify(struct notifier_block * self,unsigned long val,void * data)2763 static int ftrace_module_notify(struct notifier_block *self,
2764 				unsigned long val, void *data)
2765 {
2766 	struct module *mod = data;
2767 
2768 	switch (val) {
2769 	case MODULE_STATE_COMING:
2770 		ftrace_init_module(mod, mod->ftrace_callsites,
2771 				   mod->ftrace_callsites +
2772 				   mod->num_ftrace_callsites);
2773 		break;
2774 	case MODULE_STATE_GOING:
2775 		ftrace_release_mod(mod);
2776 		break;
2777 	}
2778 
2779 	return 0;
2780 }
2781 #else
ftrace_module_notify(struct notifier_block * self,unsigned long val,void * data)2782 static int ftrace_module_notify(struct notifier_block *self,
2783 				unsigned long val, void *data)
2784 {
2785 	return 0;
2786 }
2787 #endif /* CONFIG_MODULES */
2788 
2789 struct notifier_block ftrace_module_nb = {
2790 	.notifier_call = ftrace_module_notify,
2791 	.priority = 0,
2792 };
2793 
2794 extern unsigned long __start_mcount_loc[];
2795 extern unsigned long __stop_mcount_loc[];
2796 
ftrace_init(void)2797 void __init ftrace_init(void)
2798 {
2799 	unsigned long count, addr, flags;
2800 	int ret;
2801 
2802 	/* Keep the ftrace pointer to the stub */
2803 	addr = (unsigned long)ftrace_stub;
2804 
2805 	local_irq_save(flags);
2806 	ftrace_dyn_arch_init(&addr);
2807 	local_irq_restore(flags);
2808 
2809 	/* ftrace_dyn_arch_init places the return code in addr */
2810 	if (addr)
2811 		goto failed;
2812 
2813 	count = __stop_mcount_loc - __start_mcount_loc;
2814 
2815 	ret = ftrace_dyn_table_alloc(count);
2816 	if (ret)
2817 		goto failed;
2818 
2819 	last_ftrace_enabled = ftrace_enabled = 1;
2820 
2821 	ret = ftrace_process_locs(NULL,
2822 				  __start_mcount_loc,
2823 				  __stop_mcount_loc);
2824 
2825 	ret = register_module_notifier(&ftrace_module_nb);
2826 	if (ret)
2827 		pr_warning("Failed to register trace ftrace module notifier\n");
2828 
2829 	set_ftrace_early_filters();
2830 
2831 	return;
2832  failed:
2833 	ftrace_disabled = 1;
2834 }
2835 
2836 #else
2837 
ftrace_nodyn_init(void)2838 static int __init ftrace_nodyn_init(void)
2839 {
2840 	ftrace_enabled = 1;
2841 	return 0;
2842 }
2843 device_initcall(ftrace_nodyn_init);
2844 
ftrace_init_dyn_debugfs(struct dentry * d_tracer)2845 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
ftrace_startup_enable(int command)2846 static inline void ftrace_startup_enable(int command) { }
2847 /* Keep as macros so we do not need to define the commands */
2848 # define ftrace_startup(command)	do { } while (0)
2849 # define ftrace_shutdown(command)	do { } while (0)
2850 # define ftrace_startup_sysctl()	do { } while (0)
2851 # define ftrace_shutdown_sysctl()	do { } while (0)
2852 #endif /* CONFIG_DYNAMIC_FTRACE */
2853 
clear_ftrace_swapper(void)2854 static void clear_ftrace_swapper(void)
2855 {
2856 	struct task_struct *p;
2857 	int cpu;
2858 
2859 	get_online_cpus();
2860 	for_each_online_cpu(cpu) {
2861 		p = idle_task(cpu);
2862 		clear_tsk_trace_trace(p);
2863 	}
2864 	put_online_cpus();
2865 }
2866 
set_ftrace_swapper(void)2867 static void set_ftrace_swapper(void)
2868 {
2869 	struct task_struct *p;
2870 	int cpu;
2871 
2872 	get_online_cpus();
2873 	for_each_online_cpu(cpu) {
2874 		p = idle_task(cpu);
2875 		set_tsk_trace_trace(p);
2876 	}
2877 	put_online_cpus();
2878 }
2879 
clear_ftrace_pid(struct pid * pid)2880 static void clear_ftrace_pid(struct pid *pid)
2881 {
2882 	struct task_struct *p;
2883 
2884 	rcu_read_lock();
2885 	do_each_pid_task(pid, PIDTYPE_PID, p) {
2886 		clear_tsk_trace_trace(p);
2887 	} while_each_pid_task(pid, PIDTYPE_PID, p);
2888 	rcu_read_unlock();
2889 
2890 	put_pid(pid);
2891 }
2892 
set_ftrace_pid(struct pid * pid)2893 static void set_ftrace_pid(struct pid *pid)
2894 {
2895 	struct task_struct *p;
2896 
2897 	rcu_read_lock();
2898 	do_each_pid_task(pid, PIDTYPE_PID, p) {
2899 		set_tsk_trace_trace(p);
2900 	} while_each_pid_task(pid, PIDTYPE_PID, p);
2901 	rcu_read_unlock();
2902 }
2903 
clear_ftrace_pid_task(struct pid * pid)2904 static void clear_ftrace_pid_task(struct pid *pid)
2905 {
2906 	if (pid == ftrace_swapper_pid)
2907 		clear_ftrace_swapper();
2908 	else
2909 		clear_ftrace_pid(pid);
2910 }
2911 
set_ftrace_pid_task(struct pid * pid)2912 static void set_ftrace_pid_task(struct pid *pid)
2913 {
2914 	if (pid == ftrace_swapper_pid)
2915 		set_ftrace_swapper();
2916 	else
2917 		set_ftrace_pid(pid);
2918 }
2919 
ftrace_pid_add(int p)2920 static int ftrace_pid_add(int p)
2921 {
2922 	struct pid *pid;
2923 	struct ftrace_pid *fpid;
2924 	int ret = -EINVAL;
2925 
2926 	mutex_lock(&ftrace_lock);
2927 
2928 	if (!p)
2929 		pid = ftrace_swapper_pid;
2930 	else
2931 		pid = find_get_pid(p);
2932 
2933 	if (!pid)
2934 		goto out;
2935 
2936 	ret = 0;
2937 
2938 	list_for_each_entry(fpid, &ftrace_pids, list)
2939 		if (fpid->pid == pid)
2940 			goto out_put;
2941 
2942 	ret = -ENOMEM;
2943 
2944 	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2945 	if (!fpid)
2946 		goto out_put;
2947 
2948 	list_add(&fpid->list, &ftrace_pids);
2949 	fpid->pid = pid;
2950 
2951 	set_ftrace_pid_task(pid);
2952 
2953 	ftrace_update_pid_func();
2954 	ftrace_startup_enable(0);
2955 
2956 	mutex_unlock(&ftrace_lock);
2957 	return 0;
2958 
2959 out_put:
2960 	if (pid != ftrace_swapper_pid)
2961 		put_pid(pid);
2962 
2963 out:
2964 	mutex_unlock(&ftrace_lock);
2965 	return ret;
2966 }
2967 
ftrace_pid_reset(void)2968 static void ftrace_pid_reset(void)
2969 {
2970 	struct ftrace_pid *fpid, *safe;
2971 
2972 	mutex_lock(&ftrace_lock);
2973 	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2974 		struct pid *pid = fpid->pid;
2975 
2976 		clear_ftrace_pid_task(pid);
2977 
2978 		list_del(&fpid->list);
2979 		kfree(fpid);
2980 	}
2981 
2982 	ftrace_update_pid_func();
2983 	ftrace_startup_enable(0);
2984 
2985 	mutex_unlock(&ftrace_lock);
2986 }
2987 
fpid_start(struct seq_file * m,loff_t * pos)2988 static void *fpid_start(struct seq_file *m, loff_t *pos)
2989 {
2990 	mutex_lock(&ftrace_lock);
2991 
2992 	if (list_empty(&ftrace_pids) && (!*pos))
2993 		return (void *) 1;
2994 
2995 	return seq_list_start(&ftrace_pids, *pos);
2996 }
2997 
fpid_next(struct seq_file * m,void * v,loff_t * pos)2998 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2999 {
3000 	if (v == (void *)1)
3001 		return NULL;
3002 
3003 	return seq_list_next(v, &ftrace_pids, pos);
3004 }
3005 
fpid_stop(struct seq_file * m,void * p)3006 static void fpid_stop(struct seq_file *m, void *p)
3007 {
3008 	mutex_unlock(&ftrace_lock);
3009 }
3010 
fpid_show(struct seq_file * m,void * v)3011 static int fpid_show(struct seq_file *m, void *v)
3012 {
3013 	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3014 
3015 	if (v == (void *)1) {
3016 		seq_printf(m, "no pid\n");
3017 		return 0;
3018 	}
3019 
3020 	if (fpid->pid == ftrace_swapper_pid)
3021 		seq_printf(m, "swapper tasks\n");
3022 	else
3023 		seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3024 
3025 	return 0;
3026 }
3027 
3028 static const struct seq_operations ftrace_pid_sops = {
3029 	.start = fpid_start,
3030 	.next = fpid_next,
3031 	.stop = fpid_stop,
3032 	.show = fpid_show,
3033 };
3034 
3035 static int
ftrace_pid_open(struct inode * inode,struct file * file)3036 ftrace_pid_open(struct inode *inode, struct file *file)
3037 {
3038 	int ret = 0;
3039 
3040 	if ((file->f_mode & FMODE_WRITE) &&
3041 	    (file->f_flags & O_TRUNC))
3042 		ftrace_pid_reset();
3043 
3044 	if (file->f_mode & FMODE_READ)
3045 		ret = seq_open(file, &ftrace_pid_sops);
3046 
3047 	return ret;
3048 }
3049 
3050 static ssize_t
ftrace_pid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)3051 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3052 		   size_t cnt, loff_t *ppos)
3053 {
3054 	char buf[64], *tmp;
3055 	long val;
3056 	int ret;
3057 
3058 	if (cnt >= sizeof(buf))
3059 		return -EINVAL;
3060 
3061 	if (copy_from_user(&buf, ubuf, cnt))
3062 		return -EFAULT;
3063 
3064 	buf[cnt] = 0;
3065 
3066 	/*
3067 	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3068 	 * to clean the filter quietly.
3069 	 */
3070 	tmp = strstrip(buf);
3071 	if (strlen(tmp) == 0)
3072 		return 1;
3073 
3074 	ret = strict_strtol(tmp, 10, &val);
3075 	if (ret < 0)
3076 		return ret;
3077 
3078 	ret = ftrace_pid_add(val);
3079 
3080 	return ret ? ret : cnt;
3081 }
3082 
3083 static int
ftrace_pid_release(struct inode * inode,struct file * file)3084 ftrace_pid_release(struct inode *inode, struct file *file)
3085 {
3086 	if (file->f_mode & FMODE_READ)
3087 		seq_release(inode, file);
3088 
3089 	return 0;
3090 }
3091 
3092 static const struct file_operations ftrace_pid_fops = {
3093 	.open		= ftrace_pid_open,
3094 	.write		= ftrace_pid_write,
3095 	.read		= seq_read,
3096 	.llseek		= seq_lseek,
3097 	.release	= ftrace_pid_release,
3098 };
3099 
ftrace_init_debugfs(void)3100 static __init int ftrace_init_debugfs(void)
3101 {
3102 	struct dentry *d_tracer;
3103 
3104 	d_tracer = tracing_init_dentry();
3105 	if (!d_tracer)
3106 		return 0;
3107 
3108 	ftrace_init_dyn_debugfs(d_tracer);
3109 
3110 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
3111 			    NULL, &ftrace_pid_fops);
3112 
3113 	ftrace_profile_debugfs(d_tracer);
3114 
3115 	return 0;
3116 }
3117 fs_initcall(ftrace_init_debugfs);
3118 
3119 /**
3120  * ftrace_kill - kill ftrace
3121  *
3122  * This function should be used by panic code. It stops ftrace
3123  * but in a not so nice way. If you need to simply kill ftrace
3124  * from a non-atomic section, use ftrace_kill.
3125  */
ftrace_kill(void)3126 void ftrace_kill(void)
3127 {
3128 	ftrace_disabled = 1;
3129 	ftrace_enabled = 0;
3130 	clear_ftrace_function();
3131 }
3132 
3133 /**
3134  * register_ftrace_function - register a function for profiling
3135  * @ops - ops structure that holds the function for profiling.
3136  *
3137  * Register a function to be called by all functions in the
3138  * kernel.
3139  *
3140  * Note: @ops->func and all the functions it calls must be labeled
3141  *       with "notrace", otherwise it will go into a
3142  *       recursive loop.
3143  */
register_ftrace_function(struct ftrace_ops * ops)3144 int register_ftrace_function(struct ftrace_ops *ops)
3145 {
3146 	int ret;
3147 
3148 	if (unlikely(ftrace_disabled))
3149 		return -1;
3150 
3151 	mutex_lock(&ftrace_lock);
3152 
3153 	ret = __register_ftrace_function(ops);
3154 	ftrace_startup(0);
3155 
3156 	mutex_unlock(&ftrace_lock);
3157 	return ret;
3158 }
3159 
3160 /**
3161  * unregister_ftrace_function - unregister a function for profiling.
3162  * @ops - ops structure that holds the function to unregister
3163  *
3164  * Unregister a function that was added to be called by ftrace profiling.
3165  */
unregister_ftrace_function(struct ftrace_ops * ops)3166 int unregister_ftrace_function(struct ftrace_ops *ops)
3167 {
3168 	int ret;
3169 
3170 	mutex_lock(&ftrace_lock);
3171 	ret = __unregister_ftrace_function(ops);
3172 	ftrace_shutdown(0);
3173 	mutex_unlock(&ftrace_lock);
3174 
3175 	return ret;
3176 }
3177 
3178 int
ftrace_enable_sysctl(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)3179 ftrace_enable_sysctl(struct ctl_table *table, int write,
3180 		     void __user *buffer, size_t *lenp,
3181 		     loff_t *ppos)
3182 {
3183 	int ret;
3184 
3185 	if (unlikely(ftrace_disabled))
3186 		return -ENODEV;
3187 
3188 	mutex_lock(&ftrace_lock);
3189 
3190 	ret  = proc_dointvec(table, write, buffer, lenp, ppos);
3191 
3192 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3193 		goto out;
3194 
3195 	last_ftrace_enabled = !!ftrace_enabled;
3196 
3197 	if (ftrace_enabled) {
3198 
3199 		ftrace_startup_sysctl();
3200 
3201 		/* we are starting ftrace again */
3202 		if (ftrace_list != &ftrace_list_end) {
3203 			if (ftrace_list->next == &ftrace_list_end)
3204 				ftrace_trace_function = ftrace_list->func;
3205 			else
3206 				ftrace_trace_function = ftrace_list_func;
3207 		}
3208 
3209 	} else {
3210 		/* stopping ftrace calls (just send to ftrace_stub) */
3211 		ftrace_trace_function = ftrace_stub;
3212 
3213 		ftrace_shutdown_sysctl();
3214 	}
3215 
3216  out:
3217 	mutex_unlock(&ftrace_lock);
3218 	return ret;
3219 }
3220 
3221 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3222 
3223 static int ftrace_graph_active;
3224 static struct notifier_block ftrace_suspend_notifier;
3225 
ftrace_graph_entry_stub(struct ftrace_graph_ent * trace)3226 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3227 {
3228 	return 0;
3229 }
3230 
3231 /* The callbacks that hook a function */
3232 trace_func_graph_ret_t ftrace_graph_return =
3233 			(trace_func_graph_ret_t)ftrace_stub;
3234 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3235 
3236 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
alloc_retstack_tasklist(struct ftrace_ret_stack ** ret_stack_list)3237 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3238 {
3239 	int i;
3240 	int ret = 0;
3241 	unsigned long flags;
3242 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3243 	struct task_struct *g, *t;
3244 
3245 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3246 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3247 					* sizeof(struct ftrace_ret_stack),
3248 					GFP_KERNEL);
3249 		if (!ret_stack_list[i]) {
3250 			start = 0;
3251 			end = i;
3252 			ret = -ENOMEM;
3253 			goto free;
3254 		}
3255 	}
3256 
3257 	read_lock_irqsave(&tasklist_lock, flags);
3258 	do_each_thread(g, t) {
3259 		if (start == end) {
3260 			ret = -EAGAIN;
3261 			goto unlock;
3262 		}
3263 
3264 		if (t->ret_stack == NULL) {
3265 			atomic_set(&t->tracing_graph_pause, 0);
3266 			atomic_set(&t->trace_overrun, 0);
3267 			t->curr_ret_stack = -1;
3268 			/* Make sure the tasks see the -1 first: */
3269 			smp_wmb();
3270 			t->ret_stack = ret_stack_list[start++];
3271 		}
3272 	} while_each_thread(g, t);
3273 
3274 unlock:
3275 	read_unlock_irqrestore(&tasklist_lock, flags);
3276 free:
3277 	for (i = start; i < end; i++)
3278 		kfree(ret_stack_list[i]);
3279 	return ret;
3280 }
3281 
3282 static void
ftrace_graph_probe_sched_switch(void * ignore,struct task_struct * prev,struct task_struct * next)3283 ftrace_graph_probe_sched_switch(void *ignore,
3284 			struct task_struct *prev, struct task_struct *next)
3285 {
3286 	unsigned long long timestamp;
3287 	int index;
3288 
3289 	/*
3290 	 * Does the user want to count the time a function was asleep.
3291 	 * If so, do not update the time stamps.
3292 	 */
3293 	if (trace_flags & TRACE_ITER_SLEEP_TIME)
3294 		return;
3295 
3296 	timestamp = trace_clock_local();
3297 
3298 	prev->ftrace_timestamp = timestamp;
3299 
3300 	/* only process tasks that we timestamped */
3301 	if (!next->ftrace_timestamp)
3302 		return;
3303 
3304 	/*
3305 	 * Update all the counters in next to make up for the
3306 	 * time next was sleeping.
3307 	 */
3308 	timestamp -= next->ftrace_timestamp;
3309 
3310 	for (index = next->curr_ret_stack; index >= 0; index--)
3311 		next->ret_stack[index].calltime += timestamp;
3312 }
3313 
3314 /* Allocate a return stack for each task */
start_graph_tracing(void)3315 static int start_graph_tracing(void)
3316 {
3317 	struct ftrace_ret_stack **ret_stack_list;
3318 	int ret, cpu;
3319 
3320 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3321 				sizeof(struct ftrace_ret_stack *),
3322 				GFP_KERNEL);
3323 
3324 	if (!ret_stack_list)
3325 		return -ENOMEM;
3326 
3327 	/* The cpu_boot init_task->ret_stack will never be freed */
3328 	for_each_online_cpu(cpu) {
3329 		if (!idle_task(cpu)->ret_stack)
3330 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3331 	}
3332 
3333 	do {
3334 		ret = alloc_retstack_tasklist(ret_stack_list);
3335 	} while (ret == -EAGAIN);
3336 
3337 	if (!ret) {
3338 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3339 		if (ret)
3340 			pr_info("ftrace_graph: Couldn't activate tracepoint"
3341 				" probe to kernel_sched_switch\n");
3342 	}
3343 
3344 	kfree(ret_stack_list);
3345 	return ret;
3346 }
3347 
3348 /*
3349  * Hibernation protection.
3350  * The state of the current task is too much unstable during
3351  * suspend/restore to disk. We want to protect against that.
3352  */
3353 static int
ftrace_suspend_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)3354 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3355 							void *unused)
3356 {
3357 	switch (state) {
3358 	case PM_HIBERNATION_PREPARE:
3359 		pause_graph_tracing();
3360 		break;
3361 
3362 	case PM_POST_HIBERNATION:
3363 		unpause_graph_tracing();
3364 		break;
3365 	}
3366 	return NOTIFY_DONE;
3367 }
3368 
register_ftrace_graph(trace_func_graph_ret_t retfunc,trace_func_graph_ent_t entryfunc)3369 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3370 			trace_func_graph_ent_t entryfunc)
3371 {
3372 	int ret = 0;
3373 
3374 	mutex_lock(&ftrace_lock);
3375 
3376 	/* we currently allow only one tracer registered at a time */
3377 	if (ftrace_graph_active) {
3378 		ret = -EBUSY;
3379 		goto out;
3380 	}
3381 
3382 	ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3383 	register_pm_notifier(&ftrace_suspend_notifier);
3384 
3385 	ftrace_graph_active++;
3386 	ret = start_graph_tracing();
3387 	if (ret) {
3388 		ftrace_graph_active--;
3389 		goto out;
3390 	}
3391 
3392 	ftrace_graph_return = retfunc;
3393 	ftrace_graph_entry = entryfunc;
3394 
3395 	ftrace_startup(FTRACE_START_FUNC_RET);
3396 
3397 out:
3398 	mutex_unlock(&ftrace_lock);
3399 	return ret;
3400 }
3401 
unregister_ftrace_graph(void)3402 void unregister_ftrace_graph(void)
3403 {
3404 	mutex_lock(&ftrace_lock);
3405 
3406 	if (unlikely(!ftrace_graph_active))
3407 		goto out;
3408 
3409 	ftrace_graph_active--;
3410 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3411 	ftrace_graph_entry = ftrace_graph_entry_stub;
3412 	ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3413 	unregister_pm_notifier(&ftrace_suspend_notifier);
3414 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3415 
3416  out:
3417 	mutex_unlock(&ftrace_lock);
3418 }
3419 
3420 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3421 
3422 static void
graph_init_task(struct task_struct * t,struct ftrace_ret_stack * ret_stack)3423 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3424 {
3425 	atomic_set(&t->tracing_graph_pause, 0);
3426 	atomic_set(&t->trace_overrun, 0);
3427 	t->ftrace_timestamp = 0;
3428 	/* make curr_ret_stack visible before we add the ret_stack */
3429 	smp_wmb();
3430 	t->ret_stack = ret_stack;
3431 }
3432 
3433 /*
3434  * Allocate a return stack for the idle task. May be the first
3435  * time through, or it may be done by CPU hotplug online.
3436  */
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)3437 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3438 {
3439 	t->curr_ret_stack = -1;
3440 	/*
3441 	 * The idle task has no parent, it either has its own
3442 	 * stack or no stack at all.
3443 	 */
3444 	if (t->ret_stack)
3445 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3446 
3447 	if (ftrace_graph_active) {
3448 		struct ftrace_ret_stack *ret_stack;
3449 
3450 		ret_stack = per_cpu(idle_ret_stack, cpu);
3451 		if (!ret_stack) {
3452 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3453 					    * sizeof(struct ftrace_ret_stack),
3454 					    GFP_KERNEL);
3455 			if (!ret_stack)
3456 				return;
3457 			per_cpu(idle_ret_stack, cpu) = ret_stack;
3458 		}
3459 		graph_init_task(t, ret_stack);
3460 	}
3461 }
3462 
3463 /* Allocate a return stack for newly created task */
ftrace_graph_init_task(struct task_struct * t)3464 void ftrace_graph_init_task(struct task_struct *t)
3465 {
3466 	/* Make sure we do not use the parent ret_stack */
3467 	t->ret_stack = NULL;
3468 	t->curr_ret_stack = -1;
3469 
3470 	if (ftrace_graph_active) {
3471 		struct ftrace_ret_stack *ret_stack;
3472 
3473 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3474 				* sizeof(struct ftrace_ret_stack),
3475 				GFP_KERNEL);
3476 		if (!ret_stack)
3477 			return;
3478 		graph_init_task(t, ret_stack);
3479 	}
3480 }
3481 
ftrace_graph_exit_task(struct task_struct * t)3482 void ftrace_graph_exit_task(struct task_struct *t)
3483 {
3484 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
3485 
3486 	t->ret_stack = NULL;
3487 	/* NULL must become visible to IRQs before we free it: */
3488 	barrier();
3489 
3490 	kfree(ret_stack);
3491 }
3492 
ftrace_graph_stop(void)3493 void ftrace_graph_stop(void)
3494 {
3495 	ftrace_stop();
3496 }
3497 #endif
3498