1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
7 
trace_valid_entry(struct trace_entry * entry)8 static inline int trace_valid_entry(struct trace_entry *entry)
9 {
10 	switch (entry->type) {
11 	case TRACE_FN:
12 	case TRACE_CTX:
13 	case TRACE_WAKE:
14 	case TRACE_STACK:
15 	case TRACE_PRINT:
16 	case TRACE_BRANCH:
17 	case TRACE_GRAPH_ENT:
18 	case TRACE_GRAPH_RET:
19 		return 1;
20 	}
21 	return 0;
22 }
23 
trace_test_buffer_cpu(struct trace_array * tr,int cpu)24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
25 {
26 	struct ring_buffer_event *event;
27 	struct trace_entry *entry;
28 	unsigned int loops = 0;
29 
30 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31 		entry = ring_buffer_event_data(event);
32 
33 		/*
34 		 * The ring buffer is a size of trace_buf_size, if
35 		 * we loop more than the size, there's something wrong
36 		 * with the ring buffer.
37 		 */
38 		if (loops++ > trace_buf_size) {
39 			printk(KERN_CONT ".. bad ring buffer ");
40 			goto failed;
41 		}
42 		if (!trace_valid_entry(entry)) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entry->type);
45 			goto failed;
46 		}
47 	}
48 	return 0;
49 
50  failed:
51 	/* disable tracing */
52 	tracing_disabled = 1;
53 	printk(KERN_CONT ".. corrupted trace buffer .. ");
54 	return -1;
55 }
56 
57 /*
58  * Test the trace buffer to see if all the elements
59  * are still sane.
60  */
trace_test_buffer(struct trace_array * tr,unsigned long * count)61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 {
63 	unsigned long flags, cnt = 0;
64 	int cpu, ret = 0;
65 
66 	/* Don't allow flipping of max traces now */
67 	local_irq_save(flags);
68 	arch_spin_lock(&ftrace_max_lock);
69 
70 	cnt = ring_buffer_entries(tr->buffer);
71 
72 	/*
73 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 	 * If the calling tracer is broken, and is constantly filling
75 	 * the buffer, this will run forever, and hard lock the box.
76 	 * We disable the ring buffer while we do this test to prevent
77 	 * a hard lock up.
78 	 */
79 	tracing_off();
80 	for_each_possible_cpu(cpu) {
81 		ret = trace_test_buffer_cpu(tr, cpu);
82 		if (ret)
83 			break;
84 	}
85 	tracing_on();
86 	arch_spin_unlock(&ftrace_max_lock);
87 	local_irq_restore(flags);
88 
89 	if (count)
90 		*count = cnt;
91 
92 	return ret;
93 }
94 
warn_failed_init_tracer(struct tracer * trace,int init_ret)95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 {
97 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 		trace->name, init_ret);
99 }
100 #ifdef CONFIG_FUNCTION_TRACER
101 
102 #ifdef CONFIG_DYNAMIC_FTRACE
103 
104 /* Test dynamic code modification and ftrace filters */
trace_selftest_startup_dynamic_tracing(struct tracer * trace,struct trace_array * tr,int (* func)(void))105 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
106 					   struct trace_array *tr,
107 					   int (*func)(void))
108 {
109 	int save_ftrace_enabled = ftrace_enabled;
110 	int save_tracer_enabled = tracer_enabled;
111 	unsigned long count;
112 	char *func_name;
113 	int ret;
114 
115 	/* The ftrace test PASSED */
116 	printk(KERN_CONT "PASSED\n");
117 	pr_info("Testing dynamic ftrace: ");
118 
119 	/* enable tracing, and record the filter function */
120 	ftrace_enabled = 1;
121 	tracer_enabled = 1;
122 
123 	/* passed in by parameter to fool gcc from optimizing */
124 	func();
125 
126 	/*
127 	 * Some archs *cough*PowerPC*cough* add characters to the
128 	 * start of the function names. We simply put a '*' to
129 	 * accommodate them.
130 	 */
131 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
132 
133 	/* filter only on our function */
134 	ftrace_set_filter(func_name, strlen(func_name), 1);
135 
136 	/* enable tracing */
137 	ret = tracer_init(trace, tr);
138 	if (ret) {
139 		warn_failed_init_tracer(trace, ret);
140 		goto out;
141 	}
142 
143 	/* Sleep for a 1/10 of a second */
144 	msleep(100);
145 
146 	/* we should have nothing in the buffer */
147 	ret = trace_test_buffer(tr, &count);
148 	if (ret)
149 		goto out;
150 
151 	if (count) {
152 		ret = -1;
153 		printk(KERN_CONT ".. filter did not filter .. ");
154 		goto out;
155 	}
156 
157 	/* call our function again */
158 	func();
159 
160 	/* sleep again */
161 	msleep(100);
162 
163 	/* stop the tracing. */
164 	tracing_stop();
165 	ftrace_enabled = 0;
166 
167 	/* check the trace buffer */
168 	ret = trace_test_buffer(tr, &count);
169 	trace->reset(tr);
170 	tracing_start();
171 
172 	/* we should only have one item */
173 	if (!ret && count != 1) {
174 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
175 		ret = -1;
176 		goto out;
177 	}
178 
179  out:
180 	ftrace_enabled = save_ftrace_enabled;
181 	tracer_enabled = save_tracer_enabled;
182 
183 	/* Enable tracing on all functions again */
184 	ftrace_set_filter(NULL, 0, 1);
185 
186 	return ret;
187 }
188 #else
189 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
190 #endif /* CONFIG_DYNAMIC_FTRACE */
191 
192 /*
193  * Simple verification test of ftrace function tracer.
194  * Enable ftrace, sleep 1/10 second, and then read the trace
195  * buffer to see if all is in order.
196  */
197 int
trace_selftest_startup_function(struct tracer * trace,struct trace_array * tr)198 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
199 {
200 	int save_ftrace_enabled = ftrace_enabled;
201 	int save_tracer_enabled = tracer_enabled;
202 	unsigned long count;
203 	int ret;
204 
205 	/* make sure msleep has been recorded */
206 	msleep(1);
207 
208 	/* start the tracing */
209 	ftrace_enabled = 1;
210 	tracer_enabled = 1;
211 
212 	ret = tracer_init(trace, tr);
213 	if (ret) {
214 		warn_failed_init_tracer(trace, ret);
215 		goto out;
216 	}
217 
218 	/* Sleep for a 1/10 of a second */
219 	msleep(100);
220 	/* stop the tracing. */
221 	tracing_stop();
222 	ftrace_enabled = 0;
223 
224 	/* check the trace buffer */
225 	ret = trace_test_buffer(tr, &count);
226 	trace->reset(tr);
227 	tracing_start();
228 
229 	if (!ret && !count) {
230 		printk(KERN_CONT ".. no entries found ..");
231 		ret = -1;
232 		goto out;
233 	}
234 
235 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
236 						     DYN_FTRACE_TEST_NAME);
237 
238  out:
239 	ftrace_enabled = save_ftrace_enabled;
240 	tracer_enabled = save_tracer_enabled;
241 
242 	/* kill ftrace totally if we failed */
243 	if (ret)
244 		ftrace_kill();
245 
246 	return ret;
247 }
248 #endif /* CONFIG_FUNCTION_TRACER */
249 
250 
251 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
252 
253 /* Maximum number of functions to trace before diagnosing a hang */
254 #define GRAPH_MAX_FUNC_TEST	100000000
255 
256 static void
257 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
258 static unsigned int graph_hang_thresh;
259 
260 /* Wrap the real function entry probe to avoid possible hanging */
trace_graph_entry_watchdog(struct ftrace_graph_ent * trace)261 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
262 {
263 	/* This is harmlessly racy, we want to approximately detect a hang */
264 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
265 		ftrace_graph_stop();
266 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
267 		if (ftrace_dump_on_oops)
268 			__ftrace_dump(false, DUMP_ALL);
269 		return 0;
270 	}
271 
272 	return trace_graph_entry(trace);
273 }
274 
275 /*
276  * Pretty much the same than for the function tracer from which the selftest
277  * has been borrowed.
278  */
279 int
trace_selftest_startup_function_graph(struct tracer * trace,struct trace_array * tr)280 trace_selftest_startup_function_graph(struct tracer *trace,
281 					struct trace_array *tr)
282 {
283 	int ret;
284 	unsigned long count;
285 
286 	/*
287 	 * Simulate the init() callback but we attach a watchdog callback
288 	 * to detect and recover from possible hangs
289 	 */
290 	tracing_reset_online_cpus(tr);
291 	set_graph_array(tr);
292 	ret = register_ftrace_graph(&trace_graph_return,
293 				    &trace_graph_entry_watchdog);
294 	if (ret) {
295 		warn_failed_init_tracer(trace, ret);
296 		goto out;
297 	}
298 	tracing_start_cmdline_record();
299 
300 	/* Sleep for a 1/10 of a second */
301 	msleep(100);
302 
303 	/* Have we just recovered from a hang? */
304 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
305 		tracing_selftest_disabled = true;
306 		ret = -1;
307 		goto out;
308 	}
309 
310 	tracing_stop();
311 
312 	/* check the trace buffer */
313 	ret = trace_test_buffer(tr, &count);
314 
315 	trace->reset(tr);
316 	tracing_start();
317 
318 	if (!ret && !count) {
319 		printk(KERN_CONT ".. no entries found ..");
320 		ret = -1;
321 		goto out;
322 	}
323 
324 	/* Don't test dynamic tracing, the function tracer already did */
325 
326 out:
327 	/* Stop it if we failed */
328 	if (ret)
329 		ftrace_graph_stop();
330 
331 	return ret;
332 }
333 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
334 
335 
336 #ifdef CONFIG_IRQSOFF_TRACER
337 int
trace_selftest_startup_irqsoff(struct tracer * trace,struct trace_array * tr)338 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
339 {
340 	unsigned long save_max = tracing_max_latency;
341 	unsigned long count;
342 	int ret;
343 
344 	/* start the tracing */
345 	ret = tracer_init(trace, tr);
346 	if (ret) {
347 		warn_failed_init_tracer(trace, ret);
348 		return ret;
349 	}
350 
351 	/* reset the max latency */
352 	tracing_max_latency = 0;
353 	/* disable interrupts for a bit */
354 	local_irq_disable();
355 	udelay(100);
356 	local_irq_enable();
357 
358 	/*
359 	 * Stop the tracer to avoid a warning subsequent
360 	 * to buffer flipping failure because tracing_stop()
361 	 * disables the tr and max buffers, making flipping impossible
362 	 * in case of parallels max irqs off latencies.
363 	 */
364 	trace->stop(tr);
365 	/* stop the tracing. */
366 	tracing_stop();
367 	/* check both trace buffers */
368 	ret = trace_test_buffer(tr, NULL);
369 	if (!ret)
370 		ret = trace_test_buffer(&max_tr, &count);
371 	trace->reset(tr);
372 	tracing_start();
373 
374 	if (!ret && !count) {
375 		printk(KERN_CONT ".. no entries found ..");
376 		ret = -1;
377 	}
378 
379 	tracing_max_latency = save_max;
380 
381 	return ret;
382 }
383 #endif /* CONFIG_IRQSOFF_TRACER */
384 
385 #ifdef CONFIG_PREEMPT_TRACER
386 int
trace_selftest_startup_preemptoff(struct tracer * trace,struct trace_array * tr)387 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
388 {
389 	unsigned long save_max = tracing_max_latency;
390 	unsigned long count;
391 	int ret;
392 
393 	/*
394 	 * Now that the big kernel lock is no longer preemptable,
395 	 * and this is called with the BKL held, it will always
396 	 * fail. If preemption is already disabled, simply
397 	 * pass the test. When the BKL is removed, or becomes
398 	 * preemptible again, we will once again test this,
399 	 * so keep it in.
400 	 */
401 	if (preempt_count()) {
402 		printk(KERN_CONT "can not test ... force ");
403 		return 0;
404 	}
405 
406 	/* start the tracing */
407 	ret = tracer_init(trace, tr);
408 	if (ret) {
409 		warn_failed_init_tracer(trace, ret);
410 		return ret;
411 	}
412 
413 	/* reset the max latency */
414 	tracing_max_latency = 0;
415 	/* disable preemption for a bit */
416 	preempt_disable();
417 	udelay(100);
418 	preempt_enable();
419 
420 	/*
421 	 * Stop the tracer to avoid a warning subsequent
422 	 * to buffer flipping failure because tracing_stop()
423 	 * disables the tr and max buffers, making flipping impossible
424 	 * in case of parallels max preempt off latencies.
425 	 */
426 	trace->stop(tr);
427 	/* stop the tracing. */
428 	tracing_stop();
429 	/* check both trace buffers */
430 	ret = trace_test_buffer(tr, NULL);
431 	if (!ret)
432 		ret = trace_test_buffer(&max_tr, &count);
433 	trace->reset(tr);
434 	tracing_start();
435 
436 	if (!ret && !count) {
437 		printk(KERN_CONT ".. no entries found ..");
438 		ret = -1;
439 	}
440 
441 	tracing_max_latency = save_max;
442 
443 	return ret;
444 }
445 #endif /* CONFIG_PREEMPT_TRACER */
446 
447 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
448 int
trace_selftest_startup_preemptirqsoff(struct tracer * trace,struct trace_array * tr)449 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
450 {
451 	unsigned long save_max = tracing_max_latency;
452 	unsigned long count;
453 	int ret;
454 
455 	/*
456 	 * Now that the big kernel lock is no longer preemptable,
457 	 * and this is called with the BKL held, it will always
458 	 * fail. If preemption is already disabled, simply
459 	 * pass the test. When the BKL is removed, or becomes
460 	 * preemptible again, we will once again test this,
461 	 * so keep it in.
462 	 */
463 	if (preempt_count()) {
464 		printk(KERN_CONT "can not test ... force ");
465 		return 0;
466 	}
467 
468 	/* start the tracing */
469 	ret = tracer_init(trace, tr);
470 	if (ret) {
471 		warn_failed_init_tracer(trace, ret);
472 		goto out_no_start;
473 	}
474 
475 	/* reset the max latency */
476 	tracing_max_latency = 0;
477 
478 	/* disable preemption and interrupts for a bit */
479 	preempt_disable();
480 	local_irq_disable();
481 	udelay(100);
482 	preempt_enable();
483 	/* reverse the order of preempt vs irqs */
484 	local_irq_enable();
485 
486 	/*
487 	 * Stop the tracer to avoid a warning subsequent
488 	 * to buffer flipping failure because tracing_stop()
489 	 * disables the tr and max buffers, making flipping impossible
490 	 * in case of parallels max irqs/preempt off latencies.
491 	 */
492 	trace->stop(tr);
493 	/* stop the tracing. */
494 	tracing_stop();
495 	/* check both trace buffers */
496 	ret = trace_test_buffer(tr, NULL);
497 	if (ret)
498 		goto out;
499 
500 	ret = trace_test_buffer(&max_tr, &count);
501 	if (ret)
502 		goto out;
503 
504 	if (!ret && !count) {
505 		printk(KERN_CONT ".. no entries found ..");
506 		ret = -1;
507 		goto out;
508 	}
509 
510 	/* do the test by disabling interrupts first this time */
511 	tracing_max_latency = 0;
512 	tracing_start();
513 	trace->start(tr);
514 
515 	preempt_disable();
516 	local_irq_disable();
517 	udelay(100);
518 	preempt_enable();
519 	/* reverse the order of preempt vs irqs */
520 	local_irq_enable();
521 
522 	trace->stop(tr);
523 	/* stop the tracing. */
524 	tracing_stop();
525 	/* check both trace buffers */
526 	ret = trace_test_buffer(tr, NULL);
527 	if (ret)
528 		goto out;
529 
530 	ret = trace_test_buffer(&max_tr, &count);
531 
532 	if (!ret && !count) {
533 		printk(KERN_CONT ".. no entries found ..");
534 		ret = -1;
535 		goto out;
536 	}
537 
538 out:
539 	tracing_start();
540 out_no_start:
541 	trace->reset(tr);
542 	tracing_max_latency = save_max;
543 
544 	return ret;
545 }
546 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
547 
548 #ifdef CONFIG_NOP_TRACER
549 int
trace_selftest_startup_nop(struct tracer * trace,struct trace_array * tr)550 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
551 {
552 	/* What could possibly go wrong? */
553 	return 0;
554 }
555 #endif
556 
557 #ifdef CONFIG_SCHED_TRACER
trace_wakeup_test_thread(void * data)558 static int trace_wakeup_test_thread(void *data)
559 {
560 	/* Make this a RT thread, doesn't need to be too high */
561 	static const struct sched_param param = { .sched_priority = 5 };
562 	struct completion *x = data;
563 
564 	sched_setscheduler(current, SCHED_FIFO, &param);
565 
566 	/* Make it know we have a new prio */
567 	complete(x);
568 
569 	/* now go to sleep and let the test wake us up */
570 	set_current_state(TASK_INTERRUPTIBLE);
571 	schedule();
572 
573 	/* we are awake, now wait to disappear */
574 	while (!kthread_should_stop()) {
575 		/*
576 		 * This is an RT task, do short sleeps to let
577 		 * others run.
578 		 */
579 		msleep(100);
580 	}
581 
582 	return 0;
583 }
584 
585 int
trace_selftest_startup_wakeup(struct tracer * trace,struct trace_array * tr)586 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
587 {
588 	unsigned long save_max = tracing_max_latency;
589 	struct task_struct *p;
590 	struct completion isrt;
591 	unsigned long count;
592 	int ret;
593 
594 	init_completion(&isrt);
595 
596 	/* create a high prio thread */
597 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
598 	if (IS_ERR(p)) {
599 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
600 		return -1;
601 	}
602 
603 	/* make sure the thread is running at an RT prio */
604 	wait_for_completion(&isrt);
605 
606 	/* start the tracing */
607 	ret = tracer_init(trace, tr);
608 	if (ret) {
609 		warn_failed_init_tracer(trace, ret);
610 		return ret;
611 	}
612 
613 	/* reset the max latency */
614 	tracing_max_latency = 0;
615 
616 	/* sleep to let the RT thread sleep too */
617 	msleep(100);
618 
619 	/*
620 	 * Yes this is slightly racy. It is possible that for some
621 	 * strange reason that the RT thread we created, did not
622 	 * call schedule for 100ms after doing the completion,
623 	 * and we do a wakeup on a task that already is awake.
624 	 * But that is extremely unlikely, and the worst thing that
625 	 * happens in such a case, is that we disable tracing.
626 	 * Honestly, if this race does happen something is horrible
627 	 * wrong with the system.
628 	 */
629 
630 	wake_up_process(p);
631 
632 	/* give a little time to let the thread wake up */
633 	msleep(100);
634 
635 	/* stop the tracing. */
636 	tracing_stop();
637 	/* check both trace buffers */
638 	ret = trace_test_buffer(tr, NULL);
639 	if (!ret)
640 		ret = trace_test_buffer(&max_tr, &count);
641 
642 
643 	trace->reset(tr);
644 	tracing_start();
645 
646 	tracing_max_latency = save_max;
647 
648 	/* kill the thread */
649 	kthread_stop(p);
650 
651 	if (!ret && !count) {
652 		printk(KERN_CONT ".. no entries found ..");
653 		ret = -1;
654 	}
655 
656 	return ret;
657 }
658 #endif /* CONFIG_SCHED_TRACER */
659 
660 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
661 int
trace_selftest_startup_sched_switch(struct tracer * trace,struct trace_array * tr)662 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
663 {
664 	unsigned long count;
665 	int ret;
666 
667 	/* start the tracing */
668 	ret = tracer_init(trace, tr);
669 	if (ret) {
670 		warn_failed_init_tracer(trace, ret);
671 		return ret;
672 	}
673 
674 	/* Sleep for a 1/10 of a second */
675 	msleep(100);
676 	/* stop the tracing. */
677 	tracing_stop();
678 	/* check the trace buffer */
679 	ret = trace_test_buffer(tr, &count);
680 	trace->reset(tr);
681 	tracing_start();
682 
683 	if (!ret && !count) {
684 		printk(KERN_CONT ".. no entries found ..");
685 		ret = -1;
686 	}
687 
688 	return ret;
689 }
690 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
691 
692 #ifdef CONFIG_BRANCH_TRACER
693 int
trace_selftest_startup_branch(struct tracer * trace,struct trace_array * tr)694 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
695 {
696 	unsigned long count;
697 	int ret;
698 
699 	/* start the tracing */
700 	ret = tracer_init(trace, tr);
701 	if (ret) {
702 		warn_failed_init_tracer(trace, ret);
703 		return ret;
704 	}
705 
706 	/* Sleep for a 1/10 of a second */
707 	msleep(100);
708 	/* stop the tracing. */
709 	tracing_stop();
710 	/* check the trace buffer */
711 	ret = trace_test_buffer(tr, &count);
712 	trace->reset(tr);
713 	tracing_start();
714 
715 	if (!ret && !count) {
716 		printk(KERN_CONT ".. no entries found ..");
717 		ret = -1;
718 	}
719 
720 	return ret;
721 }
722 #endif /* CONFIG_BRANCH_TRACER */
723 
724