1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
7 
trace_valid_entry(struct trace_entry * entry)8 static inline int trace_valid_entry(struct trace_entry *entry)
9 {
10 	switch (entry->type) {
11 	case TRACE_FN:
12 	case TRACE_CTX:
13 	case TRACE_WAKE:
14 	case TRACE_STACK:
15 	case TRACE_PRINT:
16 	case TRACE_BRANCH:
17 	case TRACE_GRAPH_ENT:
18 	case TRACE_GRAPH_RET:
19 		return 1;
20 	}
21 	return 0;
22 }
23 
trace_test_buffer_cpu(struct trace_array * tr,int cpu)24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
25 {
26 	struct ring_buffer_event *event;
27 	struct trace_entry *entry;
28 	unsigned int loops = 0;
29 
30 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31 		entry = ring_buffer_event_data(event);
32 
33 		/*
34 		 * The ring buffer is a size of trace_buf_size, if
35 		 * we loop more than the size, there's something wrong
36 		 * with the ring buffer.
37 		 */
38 		if (loops++ > trace_buf_size) {
39 			printk(KERN_CONT ".. bad ring buffer ");
40 			goto failed;
41 		}
42 		if (!trace_valid_entry(entry)) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entry->type);
45 			goto failed;
46 		}
47 	}
48 	return 0;
49 
50  failed:
51 	/* disable tracing */
52 	tracing_disabled = 1;
53 	printk(KERN_CONT ".. corrupted trace buffer .. ");
54 	return -1;
55 }
56 
57 /*
58  * Test the trace buffer to see if all the elements
59  * are still sane.
60  */
trace_test_buffer(struct trace_array * tr,unsigned long * count)61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 {
63 	unsigned long flags, cnt = 0;
64 	int cpu, ret = 0;
65 
66 	/* Don't allow flipping of max traces now */
67 	local_irq_save(flags);
68 	arch_spin_lock(&ftrace_max_lock);
69 
70 	cnt = ring_buffer_entries(tr->buffer);
71 
72 	/*
73 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 	 * If the calling tracer is broken, and is constantly filling
75 	 * the buffer, this will run forever, and hard lock the box.
76 	 * We disable the ring buffer while we do this test to prevent
77 	 * a hard lock up.
78 	 */
79 	tracing_off();
80 	for_each_possible_cpu(cpu) {
81 		ret = trace_test_buffer_cpu(tr, cpu);
82 		if (ret)
83 			break;
84 	}
85 	tracing_on();
86 	arch_spin_unlock(&ftrace_max_lock);
87 	local_irq_restore(flags);
88 
89 	if (count)
90 		*count = cnt;
91 
92 	return ret;
93 }
94 
warn_failed_init_tracer(struct tracer * trace,int init_ret)95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 {
97 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 		trace->name, init_ret);
99 }
100 #ifdef CONFIG_FUNCTION_TRACER
101 
102 #ifdef CONFIG_DYNAMIC_FTRACE
103 
104 static int trace_selftest_test_probe1_cnt;
trace_selftest_test_probe1_func(unsigned long ip,unsigned long pip)105 static void trace_selftest_test_probe1_func(unsigned long ip,
106 					    unsigned long pip)
107 {
108 	trace_selftest_test_probe1_cnt++;
109 }
110 
111 static int trace_selftest_test_probe2_cnt;
trace_selftest_test_probe2_func(unsigned long ip,unsigned long pip)112 static void trace_selftest_test_probe2_func(unsigned long ip,
113 					    unsigned long pip)
114 {
115 	trace_selftest_test_probe2_cnt++;
116 }
117 
118 static int trace_selftest_test_probe3_cnt;
trace_selftest_test_probe3_func(unsigned long ip,unsigned long pip)119 static void trace_selftest_test_probe3_func(unsigned long ip,
120 					    unsigned long pip)
121 {
122 	trace_selftest_test_probe3_cnt++;
123 }
124 
125 static int trace_selftest_test_global_cnt;
trace_selftest_test_global_func(unsigned long ip,unsigned long pip)126 static void trace_selftest_test_global_func(unsigned long ip,
127 					    unsigned long pip)
128 {
129 	trace_selftest_test_global_cnt++;
130 }
131 
132 static int trace_selftest_test_dyn_cnt;
trace_selftest_test_dyn_func(unsigned long ip,unsigned long pip)133 static void trace_selftest_test_dyn_func(unsigned long ip,
134 					 unsigned long pip)
135 {
136 	trace_selftest_test_dyn_cnt++;
137 }
138 
139 static struct ftrace_ops test_probe1 = {
140 	.func			= trace_selftest_test_probe1_func,
141 };
142 
143 static struct ftrace_ops test_probe2 = {
144 	.func			= trace_selftest_test_probe2_func,
145 };
146 
147 static struct ftrace_ops test_probe3 = {
148 	.func			= trace_selftest_test_probe3_func,
149 };
150 
151 static struct ftrace_ops test_global = {
152 	.func			= trace_selftest_test_global_func,
153 	.flags			= FTRACE_OPS_FL_GLOBAL,
154 };
155 
print_counts(void)156 static void print_counts(void)
157 {
158 	printk("(%d %d %d %d %d) ",
159 	       trace_selftest_test_probe1_cnt,
160 	       trace_selftest_test_probe2_cnt,
161 	       trace_selftest_test_probe3_cnt,
162 	       trace_selftest_test_global_cnt,
163 	       trace_selftest_test_dyn_cnt);
164 }
165 
reset_counts(void)166 static void reset_counts(void)
167 {
168 	trace_selftest_test_probe1_cnt = 0;
169 	trace_selftest_test_probe2_cnt = 0;
170 	trace_selftest_test_probe3_cnt = 0;
171 	trace_selftest_test_global_cnt = 0;
172 	trace_selftest_test_dyn_cnt = 0;
173 }
174 
trace_selftest_ops(int cnt)175 static int trace_selftest_ops(int cnt)
176 {
177 	int save_ftrace_enabled = ftrace_enabled;
178 	struct ftrace_ops *dyn_ops;
179 	char *func1_name;
180 	char *func2_name;
181 	int len1;
182 	int len2;
183 	int ret = -1;
184 
185 	printk(KERN_CONT "PASSED\n");
186 	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
187 
188 	ftrace_enabled = 1;
189 	reset_counts();
190 
191 	/* Handle PPC64 '.' name */
192 	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
193 	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
194 	len1 = strlen(func1_name);
195 	len2 = strlen(func2_name);
196 
197 	/*
198 	 * Probe 1 will trace function 1.
199 	 * Probe 2 will trace function 2.
200 	 * Probe 3 will trace functions 1 and 2.
201 	 */
202 	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
203 	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
204 	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
205 	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
206 
207 	register_ftrace_function(&test_probe1);
208 	register_ftrace_function(&test_probe2);
209 	register_ftrace_function(&test_probe3);
210 	register_ftrace_function(&test_global);
211 
212 	DYN_FTRACE_TEST_NAME();
213 
214 	print_counts();
215 
216 	if (trace_selftest_test_probe1_cnt != 1)
217 		goto out;
218 	if (trace_selftest_test_probe2_cnt != 0)
219 		goto out;
220 	if (trace_selftest_test_probe3_cnt != 1)
221 		goto out;
222 	if (trace_selftest_test_global_cnt == 0)
223 		goto out;
224 
225 	DYN_FTRACE_TEST_NAME2();
226 
227 	print_counts();
228 
229 	if (trace_selftest_test_probe1_cnt != 1)
230 		goto out;
231 	if (trace_selftest_test_probe2_cnt != 1)
232 		goto out;
233 	if (trace_selftest_test_probe3_cnt != 2)
234 		goto out;
235 
236 	/* Add a dynamic probe */
237 	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
238 	if (!dyn_ops) {
239 		printk("MEMORY ERROR ");
240 		goto out;
241 	}
242 
243 	dyn_ops->func = trace_selftest_test_dyn_func;
244 
245 	register_ftrace_function(dyn_ops);
246 
247 	trace_selftest_test_global_cnt = 0;
248 
249 	DYN_FTRACE_TEST_NAME();
250 
251 	print_counts();
252 
253 	if (trace_selftest_test_probe1_cnt != 2)
254 		goto out_free;
255 	if (trace_selftest_test_probe2_cnt != 1)
256 		goto out_free;
257 	if (trace_selftest_test_probe3_cnt != 3)
258 		goto out_free;
259 	if (trace_selftest_test_global_cnt == 0)
260 		goto out;
261 	if (trace_selftest_test_dyn_cnt == 0)
262 		goto out_free;
263 
264 	DYN_FTRACE_TEST_NAME2();
265 
266 	print_counts();
267 
268 	if (trace_selftest_test_probe1_cnt != 2)
269 		goto out_free;
270 	if (trace_selftest_test_probe2_cnt != 2)
271 		goto out_free;
272 	if (trace_selftest_test_probe3_cnt != 4)
273 		goto out_free;
274 
275 	ret = 0;
276  out_free:
277 	unregister_ftrace_function(dyn_ops);
278 	kfree(dyn_ops);
279 
280  out:
281 	/* Purposely unregister in the same order */
282 	unregister_ftrace_function(&test_probe1);
283 	unregister_ftrace_function(&test_probe2);
284 	unregister_ftrace_function(&test_probe3);
285 	unregister_ftrace_function(&test_global);
286 
287 	/* Make sure everything is off */
288 	reset_counts();
289 	DYN_FTRACE_TEST_NAME();
290 	DYN_FTRACE_TEST_NAME();
291 
292 	if (trace_selftest_test_probe1_cnt ||
293 	    trace_selftest_test_probe2_cnt ||
294 	    trace_selftest_test_probe3_cnt ||
295 	    trace_selftest_test_global_cnt ||
296 	    trace_selftest_test_dyn_cnt)
297 		ret = -1;
298 
299 	ftrace_enabled = save_ftrace_enabled;
300 
301 	return ret;
302 }
303 
304 /* Test dynamic code modification and ftrace filters */
trace_selftest_startup_dynamic_tracing(struct tracer * trace,struct trace_array * tr,int (* func)(void))305 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
306 					   struct trace_array *tr,
307 					   int (*func)(void))
308 {
309 	int save_ftrace_enabled = ftrace_enabled;
310 	int save_tracer_enabled = tracer_enabled;
311 	unsigned long count;
312 	char *func_name;
313 	int ret;
314 
315 	/* The ftrace test PASSED */
316 	printk(KERN_CONT "PASSED\n");
317 	pr_info("Testing dynamic ftrace: ");
318 
319 	/* enable tracing, and record the filter function */
320 	ftrace_enabled = 1;
321 	tracer_enabled = 1;
322 
323 	/* passed in by parameter to fool gcc from optimizing */
324 	func();
325 
326 	/*
327 	 * Some archs *cough*PowerPC*cough* add characters to the
328 	 * start of the function names. We simply put a '*' to
329 	 * accommodate them.
330 	 */
331 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
332 
333 	/* filter only on our function */
334 	ftrace_set_global_filter(func_name, strlen(func_name), 1);
335 
336 	/* enable tracing */
337 	ret = tracer_init(trace, tr);
338 	if (ret) {
339 		warn_failed_init_tracer(trace, ret);
340 		goto out;
341 	}
342 
343 	/* Sleep for a 1/10 of a second */
344 	msleep(100);
345 
346 	/* we should have nothing in the buffer */
347 	ret = trace_test_buffer(tr, &count);
348 	if (ret)
349 		goto out;
350 
351 	if (count) {
352 		ret = -1;
353 		printk(KERN_CONT ".. filter did not filter .. ");
354 		goto out;
355 	}
356 
357 	/* call our function again */
358 	func();
359 
360 	/* sleep again */
361 	msleep(100);
362 
363 	/* stop the tracing. */
364 	tracing_stop();
365 	ftrace_enabled = 0;
366 
367 	/* check the trace buffer */
368 	ret = trace_test_buffer(tr, &count);
369 	tracing_start();
370 
371 	/* we should only have one item */
372 	if (!ret && count != 1) {
373 		trace->reset(tr);
374 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
375 		ret = -1;
376 		goto out;
377 	}
378 
379 	/* Test the ops with global tracing running */
380 	ret = trace_selftest_ops(1);
381 	trace->reset(tr);
382 
383  out:
384 	ftrace_enabled = save_ftrace_enabled;
385 	tracer_enabled = save_tracer_enabled;
386 
387 	/* Enable tracing on all functions again */
388 	ftrace_set_global_filter(NULL, 0, 1);
389 
390 	/* Test the ops with global tracing off */
391 	if (!ret)
392 		ret = trace_selftest_ops(2);
393 
394 	return ret;
395 }
396 #else
397 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
398 #endif /* CONFIG_DYNAMIC_FTRACE */
399 
400 /*
401  * Simple verification test of ftrace function tracer.
402  * Enable ftrace, sleep 1/10 second, and then read the trace
403  * buffer to see if all is in order.
404  */
405 int
trace_selftest_startup_function(struct tracer * trace,struct trace_array * tr)406 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
407 {
408 	int save_ftrace_enabled = ftrace_enabled;
409 	int save_tracer_enabled = tracer_enabled;
410 	unsigned long count;
411 	int ret;
412 
413 	/* make sure msleep has been recorded */
414 	msleep(1);
415 
416 	/* start the tracing */
417 	ftrace_enabled = 1;
418 	tracer_enabled = 1;
419 
420 	ret = tracer_init(trace, tr);
421 	if (ret) {
422 		warn_failed_init_tracer(trace, ret);
423 		goto out;
424 	}
425 
426 	/* Sleep for a 1/10 of a second */
427 	msleep(100);
428 	/* stop the tracing. */
429 	tracing_stop();
430 	ftrace_enabled = 0;
431 
432 	/* check the trace buffer */
433 	ret = trace_test_buffer(tr, &count);
434 	trace->reset(tr);
435 	tracing_start();
436 
437 	if (!ret && !count) {
438 		printk(KERN_CONT ".. no entries found ..");
439 		ret = -1;
440 		goto out;
441 	}
442 
443 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
444 						     DYN_FTRACE_TEST_NAME);
445 
446  out:
447 	ftrace_enabled = save_ftrace_enabled;
448 	tracer_enabled = save_tracer_enabled;
449 
450 	/* kill ftrace totally if we failed */
451 	if (ret)
452 		ftrace_kill();
453 
454 	return ret;
455 }
456 #endif /* CONFIG_FUNCTION_TRACER */
457 
458 
459 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
460 
461 /* Maximum number of functions to trace before diagnosing a hang */
462 #define GRAPH_MAX_FUNC_TEST	100000000
463 
464 static unsigned int graph_hang_thresh;
465 
466 /* Wrap the real function entry probe to avoid possible hanging */
trace_graph_entry_watchdog(struct ftrace_graph_ent * trace)467 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
468 {
469 	/* This is harmlessly racy, we want to approximately detect a hang */
470 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
471 		ftrace_graph_stop();
472 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
473 		if (ftrace_dump_on_oops) {
474 			ftrace_dump(DUMP_ALL);
475 			/* ftrace_dump() disables tracing */
476 			tracing_on();
477 		}
478 		return 0;
479 	}
480 
481 	return trace_graph_entry(trace);
482 }
483 
484 /*
485  * Pretty much the same than for the function tracer from which the selftest
486  * has been borrowed.
487  */
488 int
trace_selftest_startup_function_graph(struct tracer * trace,struct trace_array * tr)489 trace_selftest_startup_function_graph(struct tracer *trace,
490 					struct trace_array *tr)
491 {
492 	int ret;
493 	unsigned long count;
494 
495 	/*
496 	 * Simulate the init() callback but we attach a watchdog callback
497 	 * to detect and recover from possible hangs
498 	 */
499 	tracing_reset_online_cpus(tr);
500 	set_graph_array(tr);
501 	ret = register_ftrace_graph(&trace_graph_return,
502 				    &trace_graph_entry_watchdog);
503 	if (ret) {
504 		warn_failed_init_tracer(trace, ret);
505 		goto out;
506 	}
507 	tracing_start_cmdline_record();
508 
509 	/* Sleep for a 1/10 of a second */
510 	msleep(100);
511 
512 	/* Have we just recovered from a hang? */
513 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
514 		tracing_selftest_disabled = true;
515 		ret = -1;
516 		goto out;
517 	}
518 
519 	tracing_stop();
520 
521 	/* check the trace buffer */
522 	ret = trace_test_buffer(tr, &count);
523 
524 	trace->reset(tr);
525 	tracing_start();
526 
527 	if (!ret && !count) {
528 		printk(KERN_CONT ".. no entries found ..");
529 		ret = -1;
530 		goto out;
531 	}
532 
533 	/* Don't test dynamic tracing, the function tracer already did */
534 
535 out:
536 	/* Stop it if we failed */
537 	if (ret)
538 		ftrace_graph_stop();
539 
540 	return ret;
541 }
542 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
543 
544 
545 #ifdef CONFIG_IRQSOFF_TRACER
546 int
trace_selftest_startup_irqsoff(struct tracer * trace,struct trace_array * tr)547 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
548 {
549 	unsigned long save_max = tracing_max_latency;
550 	unsigned long count;
551 	int ret;
552 
553 	/* start the tracing */
554 	ret = tracer_init(trace, tr);
555 	if (ret) {
556 		warn_failed_init_tracer(trace, ret);
557 		return ret;
558 	}
559 
560 	/* reset the max latency */
561 	tracing_max_latency = 0;
562 	/* disable interrupts for a bit */
563 	local_irq_disable();
564 	udelay(100);
565 	local_irq_enable();
566 
567 	/*
568 	 * Stop the tracer to avoid a warning subsequent
569 	 * to buffer flipping failure because tracing_stop()
570 	 * disables the tr and max buffers, making flipping impossible
571 	 * in case of parallels max irqs off latencies.
572 	 */
573 	trace->stop(tr);
574 	/* stop the tracing. */
575 	tracing_stop();
576 	/* check both trace buffers */
577 	ret = trace_test_buffer(tr, NULL);
578 	if (!ret)
579 		ret = trace_test_buffer(&max_tr, &count);
580 	trace->reset(tr);
581 	tracing_start();
582 
583 	if (!ret && !count) {
584 		printk(KERN_CONT ".. no entries found ..");
585 		ret = -1;
586 	}
587 
588 	tracing_max_latency = save_max;
589 
590 	return ret;
591 }
592 #endif /* CONFIG_IRQSOFF_TRACER */
593 
594 #ifdef CONFIG_PREEMPT_TRACER
595 int
trace_selftest_startup_preemptoff(struct tracer * trace,struct trace_array * tr)596 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
597 {
598 	unsigned long save_max = tracing_max_latency;
599 	unsigned long count;
600 	int ret;
601 
602 	/*
603 	 * Now that the big kernel lock is no longer preemptable,
604 	 * and this is called with the BKL held, it will always
605 	 * fail. If preemption is already disabled, simply
606 	 * pass the test. When the BKL is removed, or becomes
607 	 * preemptible again, we will once again test this,
608 	 * so keep it in.
609 	 */
610 	if (preempt_count()) {
611 		printk(KERN_CONT "can not test ... force ");
612 		return 0;
613 	}
614 
615 	/* start the tracing */
616 	ret = tracer_init(trace, tr);
617 	if (ret) {
618 		warn_failed_init_tracer(trace, ret);
619 		return ret;
620 	}
621 
622 	/* reset the max latency */
623 	tracing_max_latency = 0;
624 	/* disable preemption for a bit */
625 	preempt_disable();
626 	udelay(100);
627 	preempt_enable();
628 
629 	/*
630 	 * Stop the tracer to avoid a warning subsequent
631 	 * to buffer flipping failure because tracing_stop()
632 	 * disables the tr and max buffers, making flipping impossible
633 	 * in case of parallels max preempt off latencies.
634 	 */
635 	trace->stop(tr);
636 	/* stop the tracing. */
637 	tracing_stop();
638 	/* check both trace buffers */
639 	ret = trace_test_buffer(tr, NULL);
640 	if (!ret)
641 		ret = trace_test_buffer(&max_tr, &count);
642 	trace->reset(tr);
643 	tracing_start();
644 
645 	if (!ret && !count) {
646 		printk(KERN_CONT ".. no entries found ..");
647 		ret = -1;
648 	}
649 
650 	tracing_max_latency = save_max;
651 
652 	return ret;
653 }
654 #endif /* CONFIG_PREEMPT_TRACER */
655 
656 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
657 int
trace_selftest_startup_preemptirqsoff(struct tracer * trace,struct trace_array * tr)658 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
659 {
660 	unsigned long save_max = tracing_max_latency;
661 	unsigned long count;
662 	int ret;
663 
664 	/*
665 	 * Now that the big kernel lock is no longer preemptable,
666 	 * and this is called with the BKL held, it will always
667 	 * fail. If preemption is already disabled, simply
668 	 * pass the test. When the BKL is removed, or becomes
669 	 * preemptible again, we will once again test this,
670 	 * so keep it in.
671 	 */
672 	if (preempt_count()) {
673 		printk(KERN_CONT "can not test ... force ");
674 		return 0;
675 	}
676 
677 	/* start the tracing */
678 	ret = tracer_init(trace, tr);
679 	if (ret) {
680 		warn_failed_init_tracer(trace, ret);
681 		goto out_no_start;
682 	}
683 
684 	/* reset the max latency */
685 	tracing_max_latency = 0;
686 
687 	/* disable preemption and interrupts for a bit */
688 	preempt_disable();
689 	local_irq_disable();
690 	udelay(100);
691 	preempt_enable();
692 	/* reverse the order of preempt vs irqs */
693 	local_irq_enable();
694 
695 	/*
696 	 * Stop the tracer to avoid a warning subsequent
697 	 * to buffer flipping failure because tracing_stop()
698 	 * disables the tr and max buffers, making flipping impossible
699 	 * in case of parallels max irqs/preempt off latencies.
700 	 */
701 	trace->stop(tr);
702 	/* stop the tracing. */
703 	tracing_stop();
704 	/* check both trace buffers */
705 	ret = trace_test_buffer(tr, NULL);
706 	if (ret)
707 		goto out;
708 
709 	ret = trace_test_buffer(&max_tr, &count);
710 	if (ret)
711 		goto out;
712 
713 	if (!ret && !count) {
714 		printk(KERN_CONT ".. no entries found ..");
715 		ret = -1;
716 		goto out;
717 	}
718 
719 	/* do the test by disabling interrupts first this time */
720 	tracing_max_latency = 0;
721 	tracing_start();
722 	trace->start(tr);
723 
724 	preempt_disable();
725 	local_irq_disable();
726 	udelay(100);
727 	preempt_enable();
728 	/* reverse the order of preempt vs irqs */
729 	local_irq_enable();
730 
731 	trace->stop(tr);
732 	/* stop the tracing. */
733 	tracing_stop();
734 	/* check both trace buffers */
735 	ret = trace_test_buffer(tr, NULL);
736 	if (ret)
737 		goto out;
738 
739 	ret = trace_test_buffer(&max_tr, &count);
740 
741 	if (!ret && !count) {
742 		printk(KERN_CONT ".. no entries found ..");
743 		ret = -1;
744 		goto out;
745 	}
746 
747 out:
748 	tracing_start();
749 out_no_start:
750 	trace->reset(tr);
751 	tracing_max_latency = save_max;
752 
753 	return ret;
754 }
755 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
756 
757 #ifdef CONFIG_NOP_TRACER
758 int
trace_selftest_startup_nop(struct tracer * trace,struct trace_array * tr)759 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
760 {
761 	/* What could possibly go wrong? */
762 	return 0;
763 }
764 #endif
765 
766 #ifdef CONFIG_SCHED_TRACER
trace_wakeup_test_thread(void * data)767 static int trace_wakeup_test_thread(void *data)
768 {
769 	/* Make this a RT thread, doesn't need to be too high */
770 	static const struct sched_param param = { .sched_priority = 5 };
771 	struct completion *x = data;
772 
773 	sched_setscheduler(current, SCHED_FIFO, &param);
774 
775 	/* Make it know we have a new prio */
776 	complete(x);
777 
778 	/* now go to sleep and let the test wake us up */
779 	set_current_state(TASK_INTERRUPTIBLE);
780 	schedule();
781 
782 	/* we are awake, now wait to disappear */
783 	while (!kthread_should_stop()) {
784 		/*
785 		 * This is an RT task, do short sleeps to let
786 		 * others run.
787 		 */
788 		msleep(100);
789 	}
790 
791 	return 0;
792 }
793 
794 int
trace_selftest_startup_wakeup(struct tracer * trace,struct trace_array * tr)795 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
796 {
797 	unsigned long save_max = tracing_max_latency;
798 	struct task_struct *p;
799 	struct completion isrt;
800 	unsigned long count;
801 	int ret;
802 
803 	init_completion(&isrt);
804 
805 	/* create a high prio thread */
806 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
807 	if (IS_ERR(p)) {
808 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
809 		return -1;
810 	}
811 
812 	/* make sure the thread is running at an RT prio */
813 	wait_for_completion(&isrt);
814 
815 	/* start the tracing */
816 	ret = tracer_init(trace, tr);
817 	if (ret) {
818 		warn_failed_init_tracer(trace, ret);
819 		return ret;
820 	}
821 
822 	/* reset the max latency */
823 	tracing_max_latency = 0;
824 
825 	/* sleep to let the RT thread sleep too */
826 	msleep(100);
827 
828 	/*
829 	 * Yes this is slightly racy. It is possible that for some
830 	 * strange reason that the RT thread we created, did not
831 	 * call schedule for 100ms after doing the completion,
832 	 * and we do a wakeup on a task that already is awake.
833 	 * But that is extremely unlikely, and the worst thing that
834 	 * happens in such a case, is that we disable tracing.
835 	 * Honestly, if this race does happen something is horrible
836 	 * wrong with the system.
837 	 */
838 
839 	wake_up_process(p);
840 
841 	/* give a little time to let the thread wake up */
842 	msleep(100);
843 
844 	/* stop the tracing. */
845 	tracing_stop();
846 	/* check both trace buffers */
847 	ret = trace_test_buffer(tr, NULL);
848 	if (!ret)
849 		ret = trace_test_buffer(&max_tr, &count);
850 
851 
852 	trace->reset(tr);
853 	tracing_start();
854 
855 	tracing_max_latency = save_max;
856 
857 	/* kill the thread */
858 	kthread_stop(p);
859 
860 	if (!ret && !count) {
861 		printk(KERN_CONT ".. no entries found ..");
862 		ret = -1;
863 	}
864 
865 	return ret;
866 }
867 #endif /* CONFIG_SCHED_TRACER */
868 
869 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
870 int
trace_selftest_startup_sched_switch(struct tracer * trace,struct trace_array * tr)871 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
872 {
873 	unsigned long count;
874 	int ret;
875 
876 	/* start the tracing */
877 	ret = tracer_init(trace, tr);
878 	if (ret) {
879 		warn_failed_init_tracer(trace, ret);
880 		return ret;
881 	}
882 
883 	/* Sleep for a 1/10 of a second */
884 	msleep(100);
885 	/* stop the tracing. */
886 	tracing_stop();
887 	/* check the trace buffer */
888 	ret = trace_test_buffer(tr, &count);
889 	trace->reset(tr);
890 	tracing_start();
891 
892 	if (!ret && !count) {
893 		printk(KERN_CONT ".. no entries found ..");
894 		ret = -1;
895 	}
896 
897 	return ret;
898 }
899 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
900 
901 #ifdef CONFIG_BRANCH_TRACER
902 int
trace_selftest_startup_branch(struct tracer * trace,struct trace_array * tr)903 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
904 {
905 	unsigned long count;
906 	int ret;
907 
908 	/* start the tracing */
909 	ret = tracer_init(trace, tr);
910 	if (ret) {
911 		warn_failed_init_tracer(trace, ret);
912 		return ret;
913 	}
914 
915 	/* Sleep for a 1/10 of a second */
916 	msleep(100);
917 	/* stop the tracing. */
918 	tracing_stop();
919 	/* check the trace buffer */
920 	ret = trace_test_buffer(tr, &count);
921 	trace->reset(tr);
922 	tracing_start();
923 
924 	if (!ret && !count) {
925 		printk(KERN_CONT ".. no entries found ..");
926 		ret = -1;
927 	}
928 
929 	return ret;
930 }
931 #endif /* CONFIG_BRANCH_TRACER */
932 
933