1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
45 #include <linux/fs.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
52
53 #include "trace.h"
54 #include "trace_output.h"
55
56 /*
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
59 */
60 bool ring_buffer_expanded;
61
62 /*
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
68 */
69 static bool __read_mostly tracing_selftest_running;
70
71 /*
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
74 */
75 bool __read_mostly tracing_selftest_disabled;
76
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
disable_tracing_selftest(const char * reason)78 void __init disable_tracing_selftest(const char *reason)
79 {
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
83 }
84 }
85 #endif
86
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
95 { }
96 };
97
98 static int
dummy_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
100 {
101 return 0;
102 }
103
104 /*
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
107 * occurred.
108 */
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
110
111 /*
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
115 * this back to zero.
116 */
117 static int tracing_disabled = 1;
118
119 cpumask_var_t __read_mostly tracing_buffer_mask;
120
121 /*
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 *
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
128 * serial console.
129 *
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 */
136
137 enum ftrace_dump_mode ftrace_dump_on_oops;
138
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
141
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
145 struct module *mod;
146 unsigned long length;
147 };
148
149 union trace_eval_map_item;
150
151 struct trace_eval_map_tail {
152 /*
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
155 */
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
158 };
159
160 static DEFINE_MUTEX(trace_eval_mutex);
161
162 /*
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
168 */
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
173 };
174
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
182
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
186
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
189
set_cmdline_ftrace(char * str)190 static int __init set_cmdline_ftrace(char *str)
191 {
192 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
193 default_bootup_tracer = bootup_tracer_buf;
194 /* We are using ftrace early, expand it */
195 ring_buffer_expanded = true;
196 return 1;
197 }
198 __setup("ftrace=", set_cmdline_ftrace);
199
set_ftrace_dump_on_oops(char * str)200 static int __init set_ftrace_dump_on_oops(char *str)
201 {
202 if (*str++ != '=' || !*str || !strcmp("1", str)) {
203 ftrace_dump_on_oops = DUMP_ALL;
204 return 1;
205 }
206
207 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
208 ftrace_dump_on_oops = DUMP_ORIG;
209 return 1;
210 }
211
212 return 0;
213 }
214 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
215
stop_trace_on_warning(char * str)216 static int __init stop_trace_on_warning(char *str)
217 {
218 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
219 __disable_trace_on_warning = 1;
220 return 1;
221 }
222 __setup("traceoff_on_warning", stop_trace_on_warning);
223
boot_alloc_snapshot(char * str)224 static int __init boot_alloc_snapshot(char *str)
225 {
226 allocate_snapshot = true;
227 /* We also need the main ring buffer expanded */
228 ring_buffer_expanded = true;
229 return 1;
230 }
231 __setup("alloc_snapshot", boot_alloc_snapshot);
232
233
boot_snapshot(char * str)234 static int __init boot_snapshot(char *str)
235 {
236 snapshot_at_boot = true;
237 boot_alloc_snapshot(str);
238 return 1;
239 }
240 __setup("ftrace_boot_snapshot", boot_snapshot);
241
242
243 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
244
set_trace_boot_options(char * str)245 static int __init set_trace_boot_options(char *str)
246 {
247 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
248 return 1;
249 }
250 __setup("trace_options=", set_trace_boot_options);
251
252 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
253 static char *trace_boot_clock __initdata;
254
set_trace_boot_clock(char * str)255 static int __init set_trace_boot_clock(char *str)
256 {
257 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
258 trace_boot_clock = trace_boot_clock_buf;
259 return 1;
260 }
261 __setup("trace_clock=", set_trace_boot_clock);
262
set_tracepoint_printk(char * str)263 static int __init set_tracepoint_printk(char *str)
264 {
265 /* Ignore the "tp_printk_stop_on_boot" param */
266 if (*str == '_')
267 return 0;
268
269 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
270 tracepoint_printk = 1;
271 return 1;
272 }
273 __setup("tp_printk", set_tracepoint_printk);
274
set_tracepoint_printk_stop(char * str)275 static int __init set_tracepoint_printk_stop(char *str)
276 {
277 tracepoint_printk_stop_on_boot = true;
278 return 1;
279 }
280 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
281
ns2usecs(u64 nsec)282 unsigned long long ns2usecs(u64 nsec)
283 {
284 nsec += 500;
285 do_div(nsec, 1000);
286 return nsec;
287 }
288
289 static void
trace_process_export(struct trace_export * export,struct ring_buffer_event * event,int flag)290 trace_process_export(struct trace_export *export,
291 struct ring_buffer_event *event, int flag)
292 {
293 struct trace_entry *entry;
294 unsigned int size = 0;
295
296 if (export->flags & flag) {
297 entry = ring_buffer_event_data(event);
298 size = ring_buffer_event_length(event);
299 export->write(export, entry, size);
300 }
301 }
302
303 static DEFINE_MUTEX(ftrace_export_lock);
304
305 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
306
307 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
309 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
310
ftrace_exports_enable(struct trace_export * export)311 static inline void ftrace_exports_enable(struct trace_export *export)
312 {
313 if (export->flags & TRACE_EXPORT_FUNCTION)
314 static_branch_inc(&trace_function_exports_enabled);
315
316 if (export->flags & TRACE_EXPORT_EVENT)
317 static_branch_inc(&trace_event_exports_enabled);
318
319 if (export->flags & TRACE_EXPORT_MARKER)
320 static_branch_inc(&trace_marker_exports_enabled);
321 }
322
ftrace_exports_disable(struct trace_export * export)323 static inline void ftrace_exports_disable(struct trace_export *export)
324 {
325 if (export->flags & TRACE_EXPORT_FUNCTION)
326 static_branch_dec(&trace_function_exports_enabled);
327
328 if (export->flags & TRACE_EXPORT_EVENT)
329 static_branch_dec(&trace_event_exports_enabled);
330
331 if (export->flags & TRACE_EXPORT_MARKER)
332 static_branch_dec(&trace_marker_exports_enabled);
333 }
334
ftrace_exports(struct ring_buffer_event * event,int flag)335 static void ftrace_exports(struct ring_buffer_event *event, int flag)
336 {
337 struct trace_export *export;
338
339 preempt_disable_notrace();
340
341 export = rcu_dereference_raw_check(ftrace_exports_list);
342 while (export) {
343 trace_process_export(export, event, flag);
344 export = rcu_dereference_raw_check(export->next);
345 }
346
347 preempt_enable_notrace();
348 }
349
350 static inline void
add_trace_export(struct trace_export ** list,struct trace_export * export)351 add_trace_export(struct trace_export **list, struct trace_export *export)
352 {
353 rcu_assign_pointer(export->next, *list);
354 /*
355 * We are entering export into the list but another
356 * CPU might be walking that list. We need to make sure
357 * the export->next pointer is valid before another CPU sees
358 * the export pointer included into the list.
359 */
360 rcu_assign_pointer(*list, export);
361 }
362
363 static inline int
rm_trace_export(struct trace_export ** list,struct trace_export * export)364 rm_trace_export(struct trace_export **list, struct trace_export *export)
365 {
366 struct trace_export **p;
367
368 for (p = list; *p != NULL; p = &(*p)->next)
369 if (*p == export)
370 break;
371
372 if (*p != export)
373 return -1;
374
375 rcu_assign_pointer(*p, (*p)->next);
376
377 return 0;
378 }
379
380 static inline void
add_ftrace_export(struct trace_export ** list,struct trace_export * export)381 add_ftrace_export(struct trace_export **list, struct trace_export *export)
382 {
383 ftrace_exports_enable(export);
384
385 add_trace_export(list, export);
386 }
387
388 static inline int
rm_ftrace_export(struct trace_export ** list,struct trace_export * export)389 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
390 {
391 int ret;
392
393 ret = rm_trace_export(list, export);
394 ftrace_exports_disable(export);
395
396 return ret;
397 }
398
register_ftrace_export(struct trace_export * export)399 int register_ftrace_export(struct trace_export *export)
400 {
401 if (WARN_ON_ONCE(!export->write))
402 return -1;
403
404 mutex_lock(&ftrace_export_lock);
405
406 add_ftrace_export(&ftrace_exports_list, export);
407
408 mutex_unlock(&ftrace_export_lock);
409
410 return 0;
411 }
412 EXPORT_SYMBOL_GPL(register_ftrace_export);
413
unregister_ftrace_export(struct trace_export * export)414 int unregister_ftrace_export(struct trace_export *export)
415 {
416 int ret;
417
418 mutex_lock(&ftrace_export_lock);
419
420 ret = rm_ftrace_export(&ftrace_exports_list, export);
421
422 mutex_unlock(&ftrace_export_lock);
423
424 return ret;
425 }
426 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
427
428 /* trace_flags holds trace_options default values */
429 #define TRACE_DEFAULT_FLAGS \
430 (FUNCTION_DEFAULT_FLAGS | \
431 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
432 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
433 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
434 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
435 TRACE_ITER_HASH_PTR)
436
437 /* trace_options that are only supported by global_trace */
438 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
439 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
440
441 /* trace_flags that are default zero for instances */
442 #define ZEROED_TRACE_FLAGS \
443 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
444
445 /*
446 * The global_trace is the descriptor that holds the top-level tracing
447 * buffers for the live tracing.
448 */
449 static struct trace_array global_trace = {
450 .trace_flags = TRACE_DEFAULT_FLAGS,
451 };
452
453 LIST_HEAD(ftrace_trace_arrays);
454
trace_array_get(struct trace_array * this_tr)455 int trace_array_get(struct trace_array *this_tr)
456 {
457 struct trace_array *tr;
458 int ret = -ENODEV;
459
460 mutex_lock(&trace_types_lock);
461 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
462 if (tr == this_tr) {
463 tr->ref++;
464 ret = 0;
465 break;
466 }
467 }
468 mutex_unlock(&trace_types_lock);
469
470 return ret;
471 }
472
__trace_array_put(struct trace_array * this_tr)473 static void __trace_array_put(struct trace_array *this_tr)
474 {
475 WARN_ON(!this_tr->ref);
476 this_tr->ref--;
477 }
478
479 /**
480 * trace_array_put - Decrement the reference counter for this trace array.
481 * @this_tr : pointer to the trace array
482 *
483 * NOTE: Use this when we no longer need the trace array returned by
484 * trace_array_get_by_name(). This ensures the trace array can be later
485 * destroyed.
486 *
487 */
trace_array_put(struct trace_array * this_tr)488 void trace_array_put(struct trace_array *this_tr)
489 {
490 if (!this_tr)
491 return;
492
493 mutex_lock(&trace_types_lock);
494 __trace_array_put(this_tr);
495 mutex_unlock(&trace_types_lock);
496 }
497 EXPORT_SYMBOL_GPL(trace_array_put);
498
tracing_check_open_get_tr(struct trace_array * tr)499 int tracing_check_open_get_tr(struct trace_array *tr)
500 {
501 int ret;
502
503 ret = security_locked_down(LOCKDOWN_TRACEFS);
504 if (ret)
505 return ret;
506
507 if (tracing_disabled)
508 return -ENODEV;
509
510 if (tr && trace_array_get(tr) < 0)
511 return -ENODEV;
512
513 return 0;
514 }
515
call_filter_check_discard(struct trace_event_call * call,void * rec,struct trace_buffer * buffer,struct ring_buffer_event * event)516 int call_filter_check_discard(struct trace_event_call *call, void *rec,
517 struct trace_buffer *buffer,
518 struct ring_buffer_event *event)
519 {
520 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
521 !filter_match_preds(call->filter, rec)) {
522 __trace_event_discard_commit(buffer, event);
523 return 1;
524 }
525
526 return 0;
527 }
528
529 /**
530 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
531 * @filtered_pids: The list of pids to check
532 * @search_pid: The PID to find in @filtered_pids
533 *
534 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
535 */
536 bool
trace_find_filtered_pid(struct trace_pid_list * filtered_pids,pid_t search_pid)537 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
538 {
539 return trace_pid_list_is_set(filtered_pids, search_pid);
540 }
541
542 /**
543 * trace_ignore_this_task - should a task be ignored for tracing
544 * @filtered_pids: The list of pids to check
545 * @filtered_no_pids: The list of pids not to be traced
546 * @task: The task that should be ignored if not filtered
547 *
548 * Checks if @task should be traced or not from @filtered_pids.
549 * Returns true if @task should *NOT* be traced.
550 * Returns false if @task should be traced.
551 */
552 bool
trace_ignore_this_task(struct trace_pid_list * filtered_pids,struct trace_pid_list * filtered_no_pids,struct task_struct * task)553 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
554 struct trace_pid_list *filtered_no_pids,
555 struct task_struct *task)
556 {
557 /*
558 * If filtered_no_pids is not empty, and the task's pid is listed
559 * in filtered_no_pids, then return true.
560 * Otherwise, if filtered_pids is empty, that means we can
561 * trace all tasks. If it has content, then only trace pids
562 * within filtered_pids.
563 */
564
565 return (filtered_pids &&
566 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
567 (filtered_no_pids &&
568 trace_find_filtered_pid(filtered_no_pids, task->pid));
569 }
570
571 /**
572 * trace_filter_add_remove_task - Add or remove a task from a pid_list
573 * @pid_list: The list to modify
574 * @self: The current task for fork or NULL for exit
575 * @task: The task to add or remove
576 *
577 * If adding a task, if @self is defined, the task is only added if @self
578 * is also included in @pid_list. This happens on fork and tasks should
579 * only be added when the parent is listed. If @self is NULL, then the
580 * @task pid will be removed from the list, which would happen on exit
581 * of a task.
582 */
trace_filter_add_remove_task(struct trace_pid_list * pid_list,struct task_struct * self,struct task_struct * task)583 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
584 struct task_struct *self,
585 struct task_struct *task)
586 {
587 if (!pid_list)
588 return;
589
590 /* For forks, we only add if the forking task is listed */
591 if (self) {
592 if (!trace_find_filtered_pid(pid_list, self->pid))
593 return;
594 }
595
596 /* "self" is set for forks, and NULL for exits */
597 if (self)
598 trace_pid_list_set(pid_list, task->pid);
599 else
600 trace_pid_list_clear(pid_list, task->pid);
601 }
602
603 /**
604 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
605 * @pid_list: The pid list to show
606 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
607 * @pos: The position of the file
608 *
609 * This is used by the seq_file "next" operation to iterate the pids
610 * listed in a trace_pid_list structure.
611 *
612 * Returns the pid+1 as we want to display pid of zero, but NULL would
613 * stop the iteration.
614 */
trace_pid_next(struct trace_pid_list * pid_list,void * v,loff_t * pos)615 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
616 {
617 long pid = (unsigned long)v;
618 unsigned int next;
619
620 (*pos)++;
621
622 /* pid already is +1 of the actual previous bit */
623 if (trace_pid_list_next(pid_list, pid, &next) < 0)
624 return NULL;
625
626 pid = next;
627
628 /* Return pid + 1 to allow zero to be represented */
629 return (void *)(pid + 1);
630 }
631
632 /**
633 * trace_pid_start - Used for seq_file to start reading pid lists
634 * @pid_list: The pid list to show
635 * @pos: The position of the file
636 *
637 * This is used by seq_file "start" operation to start the iteration
638 * of listing pids.
639 *
640 * Returns the pid+1 as we want to display pid of zero, but NULL would
641 * stop the iteration.
642 */
trace_pid_start(struct trace_pid_list * pid_list,loff_t * pos)643 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
644 {
645 unsigned long pid;
646 unsigned int first;
647 loff_t l = 0;
648
649 if (trace_pid_list_first(pid_list, &first) < 0)
650 return NULL;
651
652 pid = first;
653
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
657 ;
658 return (void *)pid;
659 }
660
661 /**
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
665 *
666 * Can be directly used by seq_file operations to display the current
667 * pid value.
668 */
trace_pid_show(struct seq_file * m,void * v)669 int trace_pid_show(struct seq_file *m, void *v)
670 {
671 unsigned long pid = (unsigned long)v - 1;
672
673 seq_printf(m, "%lu\n", pid);
674 return 0;
675 }
676
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
679
trace_pid_write(struct trace_pid_list * filtered_pids,struct trace_pid_list ** new_pid_list,const char __user * ubuf,size_t cnt)680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
683 {
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
686 unsigned long val;
687 int nr_pids = 0;
688 ssize_t read = 0;
689 ssize_t ret;
690 loff_t pos;
691 pid_t pid;
692
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
694 return -ENOMEM;
695
696 /*
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
700 * not modified.
701 */
702 pid_list = trace_pid_list_alloc();
703 if (!pid_list) {
704 trace_parser_put(&parser);
705 return -ENOMEM;
706 }
707
708 if (filtered_pids) {
709 /* copy the current bits to the new max */
710 ret = trace_pid_list_first(filtered_pids, &pid);
711 while (!ret) {
712 trace_pid_list_set(pid_list, pid);
713 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
714 nr_pids++;
715 }
716 }
717
718 ret = 0;
719 while (cnt > 0) {
720
721 pos = 0;
722
723 ret = trace_get_user(&parser, ubuf, cnt, &pos);
724 if (ret < 0)
725 break;
726
727 read += ret;
728 ubuf += ret;
729 cnt -= ret;
730
731 if (!trace_parser_loaded(&parser))
732 break;
733
734 ret = -EINVAL;
735 if (kstrtoul(parser.buffer, 0, &val))
736 break;
737
738 pid = (pid_t)val;
739
740 if (trace_pid_list_set(pid_list, pid) < 0) {
741 ret = -1;
742 break;
743 }
744 nr_pids++;
745
746 trace_parser_clear(&parser);
747 ret = 0;
748 }
749 trace_parser_put(&parser);
750
751 if (ret < 0) {
752 trace_pid_list_free(pid_list);
753 return ret;
754 }
755
756 if (!nr_pids) {
757 /* Cleared the list of pids */
758 trace_pid_list_free(pid_list);
759 pid_list = NULL;
760 }
761
762 *new_pid_list = pid_list;
763
764 return read;
765 }
766
buffer_ftrace_now(struct array_buffer * buf,int cpu)767 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
768 {
769 u64 ts;
770
771 /* Early boot up does not have a buffer yet */
772 if (!buf->buffer)
773 return trace_clock_local();
774
775 ts = ring_buffer_time_stamp(buf->buffer);
776 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
777
778 return ts;
779 }
780
ftrace_now(int cpu)781 u64 ftrace_now(int cpu)
782 {
783 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
784 }
785
786 /**
787 * tracing_is_enabled - Show if global_trace has been enabled
788 *
789 * Shows if the global trace has been enabled or not. It uses the
790 * mirror flag "buffer_disabled" to be used in fast paths such as for
791 * the irqsoff tracer. But it may be inaccurate due to races. If you
792 * need to know the accurate state, use tracing_is_on() which is a little
793 * slower, but accurate.
794 */
tracing_is_enabled(void)795 int tracing_is_enabled(void)
796 {
797 /*
798 * For quick access (irqsoff uses this in fast path), just
799 * return the mirror variable of the state of the ring buffer.
800 * It's a little racy, but we don't really care.
801 */
802 smp_rmb();
803 return !global_trace.buffer_disabled;
804 }
805
806 /*
807 * trace_buf_size is the size in bytes that is allocated
808 * for a buffer. Note, the number of bytes is always rounded
809 * to page size.
810 *
811 * This number is purposely set to a low number of 16384.
812 * If the dump on oops happens, it will be much appreciated
813 * to not have to wait for all that output. Anyway this can be
814 * boot time and run time configurable.
815 */
816 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
817
818 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
819
820 /* trace_types holds a link list of available tracers. */
821 static struct tracer *trace_types __read_mostly;
822
823 /*
824 * trace_types_lock is used to protect the trace_types list.
825 */
826 DEFINE_MUTEX(trace_types_lock);
827
828 /*
829 * serialize the access of the ring buffer
830 *
831 * ring buffer serializes readers, but it is low level protection.
832 * The validity of the events (which returns by ring_buffer_peek() ..etc)
833 * are not protected by ring buffer.
834 *
835 * The content of events may become garbage if we allow other process consumes
836 * these events concurrently:
837 * A) the page of the consumed events may become a normal page
838 * (not reader page) in ring buffer, and this page will be rewritten
839 * by events producer.
840 * B) The page of the consumed events may become a page for splice_read,
841 * and this page will be returned to system.
842 *
843 * These primitives allow multi process access to different cpu ring buffer
844 * concurrently.
845 *
846 * These primitives don't distinguish read-only and read-consume access.
847 * Multi read-only access are also serialized.
848 */
849
850 #ifdef CONFIG_SMP
851 static DECLARE_RWSEM(all_cpu_access_lock);
852 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
853
trace_access_lock(int cpu)854 static inline void trace_access_lock(int cpu)
855 {
856 if (cpu == RING_BUFFER_ALL_CPUS) {
857 /* gain it for accessing the whole ring buffer. */
858 down_write(&all_cpu_access_lock);
859 } else {
860 /* gain it for accessing a cpu ring buffer. */
861
862 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
863 down_read(&all_cpu_access_lock);
864
865 /* Secondly block other access to this @cpu ring buffer. */
866 mutex_lock(&per_cpu(cpu_access_lock, cpu));
867 }
868 }
869
trace_access_unlock(int cpu)870 static inline void trace_access_unlock(int cpu)
871 {
872 if (cpu == RING_BUFFER_ALL_CPUS) {
873 up_write(&all_cpu_access_lock);
874 } else {
875 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
876 up_read(&all_cpu_access_lock);
877 }
878 }
879
trace_access_lock_init(void)880 static inline void trace_access_lock_init(void)
881 {
882 int cpu;
883
884 for_each_possible_cpu(cpu)
885 mutex_init(&per_cpu(cpu_access_lock, cpu));
886 }
887
888 #else
889
890 static DEFINE_MUTEX(access_lock);
891
trace_access_lock(int cpu)892 static inline void trace_access_lock(int cpu)
893 {
894 (void)cpu;
895 mutex_lock(&access_lock);
896 }
897
trace_access_unlock(int cpu)898 static inline void trace_access_unlock(int cpu)
899 {
900 (void)cpu;
901 mutex_unlock(&access_lock);
902 }
903
trace_access_lock_init(void)904 static inline void trace_access_lock_init(void)
905 {
906 }
907
908 #endif
909
910 #ifdef CONFIG_STACKTRACE
911 static void __ftrace_trace_stack(struct trace_buffer *buffer,
912 unsigned int trace_ctx,
913 int skip, struct pt_regs *regs);
914 static inline void ftrace_trace_stack(struct trace_array *tr,
915 struct trace_buffer *buffer,
916 unsigned int trace_ctx,
917 int skip, struct pt_regs *regs);
918
919 #else
__ftrace_trace_stack(struct trace_buffer * buffer,unsigned int trace_ctx,int skip,struct pt_regs * regs)920 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
921 unsigned int trace_ctx,
922 int skip, struct pt_regs *regs)
923 {
924 }
ftrace_trace_stack(struct trace_array * tr,struct trace_buffer * buffer,unsigned long trace_ctx,int skip,struct pt_regs * regs)925 static inline void ftrace_trace_stack(struct trace_array *tr,
926 struct trace_buffer *buffer,
927 unsigned long trace_ctx,
928 int skip, struct pt_regs *regs)
929 {
930 }
931
932 #endif
933
934 static __always_inline void
trace_event_setup(struct ring_buffer_event * event,int type,unsigned int trace_ctx)935 trace_event_setup(struct ring_buffer_event *event,
936 int type, unsigned int trace_ctx)
937 {
938 struct trace_entry *ent = ring_buffer_event_data(event);
939
940 tracing_generic_entry_update(ent, type, trace_ctx);
941 }
942
943 static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct trace_buffer * buffer,int type,unsigned long len,unsigned int trace_ctx)944 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
945 int type,
946 unsigned long len,
947 unsigned int trace_ctx)
948 {
949 struct ring_buffer_event *event;
950
951 event = ring_buffer_lock_reserve(buffer, len);
952 if (event != NULL)
953 trace_event_setup(event, type, trace_ctx);
954
955 return event;
956 }
957
tracer_tracing_on(struct trace_array * tr)958 void tracer_tracing_on(struct trace_array *tr)
959 {
960 if (tr->array_buffer.buffer)
961 ring_buffer_record_on(tr->array_buffer.buffer);
962 /*
963 * This flag is looked at when buffers haven't been allocated
964 * yet, or by some tracers (like irqsoff), that just want to
965 * know if the ring buffer has been disabled, but it can handle
966 * races of where it gets disabled but we still do a record.
967 * As the check is in the fast path of the tracers, it is more
968 * important to be fast than accurate.
969 */
970 tr->buffer_disabled = 0;
971 /* Make the flag seen by readers */
972 smp_wmb();
973 }
974
975 /**
976 * tracing_on - enable tracing buffers
977 *
978 * This function enables tracing buffers that may have been
979 * disabled with tracing_off.
980 */
tracing_on(void)981 void tracing_on(void)
982 {
983 tracer_tracing_on(&global_trace);
984 }
985 EXPORT_SYMBOL_GPL(tracing_on);
986
987
988 static __always_inline void
__buffer_unlock_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)989 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
990 {
991 __this_cpu_write(trace_taskinfo_save, true);
992
993 /* If this is the temp buffer, we need to commit fully */
994 if (this_cpu_read(trace_buffered_event) == event) {
995 /* Length is in event->array[0] */
996 ring_buffer_write(buffer, event->array[0], &event->array[1]);
997 /* Release the temp buffer */
998 this_cpu_dec(trace_buffered_event_cnt);
999 /* ring_buffer_unlock_commit() enables preemption */
1000 preempt_enable_notrace();
1001 } else
1002 ring_buffer_unlock_commit(buffer, event);
1003 }
1004
1005 /**
1006 * __trace_puts - write a constant string into the trace buffer.
1007 * @ip: The address of the caller
1008 * @str: The constant string to write
1009 * @size: The size of the string.
1010 */
__trace_puts(unsigned long ip,const char * str,int size)1011 int __trace_puts(unsigned long ip, const char *str, int size)
1012 {
1013 struct ring_buffer_event *event;
1014 struct trace_buffer *buffer;
1015 struct print_entry *entry;
1016 unsigned int trace_ctx;
1017 int alloc;
1018
1019 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1020 return 0;
1021
1022 if (unlikely(tracing_selftest_running || tracing_disabled))
1023 return 0;
1024
1025 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1026
1027 trace_ctx = tracing_gen_ctx();
1028 buffer = global_trace.array_buffer.buffer;
1029 ring_buffer_nest_start(buffer);
1030 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1031 trace_ctx);
1032 if (!event) {
1033 size = 0;
1034 goto out;
1035 }
1036
1037 entry = ring_buffer_event_data(event);
1038 entry->ip = ip;
1039
1040 memcpy(&entry->buf, str, size);
1041
1042 /* Add a newline if necessary */
1043 if (entry->buf[size - 1] != '\n') {
1044 entry->buf[size] = '\n';
1045 entry->buf[size + 1] = '\0';
1046 } else
1047 entry->buf[size] = '\0';
1048
1049 __buffer_unlock_commit(buffer, event);
1050 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1051 out:
1052 ring_buffer_nest_end(buffer);
1053 return size;
1054 }
1055 EXPORT_SYMBOL_GPL(__trace_puts);
1056
1057 /**
1058 * __trace_bputs - write the pointer to a constant string into trace buffer
1059 * @ip: The address of the caller
1060 * @str: The constant string to write to the buffer to
1061 */
__trace_bputs(unsigned long ip,const char * str)1062 int __trace_bputs(unsigned long ip, const char *str)
1063 {
1064 struct ring_buffer_event *event;
1065 struct trace_buffer *buffer;
1066 struct bputs_entry *entry;
1067 unsigned int trace_ctx;
1068 int size = sizeof(struct bputs_entry);
1069 int ret = 0;
1070
1071 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1072 return 0;
1073
1074 if (unlikely(tracing_selftest_running || tracing_disabled))
1075 return 0;
1076
1077 trace_ctx = tracing_gen_ctx();
1078 buffer = global_trace.array_buffer.buffer;
1079
1080 ring_buffer_nest_start(buffer);
1081 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1082 trace_ctx);
1083 if (!event)
1084 goto out;
1085
1086 entry = ring_buffer_event_data(event);
1087 entry->ip = ip;
1088 entry->str = str;
1089
1090 __buffer_unlock_commit(buffer, event);
1091 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1092
1093 ret = 1;
1094 out:
1095 ring_buffer_nest_end(buffer);
1096 return ret;
1097 }
1098 EXPORT_SYMBOL_GPL(__trace_bputs);
1099
1100 #ifdef CONFIG_TRACER_SNAPSHOT
tracing_snapshot_instance_cond(struct trace_array * tr,void * cond_data)1101 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1102 void *cond_data)
1103 {
1104 struct tracer *tracer = tr->current_trace;
1105 unsigned long flags;
1106
1107 if (in_nmi()) {
1108 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1109 internal_trace_puts("*** snapshot is being ignored ***\n");
1110 return;
1111 }
1112
1113 if (!tr->allocated_snapshot) {
1114 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1115 internal_trace_puts("*** stopping trace here! ***\n");
1116 tracing_off();
1117 return;
1118 }
1119
1120 /* Note, snapshot can not be used when the tracer uses it */
1121 if (tracer->use_max_tr) {
1122 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1123 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1124 return;
1125 }
1126
1127 local_irq_save(flags);
1128 update_max_tr(tr, current, smp_processor_id(), cond_data);
1129 local_irq_restore(flags);
1130 }
1131
tracing_snapshot_instance(struct trace_array * tr)1132 void tracing_snapshot_instance(struct trace_array *tr)
1133 {
1134 tracing_snapshot_instance_cond(tr, NULL);
1135 }
1136
1137 /**
1138 * tracing_snapshot - take a snapshot of the current buffer.
1139 *
1140 * This causes a swap between the snapshot buffer and the current live
1141 * tracing buffer. You can use this to take snapshots of the live
1142 * trace when some condition is triggered, but continue to trace.
1143 *
1144 * Note, make sure to allocate the snapshot with either
1145 * a tracing_snapshot_alloc(), or by doing it manually
1146 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1147 *
1148 * If the snapshot buffer is not allocated, it will stop tracing.
1149 * Basically making a permanent snapshot.
1150 */
tracing_snapshot(void)1151 void tracing_snapshot(void)
1152 {
1153 struct trace_array *tr = &global_trace;
1154
1155 tracing_snapshot_instance(tr);
1156 }
1157 EXPORT_SYMBOL_GPL(tracing_snapshot);
1158
1159 /**
1160 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1161 * @tr: The tracing instance to snapshot
1162 * @cond_data: The data to be tested conditionally, and possibly saved
1163 *
1164 * This is the same as tracing_snapshot() except that the snapshot is
1165 * conditional - the snapshot will only happen if the
1166 * cond_snapshot.update() implementation receiving the cond_data
1167 * returns true, which means that the trace array's cond_snapshot
1168 * update() operation used the cond_data to determine whether the
1169 * snapshot should be taken, and if it was, presumably saved it along
1170 * with the snapshot.
1171 */
tracing_snapshot_cond(struct trace_array * tr,void * cond_data)1172 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1173 {
1174 tracing_snapshot_instance_cond(tr, cond_data);
1175 }
1176 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1177
1178 /**
1179 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1180 * @tr: The tracing instance
1181 *
1182 * When the user enables a conditional snapshot using
1183 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1184 * with the snapshot. This accessor is used to retrieve it.
1185 *
1186 * Should not be called from cond_snapshot.update(), since it takes
1187 * the tr->max_lock lock, which the code calling
1188 * cond_snapshot.update() has already done.
1189 *
1190 * Returns the cond_data associated with the trace array's snapshot.
1191 */
tracing_cond_snapshot_data(struct trace_array * tr)1192 void *tracing_cond_snapshot_data(struct trace_array *tr)
1193 {
1194 void *cond_data = NULL;
1195
1196 local_irq_disable();
1197 arch_spin_lock(&tr->max_lock);
1198
1199 if (tr->cond_snapshot)
1200 cond_data = tr->cond_snapshot->cond_data;
1201
1202 arch_spin_unlock(&tr->max_lock);
1203 local_irq_enable();
1204
1205 return cond_data;
1206 }
1207 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1208
1209 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1210 struct array_buffer *size_buf, int cpu_id);
1211 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1212
tracing_alloc_snapshot_instance(struct trace_array * tr)1213 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1214 {
1215 int ret;
1216
1217 if (!tr->allocated_snapshot) {
1218
1219 /* allocate spare buffer */
1220 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1221 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1222 if (ret < 0)
1223 return ret;
1224
1225 tr->allocated_snapshot = true;
1226 }
1227
1228 return 0;
1229 }
1230
free_snapshot(struct trace_array * tr)1231 static void free_snapshot(struct trace_array *tr)
1232 {
1233 /*
1234 * We don't free the ring buffer. instead, resize it because
1235 * The max_tr ring buffer has some state (e.g. ring->clock) and
1236 * we want preserve it.
1237 */
1238 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1239 set_buffer_entries(&tr->max_buffer, 1);
1240 tracing_reset_online_cpus(&tr->max_buffer);
1241 tr->allocated_snapshot = false;
1242 }
1243
1244 /**
1245 * tracing_alloc_snapshot - allocate snapshot buffer.
1246 *
1247 * This only allocates the snapshot buffer if it isn't already
1248 * allocated - it doesn't also take a snapshot.
1249 *
1250 * This is meant to be used in cases where the snapshot buffer needs
1251 * to be set up for events that can't sleep but need to be able to
1252 * trigger a snapshot.
1253 */
tracing_alloc_snapshot(void)1254 int tracing_alloc_snapshot(void)
1255 {
1256 struct trace_array *tr = &global_trace;
1257 int ret;
1258
1259 ret = tracing_alloc_snapshot_instance(tr);
1260 WARN_ON(ret < 0);
1261
1262 return ret;
1263 }
1264 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1265
1266 /**
1267 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1268 *
1269 * This is similar to tracing_snapshot(), but it will allocate the
1270 * snapshot buffer if it isn't already allocated. Use this only
1271 * where it is safe to sleep, as the allocation may sleep.
1272 *
1273 * This causes a swap between the snapshot buffer and the current live
1274 * tracing buffer. You can use this to take snapshots of the live
1275 * trace when some condition is triggered, but continue to trace.
1276 */
tracing_snapshot_alloc(void)1277 void tracing_snapshot_alloc(void)
1278 {
1279 int ret;
1280
1281 ret = tracing_alloc_snapshot();
1282 if (ret < 0)
1283 return;
1284
1285 tracing_snapshot();
1286 }
1287 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1288
1289 /**
1290 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1291 * @tr: The tracing instance
1292 * @cond_data: User data to associate with the snapshot
1293 * @update: Implementation of the cond_snapshot update function
1294 *
1295 * Check whether the conditional snapshot for the given instance has
1296 * already been enabled, or if the current tracer is already using a
1297 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1298 * save the cond_data and update function inside.
1299 *
1300 * Returns 0 if successful, error otherwise.
1301 */
tracing_snapshot_cond_enable(struct trace_array * tr,void * cond_data,cond_update_fn_t update)1302 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1303 cond_update_fn_t update)
1304 {
1305 struct cond_snapshot *cond_snapshot;
1306 int ret = 0;
1307
1308 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1309 if (!cond_snapshot)
1310 return -ENOMEM;
1311
1312 cond_snapshot->cond_data = cond_data;
1313 cond_snapshot->update = update;
1314
1315 mutex_lock(&trace_types_lock);
1316
1317 ret = tracing_alloc_snapshot_instance(tr);
1318 if (ret)
1319 goto fail_unlock;
1320
1321 if (tr->current_trace->use_max_tr) {
1322 ret = -EBUSY;
1323 goto fail_unlock;
1324 }
1325
1326 /*
1327 * The cond_snapshot can only change to NULL without the
1328 * trace_types_lock. We don't care if we race with it going
1329 * to NULL, but we want to make sure that it's not set to
1330 * something other than NULL when we get here, which we can
1331 * do safely with only holding the trace_types_lock and not
1332 * having to take the max_lock.
1333 */
1334 if (tr->cond_snapshot) {
1335 ret = -EBUSY;
1336 goto fail_unlock;
1337 }
1338
1339 local_irq_disable();
1340 arch_spin_lock(&tr->max_lock);
1341 tr->cond_snapshot = cond_snapshot;
1342 arch_spin_unlock(&tr->max_lock);
1343 local_irq_enable();
1344
1345 mutex_unlock(&trace_types_lock);
1346
1347 return ret;
1348
1349 fail_unlock:
1350 mutex_unlock(&trace_types_lock);
1351 kfree(cond_snapshot);
1352 return ret;
1353 }
1354 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1355
1356 /**
1357 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1358 * @tr: The tracing instance
1359 *
1360 * Check whether the conditional snapshot for the given instance is
1361 * enabled; if so, free the cond_snapshot associated with it,
1362 * otherwise return -EINVAL.
1363 *
1364 * Returns 0 if successful, error otherwise.
1365 */
tracing_snapshot_cond_disable(struct trace_array * tr)1366 int tracing_snapshot_cond_disable(struct trace_array *tr)
1367 {
1368 int ret = 0;
1369
1370 local_irq_disable();
1371 arch_spin_lock(&tr->max_lock);
1372
1373 if (!tr->cond_snapshot)
1374 ret = -EINVAL;
1375 else {
1376 kfree(tr->cond_snapshot);
1377 tr->cond_snapshot = NULL;
1378 }
1379
1380 arch_spin_unlock(&tr->max_lock);
1381 local_irq_enable();
1382
1383 return ret;
1384 }
1385 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1386 #else
tracing_snapshot(void)1387 void tracing_snapshot(void)
1388 {
1389 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1390 }
1391 EXPORT_SYMBOL_GPL(tracing_snapshot);
tracing_snapshot_cond(struct trace_array * tr,void * cond_data)1392 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1393 {
1394 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1395 }
1396 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
tracing_alloc_snapshot(void)1397 int tracing_alloc_snapshot(void)
1398 {
1399 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1400 return -ENODEV;
1401 }
1402 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
tracing_snapshot_alloc(void)1403 void tracing_snapshot_alloc(void)
1404 {
1405 /* Give warning */
1406 tracing_snapshot();
1407 }
1408 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
tracing_cond_snapshot_data(struct trace_array * tr)1409 void *tracing_cond_snapshot_data(struct trace_array *tr)
1410 {
1411 return NULL;
1412 }
1413 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
tracing_snapshot_cond_enable(struct trace_array * tr,void * cond_data,cond_update_fn_t update)1414 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1415 {
1416 return -ENODEV;
1417 }
1418 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
tracing_snapshot_cond_disable(struct trace_array * tr)1419 int tracing_snapshot_cond_disable(struct trace_array *tr)
1420 {
1421 return false;
1422 }
1423 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1424 #define free_snapshot(tr) do { } while (0)
1425 #endif /* CONFIG_TRACER_SNAPSHOT */
1426
tracer_tracing_off(struct trace_array * tr)1427 void tracer_tracing_off(struct trace_array *tr)
1428 {
1429 if (tr->array_buffer.buffer)
1430 ring_buffer_record_off(tr->array_buffer.buffer);
1431 /*
1432 * This flag is looked at when buffers haven't been allocated
1433 * yet, or by some tracers (like irqsoff), that just want to
1434 * know if the ring buffer has been disabled, but it can handle
1435 * races of where it gets disabled but we still do a record.
1436 * As the check is in the fast path of the tracers, it is more
1437 * important to be fast than accurate.
1438 */
1439 tr->buffer_disabled = 1;
1440 /* Make the flag seen by readers */
1441 smp_wmb();
1442 }
1443
1444 /**
1445 * tracing_off - turn off tracing buffers
1446 *
1447 * This function stops the tracing buffers from recording data.
1448 * It does not disable any overhead the tracers themselves may
1449 * be causing. This function simply causes all recording to
1450 * the ring buffers to fail.
1451 */
tracing_off(void)1452 void tracing_off(void)
1453 {
1454 tracer_tracing_off(&global_trace);
1455 }
1456 EXPORT_SYMBOL_GPL(tracing_off);
1457
disable_trace_on_warning(void)1458 void disable_trace_on_warning(void)
1459 {
1460 if (__disable_trace_on_warning) {
1461 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1462 "Disabling tracing due to warning\n");
1463 tracing_off();
1464 }
1465 }
1466
1467 /**
1468 * tracer_tracing_is_on - show real state of ring buffer enabled
1469 * @tr : the trace array to know if ring buffer is enabled
1470 *
1471 * Shows real state of the ring buffer if it is enabled or not.
1472 */
tracer_tracing_is_on(struct trace_array * tr)1473 bool tracer_tracing_is_on(struct trace_array *tr)
1474 {
1475 if (tr->array_buffer.buffer)
1476 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1477 return !tr->buffer_disabled;
1478 }
1479
1480 /**
1481 * tracing_is_on - show state of ring buffers enabled
1482 */
tracing_is_on(void)1483 int tracing_is_on(void)
1484 {
1485 return tracer_tracing_is_on(&global_trace);
1486 }
1487 EXPORT_SYMBOL_GPL(tracing_is_on);
1488
set_buf_size(char * str)1489 static int __init set_buf_size(char *str)
1490 {
1491 unsigned long buf_size;
1492
1493 if (!str)
1494 return 0;
1495 buf_size = memparse(str, &str);
1496 /*
1497 * nr_entries can not be zero and the startup
1498 * tests require some buffer space. Therefore
1499 * ensure we have at least 4096 bytes of buffer.
1500 */
1501 trace_buf_size = max(4096UL, buf_size);
1502 return 1;
1503 }
1504 __setup("trace_buf_size=", set_buf_size);
1505
set_tracing_thresh(char * str)1506 static int __init set_tracing_thresh(char *str)
1507 {
1508 unsigned long threshold;
1509 int ret;
1510
1511 if (!str)
1512 return 0;
1513 ret = kstrtoul(str, 0, &threshold);
1514 if (ret < 0)
1515 return 0;
1516 tracing_thresh = threshold * 1000;
1517 return 1;
1518 }
1519 __setup("tracing_thresh=", set_tracing_thresh);
1520
nsecs_to_usecs(unsigned long nsecs)1521 unsigned long nsecs_to_usecs(unsigned long nsecs)
1522 {
1523 return nsecs / 1000;
1524 }
1525
1526 /*
1527 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1528 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1529 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1530 * of strings in the order that the evals (enum) were defined.
1531 */
1532 #undef C
1533 #define C(a, b) b
1534
1535 /* These must match the bit positions in trace_iterator_flags */
1536 static const char *trace_options[] = {
1537 TRACE_FLAGS
1538 NULL
1539 };
1540
1541 static struct {
1542 u64 (*func)(void);
1543 const char *name;
1544 int in_ns; /* is this clock in nanoseconds? */
1545 } trace_clocks[] = {
1546 { trace_clock_local, "local", 1 },
1547 { trace_clock_global, "global", 1 },
1548 { trace_clock_counter, "counter", 0 },
1549 { trace_clock_jiffies, "uptime", 0 },
1550 { trace_clock, "perf", 1 },
1551 { ktime_get_mono_fast_ns, "mono", 1 },
1552 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1553 { ktime_get_boot_fast_ns, "boot", 1 },
1554 { ktime_get_tai_fast_ns, "tai", 1 },
1555 ARCH_TRACE_CLOCKS
1556 };
1557
trace_clock_in_ns(struct trace_array * tr)1558 bool trace_clock_in_ns(struct trace_array *tr)
1559 {
1560 if (trace_clocks[tr->clock_id].in_ns)
1561 return true;
1562
1563 return false;
1564 }
1565
1566 /*
1567 * trace_parser_get_init - gets the buffer for trace parser
1568 */
trace_parser_get_init(struct trace_parser * parser,int size)1569 int trace_parser_get_init(struct trace_parser *parser, int size)
1570 {
1571 memset(parser, 0, sizeof(*parser));
1572
1573 parser->buffer = kmalloc(size, GFP_KERNEL);
1574 if (!parser->buffer)
1575 return 1;
1576
1577 parser->size = size;
1578 return 0;
1579 }
1580
1581 /*
1582 * trace_parser_put - frees the buffer for trace parser
1583 */
trace_parser_put(struct trace_parser * parser)1584 void trace_parser_put(struct trace_parser *parser)
1585 {
1586 kfree(parser->buffer);
1587 parser->buffer = NULL;
1588 }
1589
1590 /*
1591 * trace_get_user - reads the user input string separated by space
1592 * (matched by isspace(ch))
1593 *
1594 * For each string found the 'struct trace_parser' is updated,
1595 * and the function returns.
1596 *
1597 * Returns number of bytes read.
1598 *
1599 * See kernel/trace/trace.h for 'struct trace_parser' details.
1600 */
trace_get_user(struct trace_parser * parser,const char __user * ubuf,size_t cnt,loff_t * ppos)1601 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1602 size_t cnt, loff_t *ppos)
1603 {
1604 char ch;
1605 size_t read = 0;
1606 ssize_t ret;
1607
1608 if (!*ppos)
1609 trace_parser_clear(parser);
1610
1611 ret = get_user(ch, ubuf++);
1612 if (ret)
1613 goto out;
1614
1615 read++;
1616 cnt--;
1617
1618 /*
1619 * The parser is not finished with the last write,
1620 * continue reading the user input without skipping spaces.
1621 */
1622 if (!parser->cont) {
1623 /* skip white space */
1624 while (cnt && isspace(ch)) {
1625 ret = get_user(ch, ubuf++);
1626 if (ret)
1627 goto out;
1628 read++;
1629 cnt--;
1630 }
1631
1632 parser->idx = 0;
1633
1634 /* only spaces were written */
1635 if (isspace(ch) || !ch) {
1636 *ppos += read;
1637 ret = read;
1638 goto out;
1639 }
1640 }
1641
1642 /* read the non-space input */
1643 while (cnt && !isspace(ch) && ch) {
1644 if (parser->idx < parser->size - 1)
1645 parser->buffer[parser->idx++] = ch;
1646 else {
1647 ret = -EINVAL;
1648 goto out;
1649 }
1650 ret = get_user(ch, ubuf++);
1651 if (ret)
1652 goto out;
1653 read++;
1654 cnt--;
1655 }
1656
1657 /* We either got finished input or we have to wait for another call. */
1658 if (isspace(ch) || !ch) {
1659 parser->buffer[parser->idx] = 0;
1660 parser->cont = false;
1661 } else if (parser->idx < parser->size - 1) {
1662 parser->cont = true;
1663 parser->buffer[parser->idx++] = ch;
1664 /* Make sure the parsed string always terminates with '\0'. */
1665 parser->buffer[parser->idx] = 0;
1666 } else {
1667 ret = -EINVAL;
1668 goto out;
1669 }
1670
1671 *ppos += read;
1672 ret = read;
1673
1674 out:
1675 return ret;
1676 }
1677
1678 /* TODO add a seq_buf_to_buffer() */
trace_seq_to_buffer(struct trace_seq * s,void * buf,size_t cnt)1679 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1680 {
1681 int len;
1682
1683 if (trace_seq_used(s) <= s->seq.readpos)
1684 return -EBUSY;
1685
1686 len = trace_seq_used(s) - s->seq.readpos;
1687 if (cnt > len)
1688 cnt = len;
1689 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1690
1691 s->seq.readpos += cnt;
1692 return cnt;
1693 }
1694
1695 unsigned long __read_mostly tracing_thresh;
1696
1697 #ifdef CONFIG_TRACER_MAX_TRACE
1698 static const struct file_operations tracing_max_lat_fops;
1699
1700 #ifdef LATENCY_FS_NOTIFY
1701
1702 static struct workqueue_struct *fsnotify_wq;
1703
latency_fsnotify_workfn(struct work_struct * work)1704 static void latency_fsnotify_workfn(struct work_struct *work)
1705 {
1706 struct trace_array *tr = container_of(work, struct trace_array,
1707 fsnotify_work);
1708 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1709 }
1710
latency_fsnotify_workfn_irq(struct irq_work * iwork)1711 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1712 {
1713 struct trace_array *tr = container_of(iwork, struct trace_array,
1714 fsnotify_irqwork);
1715 queue_work(fsnotify_wq, &tr->fsnotify_work);
1716 }
1717
trace_create_maxlat_file(struct trace_array * tr,struct dentry * d_tracer)1718 static void trace_create_maxlat_file(struct trace_array *tr,
1719 struct dentry *d_tracer)
1720 {
1721 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1722 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1723 tr->d_max_latency = trace_create_file("tracing_max_latency",
1724 TRACE_MODE_WRITE,
1725 d_tracer, &tr->max_latency,
1726 &tracing_max_lat_fops);
1727 }
1728
latency_fsnotify_init(void)1729 __init static int latency_fsnotify_init(void)
1730 {
1731 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1732 WQ_UNBOUND | WQ_HIGHPRI, 0);
1733 if (!fsnotify_wq) {
1734 pr_err("Unable to allocate tr_max_lat_wq\n");
1735 return -ENOMEM;
1736 }
1737 return 0;
1738 }
1739
1740 late_initcall_sync(latency_fsnotify_init);
1741
latency_fsnotify(struct trace_array * tr)1742 void latency_fsnotify(struct trace_array *tr)
1743 {
1744 if (!fsnotify_wq)
1745 return;
1746 /*
1747 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1748 * possible that we are called from __schedule() or do_idle(), which
1749 * could cause a deadlock.
1750 */
1751 irq_work_queue(&tr->fsnotify_irqwork);
1752 }
1753
1754 #else /* !LATENCY_FS_NOTIFY */
1755
1756 #define trace_create_maxlat_file(tr, d_tracer) \
1757 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1758 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1759
1760 #endif
1761
1762 /*
1763 * Copy the new maximum trace into the separate maximum-trace
1764 * structure. (this way the maximum trace is permanently saved,
1765 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1766 */
1767 static void
__update_max_tr(struct trace_array * tr,struct task_struct * tsk,int cpu)1768 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1769 {
1770 struct array_buffer *trace_buf = &tr->array_buffer;
1771 struct array_buffer *max_buf = &tr->max_buffer;
1772 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1773 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1774
1775 max_buf->cpu = cpu;
1776 max_buf->time_start = data->preempt_timestamp;
1777
1778 max_data->saved_latency = tr->max_latency;
1779 max_data->critical_start = data->critical_start;
1780 max_data->critical_end = data->critical_end;
1781
1782 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1783 max_data->pid = tsk->pid;
1784 /*
1785 * If tsk == current, then use current_uid(), as that does not use
1786 * RCU. The irq tracer can be called out of RCU scope.
1787 */
1788 if (tsk == current)
1789 max_data->uid = current_uid();
1790 else
1791 max_data->uid = task_uid(tsk);
1792
1793 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1794 max_data->policy = tsk->policy;
1795 max_data->rt_priority = tsk->rt_priority;
1796
1797 /* record this tasks comm */
1798 tracing_record_cmdline(tsk);
1799 latency_fsnotify(tr);
1800 }
1801
1802 /**
1803 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1804 * @tr: tracer
1805 * @tsk: the task with the latency
1806 * @cpu: The cpu that initiated the trace.
1807 * @cond_data: User data associated with a conditional snapshot
1808 *
1809 * Flip the buffers between the @tr and the max_tr and record information
1810 * about which task was the cause of this latency.
1811 */
1812 void
update_max_tr(struct trace_array * tr,struct task_struct * tsk,int cpu,void * cond_data)1813 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1814 void *cond_data)
1815 {
1816 if (tr->stop_count)
1817 return;
1818
1819 WARN_ON_ONCE(!irqs_disabled());
1820
1821 if (!tr->allocated_snapshot) {
1822 /* Only the nop tracer should hit this when disabling */
1823 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1824 return;
1825 }
1826
1827 arch_spin_lock(&tr->max_lock);
1828
1829 /* Inherit the recordable setting from array_buffer */
1830 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1831 ring_buffer_record_on(tr->max_buffer.buffer);
1832 else
1833 ring_buffer_record_off(tr->max_buffer.buffer);
1834
1835 #ifdef CONFIG_TRACER_SNAPSHOT
1836 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1837 arch_spin_unlock(&tr->max_lock);
1838 return;
1839 }
1840 #endif
1841 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1842
1843 __update_max_tr(tr, tsk, cpu);
1844
1845 arch_spin_unlock(&tr->max_lock);
1846 }
1847
1848 /**
1849 * update_max_tr_single - only copy one trace over, and reset the rest
1850 * @tr: tracer
1851 * @tsk: task with the latency
1852 * @cpu: the cpu of the buffer to copy.
1853 *
1854 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1855 */
1856 void
update_max_tr_single(struct trace_array * tr,struct task_struct * tsk,int cpu)1857 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1858 {
1859 int ret;
1860
1861 if (tr->stop_count)
1862 return;
1863
1864 WARN_ON_ONCE(!irqs_disabled());
1865 if (!tr->allocated_snapshot) {
1866 /* Only the nop tracer should hit this when disabling */
1867 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1868 return;
1869 }
1870
1871 arch_spin_lock(&tr->max_lock);
1872
1873 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1874
1875 if (ret == -EBUSY) {
1876 /*
1877 * We failed to swap the buffer due to a commit taking
1878 * place on this CPU. We fail to record, but we reset
1879 * the max trace buffer (no one writes directly to it)
1880 * and flag that it failed.
1881 */
1882 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1883 "Failed to swap buffers due to commit in progress\n");
1884 }
1885
1886 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1887
1888 __update_max_tr(tr, tsk, cpu);
1889 arch_spin_unlock(&tr->max_lock);
1890 }
1891
1892 #endif /* CONFIG_TRACER_MAX_TRACE */
1893
wait_on_pipe(struct trace_iterator * iter,int full)1894 static int wait_on_pipe(struct trace_iterator *iter, int full)
1895 {
1896 /* Iterators are static, they should be filled or empty */
1897 if (trace_buffer_iter(iter, iter->cpu_file))
1898 return 0;
1899
1900 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1901 full);
1902 }
1903
1904 #ifdef CONFIG_FTRACE_STARTUP_TEST
1905 static bool selftests_can_run;
1906
1907 struct trace_selftests {
1908 struct list_head list;
1909 struct tracer *type;
1910 };
1911
1912 static LIST_HEAD(postponed_selftests);
1913
save_selftest(struct tracer * type)1914 static int save_selftest(struct tracer *type)
1915 {
1916 struct trace_selftests *selftest;
1917
1918 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1919 if (!selftest)
1920 return -ENOMEM;
1921
1922 selftest->type = type;
1923 list_add(&selftest->list, &postponed_selftests);
1924 return 0;
1925 }
1926
run_tracer_selftest(struct tracer * type)1927 static int run_tracer_selftest(struct tracer *type)
1928 {
1929 struct trace_array *tr = &global_trace;
1930 struct tracer *saved_tracer = tr->current_trace;
1931 int ret;
1932
1933 if (!type->selftest || tracing_selftest_disabled)
1934 return 0;
1935
1936 /*
1937 * If a tracer registers early in boot up (before scheduling is
1938 * initialized and such), then do not run its selftests yet.
1939 * Instead, run it a little later in the boot process.
1940 */
1941 if (!selftests_can_run)
1942 return save_selftest(type);
1943
1944 if (!tracing_is_on()) {
1945 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1946 type->name);
1947 return 0;
1948 }
1949
1950 /*
1951 * Run a selftest on this tracer.
1952 * Here we reset the trace buffer, and set the current
1953 * tracer to be this tracer. The tracer can then run some
1954 * internal tracing to verify that everything is in order.
1955 * If we fail, we do not register this tracer.
1956 */
1957 tracing_reset_online_cpus(&tr->array_buffer);
1958
1959 tr->current_trace = type;
1960
1961 #ifdef CONFIG_TRACER_MAX_TRACE
1962 if (type->use_max_tr) {
1963 /* If we expanded the buffers, make sure the max is expanded too */
1964 if (ring_buffer_expanded)
1965 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1966 RING_BUFFER_ALL_CPUS);
1967 tr->allocated_snapshot = true;
1968 }
1969 #endif
1970
1971 /* the test is responsible for initializing and enabling */
1972 pr_info("Testing tracer %s: ", type->name);
1973 ret = type->selftest(type, tr);
1974 /* the test is responsible for resetting too */
1975 tr->current_trace = saved_tracer;
1976 if (ret) {
1977 printk(KERN_CONT "FAILED!\n");
1978 /* Add the warning after printing 'FAILED' */
1979 WARN_ON(1);
1980 return -1;
1981 }
1982 /* Only reset on passing, to avoid touching corrupted buffers */
1983 tracing_reset_online_cpus(&tr->array_buffer);
1984
1985 #ifdef CONFIG_TRACER_MAX_TRACE
1986 if (type->use_max_tr) {
1987 tr->allocated_snapshot = false;
1988
1989 /* Shrink the max buffer again */
1990 if (ring_buffer_expanded)
1991 ring_buffer_resize(tr->max_buffer.buffer, 1,
1992 RING_BUFFER_ALL_CPUS);
1993 }
1994 #endif
1995
1996 printk(KERN_CONT "PASSED\n");
1997 return 0;
1998 }
1999
init_trace_selftests(void)2000 static __init int init_trace_selftests(void)
2001 {
2002 struct trace_selftests *p, *n;
2003 struct tracer *t, **last;
2004 int ret;
2005
2006 selftests_can_run = true;
2007
2008 mutex_lock(&trace_types_lock);
2009
2010 if (list_empty(&postponed_selftests))
2011 goto out;
2012
2013 pr_info("Running postponed tracer tests:\n");
2014
2015 tracing_selftest_running = true;
2016 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2017 /* This loop can take minutes when sanitizers are enabled, so
2018 * lets make sure we allow RCU processing.
2019 */
2020 cond_resched();
2021 ret = run_tracer_selftest(p->type);
2022 /* If the test fails, then warn and remove from available_tracers */
2023 if (ret < 0) {
2024 WARN(1, "tracer: %s failed selftest, disabling\n",
2025 p->type->name);
2026 last = &trace_types;
2027 for (t = trace_types; t; t = t->next) {
2028 if (t == p->type) {
2029 *last = t->next;
2030 break;
2031 }
2032 last = &t->next;
2033 }
2034 }
2035 list_del(&p->list);
2036 kfree(p);
2037 }
2038 tracing_selftest_running = false;
2039
2040 out:
2041 mutex_unlock(&trace_types_lock);
2042
2043 return 0;
2044 }
2045 core_initcall(init_trace_selftests);
2046 #else
run_tracer_selftest(struct tracer * type)2047 static inline int run_tracer_selftest(struct tracer *type)
2048 {
2049 return 0;
2050 }
2051 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2052
2053 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2054
2055 static void __init apply_trace_boot_options(void);
2056
2057 /**
2058 * register_tracer - register a tracer with the ftrace system.
2059 * @type: the plugin for the tracer
2060 *
2061 * Register a new plugin tracer.
2062 */
register_tracer(struct tracer * type)2063 int __init register_tracer(struct tracer *type)
2064 {
2065 struct tracer *t;
2066 int ret = 0;
2067
2068 if (!type->name) {
2069 pr_info("Tracer must have a name\n");
2070 return -1;
2071 }
2072
2073 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2074 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2075 return -1;
2076 }
2077
2078 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2079 pr_warn("Can not register tracer %s due to lockdown\n",
2080 type->name);
2081 return -EPERM;
2082 }
2083
2084 mutex_lock(&trace_types_lock);
2085
2086 tracing_selftest_running = true;
2087
2088 for (t = trace_types; t; t = t->next) {
2089 if (strcmp(type->name, t->name) == 0) {
2090 /* already found */
2091 pr_info("Tracer %s already registered\n",
2092 type->name);
2093 ret = -1;
2094 goto out;
2095 }
2096 }
2097
2098 if (!type->set_flag)
2099 type->set_flag = &dummy_set_flag;
2100 if (!type->flags) {
2101 /*allocate a dummy tracer_flags*/
2102 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2103 if (!type->flags) {
2104 ret = -ENOMEM;
2105 goto out;
2106 }
2107 type->flags->val = 0;
2108 type->flags->opts = dummy_tracer_opt;
2109 } else
2110 if (!type->flags->opts)
2111 type->flags->opts = dummy_tracer_opt;
2112
2113 /* store the tracer for __set_tracer_option */
2114 type->flags->trace = type;
2115
2116 ret = run_tracer_selftest(type);
2117 if (ret < 0)
2118 goto out;
2119
2120 type->next = trace_types;
2121 trace_types = type;
2122 add_tracer_options(&global_trace, type);
2123
2124 out:
2125 tracing_selftest_running = false;
2126 mutex_unlock(&trace_types_lock);
2127
2128 if (ret || !default_bootup_tracer)
2129 goto out_unlock;
2130
2131 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2132 goto out_unlock;
2133
2134 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2135 /* Do we want this tracer to start on bootup? */
2136 tracing_set_tracer(&global_trace, type->name);
2137 default_bootup_tracer = NULL;
2138
2139 apply_trace_boot_options();
2140
2141 /* disable other selftests, since this will break it. */
2142 disable_tracing_selftest("running a tracer");
2143
2144 out_unlock:
2145 return ret;
2146 }
2147
tracing_reset_cpu(struct array_buffer * buf,int cpu)2148 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2149 {
2150 struct trace_buffer *buffer = buf->buffer;
2151
2152 if (!buffer)
2153 return;
2154
2155 ring_buffer_record_disable(buffer);
2156
2157 /* Make sure all commits have finished */
2158 synchronize_rcu();
2159 ring_buffer_reset_cpu(buffer, cpu);
2160
2161 ring_buffer_record_enable(buffer);
2162 }
2163
tracing_reset_online_cpus(struct array_buffer * buf)2164 void tracing_reset_online_cpus(struct array_buffer *buf)
2165 {
2166 struct trace_buffer *buffer = buf->buffer;
2167
2168 if (!buffer)
2169 return;
2170
2171 ring_buffer_record_disable(buffer);
2172
2173 /* Make sure all commits have finished */
2174 synchronize_rcu();
2175
2176 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2177
2178 ring_buffer_reset_online_cpus(buffer);
2179
2180 ring_buffer_record_enable(buffer);
2181 }
2182
2183 /* Must have trace_types_lock held */
tracing_reset_all_online_cpus_unlocked(void)2184 void tracing_reset_all_online_cpus_unlocked(void)
2185 {
2186 struct trace_array *tr;
2187
2188 lockdep_assert_held(&trace_types_lock);
2189
2190 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2191 if (!tr->clear_trace)
2192 continue;
2193 tr->clear_trace = false;
2194 tracing_reset_online_cpus(&tr->array_buffer);
2195 #ifdef CONFIG_TRACER_MAX_TRACE
2196 tracing_reset_online_cpus(&tr->max_buffer);
2197 #endif
2198 }
2199 }
2200
tracing_reset_all_online_cpus(void)2201 void tracing_reset_all_online_cpus(void)
2202 {
2203 mutex_lock(&trace_types_lock);
2204 tracing_reset_all_online_cpus_unlocked();
2205 mutex_unlock(&trace_types_lock);
2206 }
2207
2208 /*
2209 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2210 * is the tgid last observed corresponding to pid=i.
2211 */
2212 static int *tgid_map;
2213
2214 /* The maximum valid index into tgid_map. */
2215 static size_t tgid_map_max;
2216
2217 #define SAVED_CMDLINES_DEFAULT 128
2218 #define NO_CMDLINE_MAP UINT_MAX
2219 /*
2220 * Preemption must be disabled before acquiring trace_cmdline_lock.
2221 * The various trace_arrays' max_lock must be acquired in a context
2222 * where interrupt is disabled.
2223 */
2224 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2225 struct saved_cmdlines_buffer {
2226 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2227 unsigned *map_cmdline_to_pid;
2228 unsigned cmdline_num;
2229 int cmdline_idx;
2230 char *saved_cmdlines;
2231 };
2232 static struct saved_cmdlines_buffer *savedcmd;
2233
get_saved_cmdlines(int idx)2234 static inline char *get_saved_cmdlines(int idx)
2235 {
2236 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2237 }
2238
set_cmdline(int idx,const char * cmdline)2239 static inline void set_cmdline(int idx, const char *cmdline)
2240 {
2241 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2242 }
2243
allocate_cmdlines_buffer(unsigned int val,struct saved_cmdlines_buffer * s)2244 static int allocate_cmdlines_buffer(unsigned int val,
2245 struct saved_cmdlines_buffer *s)
2246 {
2247 s->map_cmdline_to_pid = kmalloc_array(val,
2248 sizeof(*s->map_cmdline_to_pid),
2249 GFP_KERNEL);
2250 if (!s->map_cmdline_to_pid)
2251 return -ENOMEM;
2252
2253 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2254 if (!s->saved_cmdlines) {
2255 kfree(s->map_cmdline_to_pid);
2256 return -ENOMEM;
2257 }
2258
2259 s->cmdline_idx = 0;
2260 s->cmdline_num = val;
2261 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2262 sizeof(s->map_pid_to_cmdline));
2263 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2264 val * sizeof(*s->map_cmdline_to_pid));
2265
2266 return 0;
2267 }
2268
trace_create_savedcmd(void)2269 static int trace_create_savedcmd(void)
2270 {
2271 int ret;
2272
2273 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2274 if (!savedcmd)
2275 return -ENOMEM;
2276
2277 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2278 if (ret < 0) {
2279 kfree(savedcmd);
2280 savedcmd = NULL;
2281 return -ENOMEM;
2282 }
2283
2284 return 0;
2285 }
2286
is_tracing_stopped(void)2287 int is_tracing_stopped(void)
2288 {
2289 return global_trace.stop_count;
2290 }
2291
2292 /**
2293 * tracing_start - quick start of the tracer
2294 *
2295 * If tracing is enabled but was stopped by tracing_stop,
2296 * this will start the tracer back up.
2297 */
tracing_start(void)2298 void tracing_start(void)
2299 {
2300 struct trace_buffer *buffer;
2301 unsigned long flags;
2302
2303 if (tracing_disabled)
2304 return;
2305
2306 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2307 if (--global_trace.stop_count) {
2308 if (global_trace.stop_count < 0) {
2309 /* Someone screwed up their debugging */
2310 WARN_ON_ONCE(1);
2311 global_trace.stop_count = 0;
2312 }
2313 goto out;
2314 }
2315
2316 /* Prevent the buffers from switching */
2317 arch_spin_lock(&global_trace.max_lock);
2318
2319 buffer = global_trace.array_buffer.buffer;
2320 if (buffer)
2321 ring_buffer_record_enable(buffer);
2322
2323 #ifdef CONFIG_TRACER_MAX_TRACE
2324 buffer = global_trace.max_buffer.buffer;
2325 if (buffer)
2326 ring_buffer_record_enable(buffer);
2327 #endif
2328
2329 arch_spin_unlock(&global_trace.max_lock);
2330
2331 out:
2332 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2333 }
2334
tracing_start_tr(struct trace_array * tr)2335 static void tracing_start_tr(struct trace_array *tr)
2336 {
2337 struct trace_buffer *buffer;
2338 unsigned long flags;
2339
2340 if (tracing_disabled)
2341 return;
2342
2343 /* If global, we need to also start the max tracer */
2344 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2345 return tracing_start();
2346
2347 raw_spin_lock_irqsave(&tr->start_lock, flags);
2348
2349 if (--tr->stop_count) {
2350 if (tr->stop_count < 0) {
2351 /* Someone screwed up their debugging */
2352 WARN_ON_ONCE(1);
2353 tr->stop_count = 0;
2354 }
2355 goto out;
2356 }
2357
2358 buffer = tr->array_buffer.buffer;
2359 if (buffer)
2360 ring_buffer_record_enable(buffer);
2361
2362 out:
2363 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2364 }
2365
2366 /**
2367 * tracing_stop - quick stop of the tracer
2368 *
2369 * Light weight way to stop tracing. Use in conjunction with
2370 * tracing_start.
2371 */
tracing_stop(void)2372 void tracing_stop(void)
2373 {
2374 struct trace_buffer *buffer;
2375 unsigned long flags;
2376
2377 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2378 if (global_trace.stop_count++)
2379 goto out;
2380
2381 /* Prevent the buffers from switching */
2382 arch_spin_lock(&global_trace.max_lock);
2383
2384 buffer = global_trace.array_buffer.buffer;
2385 if (buffer)
2386 ring_buffer_record_disable(buffer);
2387
2388 #ifdef CONFIG_TRACER_MAX_TRACE
2389 buffer = global_trace.max_buffer.buffer;
2390 if (buffer)
2391 ring_buffer_record_disable(buffer);
2392 #endif
2393
2394 arch_spin_unlock(&global_trace.max_lock);
2395
2396 out:
2397 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2398 }
2399
tracing_stop_tr(struct trace_array * tr)2400 static void tracing_stop_tr(struct trace_array *tr)
2401 {
2402 struct trace_buffer *buffer;
2403 unsigned long flags;
2404
2405 /* If global, we need to also stop the max tracer */
2406 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2407 return tracing_stop();
2408
2409 raw_spin_lock_irqsave(&tr->start_lock, flags);
2410 if (tr->stop_count++)
2411 goto out;
2412
2413 buffer = tr->array_buffer.buffer;
2414 if (buffer)
2415 ring_buffer_record_disable(buffer);
2416
2417 out:
2418 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2419 }
2420
trace_save_cmdline(struct task_struct * tsk)2421 static int trace_save_cmdline(struct task_struct *tsk)
2422 {
2423 unsigned tpid, idx;
2424
2425 /* treat recording of idle task as a success */
2426 if (!tsk->pid)
2427 return 1;
2428
2429 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2430
2431 /*
2432 * It's not the end of the world if we don't get
2433 * the lock, but we also don't want to spin
2434 * nor do we want to disable interrupts,
2435 * so if we miss here, then better luck next time.
2436 *
2437 * This is called within the scheduler and wake up, so interrupts
2438 * had better been disabled and run queue lock been held.
2439 */
2440 lockdep_assert_preemption_disabled();
2441 if (!arch_spin_trylock(&trace_cmdline_lock))
2442 return 0;
2443
2444 idx = savedcmd->map_pid_to_cmdline[tpid];
2445 if (idx == NO_CMDLINE_MAP) {
2446 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2447
2448 savedcmd->map_pid_to_cmdline[tpid] = idx;
2449 savedcmd->cmdline_idx = idx;
2450 }
2451
2452 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2453 set_cmdline(idx, tsk->comm);
2454
2455 arch_spin_unlock(&trace_cmdline_lock);
2456
2457 return 1;
2458 }
2459
__trace_find_cmdline(int pid,char comm[])2460 static void __trace_find_cmdline(int pid, char comm[])
2461 {
2462 unsigned map;
2463 int tpid;
2464
2465 if (!pid) {
2466 strcpy(comm, "<idle>");
2467 return;
2468 }
2469
2470 if (WARN_ON_ONCE(pid < 0)) {
2471 strcpy(comm, "<XXX>");
2472 return;
2473 }
2474
2475 tpid = pid & (PID_MAX_DEFAULT - 1);
2476 map = savedcmd->map_pid_to_cmdline[tpid];
2477 if (map != NO_CMDLINE_MAP) {
2478 tpid = savedcmd->map_cmdline_to_pid[map];
2479 if (tpid == pid) {
2480 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2481 return;
2482 }
2483 }
2484 strcpy(comm, "<...>");
2485 }
2486
trace_find_cmdline(int pid,char comm[])2487 void trace_find_cmdline(int pid, char comm[])
2488 {
2489 preempt_disable();
2490 arch_spin_lock(&trace_cmdline_lock);
2491
2492 __trace_find_cmdline(pid, comm);
2493
2494 arch_spin_unlock(&trace_cmdline_lock);
2495 preempt_enable();
2496 }
2497
trace_find_tgid_ptr(int pid)2498 static int *trace_find_tgid_ptr(int pid)
2499 {
2500 /*
2501 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2502 * if we observe a non-NULL tgid_map then we also observe the correct
2503 * tgid_map_max.
2504 */
2505 int *map = smp_load_acquire(&tgid_map);
2506
2507 if (unlikely(!map || pid > tgid_map_max))
2508 return NULL;
2509
2510 return &map[pid];
2511 }
2512
trace_find_tgid(int pid)2513 int trace_find_tgid(int pid)
2514 {
2515 int *ptr = trace_find_tgid_ptr(pid);
2516
2517 return ptr ? *ptr : 0;
2518 }
2519
trace_save_tgid(struct task_struct * tsk)2520 static int trace_save_tgid(struct task_struct *tsk)
2521 {
2522 int *ptr;
2523
2524 /* treat recording of idle task as a success */
2525 if (!tsk->pid)
2526 return 1;
2527
2528 ptr = trace_find_tgid_ptr(tsk->pid);
2529 if (!ptr)
2530 return 0;
2531
2532 *ptr = tsk->tgid;
2533 return 1;
2534 }
2535
tracing_record_taskinfo_skip(int flags)2536 static bool tracing_record_taskinfo_skip(int flags)
2537 {
2538 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2539 return true;
2540 if (!__this_cpu_read(trace_taskinfo_save))
2541 return true;
2542 return false;
2543 }
2544
2545 /**
2546 * tracing_record_taskinfo - record the task info of a task
2547 *
2548 * @task: task to record
2549 * @flags: TRACE_RECORD_CMDLINE for recording comm
2550 * TRACE_RECORD_TGID for recording tgid
2551 */
tracing_record_taskinfo(struct task_struct * task,int flags)2552 void tracing_record_taskinfo(struct task_struct *task, int flags)
2553 {
2554 bool done;
2555
2556 if (tracing_record_taskinfo_skip(flags))
2557 return;
2558
2559 /*
2560 * Record as much task information as possible. If some fail, continue
2561 * to try to record the others.
2562 */
2563 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2564 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2565
2566 /* If recording any information failed, retry again soon. */
2567 if (!done)
2568 return;
2569
2570 __this_cpu_write(trace_taskinfo_save, false);
2571 }
2572
2573 /**
2574 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2575 *
2576 * @prev: previous task during sched_switch
2577 * @next: next task during sched_switch
2578 * @flags: TRACE_RECORD_CMDLINE for recording comm
2579 * TRACE_RECORD_TGID for recording tgid
2580 */
tracing_record_taskinfo_sched_switch(struct task_struct * prev,struct task_struct * next,int flags)2581 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2582 struct task_struct *next, int flags)
2583 {
2584 bool done;
2585
2586 if (tracing_record_taskinfo_skip(flags))
2587 return;
2588
2589 /*
2590 * Record as much task information as possible. If some fail, continue
2591 * to try to record the others.
2592 */
2593 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2594 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2595 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2596 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2597
2598 /* If recording any information failed, retry again soon. */
2599 if (!done)
2600 return;
2601
2602 __this_cpu_write(trace_taskinfo_save, false);
2603 }
2604
2605 /* Helpers to record a specific task information */
tracing_record_cmdline(struct task_struct * task)2606 void tracing_record_cmdline(struct task_struct *task)
2607 {
2608 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2609 }
2610
tracing_record_tgid(struct task_struct * task)2611 void tracing_record_tgid(struct task_struct *task)
2612 {
2613 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2614 }
2615
2616 /*
2617 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2618 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2619 * simplifies those functions and keeps them in sync.
2620 */
trace_handle_return(struct trace_seq * s)2621 enum print_line_t trace_handle_return(struct trace_seq *s)
2622 {
2623 return trace_seq_has_overflowed(s) ?
2624 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2625 }
2626 EXPORT_SYMBOL_GPL(trace_handle_return);
2627
migration_disable_value(void)2628 static unsigned short migration_disable_value(void)
2629 {
2630 #if defined(CONFIG_SMP)
2631 return current->migration_disabled;
2632 #else
2633 return 0;
2634 #endif
2635 }
2636
tracing_gen_ctx_irq_test(unsigned int irqs_status)2637 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2638 {
2639 unsigned int trace_flags = irqs_status;
2640 unsigned int pc;
2641
2642 pc = preempt_count();
2643
2644 if (pc & NMI_MASK)
2645 trace_flags |= TRACE_FLAG_NMI;
2646 if (pc & HARDIRQ_MASK)
2647 trace_flags |= TRACE_FLAG_HARDIRQ;
2648 if (in_serving_softirq())
2649 trace_flags |= TRACE_FLAG_SOFTIRQ;
2650 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2651 trace_flags |= TRACE_FLAG_BH_OFF;
2652
2653 if (tif_need_resched())
2654 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2655 if (test_preempt_need_resched())
2656 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2657 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2658 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2659 }
2660
2661 struct ring_buffer_event *
trace_buffer_lock_reserve(struct trace_buffer * buffer,int type,unsigned long len,unsigned int trace_ctx)2662 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2663 int type,
2664 unsigned long len,
2665 unsigned int trace_ctx)
2666 {
2667 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2668 }
2669
2670 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2671 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2672 static int trace_buffered_event_ref;
2673
2674 /**
2675 * trace_buffered_event_enable - enable buffering events
2676 *
2677 * When events are being filtered, it is quicker to use a temporary
2678 * buffer to write the event data into if there's a likely chance
2679 * that it will not be committed. The discard of the ring buffer
2680 * is not as fast as committing, and is much slower than copying
2681 * a commit.
2682 *
2683 * When an event is to be filtered, allocate per cpu buffers to
2684 * write the event data into, and if the event is filtered and discarded
2685 * it is simply dropped, otherwise, the entire data is to be committed
2686 * in one shot.
2687 */
trace_buffered_event_enable(void)2688 void trace_buffered_event_enable(void)
2689 {
2690 struct ring_buffer_event *event;
2691 struct page *page;
2692 int cpu;
2693
2694 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2695
2696 if (trace_buffered_event_ref++)
2697 return;
2698
2699 for_each_tracing_cpu(cpu) {
2700 page = alloc_pages_node(cpu_to_node(cpu),
2701 GFP_KERNEL | __GFP_NORETRY, 0);
2702 if (!page)
2703 goto failed;
2704
2705 event = page_address(page);
2706 memset(event, 0, sizeof(*event));
2707
2708 per_cpu(trace_buffered_event, cpu) = event;
2709
2710 preempt_disable();
2711 if (cpu == smp_processor_id() &&
2712 __this_cpu_read(trace_buffered_event) !=
2713 per_cpu(trace_buffered_event, cpu))
2714 WARN_ON_ONCE(1);
2715 preempt_enable();
2716 }
2717
2718 return;
2719 failed:
2720 trace_buffered_event_disable();
2721 }
2722
enable_trace_buffered_event(void * data)2723 static void enable_trace_buffered_event(void *data)
2724 {
2725 /* Probably not needed, but do it anyway */
2726 smp_rmb();
2727 this_cpu_dec(trace_buffered_event_cnt);
2728 }
2729
disable_trace_buffered_event(void * data)2730 static void disable_trace_buffered_event(void *data)
2731 {
2732 this_cpu_inc(trace_buffered_event_cnt);
2733 }
2734
2735 /**
2736 * trace_buffered_event_disable - disable buffering events
2737 *
2738 * When a filter is removed, it is faster to not use the buffered
2739 * events, and to commit directly into the ring buffer. Free up
2740 * the temp buffers when there are no more users. This requires
2741 * special synchronization with current events.
2742 */
trace_buffered_event_disable(void)2743 void trace_buffered_event_disable(void)
2744 {
2745 int cpu;
2746
2747 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2748
2749 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2750 return;
2751
2752 if (--trace_buffered_event_ref)
2753 return;
2754
2755 preempt_disable();
2756 /* For each CPU, set the buffer as used. */
2757 smp_call_function_many(tracing_buffer_mask,
2758 disable_trace_buffered_event, NULL, 1);
2759 preempt_enable();
2760
2761 /* Wait for all current users to finish */
2762 synchronize_rcu();
2763
2764 for_each_tracing_cpu(cpu) {
2765 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2766 per_cpu(trace_buffered_event, cpu) = NULL;
2767 }
2768 /*
2769 * Make sure trace_buffered_event is NULL before clearing
2770 * trace_buffered_event_cnt.
2771 */
2772 smp_wmb();
2773
2774 preempt_disable();
2775 /* Do the work on each cpu */
2776 smp_call_function_many(tracing_buffer_mask,
2777 enable_trace_buffered_event, NULL, 1);
2778 preempt_enable();
2779 }
2780
2781 static struct trace_buffer *temp_buffer;
2782
2783 struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer ** current_rb,struct trace_event_file * trace_file,int type,unsigned long len,unsigned int trace_ctx)2784 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2785 struct trace_event_file *trace_file,
2786 int type, unsigned long len,
2787 unsigned int trace_ctx)
2788 {
2789 struct ring_buffer_event *entry;
2790 struct trace_array *tr = trace_file->tr;
2791 int val;
2792
2793 *current_rb = tr->array_buffer.buffer;
2794
2795 if (!tr->no_filter_buffering_ref &&
2796 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2797 preempt_disable_notrace();
2798 /*
2799 * Filtering is on, so try to use the per cpu buffer first.
2800 * This buffer will simulate a ring_buffer_event,
2801 * where the type_len is zero and the array[0] will
2802 * hold the full length.
2803 * (see include/linux/ring-buffer.h for details on
2804 * how the ring_buffer_event is structured).
2805 *
2806 * Using a temp buffer during filtering and copying it
2807 * on a matched filter is quicker than writing directly
2808 * into the ring buffer and then discarding it when
2809 * it doesn't match. That is because the discard
2810 * requires several atomic operations to get right.
2811 * Copying on match and doing nothing on a failed match
2812 * is still quicker than no copy on match, but having
2813 * to discard out of the ring buffer on a failed match.
2814 */
2815 if ((entry = __this_cpu_read(trace_buffered_event))) {
2816 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2817
2818 val = this_cpu_inc_return(trace_buffered_event_cnt);
2819
2820 /*
2821 * Preemption is disabled, but interrupts and NMIs
2822 * can still come in now. If that happens after
2823 * the above increment, then it will have to go
2824 * back to the old method of allocating the event
2825 * on the ring buffer, and if the filter fails, it
2826 * will have to call ring_buffer_discard_commit()
2827 * to remove it.
2828 *
2829 * Need to also check the unlikely case that the
2830 * length is bigger than the temp buffer size.
2831 * If that happens, then the reserve is pretty much
2832 * guaranteed to fail, as the ring buffer currently
2833 * only allows events less than a page. But that may
2834 * change in the future, so let the ring buffer reserve
2835 * handle the failure in that case.
2836 */
2837 if (val == 1 && likely(len <= max_len)) {
2838 trace_event_setup(entry, type, trace_ctx);
2839 entry->array[0] = len;
2840 /* Return with preemption disabled */
2841 return entry;
2842 }
2843 this_cpu_dec(trace_buffered_event_cnt);
2844 }
2845 /* __trace_buffer_lock_reserve() disables preemption */
2846 preempt_enable_notrace();
2847 }
2848
2849 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2850 trace_ctx);
2851 /*
2852 * If tracing is off, but we have triggers enabled
2853 * we still need to look at the event data. Use the temp_buffer
2854 * to store the trace event for the trigger to use. It's recursive
2855 * safe and will not be recorded anywhere.
2856 */
2857 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2858 *current_rb = temp_buffer;
2859 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2860 trace_ctx);
2861 }
2862 return entry;
2863 }
2864 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2865
2866 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2867 static DEFINE_MUTEX(tracepoint_printk_mutex);
2868
output_printk(struct trace_event_buffer * fbuffer)2869 static void output_printk(struct trace_event_buffer *fbuffer)
2870 {
2871 struct trace_event_call *event_call;
2872 struct trace_event_file *file;
2873 struct trace_event *event;
2874 unsigned long flags;
2875 struct trace_iterator *iter = tracepoint_print_iter;
2876
2877 /* We should never get here if iter is NULL */
2878 if (WARN_ON_ONCE(!iter))
2879 return;
2880
2881 event_call = fbuffer->trace_file->event_call;
2882 if (!event_call || !event_call->event.funcs ||
2883 !event_call->event.funcs->trace)
2884 return;
2885
2886 file = fbuffer->trace_file;
2887 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2888 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2889 !filter_match_preds(file->filter, fbuffer->entry)))
2890 return;
2891
2892 event = &fbuffer->trace_file->event_call->event;
2893
2894 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2895 trace_seq_init(&iter->seq);
2896 iter->ent = fbuffer->entry;
2897 event_call->event.funcs->trace(iter, 0, event);
2898 trace_seq_putc(&iter->seq, 0);
2899 printk("%s", iter->seq.buffer);
2900
2901 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2902 }
2903
tracepoint_printk_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2904 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2905 void *buffer, size_t *lenp,
2906 loff_t *ppos)
2907 {
2908 int save_tracepoint_printk;
2909 int ret;
2910
2911 mutex_lock(&tracepoint_printk_mutex);
2912 save_tracepoint_printk = tracepoint_printk;
2913
2914 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2915
2916 /*
2917 * This will force exiting early, as tracepoint_printk
2918 * is always zero when tracepoint_printk_iter is not allocated
2919 */
2920 if (!tracepoint_print_iter)
2921 tracepoint_printk = 0;
2922
2923 if (save_tracepoint_printk == tracepoint_printk)
2924 goto out;
2925
2926 if (tracepoint_printk)
2927 static_key_enable(&tracepoint_printk_key.key);
2928 else
2929 static_key_disable(&tracepoint_printk_key.key);
2930
2931 out:
2932 mutex_unlock(&tracepoint_printk_mutex);
2933
2934 return ret;
2935 }
2936
trace_event_buffer_commit(struct trace_event_buffer * fbuffer)2937 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2938 {
2939 enum event_trigger_type tt = ETT_NONE;
2940 struct trace_event_file *file = fbuffer->trace_file;
2941
2942 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2943 fbuffer->entry, &tt))
2944 goto discard;
2945
2946 if (static_key_false(&tracepoint_printk_key.key))
2947 output_printk(fbuffer);
2948
2949 if (static_branch_unlikely(&trace_event_exports_enabled))
2950 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2951
2952 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2953 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2954
2955 discard:
2956 if (tt)
2957 event_triggers_post_call(file, tt);
2958
2959 }
2960 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2961
2962 /*
2963 * Skip 3:
2964 *
2965 * trace_buffer_unlock_commit_regs()
2966 * trace_event_buffer_commit()
2967 * trace_event_raw_event_xxx()
2968 */
2969 # define STACK_SKIP 3
2970
trace_buffer_unlock_commit_regs(struct trace_array * tr,struct trace_buffer * buffer,struct ring_buffer_event * event,unsigned int trace_ctx,struct pt_regs * regs)2971 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2972 struct trace_buffer *buffer,
2973 struct ring_buffer_event *event,
2974 unsigned int trace_ctx,
2975 struct pt_regs *regs)
2976 {
2977 __buffer_unlock_commit(buffer, event);
2978
2979 /*
2980 * If regs is not set, then skip the necessary functions.
2981 * Note, we can still get here via blktrace, wakeup tracer
2982 * and mmiotrace, but that's ok if they lose a function or
2983 * two. They are not that meaningful.
2984 */
2985 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2986 ftrace_trace_userstack(tr, buffer, trace_ctx);
2987 }
2988
2989 /*
2990 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2991 */
2992 void
trace_buffer_unlock_commit_nostack(struct trace_buffer * buffer,struct ring_buffer_event * event)2993 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2994 struct ring_buffer_event *event)
2995 {
2996 __buffer_unlock_commit(buffer, event);
2997 }
2998
2999 void
trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)3000 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3001 parent_ip, unsigned int trace_ctx)
3002 {
3003 struct trace_event_call *call = &event_function;
3004 struct trace_buffer *buffer = tr->array_buffer.buffer;
3005 struct ring_buffer_event *event;
3006 struct ftrace_entry *entry;
3007
3008 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3009 trace_ctx);
3010 if (!event)
3011 return;
3012 entry = ring_buffer_event_data(event);
3013 entry->ip = ip;
3014 entry->parent_ip = parent_ip;
3015
3016 if (!call_filter_check_discard(call, entry, buffer, event)) {
3017 if (static_branch_unlikely(&trace_function_exports_enabled))
3018 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3019 __buffer_unlock_commit(buffer, event);
3020 }
3021 }
3022
3023 #ifdef CONFIG_STACKTRACE
3024
3025 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3026 #define FTRACE_KSTACK_NESTING 4
3027
3028 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3029
3030 struct ftrace_stack {
3031 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3032 };
3033
3034
3035 struct ftrace_stacks {
3036 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3037 };
3038
3039 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3040 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3041
__ftrace_trace_stack(struct trace_buffer * buffer,unsigned int trace_ctx,int skip,struct pt_regs * regs)3042 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3043 unsigned int trace_ctx,
3044 int skip, struct pt_regs *regs)
3045 {
3046 struct trace_event_call *call = &event_kernel_stack;
3047 struct ring_buffer_event *event;
3048 unsigned int size, nr_entries;
3049 struct ftrace_stack *fstack;
3050 struct stack_entry *entry;
3051 int stackidx;
3052
3053 /*
3054 * Add one, for this function and the call to save_stack_trace()
3055 * If regs is set, then these functions will not be in the way.
3056 */
3057 #ifndef CONFIG_UNWINDER_ORC
3058 if (!regs)
3059 skip++;
3060 #endif
3061
3062 preempt_disable_notrace();
3063
3064 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3065
3066 /* This should never happen. If it does, yell once and skip */
3067 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3068 goto out;
3069
3070 /*
3071 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3072 * interrupt will either see the value pre increment or post
3073 * increment. If the interrupt happens pre increment it will have
3074 * restored the counter when it returns. We just need a barrier to
3075 * keep gcc from moving things around.
3076 */
3077 barrier();
3078
3079 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3080 size = ARRAY_SIZE(fstack->calls);
3081
3082 if (regs) {
3083 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3084 size, skip);
3085 } else {
3086 nr_entries = stack_trace_save(fstack->calls, size, skip);
3087 }
3088
3089 size = nr_entries * sizeof(unsigned long);
3090 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3091 (sizeof(*entry) - sizeof(entry->caller)) + size,
3092 trace_ctx);
3093 if (!event)
3094 goto out;
3095 entry = ring_buffer_event_data(event);
3096
3097 memcpy(&entry->caller, fstack->calls, size);
3098 entry->size = nr_entries;
3099
3100 if (!call_filter_check_discard(call, entry, buffer, event))
3101 __buffer_unlock_commit(buffer, event);
3102
3103 out:
3104 /* Again, don't let gcc optimize things here */
3105 barrier();
3106 __this_cpu_dec(ftrace_stack_reserve);
3107 preempt_enable_notrace();
3108
3109 }
3110
ftrace_trace_stack(struct trace_array * tr,struct trace_buffer * buffer,unsigned int trace_ctx,int skip,struct pt_regs * regs)3111 static inline void ftrace_trace_stack(struct trace_array *tr,
3112 struct trace_buffer *buffer,
3113 unsigned int trace_ctx,
3114 int skip, struct pt_regs *regs)
3115 {
3116 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3117 return;
3118
3119 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3120 }
3121
__trace_stack(struct trace_array * tr,unsigned int trace_ctx,int skip)3122 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3123 int skip)
3124 {
3125 struct trace_buffer *buffer = tr->array_buffer.buffer;
3126
3127 if (rcu_is_watching()) {
3128 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3129 return;
3130 }
3131
3132 /*
3133 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3134 * but if the above rcu_is_watching() failed, then the NMI
3135 * triggered someplace critical, and ct_irq_enter() should
3136 * not be called from NMI.
3137 */
3138 if (unlikely(in_nmi()))
3139 return;
3140
3141 ct_irq_enter_irqson();
3142 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3143 ct_irq_exit_irqson();
3144 }
3145
3146 /**
3147 * trace_dump_stack - record a stack back trace in the trace buffer
3148 * @skip: Number of functions to skip (helper handlers)
3149 */
trace_dump_stack(int skip)3150 void trace_dump_stack(int skip)
3151 {
3152 if (tracing_disabled || tracing_selftest_running)
3153 return;
3154
3155 #ifndef CONFIG_UNWINDER_ORC
3156 /* Skip 1 to skip this function. */
3157 skip++;
3158 #endif
3159 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3160 tracing_gen_ctx(), skip, NULL);
3161 }
3162 EXPORT_SYMBOL_GPL(trace_dump_stack);
3163
3164 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3165 static DEFINE_PER_CPU(int, user_stack_count);
3166
3167 static void
ftrace_trace_userstack(struct trace_array * tr,struct trace_buffer * buffer,unsigned int trace_ctx)3168 ftrace_trace_userstack(struct trace_array *tr,
3169 struct trace_buffer *buffer, unsigned int trace_ctx)
3170 {
3171 struct trace_event_call *call = &event_user_stack;
3172 struct ring_buffer_event *event;
3173 struct userstack_entry *entry;
3174
3175 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3176 return;
3177
3178 /*
3179 * NMIs can not handle page faults, even with fix ups.
3180 * The save user stack can (and often does) fault.
3181 */
3182 if (unlikely(in_nmi()))
3183 return;
3184
3185 /*
3186 * prevent recursion, since the user stack tracing may
3187 * trigger other kernel events.
3188 */
3189 preempt_disable();
3190 if (__this_cpu_read(user_stack_count))
3191 goto out;
3192
3193 __this_cpu_inc(user_stack_count);
3194
3195 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3196 sizeof(*entry), trace_ctx);
3197 if (!event)
3198 goto out_drop_count;
3199 entry = ring_buffer_event_data(event);
3200
3201 entry->tgid = current->tgid;
3202 memset(&entry->caller, 0, sizeof(entry->caller));
3203
3204 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3205 if (!call_filter_check_discard(call, entry, buffer, event))
3206 __buffer_unlock_commit(buffer, event);
3207
3208 out_drop_count:
3209 __this_cpu_dec(user_stack_count);
3210 out:
3211 preempt_enable();
3212 }
3213 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
ftrace_trace_userstack(struct trace_array * tr,struct trace_buffer * buffer,unsigned int trace_ctx)3214 static void ftrace_trace_userstack(struct trace_array *tr,
3215 struct trace_buffer *buffer,
3216 unsigned int trace_ctx)
3217 {
3218 }
3219 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3220
3221 #endif /* CONFIG_STACKTRACE */
3222
3223 static inline void
func_repeats_set_delta_ts(struct func_repeats_entry * entry,unsigned long long delta)3224 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3225 unsigned long long delta)
3226 {
3227 entry->bottom_delta_ts = delta & U32_MAX;
3228 entry->top_delta_ts = (delta >> 32);
3229 }
3230
trace_last_func_repeats(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned int trace_ctx)3231 void trace_last_func_repeats(struct trace_array *tr,
3232 struct trace_func_repeats *last_info,
3233 unsigned int trace_ctx)
3234 {
3235 struct trace_buffer *buffer = tr->array_buffer.buffer;
3236 struct func_repeats_entry *entry;
3237 struct ring_buffer_event *event;
3238 u64 delta;
3239
3240 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3241 sizeof(*entry), trace_ctx);
3242 if (!event)
3243 return;
3244
3245 delta = ring_buffer_event_time_stamp(buffer, event) -
3246 last_info->ts_last_call;
3247
3248 entry = ring_buffer_event_data(event);
3249 entry->ip = last_info->ip;
3250 entry->parent_ip = last_info->parent_ip;
3251 entry->count = last_info->count;
3252 func_repeats_set_delta_ts(entry, delta);
3253
3254 __buffer_unlock_commit(buffer, event);
3255 }
3256
3257 /* created for use with alloc_percpu */
3258 struct trace_buffer_struct {
3259 int nesting;
3260 char buffer[4][TRACE_BUF_SIZE];
3261 };
3262
3263 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3264
3265 /*
3266 * This allows for lockless recording. If we're nested too deeply, then
3267 * this returns NULL.
3268 */
get_trace_buf(void)3269 static char *get_trace_buf(void)
3270 {
3271 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3272
3273 if (!trace_percpu_buffer || buffer->nesting >= 4)
3274 return NULL;
3275
3276 buffer->nesting++;
3277
3278 /* Interrupts must see nesting incremented before we use the buffer */
3279 barrier();
3280 return &buffer->buffer[buffer->nesting - 1][0];
3281 }
3282
put_trace_buf(void)3283 static void put_trace_buf(void)
3284 {
3285 /* Don't let the decrement of nesting leak before this */
3286 barrier();
3287 this_cpu_dec(trace_percpu_buffer->nesting);
3288 }
3289
alloc_percpu_trace_buffer(void)3290 static int alloc_percpu_trace_buffer(void)
3291 {
3292 struct trace_buffer_struct __percpu *buffers;
3293
3294 if (trace_percpu_buffer)
3295 return 0;
3296
3297 buffers = alloc_percpu(struct trace_buffer_struct);
3298 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3299 return -ENOMEM;
3300
3301 trace_percpu_buffer = buffers;
3302 return 0;
3303 }
3304
3305 static int buffers_allocated;
3306
trace_printk_init_buffers(void)3307 void trace_printk_init_buffers(void)
3308 {
3309 if (buffers_allocated)
3310 return;
3311
3312 if (alloc_percpu_trace_buffer())
3313 return;
3314
3315 /* trace_printk() is for debug use only. Don't use it in production. */
3316
3317 pr_warn("\n");
3318 pr_warn("**********************************************************\n");
3319 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3320 pr_warn("** **\n");
3321 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3322 pr_warn("** **\n");
3323 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3324 pr_warn("** unsafe for production use. **\n");
3325 pr_warn("** **\n");
3326 pr_warn("** If you see this message and you are not debugging **\n");
3327 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3328 pr_warn("** **\n");
3329 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3330 pr_warn("**********************************************************\n");
3331
3332 /* Expand the buffers to set size */
3333 tracing_update_buffers();
3334
3335 buffers_allocated = 1;
3336
3337 /*
3338 * trace_printk_init_buffers() can be called by modules.
3339 * If that happens, then we need to start cmdline recording
3340 * directly here. If the global_trace.buffer is already
3341 * allocated here, then this was called by module code.
3342 */
3343 if (global_trace.array_buffer.buffer)
3344 tracing_start_cmdline_record();
3345 }
3346 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3347
trace_printk_start_comm(void)3348 void trace_printk_start_comm(void)
3349 {
3350 /* Start tracing comms if trace printk is set */
3351 if (!buffers_allocated)
3352 return;
3353 tracing_start_cmdline_record();
3354 }
3355
trace_printk_start_stop_comm(int enabled)3356 static void trace_printk_start_stop_comm(int enabled)
3357 {
3358 if (!buffers_allocated)
3359 return;
3360
3361 if (enabled)
3362 tracing_start_cmdline_record();
3363 else
3364 tracing_stop_cmdline_record();
3365 }
3366
3367 /**
3368 * trace_vbprintk - write binary msg to tracing buffer
3369 * @ip: The address of the caller
3370 * @fmt: The string format to write to the buffer
3371 * @args: Arguments for @fmt
3372 */
trace_vbprintk(unsigned long ip,const char * fmt,va_list args)3373 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3374 {
3375 struct trace_event_call *call = &event_bprint;
3376 struct ring_buffer_event *event;
3377 struct trace_buffer *buffer;
3378 struct trace_array *tr = &global_trace;
3379 struct bprint_entry *entry;
3380 unsigned int trace_ctx;
3381 char *tbuffer;
3382 int len = 0, size;
3383
3384 if (unlikely(tracing_selftest_running || tracing_disabled))
3385 return 0;
3386
3387 /* Don't pollute graph traces with trace_vprintk internals */
3388 pause_graph_tracing();
3389
3390 trace_ctx = tracing_gen_ctx();
3391 preempt_disable_notrace();
3392
3393 tbuffer = get_trace_buf();
3394 if (!tbuffer) {
3395 len = 0;
3396 goto out_nobuffer;
3397 }
3398
3399 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3400
3401 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3402 goto out_put;
3403
3404 size = sizeof(*entry) + sizeof(u32) * len;
3405 buffer = tr->array_buffer.buffer;
3406 ring_buffer_nest_start(buffer);
3407 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3408 trace_ctx);
3409 if (!event)
3410 goto out;
3411 entry = ring_buffer_event_data(event);
3412 entry->ip = ip;
3413 entry->fmt = fmt;
3414
3415 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3416 if (!call_filter_check_discard(call, entry, buffer, event)) {
3417 __buffer_unlock_commit(buffer, event);
3418 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3419 }
3420
3421 out:
3422 ring_buffer_nest_end(buffer);
3423 out_put:
3424 put_trace_buf();
3425
3426 out_nobuffer:
3427 preempt_enable_notrace();
3428 unpause_graph_tracing();
3429
3430 return len;
3431 }
3432 EXPORT_SYMBOL_GPL(trace_vbprintk);
3433
3434 __printf(3, 0)
3435 static int
__trace_array_vprintk(struct trace_buffer * buffer,unsigned long ip,const char * fmt,va_list args)3436 __trace_array_vprintk(struct trace_buffer *buffer,
3437 unsigned long ip, const char *fmt, va_list args)
3438 {
3439 struct trace_event_call *call = &event_print;
3440 struct ring_buffer_event *event;
3441 int len = 0, size;
3442 struct print_entry *entry;
3443 unsigned int trace_ctx;
3444 char *tbuffer;
3445
3446 if (tracing_disabled || tracing_selftest_running)
3447 return 0;
3448
3449 /* Don't pollute graph traces with trace_vprintk internals */
3450 pause_graph_tracing();
3451
3452 trace_ctx = tracing_gen_ctx();
3453 preempt_disable_notrace();
3454
3455
3456 tbuffer = get_trace_buf();
3457 if (!tbuffer) {
3458 len = 0;
3459 goto out_nobuffer;
3460 }
3461
3462 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3463
3464 size = sizeof(*entry) + len + 1;
3465 ring_buffer_nest_start(buffer);
3466 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3467 trace_ctx);
3468 if (!event)
3469 goto out;
3470 entry = ring_buffer_event_data(event);
3471 entry->ip = ip;
3472
3473 memcpy(&entry->buf, tbuffer, len + 1);
3474 if (!call_filter_check_discard(call, entry, buffer, event)) {
3475 __buffer_unlock_commit(buffer, event);
3476 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3477 }
3478
3479 out:
3480 ring_buffer_nest_end(buffer);
3481 put_trace_buf();
3482
3483 out_nobuffer:
3484 preempt_enable_notrace();
3485 unpause_graph_tracing();
3486
3487 return len;
3488 }
3489
3490 __printf(3, 0)
trace_array_vprintk(struct trace_array * tr,unsigned long ip,const char * fmt,va_list args)3491 int trace_array_vprintk(struct trace_array *tr,
3492 unsigned long ip, const char *fmt, va_list args)
3493 {
3494 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3495 }
3496
3497 /**
3498 * trace_array_printk - Print a message to a specific instance
3499 * @tr: The instance trace_array descriptor
3500 * @ip: The instruction pointer that this is called from.
3501 * @fmt: The format to print (printf format)
3502 *
3503 * If a subsystem sets up its own instance, they have the right to
3504 * printk strings into their tracing instance buffer using this
3505 * function. Note, this function will not write into the top level
3506 * buffer (use trace_printk() for that), as writing into the top level
3507 * buffer should only have events that can be individually disabled.
3508 * trace_printk() is only used for debugging a kernel, and should not
3509 * be ever incorporated in normal use.
3510 *
3511 * trace_array_printk() can be used, as it will not add noise to the
3512 * top level tracing buffer.
3513 *
3514 * Note, trace_array_init_printk() must be called on @tr before this
3515 * can be used.
3516 */
3517 __printf(3, 0)
trace_array_printk(struct trace_array * tr,unsigned long ip,const char * fmt,...)3518 int trace_array_printk(struct trace_array *tr,
3519 unsigned long ip, const char *fmt, ...)
3520 {
3521 int ret;
3522 va_list ap;
3523
3524 if (!tr)
3525 return -ENOENT;
3526
3527 /* This is only allowed for created instances */
3528 if (tr == &global_trace)
3529 return 0;
3530
3531 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3532 return 0;
3533
3534 va_start(ap, fmt);
3535 ret = trace_array_vprintk(tr, ip, fmt, ap);
3536 va_end(ap);
3537 return ret;
3538 }
3539 EXPORT_SYMBOL_GPL(trace_array_printk);
3540
3541 /**
3542 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3543 * @tr: The trace array to initialize the buffers for
3544 *
3545 * As trace_array_printk() only writes into instances, they are OK to
3546 * have in the kernel (unlike trace_printk()). This needs to be called
3547 * before trace_array_printk() can be used on a trace_array.
3548 */
trace_array_init_printk(struct trace_array * tr)3549 int trace_array_init_printk(struct trace_array *tr)
3550 {
3551 if (!tr)
3552 return -ENOENT;
3553
3554 /* This is only allowed for created instances */
3555 if (tr == &global_trace)
3556 return -EINVAL;
3557
3558 return alloc_percpu_trace_buffer();
3559 }
3560 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3561
3562 __printf(3, 4)
trace_array_printk_buf(struct trace_buffer * buffer,unsigned long ip,const char * fmt,...)3563 int trace_array_printk_buf(struct trace_buffer *buffer,
3564 unsigned long ip, const char *fmt, ...)
3565 {
3566 int ret;
3567 va_list ap;
3568
3569 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3570 return 0;
3571
3572 va_start(ap, fmt);
3573 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3574 va_end(ap);
3575 return ret;
3576 }
3577
3578 __printf(2, 0)
trace_vprintk(unsigned long ip,const char * fmt,va_list args)3579 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3580 {
3581 return trace_array_vprintk(&global_trace, ip, fmt, args);
3582 }
3583 EXPORT_SYMBOL_GPL(trace_vprintk);
3584
trace_iterator_increment(struct trace_iterator * iter)3585 static void trace_iterator_increment(struct trace_iterator *iter)
3586 {
3587 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3588
3589 iter->idx++;
3590 if (buf_iter)
3591 ring_buffer_iter_advance(buf_iter);
3592 }
3593
3594 static struct trace_entry *
peek_next_entry(struct trace_iterator * iter,int cpu,u64 * ts,unsigned long * lost_events)3595 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3596 unsigned long *lost_events)
3597 {
3598 struct ring_buffer_event *event;
3599 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3600
3601 if (buf_iter) {
3602 event = ring_buffer_iter_peek(buf_iter, ts);
3603 if (lost_events)
3604 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3605 (unsigned long)-1 : 0;
3606 } else {
3607 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3608 lost_events);
3609 }
3610
3611 if (event) {
3612 iter->ent_size = ring_buffer_event_length(event);
3613 return ring_buffer_event_data(event);
3614 }
3615 iter->ent_size = 0;
3616 return NULL;
3617 }
3618
3619 static struct trace_entry *
__find_next_entry(struct trace_iterator * iter,int * ent_cpu,unsigned long * missing_events,u64 * ent_ts)3620 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3621 unsigned long *missing_events, u64 *ent_ts)
3622 {
3623 struct trace_buffer *buffer = iter->array_buffer->buffer;
3624 struct trace_entry *ent, *next = NULL;
3625 unsigned long lost_events = 0, next_lost = 0;
3626 int cpu_file = iter->cpu_file;
3627 u64 next_ts = 0, ts;
3628 int next_cpu = -1;
3629 int next_size = 0;
3630 int cpu;
3631
3632 /*
3633 * If we are in a per_cpu trace file, don't bother by iterating over
3634 * all cpu and peek directly.
3635 */
3636 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3637 if (ring_buffer_empty_cpu(buffer, cpu_file))
3638 return NULL;
3639 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3640 if (ent_cpu)
3641 *ent_cpu = cpu_file;
3642
3643 return ent;
3644 }
3645
3646 for_each_tracing_cpu(cpu) {
3647
3648 if (ring_buffer_empty_cpu(buffer, cpu))
3649 continue;
3650
3651 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3652
3653 /*
3654 * Pick the entry with the smallest timestamp:
3655 */
3656 if (ent && (!next || ts < next_ts)) {
3657 next = ent;
3658 next_cpu = cpu;
3659 next_ts = ts;
3660 next_lost = lost_events;
3661 next_size = iter->ent_size;
3662 }
3663 }
3664
3665 iter->ent_size = next_size;
3666
3667 if (ent_cpu)
3668 *ent_cpu = next_cpu;
3669
3670 if (ent_ts)
3671 *ent_ts = next_ts;
3672
3673 if (missing_events)
3674 *missing_events = next_lost;
3675
3676 return next;
3677 }
3678
3679 #define STATIC_FMT_BUF_SIZE 128
3680 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3681
trace_iter_expand_format(struct trace_iterator * iter)3682 static char *trace_iter_expand_format(struct trace_iterator *iter)
3683 {
3684 char *tmp;
3685
3686 /*
3687 * iter->tr is NULL when used with tp_printk, which makes
3688 * this get called where it is not safe to call krealloc().
3689 */
3690 if (!iter->tr || iter->fmt == static_fmt_buf)
3691 return NULL;
3692
3693 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3694 GFP_KERNEL);
3695 if (tmp) {
3696 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3697 iter->fmt = tmp;
3698 }
3699
3700 return tmp;
3701 }
3702
3703 /* Returns true if the string is safe to dereference from an event */
trace_safe_str(struct trace_iterator * iter,const char * str,bool star,int len)3704 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3705 bool star, int len)
3706 {
3707 unsigned long addr = (unsigned long)str;
3708 struct trace_event *trace_event;
3709 struct trace_event_call *event;
3710
3711 /* Ignore strings with no length */
3712 if (star && !len)
3713 return true;
3714
3715 /* OK if part of the event data */
3716 if ((addr >= (unsigned long)iter->ent) &&
3717 (addr < (unsigned long)iter->ent + iter->ent_size))
3718 return true;
3719
3720 /* OK if part of the temp seq buffer */
3721 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3722 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3723 return true;
3724
3725 /* Core rodata can not be freed */
3726 if (is_kernel_rodata(addr))
3727 return true;
3728
3729 if (trace_is_tracepoint_string(str))
3730 return true;
3731
3732 /*
3733 * Now this could be a module event, referencing core module
3734 * data, which is OK.
3735 */
3736 if (!iter->ent)
3737 return false;
3738
3739 trace_event = ftrace_find_event(iter->ent->type);
3740 if (!trace_event)
3741 return false;
3742
3743 event = container_of(trace_event, struct trace_event_call, event);
3744 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3745 return false;
3746
3747 /* Would rather have rodata, but this will suffice */
3748 if (within_module_core(addr, event->module))
3749 return true;
3750
3751 return false;
3752 }
3753
show_buffer(struct trace_seq * s)3754 static const char *show_buffer(struct trace_seq *s)
3755 {
3756 struct seq_buf *seq = &s->seq;
3757
3758 seq_buf_terminate(seq);
3759
3760 return seq->buffer;
3761 }
3762
3763 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3764
test_can_verify_check(const char * fmt,...)3765 static int test_can_verify_check(const char *fmt, ...)
3766 {
3767 char buf[16];
3768 va_list ap;
3769 int ret;
3770
3771 /*
3772 * The verifier is dependent on vsnprintf() modifies the va_list
3773 * passed to it, where it is sent as a reference. Some architectures
3774 * (like x86_32) passes it by value, which means that vsnprintf()
3775 * does not modify the va_list passed to it, and the verifier
3776 * would then need to be able to understand all the values that
3777 * vsnprintf can use. If it is passed by value, then the verifier
3778 * is disabled.
3779 */
3780 va_start(ap, fmt);
3781 vsnprintf(buf, 16, "%d", ap);
3782 ret = va_arg(ap, int);
3783 va_end(ap);
3784
3785 return ret;
3786 }
3787
test_can_verify(void)3788 static void test_can_verify(void)
3789 {
3790 if (!test_can_verify_check("%d %d", 0, 1)) {
3791 pr_info("trace event string verifier disabled\n");
3792 static_branch_inc(&trace_no_verify);
3793 }
3794 }
3795
3796 /**
3797 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3798 * @iter: The iterator that holds the seq buffer and the event being printed
3799 * @fmt: The format used to print the event
3800 * @ap: The va_list holding the data to print from @fmt.
3801 *
3802 * This writes the data into the @iter->seq buffer using the data from
3803 * @fmt and @ap. If the format has a %s, then the source of the string
3804 * is examined to make sure it is safe to print, otherwise it will
3805 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3806 * pointer.
3807 */
trace_check_vprintf(struct trace_iterator * iter,const char * fmt,va_list ap)3808 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3809 va_list ap)
3810 {
3811 const char *p = fmt;
3812 const char *str;
3813 int i, j;
3814
3815 if (WARN_ON_ONCE(!fmt))
3816 return;
3817
3818 if (static_branch_unlikely(&trace_no_verify))
3819 goto print;
3820
3821 /* Don't bother checking when doing a ftrace_dump() */
3822 if (iter->fmt == static_fmt_buf)
3823 goto print;
3824
3825 while (*p) {
3826 bool star = false;
3827 int len = 0;
3828
3829 j = 0;
3830
3831 /* We only care about %s and variants */
3832 for (i = 0; p[i]; i++) {
3833 if (i + 1 >= iter->fmt_size) {
3834 /*
3835 * If we can't expand the copy buffer,
3836 * just print it.
3837 */
3838 if (!trace_iter_expand_format(iter))
3839 goto print;
3840 }
3841
3842 if (p[i] == '\\' && p[i+1]) {
3843 i++;
3844 continue;
3845 }
3846 if (p[i] == '%') {
3847 /* Need to test cases like %08.*s */
3848 for (j = 1; p[i+j]; j++) {
3849 if (isdigit(p[i+j]) ||
3850 p[i+j] == '.')
3851 continue;
3852 if (p[i+j] == '*') {
3853 star = true;
3854 continue;
3855 }
3856 break;
3857 }
3858 if (p[i+j] == 's')
3859 break;
3860 star = false;
3861 }
3862 j = 0;
3863 }
3864 /* If no %s found then just print normally */
3865 if (!p[i])
3866 break;
3867
3868 /* Copy up to the %s, and print that */
3869 strncpy(iter->fmt, p, i);
3870 iter->fmt[i] = '\0';
3871 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3872
3873 /*
3874 * If iter->seq is full, the above call no longer guarantees
3875 * that ap is in sync with fmt processing, and further calls
3876 * to va_arg() can return wrong positional arguments.
3877 *
3878 * Ensure that ap is no longer used in this case.
3879 */
3880 if (iter->seq.full) {
3881 p = "";
3882 break;
3883 }
3884
3885 if (star)
3886 len = va_arg(ap, int);
3887
3888 /* The ap now points to the string data of the %s */
3889 str = va_arg(ap, const char *);
3890
3891 /*
3892 * If you hit this warning, it is likely that the
3893 * trace event in question used %s on a string that
3894 * was saved at the time of the event, but may not be
3895 * around when the trace is read. Use __string(),
3896 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3897 * instead. See samples/trace_events/trace-events-sample.h
3898 * for reference.
3899 */
3900 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3901 "fmt: '%s' current_buffer: '%s'",
3902 fmt, show_buffer(&iter->seq))) {
3903 int ret;
3904
3905 /* Try to safely read the string */
3906 if (star) {
3907 if (len + 1 > iter->fmt_size)
3908 len = iter->fmt_size - 1;
3909 if (len < 0)
3910 len = 0;
3911 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3912 iter->fmt[len] = 0;
3913 star = false;
3914 } else {
3915 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3916 iter->fmt_size);
3917 }
3918 if (ret < 0)
3919 trace_seq_printf(&iter->seq, "(0x%px)", str);
3920 else
3921 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3922 str, iter->fmt);
3923 str = "[UNSAFE-MEMORY]";
3924 strcpy(iter->fmt, "%s");
3925 } else {
3926 strncpy(iter->fmt, p + i, j + 1);
3927 iter->fmt[j+1] = '\0';
3928 }
3929 if (star)
3930 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3931 else
3932 trace_seq_printf(&iter->seq, iter->fmt, str);
3933
3934 p += i + j + 1;
3935 }
3936 print:
3937 if (*p)
3938 trace_seq_vprintf(&iter->seq, p, ap);
3939 }
3940
trace_event_format(struct trace_iterator * iter,const char * fmt)3941 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3942 {
3943 const char *p, *new_fmt;
3944 char *q;
3945
3946 if (WARN_ON_ONCE(!fmt))
3947 return fmt;
3948
3949 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3950 return fmt;
3951
3952 p = fmt;
3953 new_fmt = q = iter->fmt;
3954 while (*p) {
3955 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3956 if (!trace_iter_expand_format(iter))
3957 return fmt;
3958
3959 q += iter->fmt - new_fmt;
3960 new_fmt = iter->fmt;
3961 }
3962
3963 *q++ = *p++;
3964
3965 /* Replace %p with %px */
3966 if (p[-1] == '%') {
3967 if (p[0] == '%') {
3968 *q++ = *p++;
3969 } else if (p[0] == 'p' && !isalnum(p[1])) {
3970 *q++ = *p++;
3971 *q++ = 'x';
3972 }
3973 }
3974 }
3975 *q = '\0';
3976
3977 return new_fmt;
3978 }
3979
3980 #define STATIC_TEMP_BUF_SIZE 128
3981 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3982
3983 /* Find the next real entry, without updating the iterator itself */
trace_find_next_entry(struct trace_iterator * iter,int * ent_cpu,u64 * ent_ts)3984 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3985 int *ent_cpu, u64 *ent_ts)
3986 {
3987 /* __find_next_entry will reset ent_size */
3988 int ent_size = iter->ent_size;
3989 struct trace_entry *entry;
3990
3991 /*
3992 * If called from ftrace_dump(), then the iter->temp buffer
3993 * will be the static_temp_buf and not created from kmalloc.
3994 * If the entry size is greater than the buffer, we can
3995 * not save it. Just return NULL in that case. This is only
3996 * used to add markers when two consecutive events' time
3997 * stamps have a large delta. See trace_print_lat_context()
3998 */
3999 if (iter->temp == static_temp_buf &&
4000 STATIC_TEMP_BUF_SIZE < ent_size)
4001 return NULL;
4002
4003 /*
4004 * The __find_next_entry() may call peek_next_entry(), which may
4005 * call ring_buffer_peek() that may make the contents of iter->ent
4006 * undefined. Need to copy iter->ent now.
4007 */
4008 if (iter->ent && iter->ent != iter->temp) {
4009 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4010 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4011 void *temp;
4012 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4013 if (!temp)
4014 return NULL;
4015 kfree(iter->temp);
4016 iter->temp = temp;
4017 iter->temp_size = iter->ent_size;
4018 }
4019 memcpy(iter->temp, iter->ent, iter->ent_size);
4020 iter->ent = iter->temp;
4021 }
4022 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4023 /* Put back the original ent_size */
4024 iter->ent_size = ent_size;
4025
4026 return entry;
4027 }
4028
4029 /* Find the next real entry, and increment the iterator to the next entry */
trace_find_next_entry_inc(struct trace_iterator * iter)4030 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4031 {
4032 iter->ent = __find_next_entry(iter, &iter->cpu,
4033 &iter->lost_events, &iter->ts);
4034
4035 if (iter->ent)
4036 trace_iterator_increment(iter);
4037
4038 return iter->ent ? iter : NULL;
4039 }
4040
trace_consume(struct trace_iterator * iter)4041 static void trace_consume(struct trace_iterator *iter)
4042 {
4043 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4044 &iter->lost_events);
4045 }
4046
s_next(struct seq_file * m,void * v,loff_t * pos)4047 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4048 {
4049 struct trace_iterator *iter = m->private;
4050 int i = (int)*pos;
4051 void *ent;
4052
4053 WARN_ON_ONCE(iter->leftover);
4054
4055 (*pos)++;
4056
4057 /* can't go backwards */
4058 if (iter->idx > i)
4059 return NULL;
4060
4061 if (iter->idx < 0)
4062 ent = trace_find_next_entry_inc(iter);
4063 else
4064 ent = iter;
4065
4066 while (ent && iter->idx < i)
4067 ent = trace_find_next_entry_inc(iter);
4068
4069 iter->pos = *pos;
4070
4071 return ent;
4072 }
4073
tracing_iter_reset(struct trace_iterator * iter,int cpu)4074 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4075 {
4076 struct ring_buffer_iter *buf_iter;
4077 unsigned long entries = 0;
4078 u64 ts;
4079
4080 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4081
4082 buf_iter = trace_buffer_iter(iter, cpu);
4083 if (!buf_iter)
4084 return;
4085
4086 ring_buffer_iter_reset(buf_iter);
4087
4088 /*
4089 * We could have the case with the max latency tracers
4090 * that a reset never took place on a cpu. This is evident
4091 * by the timestamp being before the start of the buffer.
4092 */
4093 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4094 if (ts >= iter->array_buffer->time_start)
4095 break;
4096 entries++;
4097 ring_buffer_iter_advance(buf_iter);
4098 }
4099
4100 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4101 }
4102
4103 /*
4104 * The current tracer is copied to avoid a global locking
4105 * all around.
4106 */
s_start(struct seq_file * m,loff_t * pos)4107 static void *s_start(struct seq_file *m, loff_t *pos)
4108 {
4109 struct trace_iterator *iter = m->private;
4110 struct trace_array *tr = iter->tr;
4111 int cpu_file = iter->cpu_file;
4112 void *p = NULL;
4113 loff_t l = 0;
4114 int cpu;
4115
4116 /*
4117 * copy the tracer to avoid using a global lock all around.
4118 * iter->trace is a copy of current_trace, the pointer to the
4119 * name may be used instead of a strcmp(), as iter->trace->name
4120 * will point to the same string as current_trace->name.
4121 */
4122 mutex_lock(&trace_types_lock);
4123 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4124 *iter->trace = *tr->current_trace;
4125 mutex_unlock(&trace_types_lock);
4126
4127 #ifdef CONFIG_TRACER_MAX_TRACE
4128 if (iter->snapshot && iter->trace->use_max_tr)
4129 return ERR_PTR(-EBUSY);
4130 #endif
4131
4132 if (*pos != iter->pos) {
4133 iter->ent = NULL;
4134 iter->cpu = 0;
4135 iter->idx = -1;
4136
4137 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4138 for_each_tracing_cpu(cpu)
4139 tracing_iter_reset(iter, cpu);
4140 } else
4141 tracing_iter_reset(iter, cpu_file);
4142
4143 iter->leftover = 0;
4144 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4145 ;
4146
4147 } else {
4148 /*
4149 * If we overflowed the seq_file before, then we want
4150 * to just reuse the trace_seq buffer again.
4151 */
4152 if (iter->leftover)
4153 p = iter;
4154 else {
4155 l = *pos - 1;
4156 p = s_next(m, p, &l);
4157 }
4158 }
4159
4160 trace_event_read_lock();
4161 trace_access_lock(cpu_file);
4162 return p;
4163 }
4164
s_stop(struct seq_file * m,void * p)4165 static void s_stop(struct seq_file *m, void *p)
4166 {
4167 struct trace_iterator *iter = m->private;
4168
4169 #ifdef CONFIG_TRACER_MAX_TRACE
4170 if (iter->snapshot && iter->trace->use_max_tr)
4171 return;
4172 #endif
4173
4174 trace_access_unlock(iter->cpu_file);
4175 trace_event_read_unlock();
4176 }
4177
4178 static void
get_total_entries_cpu(struct array_buffer * buf,unsigned long * total,unsigned long * entries,int cpu)4179 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4180 unsigned long *entries, int cpu)
4181 {
4182 unsigned long count;
4183
4184 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4185 /*
4186 * If this buffer has skipped entries, then we hold all
4187 * entries for the trace and we need to ignore the
4188 * ones before the time stamp.
4189 */
4190 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4191 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4192 /* total is the same as the entries */
4193 *total = count;
4194 } else
4195 *total = count +
4196 ring_buffer_overrun_cpu(buf->buffer, cpu);
4197 *entries = count;
4198 }
4199
4200 static void
get_total_entries(struct array_buffer * buf,unsigned long * total,unsigned long * entries)4201 get_total_entries(struct array_buffer *buf,
4202 unsigned long *total, unsigned long *entries)
4203 {
4204 unsigned long t, e;
4205 int cpu;
4206
4207 *total = 0;
4208 *entries = 0;
4209
4210 for_each_tracing_cpu(cpu) {
4211 get_total_entries_cpu(buf, &t, &e, cpu);
4212 *total += t;
4213 *entries += e;
4214 }
4215 }
4216
trace_total_entries_cpu(struct trace_array * tr,int cpu)4217 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4218 {
4219 unsigned long total, entries;
4220
4221 if (!tr)
4222 tr = &global_trace;
4223
4224 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4225
4226 return entries;
4227 }
4228
trace_total_entries(struct trace_array * tr)4229 unsigned long trace_total_entries(struct trace_array *tr)
4230 {
4231 unsigned long total, entries;
4232
4233 if (!tr)
4234 tr = &global_trace;
4235
4236 get_total_entries(&tr->array_buffer, &total, &entries);
4237
4238 return entries;
4239 }
4240
print_lat_help_header(struct seq_file * m)4241 static void print_lat_help_header(struct seq_file *m)
4242 {
4243 seq_puts(m, "# _------=> CPU# \n"
4244 "# / _-----=> irqs-off/BH-disabled\n"
4245 "# | / _----=> need-resched \n"
4246 "# || / _---=> hardirq/softirq \n"
4247 "# ||| / _--=> preempt-depth \n"
4248 "# |||| / _-=> migrate-disable \n"
4249 "# ||||| / delay \n"
4250 "# cmd pid |||||| time | caller \n"
4251 "# \\ / |||||| \\ | / \n");
4252 }
4253
print_event_info(struct array_buffer * buf,struct seq_file * m)4254 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4255 {
4256 unsigned long total;
4257 unsigned long entries;
4258
4259 get_total_entries(buf, &total, &entries);
4260 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4261 entries, total, num_online_cpus());
4262 seq_puts(m, "#\n");
4263 }
4264
print_func_help_header(struct array_buffer * buf,struct seq_file * m,unsigned int flags)4265 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4266 unsigned int flags)
4267 {
4268 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4269
4270 print_event_info(buf, m);
4271
4272 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4273 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4274 }
4275
print_func_help_header_irq(struct array_buffer * buf,struct seq_file * m,unsigned int flags)4276 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4277 unsigned int flags)
4278 {
4279 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4280 static const char space[] = " ";
4281 int prec = tgid ? 12 : 2;
4282
4283 print_event_info(buf, m);
4284
4285 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4286 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4287 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4288 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4289 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4290 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4291 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4292 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4293 }
4294
4295 void
print_trace_header(struct seq_file * m,struct trace_iterator * iter)4296 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4297 {
4298 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4299 struct array_buffer *buf = iter->array_buffer;
4300 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4301 struct tracer *type = iter->trace;
4302 unsigned long entries;
4303 unsigned long total;
4304 const char *name = type->name;
4305
4306 get_total_entries(buf, &total, &entries);
4307
4308 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4309 name, UTS_RELEASE);
4310 seq_puts(m, "# -----------------------------------"
4311 "---------------------------------\n");
4312 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4313 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4314 nsecs_to_usecs(data->saved_latency),
4315 entries,
4316 total,
4317 buf->cpu,
4318 preempt_model_none() ? "server" :
4319 preempt_model_voluntary() ? "desktop" :
4320 preempt_model_full() ? "preempt" :
4321 preempt_model_rt() ? "preempt_rt" :
4322 "unknown",
4323 /* These are reserved for later use */
4324 0, 0, 0, 0);
4325 #ifdef CONFIG_SMP
4326 seq_printf(m, " #P:%d)\n", num_online_cpus());
4327 #else
4328 seq_puts(m, ")\n");
4329 #endif
4330 seq_puts(m, "# -----------------\n");
4331 seq_printf(m, "# | task: %.16s-%d "
4332 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4333 data->comm, data->pid,
4334 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4335 data->policy, data->rt_priority);
4336 seq_puts(m, "# -----------------\n");
4337
4338 if (data->critical_start) {
4339 seq_puts(m, "# => started at: ");
4340 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4341 trace_print_seq(m, &iter->seq);
4342 seq_puts(m, "\n# => ended at: ");
4343 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4344 trace_print_seq(m, &iter->seq);
4345 seq_puts(m, "\n#\n");
4346 }
4347
4348 seq_puts(m, "#\n");
4349 }
4350
test_cpu_buff_start(struct trace_iterator * iter)4351 static void test_cpu_buff_start(struct trace_iterator *iter)
4352 {
4353 struct trace_seq *s = &iter->seq;
4354 struct trace_array *tr = iter->tr;
4355
4356 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4357 return;
4358
4359 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4360 return;
4361
4362 if (cpumask_available(iter->started) &&
4363 cpumask_test_cpu(iter->cpu, iter->started))
4364 return;
4365
4366 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4367 return;
4368
4369 if (cpumask_available(iter->started))
4370 cpumask_set_cpu(iter->cpu, iter->started);
4371
4372 /* Don't print started cpu buffer for the first entry of the trace */
4373 if (iter->idx > 1)
4374 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4375 iter->cpu);
4376 }
4377
print_trace_fmt(struct trace_iterator * iter)4378 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4379 {
4380 struct trace_array *tr = iter->tr;
4381 struct trace_seq *s = &iter->seq;
4382 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4383 struct trace_entry *entry;
4384 struct trace_event *event;
4385
4386 entry = iter->ent;
4387
4388 test_cpu_buff_start(iter);
4389
4390 event = ftrace_find_event(entry->type);
4391
4392 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4393 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4394 trace_print_lat_context(iter);
4395 else
4396 trace_print_context(iter);
4397 }
4398
4399 if (trace_seq_has_overflowed(s))
4400 return TRACE_TYPE_PARTIAL_LINE;
4401
4402 if (event)
4403 return event->funcs->trace(iter, sym_flags, event);
4404
4405 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4406
4407 return trace_handle_return(s);
4408 }
4409
print_raw_fmt(struct trace_iterator * iter)4410 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4411 {
4412 struct trace_array *tr = iter->tr;
4413 struct trace_seq *s = &iter->seq;
4414 struct trace_entry *entry;
4415 struct trace_event *event;
4416
4417 entry = iter->ent;
4418
4419 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4420 trace_seq_printf(s, "%d %d %llu ",
4421 entry->pid, iter->cpu, iter->ts);
4422
4423 if (trace_seq_has_overflowed(s))
4424 return TRACE_TYPE_PARTIAL_LINE;
4425
4426 event = ftrace_find_event(entry->type);
4427 if (event)
4428 return event->funcs->raw(iter, 0, event);
4429
4430 trace_seq_printf(s, "%d ?\n", entry->type);
4431
4432 return trace_handle_return(s);
4433 }
4434
print_hex_fmt(struct trace_iterator * iter)4435 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4436 {
4437 struct trace_array *tr = iter->tr;
4438 struct trace_seq *s = &iter->seq;
4439 unsigned char newline = '\n';
4440 struct trace_entry *entry;
4441 struct trace_event *event;
4442
4443 entry = iter->ent;
4444
4445 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4446 SEQ_PUT_HEX_FIELD(s, entry->pid);
4447 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4448 SEQ_PUT_HEX_FIELD(s, iter->ts);
4449 if (trace_seq_has_overflowed(s))
4450 return TRACE_TYPE_PARTIAL_LINE;
4451 }
4452
4453 event = ftrace_find_event(entry->type);
4454 if (event) {
4455 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4456 if (ret != TRACE_TYPE_HANDLED)
4457 return ret;
4458 }
4459
4460 SEQ_PUT_FIELD(s, newline);
4461
4462 return trace_handle_return(s);
4463 }
4464
print_bin_fmt(struct trace_iterator * iter)4465 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4466 {
4467 struct trace_array *tr = iter->tr;
4468 struct trace_seq *s = &iter->seq;
4469 struct trace_entry *entry;
4470 struct trace_event *event;
4471
4472 entry = iter->ent;
4473
4474 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4475 SEQ_PUT_FIELD(s, entry->pid);
4476 SEQ_PUT_FIELD(s, iter->cpu);
4477 SEQ_PUT_FIELD(s, iter->ts);
4478 if (trace_seq_has_overflowed(s))
4479 return TRACE_TYPE_PARTIAL_LINE;
4480 }
4481
4482 event = ftrace_find_event(entry->type);
4483 return event ? event->funcs->binary(iter, 0, event) :
4484 TRACE_TYPE_HANDLED;
4485 }
4486
trace_empty(struct trace_iterator * iter)4487 int trace_empty(struct trace_iterator *iter)
4488 {
4489 struct ring_buffer_iter *buf_iter;
4490 int cpu;
4491
4492 /* If we are looking at one CPU buffer, only check that one */
4493 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4494 cpu = iter->cpu_file;
4495 buf_iter = trace_buffer_iter(iter, cpu);
4496 if (buf_iter) {
4497 if (!ring_buffer_iter_empty(buf_iter))
4498 return 0;
4499 } else {
4500 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4501 return 0;
4502 }
4503 return 1;
4504 }
4505
4506 for_each_tracing_cpu(cpu) {
4507 buf_iter = trace_buffer_iter(iter, cpu);
4508 if (buf_iter) {
4509 if (!ring_buffer_iter_empty(buf_iter))
4510 return 0;
4511 } else {
4512 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4513 return 0;
4514 }
4515 }
4516
4517 return 1;
4518 }
4519
4520 /* Called with trace_event_read_lock() held. */
print_trace_line(struct trace_iterator * iter)4521 enum print_line_t print_trace_line(struct trace_iterator *iter)
4522 {
4523 struct trace_array *tr = iter->tr;
4524 unsigned long trace_flags = tr->trace_flags;
4525 enum print_line_t ret;
4526
4527 if (iter->lost_events) {
4528 if (iter->lost_events == (unsigned long)-1)
4529 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4530 iter->cpu);
4531 else
4532 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4533 iter->cpu, iter->lost_events);
4534 if (trace_seq_has_overflowed(&iter->seq))
4535 return TRACE_TYPE_PARTIAL_LINE;
4536 }
4537
4538 if (iter->trace && iter->trace->print_line) {
4539 ret = iter->trace->print_line(iter);
4540 if (ret != TRACE_TYPE_UNHANDLED)
4541 return ret;
4542 }
4543
4544 if (iter->ent->type == TRACE_BPUTS &&
4545 trace_flags & TRACE_ITER_PRINTK &&
4546 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4547 return trace_print_bputs_msg_only(iter);
4548
4549 if (iter->ent->type == TRACE_BPRINT &&
4550 trace_flags & TRACE_ITER_PRINTK &&
4551 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4552 return trace_print_bprintk_msg_only(iter);
4553
4554 if (iter->ent->type == TRACE_PRINT &&
4555 trace_flags & TRACE_ITER_PRINTK &&
4556 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4557 return trace_print_printk_msg_only(iter);
4558
4559 if (trace_flags & TRACE_ITER_BIN)
4560 return print_bin_fmt(iter);
4561
4562 if (trace_flags & TRACE_ITER_HEX)
4563 return print_hex_fmt(iter);
4564
4565 if (trace_flags & TRACE_ITER_RAW)
4566 return print_raw_fmt(iter);
4567
4568 return print_trace_fmt(iter);
4569 }
4570
trace_latency_header(struct seq_file * m)4571 void trace_latency_header(struct seq_file *m)
4572 {
4573 struct trace_iterator *iter = m->private;
4574 struct trace_array *tr = iter->tr;
4575
4576 /* print nothing if the buffers are empty */
4577 if (trace_empty(iter))
4578 return;
4579
4580 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4581 print_trace_header(m, iter);
4582
4583 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4584 print_lat_help_header(m);
4585 }
4586
trace_default_header(struct seq_file * m)4587 void trace_default_header(struct seq_file *m)
4588 {
4589 struct trace_iterator *iter = m->private;
4590 struct trace_array *tr = iter->tr;
4591 unsigned long trace_flags = tr->trace_flags;
4592
4593 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4594 return;
4595
4596 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4597 /* print nothing if the buffers are empty */
4598 if (trace_empty(iter))
4599 return;
4600 print_trace_header(m, iter);
4601 if (!(trace_flags & TRACE_ITER_VERBOSE))
4602 print_lat_help_header(m);
4603 } else {
4604 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4605 if (trace_flags & TRACE_ITER_IRQ_INFO)
4606 print_func_help_header_irq(iter->array_buffer,
4607 m, trace_flags);
4608 else
4609 print_func_help_header(iter->array_buffer, m,
4610 trace_flags);
4611 }
4612 }
4613 }
4614
test_ftrace_alive(struct seq_file * m)4615 static void test_ftrace_alive(struct seq_file *m)
4616 {
4617 if (!ftrace_is_dead())
4618 return;
4619 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4620 "# MAY BE MISSING FUNCTION EVENTS\n");
4621 }
4622
4623 #ifdef CONFIG_TRACER_MAX_TRACE
show_snapshot_main_help(struct seq_file * m)4624 static void show_snapshot_main_help(struct seq_file *m)
4625 {
4626 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4627 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4628 "# Takes a snapshot of the main buffer.\n"
4629 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4630 "# (Doesn't have to be '2' works with any number that\n"
4631 "# is not a '0' or '1')\n");
4632 }
4633
show_snapshot_percpu_help(struct seq_file * m)4634 static void show_snapshot_percpu_help(struct seq_file *m)
4635 {
4636 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4637 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4638 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4639 "# Takes a snapshot of the main buffer for this cpu.\n");
4640 #else
4641 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4642 "# Must use main snapshot file to allocate.\n");
4643 #endif
4644 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4645 "# (Doesn't have to be '2' works with any number that\n"
4646 "# is not a '0' or '1')\n");
4647 }
4648
print_snapshot_help(struct seq_file * m,struct trace_iterator * iter)4649 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4650 {
4651 if (iter->tr->allocated_snapshot)
4652 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4653 else
4654 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4655
4656 seq_puts(m, "# Snapshot commands:\n");
4657 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4658 show_snapshot_main_help(m);
4659 else
4660 show_snapshot_percpu_help(m);
4661 }
4662 #else
4663 /* Should never be called */
print_snapshot_help(struct seq_file * m,struct trace_iterator * iter)4664 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4665 #endif
4666
s_show(struct seq_file * m,void * v)4667 static int s_show(struct seq_file *m, void *v)
4668 {
4669 struct trace_iterator *iter = v;
4670 int ret;
4671
4672 if (iter->ent == NULL) {
4673 if (iter->tr) {
4674 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4675 seq_puts(m, "#\n");
4676 test_ftrace_alive(m);
4677 }
4678 if (iter->snapshot && trace_empty(iter))
4679 print_snapshot_help(m, iter);
4680 else if (iter->trace && iter->trace->print_header)
4681 iter->trace->print_header(m);
4682 else
4683 trace_default_header(m);
4684
4685 } else if (iter->leftover) {
4686 /*
4687 * If we filled the seq_file buffer earlier, we
4688 * want to just show it now.
4689 */
4690 ret = trace_print_seq(m, &iter->seq);
4691
4692 /* ret should this time be zero, but you never know */
4693 iter->leftover = ret;
4694
4695 } else {
4696 print_trace_line(iter);
4697 ret = trace_print_seq(m, &iter->seq);
4698 /*
4699 * If we overflow the seq_file buffer, then it will
4700 * ask us for this data again at start up.
4701 * Use that instead.
4702 * ret is 0 if seq_file write succeeded.
4703 * -1 otherwise.
4704 */
4705 iter->leftover = ret;
4706 }
4707
4708 return 0;
4709 }
4710
4711 /*
4712 * Should be used after trace_array_get(), trace_types_lock
4713 * ensures that i_cdev was already initialized.
4714 */
tracing_get_cpu(struct inode * inode)4715 static inline int tracing_get_cpu(struct inode *inode)
4716 {
4717 if (inode->i_cdev) /* See trace_create_cpu_file() */
4718 return (long)inode->i_cdev - 1;
4719 return RING_BUFFER_ALL_CPUS;
4720 }
4721
4722 static const struct seq_operations tracer_seq_ops = {
4723 .start = s_start,
4724 .next = s_next,
4725 .stop = s_stop,
4726 .show = s_show,
4727 };
4728
4729 static struct trace_iterator *
__tracing_open(struct inode * inode,struct file * file,bool snapshot)4730 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4731 {
4732 struct trace_array *tr = inode->i_private;
4733 struct trace_iterator *iter;
4734 int cpu;
4735
4736 if (tracing_disabled)
4737 return ERR_PTR(-ENODEV);
4738
4739 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4740 if (!iter)
4741 return ERR_PTR(-ENOMEM);
4742
4743 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4744 GFP_KERNEL);
4745 if (!iter->buffer_iter)
4746 goto release;
4747
4748 /*
4749 * trace_find_next_entry() may need to save off iter->ent.
4750 * It will place it into the iter->temp buffer. As most
4751 * events are less than 128, allocate a buffer of that size.
4752 * If one is greater, then trace_find_next_entry() will
4753 * allocate a new buffer to adjust for the bigger iter->ent.
4754 * It's not critical if it fails to get allocated here.
4755 */
4756 iter->temp = kmalloc(128, GFP_KERNEL);
4757 if (iter->temp)
4758 iter->temp_size = 128;
4759
4760 /*
4761 * trace_event_printf() may need to modify given format
4762 * string to replace %p with %px so that it shows real address
4763 * instead of hash value. However, that is only for the event
4764 * tracing, other tracer may not need. Defer the allocation
4765 * until it is needed.
4766 */
4767 iter->fmt = NULL;
4768 iter->fmt_size = 0;
4769
4770 /*
4771 * We make a copy of the current tracer to avoid concurrent
4772 * changes on it while we are reading.
4773 */
4774 mutex_lock(&trace_types_lock);
4775 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4776 if (!iter->trace)
4777 goto fail;
4778
4779 *iter->trace = *tr->current_trace;
4780
4781 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4782 goto fail;
4783
4784 iter->tr = tr;
4785
4786 #ifdef CONFIG_TRACER_MAX_TRACE
4787 /* Currently only the top directory has a snapshot */
4788 if (tr->current_trace->print_max || snapshot)
4789 iter->array_buffer = &tr->max_buffer;
4790 else
4791 #endif
4792 iter->array_buffer = &tr->array_buffer;
4793 iter->snapshot = snapshot;
4794 iter->pos = -1;
4795 iter->cpu_file = tracing_get_cpu(inode);
4796 mutex_init(&iter->mutex);
4797
4798 /* Notify the tracer early; before we stop tracing. */
4799 if (iter->trace->open)
4800 iter->trace->open(iter);
4801
4802 /* Annotate start of buffers if we had overruns */
4803 if (ring_buffer_overruns(iter->array_buffer->buffer))
4804 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4805
4806 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4807 if (trace_clocks[tr->clock_id].in_ns)
4808 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4809
4810 /*
4811 * If pause-on-trace is enabled, then stop the trace while
4812 * dumping, unless this is the "snapshot" file
4813 */
4814 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4815 tracing_stop_tr(tr);
4816
4817 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4818 for_each_tracing_cpu(cpu) {
4819 iter->buffer_iter[cpu] =
4820 ring_buffer_read_prepare(iter->array_buffer->buffer,
4821 cpu, GFP_KERNEL);
4822 }
4823 ring_buffer_read_prepare_sync();
4824 for_each_tracing_cpu(cpu) {
4825 ring_buffer_read_start(iter->buffer_iter[cpu]);
4826 tracing_iter_reset(iter, cpu);
4827 }
4828 } else {
4829 cpu = iter->cpu_file;
4830 iter->buffer_iter[cpu] =
4831 ring_buffer_read_prepare(iter->array_buffer->buffer,
4832 cpu, GFP_KERNEL);
4833 ring_buffer_read_prepare_sync();
4834 ring_buffer_read_start(iter->buffer_iter[cpu]);
4835 tracing_iter_reset(iter, cpu);
4836 }
4837
4838 mutex_unlock(&trace_types_lock);
4839
4840 return iter;
4841
4842 fail:
4843 mutex_unlock(&trace_types_lock);
4844 kfree(iter->trace);
4845 kfree(iter->temp);
4846 kfree(iter->buffer_iter);
4847 release:
4848 seq_release_private(inode, file);
4849 return ERR_PTR(-ENOMEM);
4850 }
4851
tracing_open_generic(struct inode * inode,struct file * filp)4852 int tracing_open_generic(struct inode *inode, struct file *filp)
4853 {
4854 int ret;
4855
4856 ret = tracing_check_open_get_tr(NULL);
4857 if (ret)
4858 return ret;
4859
4860 filp->private_data = inode->i_private;
4861 return 0;
4862 }
4863
tracing_is_disabled(void)4864 bool tracing_is_disabled(void)
4865 {
4866 return (tracing_disabled) ? true: false;
4867 }
4868
4869 /*
4870 * Open and update trace_array ref count.
4871 * Must have the current trace_array passed to it.
4872 */
tracing_open_generic_tr(struct inode * inode,struct file * filp)4873 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4874 {
4875 struct trace_array *tr = inode->i_private;
4876 int ret;
4877
4878 ret = tracing_check_open_get_tr(tr);
4879 if (ret)
4880 return ret;
4881
4882 filp->private_data = inode->i_private;
4883
4884 return 0;
4885 }
4886
tracing_mark_open(struct inode * inode,struct file * filp)4887 static int tracing_mark_open(struct inode *inode, struct file *filp)
4888 {
4889 stream_open(inode, filp);
4890 return tracing_open_generic_tr(inode, filp);
4891 }
4892
tracing_release(struct inode * inode,struct file * file)4893 static int tracing_release(struct inode *inode, struct file *file)
4894 {
4895 struct trace_array *tr = inode->i_private;
4896 struct seq_file *m = file->private_data;
4897 struct trace_iterator *iter;
4898 int cpu;
4899
4900 if (!(file->f_mode & FMODE_READ)) {
4901 trace_array_put(tr);
4902 return 0;
4903 }
4904
4905 /* Writes do not use seq_file */
4906 iter = m->private;
4907 mutex_lock(&trace_types_lock);
4908
4909 for_each_tracing_cpu(cpu) {
4910 if (iter->buffer_iter[cpu])
4911 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4912 }
4913
4914 if (iter->trace && iter->trace->close)
4915 iter->trace->close(iter);
4916
4917 if (!iter->snapshot && tr->stop_count)
4918 /* reenable tracing if it was previously enabled */
4919 tracing_start_tr(tr);
4920
4921 __trace_array_put(tr);
4922
4923 mutex_unlock(&trace_types_lock);
4924
4925 mutex_destroy(&iter->mutex);
4926 free_cpumask_var(iter->started);
4927 kfree(iter->fmt);
4928 kfree(iter->temp);
4929 kfree(iter->trace);
4930 kfree(iter->buffer_iter);
4931 seq_release_private(inode, file);
4932
4933 return 0;
4934 }
4935
tracing_release_generic_tr(struct inode * inode,struct file * file)4936 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4937 {
4938 struct trace_array *tr = inode->i_private;
4939
4940 trace_array_put(tr);
4941 return 0;
4942 }
4943
tracing_single_release_tr(struct inode * inode,struct file * file)4944 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4945 {
4946 struct trace_array *tr = inode->i_private;
4947
4948 trace_array_put(tr);
4949
4950 return single_release(inode, file);
4951 }
4952
tracing_open(struct inode * inode,struct file * file)4953 static int tracing_open(struct inode *inode, struct file *file)
4954 {
4955 struct trace_array *tr = inode->i_private;
4956 struct trace_iterator *iter;
4957 int ret;
4958
4959 ret = tracing_check_open_get_tr(tr);
4960 if (ret)
4961 return ret;
4962
4963 /* If this file was open for write, then erase contents */
4964 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4965 int cpu = tracing_get_cpu(inode);
4966 struct array_buffer *trace_buf = &tr->array_buffer;
4967
4968 #ifdef CONFIG_TRACER_MAX_TRACE
4969 if (tr->current_trace->print_max)
4970 trace_buf = &tr->max_buffer;
4971 #endif
4972
4973 if (cpu == RING_BUFFER_ALL_CPUS)
4974 tracing_reset_online_cpus(trace_buf);
4975 else
4976 tracing_reset_cpu(trace_buf, cpu);
4977 }
4978
4979 if (file->f_mode & FMODE_READ) {
4980 iter = __tracing_open(inode, file, false);
4981 if (IS_ERR(iter))
4982 ret = PTR_ERR(iter);
4983 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4984 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4985 }
4986
4987 if (ret < 0)
4988 trace_array_put(tr);
4989
4990 return ret;
4991 }
4992
4993 /*
4994 * Some tracers are not suitable for instance buffers.
4995 * A tracer is always available for the global array (toplevel)
4996 * or if it explicitly states that it is.
4997 */
4998 static bool
trace_ok_for_array(struct tracer * t,struct trace_array * tr)4999 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5000 {
5001 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5002 }
5003
5004 /* Find the next tracer that this trace array may use */
5005 static struct tracer *
get_tracer_for_array(struct trace_array * tr,struct tracer * t)5006 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5007 {
5008 while (t && !trace_ok_for_array(t, tr))
5009 t = t->next;
5010
5011 return t;
5012 }
5013
5014 static void *
t_next(struct seq_file * m,void * v,loff_t * pos)5015 t_next(struct seq_file *m, void *v, loff_t *pos)
5016 {
5017 struct trace_array *tr = m->private;
5018 struct tracer *t = v;
5019
5020 (*pos)++;
5021
5022 if (t)
5023 t = get_tracer_for_array(tr, t->next);
5024
5025 return t;
5026 }
5027
t_start(struct seq_file * m,loff_t * pos)5028 static void *t_start(struct seq_file *m, loff_t *pos)
5029 {
5030 struct trace_array *tr = m->private;
5031 struct tracer *t;
5032 loff_t l = 0;
5033
5034 mutex_lock(&trace_types_lock);
5035
5036 t = get_tracer_for_array(tr, trace_types);
5037 for (; t && l < *pos; t = t_next(m, t, &l))
5038 ;
5039
5040 return t;
5041 }
5042
t_stop(struct seq_file * m,void * p)5043 static void t_stop(struct seq_file *m, void *p)
5044 {
5045 mutex_unlock(&trace_types_lock);
5046 }
5047
t_show(struct seq_file * m,void * v)5048 static int t_show(struct seq_file *m, void *v)
5049 {
5050 struct tracer *t = v;
5051
5052 if (!t)
5053 return 0;
5054
5055 seq_puts(m, t->name);
5056 if (t->next)
5057 seq_putc(m, ' ');
5058 else
5059 seq_putc(m, '\n');
5060
5061 return 0;
5062 }
5063
5064 static const struct seq_operations show_traces_seq_ops = {
5065 .start = t_start,
5066 .next = t_next,
5067 .stop = t_stop,
5068 .show = t_show,
5069 };
5070
show_traces_open(struct inode * inode,struct file * file)5071 static int show_traces_open(struct inode *inode, struct file *file)
5072 {
5073 struct trace_array *tr = inode->i_private;
5074 struct seq_file *m;
5075 int ret;
5076
5077 ret = tracing_check_open_get_tr(tr);
5078 if (ret)
5079 return ret;
5080
5081 ret = seq_open(file, &show_traces_seq_ops);
5082 if (ret) {
5083 trace_array_put(tr);
5084 return ret;
5085 }
5086
5087 m = file->private_data;
5088 m->private = tr;
5089
5090 return 0;
5091 }
5092
show_traces_release(struct inode * inode,struct file * file)5093 static int show_traces_release(struct inode *inode, struct file *file)
5094 {
5095 struct trace_array *tr = inode->i_private;
5096
5097 trace_array_put(tr);
5098 return seq_release(inode, file);
5099 }
5100
5101 static ssize_t
tracing_write_stub(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)5102 tracing_write_stub(struct file *filp, const char __user *ubuf,
5103 size_t count, loff_t *ppos)
5104 {
5105 return count;
5106 }
5107
tracing_lseek(struct file * file,loff_t offset,int whence)5108 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5109 {
5110 int ret;
5111
5112 if (file->f_mode & FMODE_READ)
5113 ret = seq_lseek(file, offset, whence);
5114 else
5115 file->f_pos = ret = 0;
5116
5117 return ret;
5118 }
5119
5120 static const struct file_operations tracing_fops = {
5121 .open = tracing_open,
5122 .read = seq_read,
5123 .write = tracing_write_stub,
5124 .llseek = tracing_lseek,
5125 .release = tracing_release,
5126 };
5127
5128 static const struct file_operations show_traces_fops = {
5129 .open = show_traces_open,
5130 .read = seq_read,
5131 .llseek = seq_lseek,
5132 .release = show_traces_release,
5133 };
5134
5135 static ssize_t
tracing_cpumask_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)5136 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5137 size_t count, loff_t *ppos)
5138 {
5139 struct trace_array *tr = file_inode(filp)->i_private;
5140 char *mask_str;
5141 int len;
5142
5143 len = snprintf(NULL, 0, "%*pb\n",
5144 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5145 mask_str = kmalloc(len, GFP_KERNEL);
5146 if (!mask_str)
5147 return -ENOMEM;
5148
5149 len = snprintf(mask_str, len, "%*pb\n",
5150 cpumask_pr_args(tr->tracing_cpumask));
5151 if (len >= count) {
5152 count = -EINVAL;
5153 goto out_err;
5154 }
5155 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5156
5157 out_err:
5158 kfree(mask_str);
5159
5160 return count;
5161 }
5162
tracing_set_cpumask(struct trace_array * tr,cpumask_var_t tracing_cpumask_new)5163 int tracing_set_cpumask(struct trace_array *tr,
5164 cpumask_var_t tracing_cpumask_new)
5165 {
5166 int cpu;
5167
5168 if (!tr)
5169 return -EINVAL;
5170
5171 local_irq_disable();
5172 arch_spin_lock(&tr->max_lock);
5173 for_each_tracing_cpu(cpu) {
5174 /*
5175 * Increase/decrease the disabled counter if we are
5176 * about to flip a bit in the cpumask:
5177 */
5178 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5179 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5180 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5181 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5182 }
5183 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5184 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5185 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5186 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5187 }
5188 }
5189 arch_spin_unlock(&tr->max_lock);
5190 local_irq_enable();
5191
5192 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5193
5194 return 0;
5195 }
5196
5197 static ssize_t
tracing_cpumask_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)5198 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5199 size_t count, loff_t *ppos)
5200 {
5201 struct trace_array *tr = file_inode(filp)->i_private;
5202 cpumask_var_t tracing_cpumask_new;
5203 int err;
5204
5205 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5206 return -ENOMEM;
5207
5208 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5209 if (err)
5210 goto err_free;
5211
5212 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5213 if (err)
5214 goto err_free;
5215
5216 free_cpumask_var(tracing_cpumask_new);
5217
5218 return count;
5219
5220 err_free:
5221 free_cpumask_var(tracing_cpumask_new);
5222
5223 return err;
5224 }
5225
5226 static const struct file_operations tracing_cpumask_fops = {
5227 .open = tracing_open_generic_tr,
5228 .read = tracing_cpumask_read,
5229 .write = tracing_cpumask_write,
5230 .release = tracing_release_generic_tr,
5231 .llseek = generic_file_llseek,
5232 };
5233
tracing_trace_options_show(struct seq_file * m,void * v)5234 static int tracing_trace_options_show(struct seq_file *m, void *v)
5235 {
5236 struct tracer_opt *trace_opts;
5237 struct trace_array *tr = m->private;
5238 u32 tracer_flags;
5239 int i;
5240
5241 mutex_lock(&trace_types_lock);
5242 tracer_flags = tr->current_trace->flags->val;
5243 trace_opts = tr->current_trace->flags->opts;
5244
5245 for (i = 0; trace_options[i]; i++) {
5246 if (tr->trace_flags & (1 << i))
5247 seq_printf(m, "%s\n", trace_options[i]);
5248 else
5249 seq_printf(m, "no%s\n", trace_options[i]);
5250 }
5251
5252 for (i = 0; trace_opts[i].name; i++) {
5253 if (tracer_flags & trace_opts[i].bit)
5254 seq_printf(m, "%s\n", trace_opts[i].name);
5255 else
5256 seq_printf(m, "no%s\n", trace_opts[i].name);
5257 }
5258 mutex_unlock(&trace_types_lock);
5259
5260 return 0;
5261 }
5262
__set_tracer_option(struct trace_array * tr,struct tracer_flags * tracer_flags,struct tracer_opt * opts,int neg)5263 static int __set_tracer_option(struct trace_array *tr,
5264 struct tracer_flags *tracer_flags,
5265 struct tracer_opt *opts, int neg)
5266 {
5267 struct tracer *trace = tracer_flags->trace;
5268 int ret;
5269
5270 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5271 if (ret)
5272 return ret;
5273
5274 if (neg)
5275 tracer_flags->val &= ~opts->bit;
5276 else
5277 tracer_flags->val |= opts->bit;
5278 return 0;
5279 }
5280
5281 /* Try to assign a tracer specific option */
set_tracer_option(struct trace_array * tr,char * cmp,int neg)5282 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5283 {
5284 struct tracer *trace = tr->current_trace;
5285 struct tracer_flags *tracer_flags = trace->flags;
5286 struct tracer_opt *opts = NULL;
5287 int i;
5288
5289 for (i = 0; tracer_flags->opts[i].name; i++) {
5290 opts = &tracer_flags->opts[i];
5291
5292 if (strcmp(cmp, opts->name) == 0)
5293 return __set_tracer_option(tr, trace->flags, opts, neg);
5294 }
5295
5296 return -EINVAL;
5297 }
5298
5299 /* Some tracers require overwrite to stay enabled */
trace_keep_overwrite(struct tracer * tracer,u32 mask,int set)5300 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5301 {
5302 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5303 return -1;
5304
5305 return 0;
5306 }
5307
set_tracer_flag(struct trace_array * tr,unsigned int mask,int enabled)5308 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5309 {
5310 int *map;
5311
5312 if ((mask == TRACE_ITER_RECORD_TGID) ||
5313 (mask == TRACE_ITER_RECORD_CMD))
5314 lockdep_assert_held(&event_mutex);
5315
5316 /* do nothing if flag is already set */
5317 if (!!(tr->trace_flags & mask) == !!enabled)
5318 return 0;
5319
5320 /* Give the tracer a chance to approve the change */
5321 if (tr->current_trace->flag_changed)
5322 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5323 return -EINVAL;
5324
5325 if (enabled)
5326 tr->trace_flags |= mask;
5327 else
5328 tr->trace_flags &= ~mask;
5329
5330 if (mask == TRACE_ITER_RECORD_CMD)
5331 trace_event_enable_cmd_record(enabled);
5332
5333 if (mask == TRACE_ITER_RECORD_TGID) {
5334 if (!tgid_map) {
5335 tgid_map_max = pid_max;
5336 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5337 GFP_KERNEL);
5338
5339 /*
5340 * Pairs with smp_load_acquire() in
5341 * trace_find_tgid_ptr() to ensure that if it observes
5342 * the tgid_map we just allocated then it also observes
5343 * the corresponding tgid_map_max value.
5344 */
5345 smp_store_release(&tgid_map, map);
5346 }
5347 if (!tgid_map) {
5348 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5349 return -ENOMEM;
5350 }
5351
5352 trace_event_enable_tgid_record(enabled);
5353 }
5354
5355 if (mask == TRACE_ITER_EVENT_FORK)
5356 trace_event_follow_fork(tr, enabled);
5357
5358 if (mask == TRACE_ITER_FUNC_FORK)
5359 ftrace_pid_follow_fork(tr, enabled);
5360
5361 if (mask == TRACE_ITER_OVERWRITE) {
5362 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5363 #ifdef CONFIG_TRACER_MAX_TRACE
5364 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5365 #endif
5366 }
5367
5368 if (mask == TRACE_ITER_PRINTK) {
5369 trace_printk_start_stop_comm(enabled);
5370 trace_printk_control(enabled);
5371 }
5372
5373 return 0;
5374 }
5375
trace_set_options(struct trace_array * tr,char * option)5376 int trace_set_options(struct trace_array *tr, char *option)
5377 {
5378 char *cmp;
5379 int neg = 0;
5380 int ret;
5381 size_t orig_len = strlen(option);
5382 int len;
5383
5384 cmp = strstrip(option);
5385
5386 len = str_has_prefix(cmp, "no");
5387 if (len)
5388 neg = 1;
5389
5390 cmp += len;
5391
5392 mutex_lock(&event_mutex);
5393 mutex_lock(&trace_types_lock);
5394
5395 ret = match_string(trace_options, -1, cmp);
5396 /* If no option could be set, test the specific tracer options */
5397 if (ret < 0)
5398 ret = set_tracer_option(tr, cmp, neg);
5399 else
5400 ret = set_tracer_flag(tr, 1 << ret, !neg);
5401
5402 mutex_unlock(&trace_types_lock);
5403 mutex_unlock(&event_mutex);
5404
5405 /*
5406 * If the first trailing whitespace is replaced with '\0' by strstrip,
5407 * turn it back into a space.
5408 */
5409 if (orig_len > strlen(option))
5410 option[strlen(option)] = ' ';
5411
5412 return ret;
5413 }
5414
apply_trace_boot_options(void)5415 static void __init apply_trace_boot_options(void)
5416 {
5417 char *buf = trace_boot_options_buf;
5418 char *option;
5419
5420 while (true) {
5421 option = strsep(&buf, ",");
5422
5423 if (!option)
5424 break;
5425
5426 if (*option)
5427 trace_set_options(&global_trace, option);
5428
5429 /* Put back the comma to allow this to be called again */
5430 if (buf)
5431 *(buf - 1) = ',';
5432 }
5433 }
5434
5435 static ssize_t
tracing_trace_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)5436 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5437 size_t cnt, loff_t *ppos)
5438 {
5439 struct seq_file *m = filp->private_data;
5440 struct trace_array *tr = m->private;
5441 char buf[64];
5442 int ret;
5443
5444 if (cnt >= sizeof(buf))
5445 return -EINVAL;
5446
5447 if (copy_from_user(buf, ubuf, cnt))
5448 return -EFAULT;
5449
5450 buf[cnt] = 0;
5451
5452 ret = trace_set_options(tr, buf);
5453 if (ret < 0)
5454 return ret;
5455
5456 *ppos += cnt;
5457
5458 return cnt;
5459 }
5460
tracing_trace_options_open(struct inode * inode,struct file * file)5461 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5462 {
5463 struct trace_array *tr = inode->i_private;
5464 int ret;
5465
5466 ret = tracing_check_open_get_tr(tr);
5467 if (ret)
5468 return ret;
5469
5470 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5471 if (ret < 0)
5472 trace_array_put(tr);
5473
5474 return ret;
5475 }
5476
5477 static const struct file_operations tracing_iter_fops = {
5478 .open = tracing_trace_options_open,
5479 .read = seq_read,
5480 .llseek = seq_lseek,
5481 .release = tracing_single_release_tr,
5482 .write = tracing_trace_options_write,
5483 };
5484
5485 static const char readme_msg[] =
5486 "tracing mini-HOWTO:\n\n"
5487 "# echo 0 > tracing_on : quick way to disable tracing\n"
5488 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5489 " Important files:\n"
5490 " trace\t\t\t- The static contents of the buffer\n"
5491 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5492 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5493 " current_tracer\t- function and latency tracers\n"
5494 " available_tracers\t- list of configured tracers for current_tracer\n"
5495 " error_log\t- error log for failed commands (that support it)\n"
5496 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5497 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5498 " trace_clock\t\t- change the clock used to order events\n"
5499 " local: Per cpu clock but may not be synced across CPUs\n"
5500 " global: Synced across CPUs but slows tracing down.\n"
5501 " counter: Not a clock, but just an increment\n"
5502 " uptime: Jiffy counter from time of boot\n"
5503 " perf: Same clock that perf events use\n"
5504 #ifdef CONFIG_X86_64
5505 " x86-tsc: TSC cycle counter\n"
5506 #endif
5507 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5508 " delta: Delta difference against a buffer-wide timestamp\n"
5509 " absolute: Absolute (standalone) timestamp\n"
5510 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5511 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5512 " tracing_cpumask\t- Limit which CPUs to trace\n"
5513 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5514 "\t\t\t Remove sub-buffer with rmdir\n"
5515 " trace_options\t\t- Set format or modify how tracing happens\n"
5516 "\t\t\t Disable an option by prefixing 'no' to the\n"
5517 "\t\t\t option name\n"
5518 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5519 #ifdef CONFIG_DYNAMIC_FTRACE
5520 "\n available_filter_functions - list of functions that can be filtered on\n"
5521 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5522 "\t\t\t functions\n"
5523 "\t accepts: func_full_name or glob-matching-pattern\n"
5524 "\t modules: Can select a group via module\n"
5525 "\t Format: :mod:<module-name>\n"
5526 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5527 "\t triggers: a command to perform when function is hit\n"
5528 "\t Format: <function>:<trigger>[:count]\n"
5529 "\t trigger: traceon, traceoff\n"
5530 "\t\t enable_event:<system>:<event>\n"
5531 "\t\t disable_event:<system>:<event>\n"
5532 #ifdef CONFIG_STACKTRACE
5533 "\t\t stacktrace\n"
5534 #endif
5535 #ifdef CONFIG_TRACER_SNAPSHOT
5536 "\t\t snapshot\n"
5537 #endif
5538 "\t\t dump\n"
5539 "\t\t cpudump\n"
5540 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5541 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5542 "\t The first one will disable tracing every time do_fault is hit\n"
5543 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5544 "\t The first time do trap is hit and it disables tracing, the\n"
5545 "\t counter will decrement to 2. If tracing is already disabled,\n"
5546 "\t the counter will not decrement. It only decrements when the\n"
5547 "\t trigger did work\n"
5548 "\t To remove trigger without count:\n"
5549 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5550 "\t To remove trigger with a count:\n"
5551 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5552 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5553 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5554 "\t modules: Can select a group via module command :mod:\n"
5555 "\t Does not accept triggers\n"
5556 #endif /* CONFIG_DYNAMIC_FTRACE */
5557 #ifdef CONFIG_FUNCTION_TRACER
5558 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5559 "\t\t (function)\n"
5560 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5561 "\t\t (function)\n"
5562 #endif
5563 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5564 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5565 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5566 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5567 #endif
5568 #ifdef CONFIG_TRACER_SNAPSHOT
5569 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5570 "\t\t\t snapshot buffer. Read the contents for more\n"
5571 "\t\t\t information\n"
5572 #endif
5573 #ifdef CONFIG_STACK_TRACER
5574 " stack_trace\t\t- Shows the max stack trace when active\n"
5575 " stack_max_size\t- Shows current max stack size that was traced\n"
5576 "\t\t\t Write into this file to reset the max size (trigger a\n"
5577 "\t\t\t new trace)\n"
5578 #ifdef CONFIG_DYNAMIC_FTRACE
5579 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5580 "\t\t\t traces\n"
5581 #endif
5582 #endif /* CONFIG_STACK_TRACER */
5583 #ifdef CONFIG_DYNAMIC_EVENTS
5584 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5585 "\t\t\t Write into this file to define/undefine new trace events.\n"
5586 #endif
5587 #ifdef CONFIG_KPROBE_EVENTS
5588 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5589 "\t\t\t Write into this file to define/undefine new trace events.\n"
5590 #endif
5591 #ifdef CONFIG_UPROBE_EVENTS
5592 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5593 "\t\t\t Write into this file to define/undefine new trace events.\n"
5594 #endif
5595 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5596 "\t accepts: event-definitions (one definition per line)\n"
5597 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5598 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5599 #ifdef CONFIG_HIST_TRIGGERS
5600 "\t s:[synthetic/]<event> <field> [<field>]\n"
5601 #endif
5602 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
5603 "\t -:[<group>/][<event>]\n"
5604 #ifdef CONFIG_KPROBE_EVENTS
5605 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5606 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5607 #endif
5608 #ifdef CONFIG_UPROBE_EVENTS
5609 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5610 #endif
5611 "\t args: <name>=fetcharg[:type]\n"
5612 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5613 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5614 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5615 #else
5616 "\t $stack<index>, $stack, $retval, $comm,\n"
5617 #endif
5618 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5619 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5620 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5621 "\t <type>\\[<array-size>\\]\n"
5622 #ifdef CONFIG_HIST_TRIGGERS
5623 "\t field: <stype> <name>;\n"
5624 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5625 "\t [unsigned] char/int/long\n"
5626 #endif
5627 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5628 "\t of the <attached-group>/<attached-event>.\n"
5629 #endif
5630 " events/\t\t- Directory containing all trace event subsystems:\n"
5631 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5632 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5633 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5634 "\t\t\t events\n"
5635 " filter\t\t- If set, only events passing filter are traced\n"
5636 " events/<system>/<event>/\t- Directory containing control files for\n"
5637 "\t\t\t <event>:\n"
5638 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5639 " filter\t\t- If set, only events passing filter are traced\n"
5640 " trigger\t\t- If set, a command to perform when event is hit\n"
5641 "\t Format: <trigger>[:count][if <filter>]\n"
5642 "\t trigger: traceon, traceoff\n"
5643 "\t enable_event:<system>:<event>\n"
5644 "\t disable_event:<system>:<event>\n"
5645 #ifdef CONFIG_HIST_TRIGGERS
5646 "\t enable_hist:<system>:<event>\n"
5647 "\t disable_hist:<system>:<event>\n"
5648 #endif
5649 #ifdef CONFIG_STACKTRACE
5650 "\t\t stacktrace\n"
5651 #endif
5652 #ifdef CONFIG_TRACER_SNAPSHOT
5653 "\t\t snapshot\n"
5654 #endif
5655 #ifdef CONFIG_HIST_TRIGGERS
5656 "\t\t hist (see below)\n"
5657 #endif
5658 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5659 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5660 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5661 "\t events/block/block_unplug/trigger\n"
5662 "\t The first disables tracing every time block_unplug is hit.\n"
5663 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5664 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5665 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5666 "\t Like function triggers, the counter is only decremented if it\n"
5667 "\t enabled or disabled tracing.\n"
5668 "\t To remove a trigger without a count:\n"
5669 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5670 "\t To remove a trigger with a count:\n"
5671 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5672 "\t Filters can be ignored when removing a trigger.\n"
5673 #ifdef CONFIG_HIST_TRIGGERS
5674 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5675 "\t Format: hist:keys=<field1[,field2,...]>\n"
5676 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5677 "\t [:values=<field1[,field2,...]>]\n"
5678 "\t [:sort=<field1[,field2,...]>]\n"
5679 "\t [:size=#entries]\n"
5680 "\t [:pause][:continue][:clear]\n"
5681 "\t [:name=histname1]\n"
5682 "\t [:<handler>.<action>]\n"
5683 "\t [if <filter>]\n\n"
5684 "\t Note, special fields can be used as well:\n"
5685 "\t common_timestamp - to record current timestamp\n"
5686 "\t common_cpu - to record the CPU the event happened on\n"
5687 "\n"
5688 "\t A hist trigger variable can be:\n"
5689 "\t - a reference to a field e.g. x=current_timestamp,\n"
5690 "\t - a reference to another variable e.g. y=$x,\n"
5691 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5692 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5693 "\n"
5694 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5695 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5696 "\t variable reference, field or numeric literal.\n"
5697 "\n"
5698 "\t When a matching event is hit, an entry is added to a hash\n"
5699 "\t table using the key(s) and value(s) named, and the value of a\n"
5700 "\t sum called 'hitcount' is incremented. Keys and values\n"
5701 "\t correspond to fields in the event's format description. Keys\n"
5702 "\t can be any field, or the special string 'stacktrace'.\n"
5703 "\t Compound keys consisting of up to two fields can be specified\n"
5704 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5705 "\t fields. Sort keys consisting of up to two fields can be\n"
5706 "\t specified using the 'sort' keyword. The sort direction can\n"
5707 "\t be modified by appending '.descending' or '.ascending' to a\n"
5708 "\t sort field. The 'size' parameter can be used to specify more\n"
5709 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5710 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5711 "\t its histogram data will be shared with other triggers of the\n"
5712 "\t same name, and trigger hits will update this common data.\n\n"
5713 "\t Reading the 'hist' file for the event will dump the hash\n"
5714 "\t table in its entirety to stdout. If there are multiple hist\n"
5715 "\t triggers attached to an event, there will be a table for each\n"
5716 "\t trigger in the output. The table displayed for a named\n"
5717 "\t trigger will be the same as any other instance having the\n"
5718 "\t same name. The default format used to display a given field\n"
5719 "\t can be modified by appending any of the following modifiers\n"
5720 "\t to the field name, as applicable:\n\n"
5721 "\t .hex display a number as a hex value\n"
5722 "\t .sym display an address as a symbol\n"
5723 "\t .sym-offset display an address as a symbol and offset\n"
5724 "\t .execname display a common_pid as a program name\n"
5725 "\t .syscall display a syscall id as a syscall name\n"
5726 "\t .log2 display log2 value rather than raw number\n"
5727 "\t .buckets=size display values in groups of size rather than raw number\n"
5728 "\t .usecs display a common_timestamp in microseconds\n\n"
5729 "\t The 'pause' parameter can be used to pause an existing hist\n"
5730 "\t trigger or to start a hist trigger but not log any events\n"
5731 "\t until told to do so. 'continue' can be used to start or\n"
5732 "\t restart a paused hist trigger.\n\n"
5733 "\t The 'clear' parameter will clear the contents of a running\n"
5734 "\t hist trigger and leave its current paused/active state\n"
5735 "\t unchanged.\n\n"
5736 "\t The enable_hist and disable_hist triggers can be used to\n"
5737 "\t have one event conditionally start and stop another event's\n"
5738 "\t already-attached hist trigger. The syntax is analogous to\n"
5739 "\t the enable_event and disable_event triggers.\n\n"
5740 "\t Hist trigger handlers and actions are executed whenever a\n"
5741 "\t a histogram entry is added or updated. They take the form:\n\n"
5742 "\t <handler>.<action>\n\n"
5743 "\t The available handlers are:\n\n"
5744 "\t onmatch(matching.event) - invoke on addition or update\n"
5745 "\t onmax(var) - invoke if var exceeds current max\n"
5746 "\t onchange(var) - invoke action if var changes\n\n"
5747 "\t The available actions are:\n\n"
5748 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5749 "\t save(field,...) - save current event fields\n"
5750 #ifdef CONFIG_TRACER_SNAPSHOT
5751 "\t snapshot() - snapshot the trace buffer\n\n"
5752 #endif
5753 #ifdef CONFIG_SYNTH_EVENTS
5754 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5755 "\t Write into this file to define/undefine new synthetic events.\n"
5756 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5757 #endif
5758 #endif
5759 ;
5760
5761 static ssize_t
tracing_readme_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)5762 tracing_readme_read(struct file *filp, char __user *ubuf,
5763 size_t cnt, loff_t *ppos)
5764 {
5765 return simple_read_from_buffer(ubuf, cnt, ppos,
5766 readme_msg, strlen(readme_msg));
5767 }
5768
5769 static const struct file_operations tracing_readme_fops = {
5770 .open = tracing_open_generic,
5771 .read = tracing_readme_read,
5772 .llseek = generic_file_llseek,
5773 };
5774
saved_tgids_next(struct seq_file * m,void * v,loff_t * pos)5775 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5776 {
5777 int pid = ++(*pos);
5778
5779 return trace_find_tgid_ptr(pid);
5780 }
5781
saved_tgids_start(struct seq_file * m,loff_t * pos)5782 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5783 {
5784 int pid = *pos;
5785
5786 return trace_find_tgid_ptr(pid);
5787 }
5788
saved_tgids_stop(struct seq_file * m,void * v)5789 static void saved_tgids_stop(struct seq_file *m, void *v)
5790 {
5791 }
5792
saved_tgids_show(struct seq_file * m,void * v)5793 static int saved_tgids_show(struct seq_file *m, void *v)
5794 {
5795 int *entry = (int *)v;
5796 int pid = entry - tgid_map;
5797 int tgid = *entry;
5798
5799 if (tgid == 0)
5800 return SEQ_SKIP;
5801
5802 seq_printf(m, "%d %d\n", pid, tgid);
5803 return 0;
5804 }
5805
5806 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5807 .start = saved_tgids_start,
5808 .stop = saved_tgids_stop,
5809 .next = saved_tgids_next,
5810 .show = saved_tgids_show,
5811 };
5812
tracing_saved_tgids_open(struct inode * inode,struct file * filp)5813 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5814 {
5815 int ret;
5816
5817 ret = tracing_check_open_get_tr(NULL);
5818 if (ret)
5819 return ret;
5820
5821 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5822 }
5823
5824
5825 static const struct file_operations tracing_saved_tgids_fops = {
5826 .open = tracing_saved_tgids_open,
5827 .read = seq_read,
5828 .llseek = seq_lseek,
5829 .release = seq_release,
5830 };
5831
saved_cmdlines_next(struct seq_file * m,void * v,loff_t * pos)5832 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5833 {
5834 unsigned int *ptr = v;
5835
5836 if (*pos || m->count)
5837 ptr++;
5838
5839 (*pos)++;
5840
5841 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5842 ptr++) {
5843 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5844 continue;
5845
5846 return ptr;
5847 }
5848
5849 return NULL;
5850 }
5851
saved_cmdlines_start(struct seq_file * m,loff_t * pos)5852 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5853 {
5854 void *v;
5855 loff_t l = 0;
5856
5857 preempt_disable();
5858 arch_spin_lock(&trace_cmdline_lock);
5859
5860 v = &savedcmd->map_cmdline_to_pid[0];
5861 while (l <= *pos) {
5862 v = saved_cmdlines_next(m, v, &l);
5863 if (!v)
5864 return NULL;
5865 }
5866
5867 return v;
5868 }
5869
saved_cmdlines_stop(struct seq_file * m,void * v)5870 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5871 {
5872 arch_spin_unlock(&trace_cmdline_lock);
5873 preempt_enable();
5874 }
5875
saved_cmdlines_show(struct seq_file * m,void * v)5876 static int saved_cmdlines_show(struct seq_file *m, void *v)
5877 {
5878 char buf[TASK_COMM_LEN];
5879 unsigned int *pid = v;
5880
5881 __trace_find_cmdline(*pid, buf);
5882 seq_printf(m, "%d %s\n", *pid, buf);
5883 return 0;
5884 }
5885
5886 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5887 .start = saved_cmdlines_start,
5888 .next = saved_cmdlines_next,
5889 .stop = saved_cmdlines_stop,
5890 .show = saved_cmdlines_show,
5891 };
5892
tracing_saved_cmdlines_open(struct inode * inode,struct file * filp)5893 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5894 {
5895 int ret;
5896
5897 ret = tracing_check_open_get_tr(NULL);
5898 if (ret)
5899 return ret;
5900
5901 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5902 }
5903
5904 static const struct file_operations tracing_saved_cmdlines_fops = {
5905 .open = tracing_saved_cmdlines_open,
5906 .read = seq_read,
5907 .llseek = seq_lseek,
5908 .release = seq_release,
5909 };
5910
5911 static ssize_t
tracing_saved_cmdlines_size_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)5912 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5913 size_t cnt, loff_t *ppos)
5914 {
5915 char buf[64];
5916 int r;
5917
5918 preempt_disable();
5919 arch_spin_lock(&trace_cmdline_lock);
5920 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5921 arch_spin_unlock(&trace_cmdline_lock);
5922 preempt_enable();
5923
5924 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5925 }
5926
free_saved_cmdlines_buffer(struct saved_cmdlines_buffer * s)5927 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5928 {
5929 kfree(s->saved_cmdlines);
5930 kfree(s->map_cmdline_to_pid);
5931 kfree(s);
5932 }
5933
tracing_resize_saved_cmdlines(unsigned int val)5934 static int tracing_resize_saved_cmdlines(unsigned int val)
5935 {
5936 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5937
5938 s = kmalloc(sizeof(*s), GFP_KERNEL);
5939 if (!s)
5940 return -ENOMEM;
5941
5942 if (allocate_cmdlines_buffer(val, s) < 0) {
5943 kfree(s);
5944 return -ENOMEM;
5945 }
5946
5947 preempt_disable();
5948 arch_spin_lock(&trace_cmdline_lock);
5949 savedcmd_temp = savedcmd;
5950 savedcmd = s;
5951 arch_spin_unlock(&trace_cmdline_lock);
5952 preempt_enable();
5953 free_saved_cmdlines_buffer(savedcmd_temp);
5954
5955 return 0;
5956 }
5957
5958 static ssize_t
tracing_saved_cmdlines_size_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)5959 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5960 size_t cnt, loff_t *ppos)
5961 {
5962 unsigned long val;
5963 int ret;
5964
5965 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5966 if (ret)
5967 return ret;
5968
5969 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5970 if (!val || val > PID_MAX_DEFAULT)
5971 return -EINVAL;
5972
5973 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5974 if (ret < 0)
5975 return ret;
5976
5977 *ppos += cnt;
5978
5979 return cnt;
5980 }
5981
5982 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5983 .open = tracing_open_generic,
5984 .read = tracing_saved_cmdlines_size_read,
5985 .write = tracing_saved_cmdlines_size_write,
5986 };
5987
5988 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5989 static union trace_eval_map_item *
update_eval_map(union trace_eval_map_item * ptr)5990 update_eval_map(union trace_eval_map_item *ptr)
5991 {
5992 if (!ptr->map.eval_string) {
5993 if (ptr->tail.next) {
5994 ptr = ptr->tail.next;
5995 /* Set ptr to the next real item (skip head) */
5996 ptr++;
5997 } else
5998 return NULL;
5999 }
6000 return ptr;
6001 }
6002
eval_map_next(struct seq_file * m,void * v,loff_t * pos)6003 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6004 {
6005 union trace_eval_map_item *ptr = v;
6006
6007 /*
6008 * Paranoid! If ptr points to end, we don't want to increment past it.
6009 * This really should never happen.
6010 */
6011 (*pos)++;
6012 ptr = update_eval_map(ptr);
6013 if (WARN_ON_ONCE(!ptr))
6014 return NULL;
6015
6016 ptr++;
6017 ptr = update_eval_map(ptr);
6018
6019 return ptr;
6020 }
6021
eval_map_start(struct seq_file * m,loff_t * pos)6022 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6023 {
6024 union trace_eval_map_item *v;
6025 loff_t l = 0;
6026
6027 mutex_lock(&trace_eval_mutex);
6028
6029 v = trace_eval_maps;
6030 if (v)
6031 v++;
6032
6033 while (v && l < *pos) {
6034 v = eval_map_next(m, v, &l);
6035 }
6036
6037 return v;
6038 }
6039
eval_map_stop(struct seq_file * m,void * v)6040 static void eval_map_stop(struct seq_file *m, void *v)
6041 {
6042 mutex_unlock(&trace_eval_mutex);
6043 }
6044
eval_map_show(struct seq_file * m,void * v)6045 static int eval_map_show(struct seq_file *m, void *v)
6046 {
6047 union trace_eval_map_item *ptr = v;
6048
6049 seq_printf(m, "%s %ld (%s)\n",
6050 ptr->map.eval_string, ptr->map.eval_value,
6051 ptr->map.system);
6052
6053 return 0;
6054 }
6055
6056 static const struct seq_operations tracing_eval_map_seq_ops = {
6057 .start = eval_map_start,
6058 .next = eval_map_next,
6059 .stop = eval_map_stop,
6060 .show = eval_map_show,
6061 };
6062
tracing_eval_map_open(struct inode * inode,struct file * filp)6063 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6064 {
6065 int ret;
6066
6067 ret = tracing_check_open_get_tr(NULL);
6068 if (ret)
6069 return ret;
6070
6071 return seq_open(filp, &tracing_eval_map_seq_ops);
6072 }
6073
6074 static const struct file_operations tracing_eval_map_fops = {
6075 .open = tracing_eval_map_open,
6076 .read = seq_read,
6077 .llseek = seq_lseek,
6078 .release = seq_release,
6079 };
6080
6081 static inline union trace_eval_map_item *
trace_eval_jmp_to_tail(union trace_eval_map_item * ptr)6082 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6083 {
6084 /* Return tail of array given the head */
6085 return ptr + ptr->head.length + 1;
6086 }
6087
6088 static void
trace_insert_eval_map_file(struct module * mod,struct trace_eval_map ** start,int len)6089 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6090 int len)
6091 {
6092 struct trace_eval_map **stop;
6093 struct trace_eval_map **map;
6094 union trace_eval_map_item *map_array;
6095 union trace_eval_map_item *ptr;
6096
6097 stop = start + len;
6098
6099 /*
6100 * The trace_eval_maps contains the map plus a head and tail item,
6101 * where the head holds the module and length of array, and the
6102 * tail holds a pointer to the next list.
6103 */
6104 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6105 if (!map_array) {
6106 pr_warn("Unable to allocate trace eval mapping\n");
6107 return;
6108 }
6109
6110 mutex_lock(&trace_eval_mutex);
6111
6112 if (!trace_eval_maps)
6113 trace_eval_maps = map_array;
6114 else {
6115 ptr = trace_eval_maps;
6116 for (;;) {
6117 ptr = trace_eval_jmp_to_tail(ptr);
6118 if (!ptr->tail.next)
6119 break;
6120 ptr = ptr->tail.next;
6121
6122 }
6123 ptr->tail.next = map_array;
6124 }
6125 map_array->head.mod = mod;
6126 map_array->head.length = len;
6127 map_array++;
6128
6129 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6130 map_array->map = **map;
6131 map_array++;
6132 }
6133 memset(map_array, 0, sizeof(*map_array));
6134
6135 mutex_unlock(&trace_eval_mutex);
6136 }
6137
trace_create_eval_file(struct dentry * d_tracer)6138 static void trace_create_eval_file(struct dentry *d_tracer)
6139 {
6140 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6141 NULL, &tracing_eval_map_fops);
6142 }
6143
6144 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
trace_create_eval_file(struct dentry * d_tracer)6145 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
trace_insert_eval_map_file(struct module * mod,struct trace_eval_map ** start,int len)6146 static inline void trace_insert_eval_map_file(struct module *mod,
6147 struct trace_eval_map **start, int len) { }
6148 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6149
trace_insert_eval_map(struct module * mod,struct trace_eval_map ** start,int len)6150 static void trace_insert_eval_map(struct module *mod,
6151 struct trace_eval_map **start, int len)
6152 {
6153 struct trace_eval_map **map;
6154
6155 if (len <= 0)
6156 return;
6157
6158 map = start;
6159
6160 trace_event_eval_update(map, len);
6161
6162 trace_insert_eval_map_file(mod, start, len);
6163 }
6164
6165 static ssize_t
tracing_set_trace_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6166 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6167 size_t cnt, loff_t *ppos)
6168 {
6169 struct trace_array *tr = filp->private_data;
6170 char buf[MAX_TRACER_SIZE+2];
6171 int r;
6172
6173 mutex_lock(&trace_types_lock);
6174 r = sprintf(buf, "%s\n", tr->current_trace->name);
6175 mutex_unlock(&trace_types_lock);
6176
6177 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6178 }
6179
tracer_init(struct tracer * t,struct trace_array * tr)6180 int tracer_init(struct tracer *t, struct trace_array *tr)
6181 {
6182 tracing_reset_online_cpus(&tr->array_buffer);
6183 return t->init(tr);
6184 }
6185
set_buffer_entries(struct array_buffer * buf,unsigned long val)6186 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6187 {
6188 int cpu;
6189
6190 for_each_tracing_cpu(cpu)
6191 per_cpu_ptr(buf->data, cpu)->entries = val;
6192 }
6193
6194 #ifdef CONFIG_TRACER_MAX_TRACE
6195 /* resize @tr's buffer to the size of @size_tr's entries */
resize_buffer_duplicate_size(struct array_buffer * trace_buf,struct array_buffer * size_buf,int cpu_id)6196 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6197 struct array_buffer *size_buf, int cpu_id)
6198 {
6199 int cpu, ret = 0;
6200
6201 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6202 for_each_tracing_cpu(cpu) {
6203 ret = ring_buffer_resize(trace_buf->buffer,
6204 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6205 if (ret < 0)
6206 break;
6207 per_cpu_ptr(trace_buf->data, cpu)->entries =
6208 per_cpu_ptr(size_buf->data, cpu)->entries;
6209 }
6210 } else {
6211 ret = ring_buffer_resize(trace_buf->buffer,
6212 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6213 if (ret == 0)
6214 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6215 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6216 }
6217
6218 return ret;
6219 }
6220 #endif /* CONFIG_TRACER_MAX_TRACE */
6221
__tracing_resize_ring_buffer(struct trace_array * tr,unsigned long size,int cpu)6222 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6223 unsigned long size, int cpu)
6224 {
6225 int ret;
6226
6227 /*
6228 * If kernel or user changes the size of the ring buffer
6229 * we use the size that was given, and we can forget about
6230 * expanding it later.
6231 */
6232 ring_buffer_expanded = true;
6233
6234 /* May be called before buffers are initialized */
6235 if (!tr->array_buffer.buffer)
6236 return 0;
6237
6238 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6239 if (ret < 0)
6240 return ret;
6241
6242 #ifdef CONFIG_TRACER_MAX_TRACE
6243 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6244 !tr->current_trace->use_max_tr)
6245 goto out;
6246
6247 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6248 if (ret < 0) {
6249 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6250 &tr->array_buffer, cpu);
6251 if (r < 0) {
6252 /*
6253 * AARGH! We are left with different
6254 * size max buffer!!!!
6255 * The max buffer is our "snapshot" buffer.
6256 * When a tracer needs a snapshot (one of the
6257 * latency tracers), it swaps the max buffer
6258 * with the saved snap shot. We succeeded to
6259 * update the size of the main buffer, but failed to
6260 * update the size of the max buffer. But when we tried
6261 * to reset the main buffer to the original size, we
6262 * failed there too. This is very unlikely to
6263 * happen, but if it does, warn and kill all
6264 * tracing.
6265 */
6266 WARN_ON(1);
6267 tracing_disabled = 1;
6268 }
6269 return ret;
6270 }
6271
6272 if (cpu == RING_BUFFER_ALL_CPUS)
6273 set_buffer_entries(&tr->max_buffer, size);
6274 else
6275 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6276
6277 out:
6278 #endif /* CONFIG_TRACER_MAX_TRACE */
6279
6280 if (cpu == RING_BUFFER_ALL_CPUS)
6281 set_buffer_entries(&tr->array_buffer, size);
6282 else
6283 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6284
6285 return ret;
6286 }
6287
tracing_resize_ring_buffer(struct trace_array * tr,unsigned long size,int cpu_id)6288 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6289 unsigned long size, int cpu_id)
6290 {
6291 int ret;
6292
6293 mutex_lock(&trace_types_lock);
6294
6295 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6296 /* make sure, this cpu is enabled in the mask */
6297 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6298 ret = -EINVAL;
6299 goto out;
6300 }
6301 }
6302
6303 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6304 if (ret < 0)
6305 ret = -ENOMEM;
6306
6307 out:
6308 mutex_unlock(&trace_types_lock);
6309
6310 return ret;
6311 }
6312
6313
6314 /**
6315 * tracing_update_buffers - used by tracing facility to expand ring buffers
6316 *
6317 * To save on memory when the tracing is never used on a system with it
6318 * configured in. The ring buffers are set to a minimum size. But once
6319 * a user starts to use the tracing facility, then they need to grow
6320 * to their default size.
6321 *
6322 * This function is to be called when a tracer is about to be used.
6323 */
tracing_update_buffers(void)6324 int tracing_update_buffers(void)
6325 {
6326 int ret = 0;
6327
6328 mutex_lock(&trace_types_lock);
6329 if (!ring_buffer_expanded)
6330 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6331 RING_BUFFER_ALL_CPUS);
6332 mutex_unlock(&trace_types_lock);
6333
6334 return ret;
6335 }
6336
6337 struct trace_option_dentry;
6338
6339 static void
6340 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6341
6342 /*
6343 * Used to clear out the tracer before deletion of an instance.
6344 * Must have trace_types_lock held.
6345 */
tracing_set_nop(struct trace_array * tr)6346 static void tracing_set_nop(struct trace_array *tr)
6347 {
6348 if (tr->current_trace == &nop_trace)
6349 return;
6350
6351 tr->current_trace->enabled--;
6352
6353 if (tr->current_trace->reset)
6354 tr->current_trace->reset(tr);
6355
6356 tr->current_trace = &nop_trace;
6357 }
6358
6359 static bool tracer_options_updated;
6360
add_tracer_options(struct trace_array * tr,struct tracer * t)6361 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6362 {
6363 /* Only enable if the directory has been created already. */
6364 if (!tr->dir)
6365 return;
6366
6367 /* Only create trace option files after update_tracer_options finish */
6368 if (!tracer_options_updated)
6369 return;
6370
6371 create_trace_option_files(tr, t);
6372 }
6373
tracing_set_tracer(struct trace_array * tr,const char * buf)6374 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6375 {
6376 struct tracer *t;
6377 #ifdef CONFIG_TRACER_MAX_TRACE
6378 bool had_max_tr;
6379 #endif
6380 int ret = 0;
6381
6382 mutex_lock(&trace_types_lock);
6383
6384 if (!ring_buffer_expanded) {
6385 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6386 RING_BUFFER_ALL_CPUS);
6387 if (ret < 0)
6388 goto out;
6389 ret = 0;
6390 }
6391
6392 for (t = trace_types; t; t = t->next) {
6393 if (strcmp(t->name, buf) == 0)
6394 break;
6395 }
6396 if (!t) {
6397 ret = -EINVAL;
6398 goto out;
6399 }
6400 if (t == tr->current_trace)
6401 goto out;
6402
6403 #ifdef CONFIG_TRACER_SNAPSHOT
6404 if (t->use_max_tr) {
6405 local_irq_disable();
6406 arch_spin_lock(&tr->max_lock);
6407 if (tr->cond_snapshot)
6408 ret = -EBUSY;
6409 arch_spin_unlock(&tr->max_lock);
6410 local_irq_enable();
6411 if (ret)
6412 goto out;
6413 }
6414 #endif
6415 /* Some tracers won't work on kernel command line */
6416 if (system_state < SYSTEM_RUNNING && t->noboot) {
6417 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6418 t->name);
6419 goto out;
6420 }
6421
6422 /* Some tracers are only allowed for the top level buffer */
6423 if (!trace_ok_for_array(t, tr)) {
6424 ret = -EINVAL;
6425 goto out;
6426 }
6427
6428 /* If trace pipe files are being read, we can't change the tracer */
6429 if (tr->trace_ref) {
6430 ret = -EBUSY;
6431 goto out;
6432 }
6433
6434 trace_branch_disable();
6435
6436 tr->current_trace->enabled--;
6437
6438 if (tr->current_trace->reset)
6439 tr->current_trace->reset(tr);
6440
6441 #ifdef CONFIG_TRACER_MAX_TRACE
6442 had_max_tr = tr->current_trace->use_max_tr;
6443
6444 /* Current trace needs to be nop_trace before synchronize_rcu */
6445 tr->current_trace = &nop_trace;
6446
6447 if (had_max_tr && !t->use_max_tr) {
6448 /*
6449 * We need to make sure that the update_max_tr sees that
6450 * current_trace changed to nop_trace to keep it from
6451 * swapping the buffers after we resize it.
6452 * The update_max_tr is called from interrupts disabled
6453 * so a synchronized_sched() is sufficient.
6454 */
6455 synchronize_rcu();
6456 free_snapshot(tr);
6457 }
6458
6459 if (t->use_max_tr && !tr->allocated_snapshot) {
6460 ret = tracing_alloc_snapshot_instance(tr);
6461 if (ret < 0)
6462 goto out;
6463 }
6464 #else
6465 tr->current_trace = &nop_trace;
6466 #endif
6467
6468 if (t->init) {
6469 ret = tracer_init(t, tr);
6470 if (ret)
6471 goto out;
6472 }
6473
6474 tr->current_trace = t;
6475 tr->current_trace->enabled++;
6476 trace_branch_enable(tr);
6477 out:
6478 mutex_unlock(&trace_types_lock);
6479
6480 return ret;
6481 }
6482
6483 static ssize_t
tracing_set_trace_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6484 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6485 size_t cnt, loff_t *ppos)
6486 {
6487 struct trace_array *tr = filp->private_data;
6488 char buf[MAX_TRACER_SIZE+1];
6489 char *name;
6490 size_t ret;
6491 int err;
6492
6493 ret = cnt;
6494
6495 if (cnt > MAX_TRACER_SIZE)
6496 cnt = MAX_TRACER_SIZE;
6497
6498 if (copy_from_user(buf, ubuf, cnt))
6499 return -EFAULT;
6500
6501 buf[cnt] = 0;
6502
6503 name = strim(buf);
6504
6505 err = tracing_set_tracer(tr, name);
6506 if (err)
6507 return err;
6508
6509 *ppos += ret;
6510
6511 return ret;
6512 }
6513
6514 static ssize_t
tracing_nsecs_read(unsigned long * ptr,char __user * ubuf,size_t cnt,loff_t * ppos)6515 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6516 size_t cnt, loff_t *ppos)
6517 {
6518 char buf[64];
6519 int r;
6520
6521 r = snprintf(buf, sizeof(buf), "%ld\n",
6522 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6523 if (r > sizeof(buf))
6524 r = sizeof(buf);
6525 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6526 }
6527
6528 static ssize_t
tracing_nsecs_write(unsigned long * ptr,const char __user * ubuf,size_t cnt,loff_t * ppos)6529 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6530 size_t cnt, loff_t *ppos)
6531 {
6532 unsigned long val;
6533 int ret;
6534
6535 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6536 if (ret)
6537 return ret;
6538
6539 *ptr = val * 1000;
6540
6541 return cnt;
6542 }
6543
6544 static ssize_t
tracing_thresh_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6545 tracing_thresh_read(struct file *filp, char __user *ubuf,
6546 size_t cnt, loff_t *ppos)
6547 {
6548 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6549 }
6550
6551 static ssize_t
tracing_thresh_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6552 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6553 size_t cnt, loff_t *ppos)
6554 {
6555 struct trace_array *tr = filp->private_data;
6556 int ret;
6557
6558 mutex_lock(&trace_types_lock);
6559 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6560 if (ret < 0)
6561 goto out;
6562
6563 if (tr->current_trace->update_thresh) {
6564 ret = tr->current_trace->update_thresh(tr);
6565 if (ret < 0)
6566 goto out;
6567 }
6568
6569 ret = cnt;
6570 out:
6571 mutex_unlock(&trace_types_lock);
6572
6573 return ret;
6574 }
6575
6576 #ifdef CONFIG_TRACER_MAX_TRACE
6577
6578 static ssize_t
tracing_max_lat_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6579 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6580 size_t cnt, loff_t *ppos)
6581 {
6582 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6583 }
6584
6585 static ssize_t
tracing_max_lat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)6586 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6587 size_t cnt, loff_t *ppos)
6588 {
6589 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6590 }
6591
6592 #endif
6593
tracing_open_pipe(struct inode * inode,struct file * filp)6594 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6595 {
6596 struct trace_array *tr = inode->i_private;
6597 struct trace_iterator *iter;
6598 int ret;
6599
6600 ret = tracing_check_open_get_tr(tr);
6601 if (ret)
6602 return ret;
6603
6604 mutex_lock(&trace_types_lock);
6605
6606 /* create a buffer to store the information to pass to userspace */
6607 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6608 if (!iter) {
6609 ret = -ENOMEM;
6610 __trace_array_put(tr);
6611 goto out;
6612 }
6613
6614 trace_seq_init(&iter->seq);
6615 iter->trace = tr->current_trace;
6616
6617 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6618 ret = -ENOMEM;
6619 goto fail;
6620 }
6621
6622 /* trace pipe does not show start of buffer */
6623 cpumask_setall(iter->started);
6624
6625 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6626 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6627
6628 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6629 if (trace_clocks[tr->clock_id].in_ns)
6630 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6631
6632 iter->tr = tr;
6633 iter->array_buffer = &tr->array_buffer;
6634 iter->cpu_file = tracing_get_cpu(inode);
6635 mutex_init(&iter->mutex);
6636 filp->private_data = iter;
6637
6638 if (iter->trace->pipe_open)
6639 iter->trace->pipe_open(iter);
6640
6641 nonseekable_open(inode, filp);
6642
6643 tr->trace_ref++;
6644 out:
6645 mutex_unlock(&trace_types_lock);
6646 return ret;
6647
6648 fail:
6649 kfree(iter);
6650 __trace_array_put(tr);
6651 mutex_unlock(&trace_types_lock);
6652 return ret;
6653 }
6654
tracing_release_pipe(struct inode * inode,struct file * file)6655 static int tracing_release_pipe(struct inode *inode, struct file *file)
6656 {
6657 struct trace_iterator *iter = file->private_data;
6658 struct trace_array *tr = inode->i_private;
6659
6660 mutex_lock(&trace_types_lock);
6661
6662 tr->trace_ref--;
6663
6664 if (iter->trace->pipe_close)
6665 iter->trace->pipe_close(iter);
6666
6667 mutex_unlock(&trace_types_lock);
6668
6669 free_cpumask_var(iter->started);
6670 kfree(iter->fmt);
6671 mutex_destroy(&iter->mutex);
6672 kfree(iter);
6673
6674 trace_array_put(tr);
6675
6676 return 0;
6677 }
6678
6679 static __poll_t
trace_poll(struct trace_iterator * iter,struct file * filp,poll_table * poll_table)6680 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6681 {
6682 struct trace_array *tr = iter->tr;
6683
6684 /* Iterators are static, they should be filled or empty */
6685 if (trace_buffer_iter(iter, iter->cpu_file))
6686 return EPOLLIN | EPOLLRDNORM;
6687
6688 if (tr->trace_flags & TRACE_ITER_BLOCK)
6689 /*
6690 * Always select as readable when in blocking mode
6691 */
6692 return EPOLLIN | EPOLLRDNORM;
6693 else
6694 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6695 filp, poll_table, iter->tr->buffer_percent);
6696 }
6697
6698 static __poll_t
tracing_poll_pipe(struct file * filp,poll_table * poll_table)6699 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6700 {
6701 struct trace_iterator *iter = filp->private_data;
6702
6703 return trace_poll(iter, filp, poll_table);
6704 }
6705
6706 /* Must be called with iter->mutex held. */
tracing_wait_pipe(struct file * filp)6707 static int tracing_wait_pipe(struct file *filp)
6708 {
6709 struct trace_iterator *iter = filp->private_data;
6710 int ret;
6711
6712 while (trace_empty(iter)) {
6713
6714 if ((filp->f_flags & O_NONBLOCK)) {
6715 return -EAGAIN;
6716 }
6717
6718 /*
6719 * We block until we read something and tracing is disabled.
6720 * We still block if tracing is disabled, but we have never
6721 * read anything. This allows a user to cat this file, and
6722 * then enable tracing. But after we have read something,
6723 * we give an EOF when tracing is again disabled.
6724 *
6725 * iter->pos will be 0 if we haven't read anything.
6726 */
6727 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6728 break;
6729
6730 mutex_unlock(&iter->mutex);
6731
6732 ret = wait_on_pipe(iter, 0);
6733
6734 mutex_lock(&iter->mutex);
6735
6736 if (ret)
6737 return ret;
6738 }
6739
6740 return 1;
6741 }
6742
6743 /*
6744 * Consumer reader.
6745 */
6746 static ssize_t
tracing_read_pipe(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6747 tracing_read_pipe(struct file *filp, char __user *ubuf,
6748 size_t cnt, loff_t *ppos)
6749 {
6750 struct trace_iterator *iter = filp->private_data;
6751 ssize_t sret;
6752
6753 /*
6754 * Avoid more than one consumer on a single file descriptor
6755 * This is just a matter of traces coherency, the ring buffer itself
6756 * is protected.
6757 */
6758 mutex_lock(&iter->mutex);
6759
6760 /* return any leftover data */
6761 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6762 if (sret != -EBUSY)
6763 goto out;
6764
6765 trace_seq_init(&iter->seq);
6766
6767 if (iter->trace->read) {
6768 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6769 if (sret)
6770 goto out;
6771 }
6772
6773 waitagain:
6774 sret = tracing_wait_pipe(filp);
6775 if (sret <= 0)
6776 goto out;
6777
6778 /* stop when tracing is finished */
6779 if (trace_empty(iter)) {
6780 sret = 0;
6781 goto out;
6782 }
6783
6784 if (cnt >= PAGE_SIZE)
6785 cnt = PAGE_SIZE - 1;
6786
6787 /* reset all but tr, trace, and overruns */
6788 trace_iterator_reset(iter);
6789 cpumask_clear(iter->started);
6790 trace_seq_init(&iter->seq);
6791
6792 trace_event_read_lock();
6793 trace_access_lock(iter->cpu_file);
6794 while (trace_find_next_entry_inc(iter) != NULL) {
6795 enum print_line_t ret;
6796 int save_len = iter->seq.seq.len;
6797
6798 ret = print_trace_line(iter);
6799 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6800 /*
6801 * If one print_trace_line() fills entire trace_seq in one shot,
6802 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6803 * In this case, we need to consume it, otherwise, loop will peek
6804 * this event next time, resulting in an infinite loop.
6805 */
6806 if (save_len == 0) {
6807 iter->seq.full = 0;
6808 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6809 trace_consume(iter);
6810 break;
6811 }
6812
6813 /* In other cases, don't print partial lines */
6814 iter->seq.seq.len = save_len;
6815 break;
6816 }
6817 if (ret != TRACE_TYPE_NO_CONSUME)
6818 trace_consume(iter);
6819
6820 if (trace_seq_used(&iter->seq) >= cnt)
6821 break;
6822
6823 /*
6824 * Setting the full flag means we reached the trace_seq buffer
6825 * size and we should leave by partial output condition above.
6826 * One of the trace_seq_* functions is not used properly.
6827 */
6828 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6829 iter->ent->type);
6830 }
6831 trace_access_unlock(iter->cpu_file);
6832 trace_event_read_unlock();
6833
6834 /* Now copy what we have to the user */
6835 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6836 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6837 trace_seq_init(&iter->seq);
6838
6839 /*
6840 * If there was nothing to send to user, in spite of consuming trace
6841 * entries, go back to wait for more entries.
6842 */
6843 if (sret == -EBUSY)
6844 goto waitagain;
6845
6846 out:
6847 mutex_unlock(&iter->mutex);
6848
6849 return sret;
6850 }
6851
tracing_spd_release_pipe(struct splice_pipe_desc * spd,unsigned int idx)6852 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6853 unsigned int idx)
6854 {
6855 __free_page(spd->pages[idx]);
6856 }
6857
6858 static size_t
tracing_fill_pipe_page(size_t rem,struct trace_iterator * iter)6859 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6860 {
6861 size_t count;
6862 int save_len;
6863 int ret;
6864
6865 /* Seq buffer is page-sized, exactly what we need. */
6866 for (;;) {
6867 save_len = iter->seq.seq.len;
6868 ret = print_trace_line(iter);
6869
6870 if (trace_seq_has_overflowed(&iter->seq)) {
6871 iter->seq.seq.len = save_len;
6872 break;
6873 }
6874
6875 /*
6876 * This should not be hit, because it should only
6877 * be set if the iter->seq overflowed. But check it
6878 * anyway to be safe.
6879 */
6880 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6881 iter->seq.seq.len = save_len;
6882 break;
6883 }
6884
6885 count = trace_seq_used(&iter->seq) - save_len;
6886 if (rem < count) {
6887 rem = 0;
6888 iter->seq.seq.len = save_len;
6889 break;
6890 }
6891
6892 if (ret != TRACE_TYPE_NO_CONSUME)
6893 trace_consume(iter);
6894 rem -= count;
6895 if (!trace_find_next_entry_inc(iter)) {
6896 rem = 0;
6897 iter->ent = NULL;
6898 break;
6899 }
6900 }
6901
6902 return rem;
6903 }
6904
tracing_splice_read_pipe(struct file * filp,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)6905 static ssize_t tracing_splice_read_pipe(struct file *filp,
6906 loff_t *ppos,
6907 struct pipe_inode_info *pipe,
6908 size_t len,
6909 unsigned int flags)
6910 {
6911 struct page *pages_def[PIPE_DEF_BUFFERS];
6912 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6913 struct trace_iterator *iter = filp->private_data;
6914 struct splice_pipe_desc spd = {
6915 .pages = pages_def,
6916 .partial = partial_def,
6917 .nr_pages = 0, /* This gets updated below. */
6918 .nr_pages_max = PIPE_DEF_BUFFERS,
6919 .ops = &default_pipe_buf_ops,
6920 .spd_release = tracing_spd_release_pipe,
6921 };
6922 ssize_t ret;
6923 size_t rem;
6924 unsigned int i;
6925
6926 if (splice_grow_spd(pipe, &spd))
6927 return -ENOMEM;
6928
6929 mutex_lock(&iter->mutex);
6930
6931 if (iter->trace->splice_read) {
6932 ret = iter->trace->splice_read(iter, filp,
6933 ppos, pipe, len, flags);
6934 if (ret)
6935 goto out_err;
6936 }
6937
6938 ret = tracing_wait_pipe(filp);
6939 if (ret <= 0)
6940 goto out_err;
6941
6942 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6943 ret = -EFAULT;
6944 goto out_err;
6945 }
6946
6947 trace_event_read_lock();
6948 trace_access_lock(iter->cpu_file);
6949
6950 /* Fill as many pages as possible. */
6951 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6952 spd.pages[i] = alloc_page(GFP_KERNEL);
6953 if (!spd.pages[i])
6954 break;
6955
6956 rem = tracing_fill_pipe_page(rem, iter);
6957
6958 /* Copy the data into the page, so we can start over. */
6959 ret = trace_seq_to_buffer(&iter->seq,
6960 page_address(spd.pages[i]),
6961 trace_seq_used(&iter->seq));
6962 if (ret < 0) {
6963 __free_page(spd.pages[i]);
6964 break;
6965 }
6966 spd.partial[i].offset = 0;
6967 spd.partial[i].len = trace_seq_used(&iter->seq);
6968
6969 trace_seq_init(&iter->seq);
6970 }
6971
6972 trace_access_unlock(iter->cpu_file);
6973 trace_event_read_unlock();
6974 mutex_unlock(&iter->mutex);
6975
6976 spd.nr_pages = i;
6977
6978 if (i)
6979 ret = splice_to_pipe(pipe, &spd);
6980 else
6981 ret = 0;
6982 out:
6983 splice_shrink_spd(&spd);
6984 return ret;
6985
6986 out_err:
6987 mutex_unlock(&iter->mutex);
6988 goto out;
6989 }
6990
6991 static ssize_t
tracing_entries_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)6992 tracing_entries_read(struct file *filp, char __user *ubuf,
6993 size_t cnt, loff_t *ppos)
6994 {
6995 struct inode *inode = file_inode(filp);
6996 struct trace_array *tr = inode->i_private;
6997 int cpu = tracing_get_cpu(inode);
6998 char buf[64];
6999 int r = 0;
7000 ssize_t ret;
7001
7002 mutex_lock(&trace_types_lock);
7003
7004 if (cpu == RING_BUFFER_ALL_CPUS) {
7005 int cpu, buf_size_same;
7006 unsigned long size;
7007
7008 size = 0;
7009 buf_size_same = 1;
7010 /* check if all cpu sizes are same */
7011 for_each_tracing_cpu(cpu) {
7012 /* fill in the size from first enabled cpu */
7013 if (size == 0)
7014 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7015 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7016 buf_size_same = 0;
7017 break;
7018 }
7019 }
7020
7021 if (buf_size_same) {
7022 if (!ring_buffer_expanded)
7023 r = sprintf(buf, "%lu (expanded: %lu)\n",
7024 size >> 10,
7025 trace_buf_size >> 10);
7026 else
7027 r = sprintf(buf, "%lu\n", size >> 10);
7028 } else
7029 r = sprintf(buf, "X\n");
7030 } else
7031 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7032
7033 mutex_unlock(&trace_types_lock);
7034
7035 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7036 return ret;
7037 }
7038
7039 static ssize_t
tracing_entries_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7040 tracing_entries_write(struct file *filp, const char __user *ubuf,
7041 size_t cnt, loff_t *ppos)
7042 {
7043 struct inode *inode = file_inode(filp);
7044 struct trace_array *tr = inode->i_private;
7045 unsigned long val;
7046 int ret;
7047
7048 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7049 if (ret)
7050 return ret;
7051
7052 /* must have at least 1 entry */
7053 if (!val)
7054 return -EINVAL;
7055
7056 /* value is in KB */
7057 val <<= 10;
7058 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7059 if (ret < 0)
7060 return ret;
7061
7062 *ppos += cnt;
7063
7064 return cnt;
7065 }
7066
7067 static ssize_t
tracing_total_entries_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)7068 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7069 size_t cnt, loff_t *ppos)
7070 {
7071 struct trace_array *tr = filp->private_data;
7072 char buf[64];
7073 int r, cpu;
7074 unsigned long size = 0, expanded_size = 0;
7075
7076 mutex_lock(&trace_types_lock);
7077 for_each_tracing_cpu(cpu) {
7078 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7079 if (!ring_buffer_expanded)
7080 expanded_size += trace_buf_size >> 10;
7081 }
7082 if (ring_buffer_expanded)
7083 r = sprintf(buf, "%lu\n", size);
7084 else
7085 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7086 mutex_unlock(&trace_types_lock);
7087
7088 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7089 }
7090
7091 static ssize_t
tracing_free_buffer_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7092 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7093 size_t cnt, loff_t *ppos)
7094 {
7095 /*
7096 * There is no need to read what the user has written, this function
7097 * is just to make sure that there is no error when "echo" is used
7098 */
7099
7100 *ppos += cnt;
7101
7102 return cnt;
7103 }
7104
7105 static int
tracing_free_buffer_release(struct inode * inode,struct file * filp)7106 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7107 {
7108 struct trace_array *tr = inode->i_private;
7109
7110 /* disable tracing ? */
7111 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7112 tracer_tracing_off(tr);
7113 /* resize the ring buffer to 0 */
7114 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7115
7116 trace_array_put(tr);
7117
7118 return 0;
7119 }
7120
7121 static ssize_t
tracing_mark_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)7122 tracing_mark_write(struct file *filp, const char __user *ubuf,
7123 size_t cnt, loff_t *fpos)
7124 {
7125 struct trace_array *tr = filp->private_data;
7126 struct ring_buffer_event *event;
7127 enum event_trigger_type tt = ETT_NONE;
7128 struct trace_buffer *buffer;
7129 struct print_entry *entry;
7130 ssize_t written;
7131 int size;
7132 int len;
7133
7134 /* Used in tracing_mark_raw_write() as well */
7135 #define FAULTED_STR "<faulted>"
7136 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7137
7138 if (tracing_disabled)
7139 return -EINVAL;
7140
7141 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7142 return -EINVAL;
7143
7144 if (cnt > TRACE_BUF_SIZE)
7145 cnt = TRACE_BUF_SIZE;
7146
7147 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7148
7149 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7150
7151 /* If less than "<faulted>", then make sure we can still add that */
7152 if (cnt < FAULTED_SIZE)
7153 size += FAULTED_SIZE - cnt;
7154
7155 buffer = tr->array_buffer.buffer;
7156 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7157 tracing_gen_ctx());
7158 if (unlikely(!event))
7159 /* Ring buffer disabled, return as if not open for write */
7160 return -EBADF;
7161
7162 entry = ring_buffer_event_data(event);
7163 entry->ip = _THIS_IP_;
7164
7165 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7166 if (len) {
7167 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7168 cnt = FAULTED_SIZE;
7169 written = -EFAULT;
7170 } else
7171 written = cnt;
7172
7173 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7174 /* do not add \n before testing triggers, but add \0 */
7175 entry->buf[cnt] = '\0';
7176 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7177 }
7178
7179 if (entry->buf[cnt - 1] != '\n') {
7180 entry->buf[cnt] = '\n';
7181 entry->buf[cnt + 1] = '\0';
7182 } else
7183 entry->buf[cnt] = '\0';
7184
7185 if (static_branch_unlikely(&trace_marker_exports_enabled))
7186 ftrace_exports(event, TRACE_EXPORT_MARKER);
7187 __buffer_unlock_commit(buffer, event);
7188
7189 if (tt)
7190 event_triggers_post_call(tr->trace_marker_file, tt);
7191
7192 return written;
7193 }
7194
7195 /* Limit it for now to 3K (including tag) */
7196 #define RAW_DATA_MAX_SIZE (1024*3)
7197
7198 static ssize_t
tracing_mark_raw_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)7199 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7200 size_t cnt, loff_t *fpos)
7201 {
7202 struct trace_array *tr = filp->private_data;
7203 struct ring_buffer_event *event;
7204 struct trace_buffer *buffer;
7205 struct raw_data_entry *entry;
7206 ssize_t written;
7207 int size;
7208 int len;
7209
7210 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7211
7212 if (tracing_disabled)
7213 return -EINVAL;
7214
7215 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7216 return -EINVAL;
7217
7218 /* The marker must at least have a tag id */
7219 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7220 return -EINVAL;
7221
7222 if (cnt > TRACE_BUF_SIZE)
7223 cnt = TRACE_BUF_SIZE;
7224
7225 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7226
7227 size = sizeof(*entry) + cnt;
7228 if (cnt < FAULT_SIZE_ID)
7229 size += FAULT_SIZE_ID - cnt;
7230
7231 buffer = tr->array_buffer.buffer;
7232 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7233 tracing_gen_ctx());
7234 if (!event)
7235 /* Ring buffer disabled, return as if not open for write */
7236 return -EBADF;
7237
7238 entry = ring_buffer_event_data(event);
7239
7240 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7241 if (len) {
7242 entry->id = -1;
7243 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7244 written = -EFAULT;
7245 } else
7246 written = cnt;
7247
7248 __buffer_unlock_commit(buffer, event);
7249
7250 return written;
7251 }
7252
tracing_clock_show(struct seq_file * m,void * v)7253 static int tracing_clock_show(struct seq_file *m, void *v)
7254 {
7255 struct trace_array *tr = m->private;
7256 int i;
7257
7258 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7259 seq_printf(m,
7260 "%s%s%s%s", i ? " " : "",
7261 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7262 i == tr->clock_id ? "]" : "");
7263 seq_putc(m, '\n');
7264
7265 return 0;
7266 }
7267
tracing_set_clock(struct trace_array * tr,const char * clockstr)7268 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7269 {
7270 int i;
7271
7272 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7273 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7274 break;
7275 }
7276 if (i == ARRAY_SIZE(trace_clocks))
7277 return -EINVAL;
7278
7279 mutex_lock(&trace_types_lock);
7280
7281 tr->clock_id = i;
7282
7283 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7284
7285 /*
7286 * New clock may not be consistent with the previous clock.
7287 * Reset the buffer so that it doesn't have incomparable timestamps.
7288 */
7289 tracing_reset_online_cpus(&tr->array_buffer);
7290
7291 #ifdef CONFIG_TRACER_MAX_TRACE
7292 if (tr->max_buffer.buffer)
7293 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7294 tracing_reset_online_cpus(&tr->max_buffer);
7295 #endif
7296
7297 mutex_unlock(&trace_types_lock);
7298
7299 return 0;
7300 }
7301
tracing_clock_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * fpos)7302 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7303 size_t cnt, loff_t *fpos)
7304 {
7305 struct seq_file *m = filp->private_data;
7306 struct trace_array *tr = m->private;
7307 char buf[64];
7308 const char *clockstr;
7309 int ret;
7310
7311 if (cnt >= sizeof(buf))
7312 return -EINVAL;
7313
7314 if (copy_from_user(buf, ubuf, cnt))
7315 return -EFAULT;
7316
7317 buf[cnt] = 0;
7318
7319 clockstr = strstrip(buf);
7320
7321 ret = tracing_set_clock(tr, clockstr);
7322 if (ret)
7323 return ret;
7324
7325 *fpos += cnt;
7326
7327 return cnt;
7328 }
7329
tracing_clock_open(struct inode * inode,struct file * file)7330 static int tracing_clock_open(struct inode *inode, struct file *file)
7331 {
7332 struct trace_array *tr = inode->i_private;
7333 int ret;
7334
7335 ret = tracing_check_open_get_tr(tr);
7336 if (ret)
7337 return ret;
7338
7339 ret = single_open(file, tracing_clock_show, inode->i_private);
7340 if (ret < 0)
7341 trace_array_put(tr);
7342
7343 return ret;
7344 }
7345
tracing_time_stamp_mode_show(struct seq_file * m,void * v)7346 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7347 {
7348 struct trace_array *tr = m->private;
7349
7350 mutex_lock(&trace_types_lock);
7351
7352 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7353 seq_puts(m, "delta [absolute]\n");
7354 else
7355 seq_puts(m, "[delta] absolute\n");
7356
7357 mutex_unlock(&trace_types_lock);
7358
7359 return 0;
7360 }
7361
tracing_time_stamp_mode_open(struct inode * inode,struct file * file)7362 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7363 {
7364 struct trace_array *tr = inode->i_private;
7365 int ret;
7366
7367 ret = tracing_check_open_get_tr(tr);
7368 if (ret)
7369 return ret;
7370
7371 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7372 if (ret < 0)
7373 trace_array_put(tr);
7374
7375 return ret;
7376 }
7377
tracing_event_time_stamp(struct trace_buffer * buffer,struct ring_buffer_event * rbe)7378 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7379 {
7380 if (rbe == this_cpu_read(trace_buffered_event))
7381 return ring_buffer_time_stamp(buffer);
7382
7383 return ring_buffer_event_time_stamp(buffer, rbe);
7384 }
7385
7386 /*
7387 * Set or disable using the per CPU trace_buffer_event when possible.
7388 */
tracing_set_filter_buffering(struct trace_array * tr,bool set)7389 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7390 {
7391 int ret = 0;
7392
7393 mutex_lock(&trace_types_lock);
7394
7395 if (set && tr->no_filter_buffering_ref++)
7396 goto out;
7397
7398 if (!set) {
7399 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7400 ret = -EINVAL;
7401 goto out;
7402 }
7403
7404 --tr->no_filter_buffering_ref;
7405 }
7406 out:
7407 mutex_unlock(&trace_types_lock);
7408
7409 return ret;
7410 }
7411
7412 struct ftrace_buffer_info {
7413 struct trace_iterator iter;
7414 void *spare;
7415 unsigned int spare_cpu;
7416 unsigned int read;
7417 };
7418
7419 #ifdef CONFIG_TRACER_SNAPSHOT
tracing_snapshot_open(struct inode * inode,struct file * file)7420 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7421 {
7422 struct trace_array *tr = inode->i_private;
7423 struct trace_iterator *iter;
7424 struct seq_file *m;
7425 int ret;
7426
7427 ret = tracing_check_open_get_tr(tr);
7428 if (ret)
7429 return ret;
7430
7431 if (file->f_mode & FMODE_READ) {
7432 iter = __tracing_open(inode, file, true);
7433 if (IS_ERR(iter))
7434 ret = PTR_ERR(iter);
7435 } else {
7436 /* Writes still need the seq_file to hold the private data */
7437 ret = -ENOMEM;
7438 m = kzalloc(sizeof(*m), GFP_KERNEL);
7439 if (!m)
7440 goto out;
7441 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7442 if (!iter) {
7443 kfree(m);
7444 goto out;
7445 }
7446 ret = 0;
7447
7448 iter->tr = tr;
7449 iter->array_buffer = &tr->max_buffer;
7450 iter->cpu_file = tracing_get_cpu(inode);
7451 m->private = iter;
7452 file->private_data = m;
7453 }
7454 out:
7455 if (ret < 0)
7456 trace_array_put(tr);
7457
7458 return ret;
7459 }
7460
7461 static ssize_t
tracing_snapshot_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7462 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7463 loff_t *ppos)
7464 {
7465 struct seq_file *m = filp->private_data;
7466 struct trace_iterator *iter = m->private;
7467 struct trace_array *tr = iter->tr;
7468 unsigned long val;
7469 int ret;
7470
7471 ret = tracing_update_buffers();
7472 if (ret < 0)
7473 return ret;
7474
7475 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7476 if (ret)
7477 return ret;
7478
7479 mutex_lock(&trace_types_lock);
7480
7481 if (tr->current_trace->use_max_tr) {
7482 ret = -EBUSY;
7483 goto out;
7484 }
7485
7486 local_irq_disable();
7487 arch_spin_lock(&tr->max_lock);
7488 if (tr->cond_snapshot)
7489 ret = -EBUSY;
7490 arch_spin_unlock(&tr->max_lock);
7491 local_irq_enable();
7492 if (ret)
7493 goto out;
7494
7495 switch (val) {
7496 case 0:
7497 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7498 ret = -EINVAL;
7499 break;
7500 }
7501 if (tr->allocated_snapshot)
7502 free_snapshot(tr);
7503 break;
7504 case 1:
7505 /* Only allow per-cpu swap if the ring buffer supports it */
7506 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7507 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7508 ret = -EINVAL;
7509 break;
7510 }
7511 #endif
7512 if (tr->allocated_snapshot)
7513 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7514 &tr->array_buffer, iter->cpu_file);
7515 else
7516 ret = tracing_alloc_snapshot_instance(tr);
7517 if (ret < 0)
7518 break;
7519 local_irq_disable();
7520 /* Now, we're going to swap */
7521 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7522 update_max_tr(tr, current, smp_processor_id(), NULL);
7523 else
7524 update_max_tr_single(tr, current, iter->cpu_file);
7525 local_irq_enable();
7526 break;
7527 default:
7528 if (tr->allocated_snapshot) {
7529 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7530 tracing_reset_online_cpus(&tr->max_buffer);
7531 else
7532 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7533 }
7534 break;
7535 }
7536
7537 if (ret >= 0) {
7538 *ppos += cnt;
7539 ret = cnt;
7540 }
7541 out:
7542 mutex_unlock(&trace_types_lock);
7543 return ret;
7544 }
7545
tracing_snapshot_release(struct inode * inode,struct file * file)7546 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7547 {
7548 struct seq_file *m = file->private_data;
7549 int ret;
7550
7551 ret = tracing_release(inode, file);
7552
7553 if (file->f_mode & FMODE_READ)
7554 return ret;
7555
7556 /* If write only, the seq_file is just a stub */
7557 if (m)
7558 kfree(m->private);
7559 kfree(m);
7560
7561 return 0;
7562 }
7563
7564 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7565 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7566 size_t count, loff_t *ppos);
7567 static int tracing_buffers_release(struct inode *inode, struct file *file);
7568 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7569 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7570
snapshot_raw_open(struct inode * inode,struct file * filp)7571 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7572 {
7573 struct ftrace_buffer_info *info;
7574 int ret;
7575
7576 /* The following checks for tracefs lockdown */
7577 ret = tracing_buffers_open(inode, filp);
7578 if (ret < 0)
7579 return ret;
7580
7581 info = filp->private_data;
7582
7583 if (info->iter.trace->use_max_tr) {
7584 tracing_buffers_release(inode, filp);
7585 return -EBUSY;
7586 }
7587
7588 info->iter.snapshot = true;
7589 info->iter.array_buffer = &info->iter.tr->max_buffer;
7590
7591 return ret;
7592 }
7593
7594 #endif /* CONFIG_TRACER_SNAPSHOT */
7595
7596
7597 static const struct file_operations tracing_thresh_fops = {
7598 .open = tracing_open_generic,
7599 .read = tracing_thresh_read,
7600 .write = tracing_thresh_write,
7601 .llseek = generic_file_llseek,
7602 };
7603
7604 #ifdef CONFIG_TRACER_MAX_TRACE
7605 static const struct file_operations tracing_max_lat_fops = {
7606 .open = tracing_open_generic,
7607 .read = tracing_max_lat_read,
7608 .write = tracing_max_lat_write,
7609 .llseek = generic_file_llseek,
7610 };
7611 #endif
7612
7613 static const struct file_operations set_tracer_fops = {
7614 .open = tracing_open_generic,
7615 .read = tracing_set_trace_read,
7616 .write = tracing_set_trace_write,
7617 .llseek = generic_file_llseek,
7618 };
7619
7620 static const struct file_operations tracing_pipe_fops = {
7621 .open = tracing_open_pipe,
7622 .poll = tracing_poll_pipe,
7623 .read = tracing_read_pipe,
7624 .splice_read = tracing_splice_read_pipe,
7625 .release = tracing_release_pipe,
7626 .llseek = no_llseek,
7627 };
7628
7629 static const struct file_operations tracing_entries_fops = {
7630 .open = tracing_open_generic_tr,
7631 .read = tracing_entries_read,
7632 .write = tracing_entries_write,
7633 .llseek = generic_file_llseek,
7634 .release = tracing_release_generic_tr,
7635 };
7636
7637 static const struct file_operations tracing_total_entries_fops = {
7638 .open = tracing_open_generic_tr,
7639 .read = tracing_total_entries_read,
7640 .llseek = generic_file_llseek,
7641 .release = tracing_release_generic_tr,
7642 };
7643
7644 static const struct file_operations tracing_free_buffer_fops = {
7645 .open = tracing_open_generic_tr,
7646 .write = tracing_free_buffer_write,
7647 .release = tracing_free_buffer_release,
7648 };
7649
7650 static const struct file_operations tracing_mark_fops = {
7651 .open = tracing_mark_open,
7652 .write = tracing_mark_write,
7653 .release = tracing_release_generic_tr,
7654 };
7655
7656 static const struct file_operations tracing_mark_raw_fops = {
7657 .open = tracing_mark_open,
7658 .write = tracing_mark_raw_write,
7659 .release = tracing_release_generic_tr,
7660 };
7661
7662 static const struct file_operations trace_clock_fops = {
7663 .open = tracing_clock_open,
7664 .read = seq_read,
7665 .llseek = seq_lseek,
7666 .release = tracing_single_release_tr,
7667 .write = tracing_clock_write,
7668 };
7669
7670 static const struct file_operations trace_time_stamp_mode_fops = {
7671 .open = tracing_time_stamp_mode_open,
7672 .read = seq_read,
7673 .llseek = seq_lseek,
7674 .release = tracing_single_release_tr,
7675 };
7676
7677 #ifdef CONFIG_TRACER_SNAPSHOT
7678 static const struct file_operations snapshot_fops = {
7679 .open = tracing_snapshot_open,
7680 .read = seq_read,
7681 .write = tracing_snapshot_write,
7682 .llseek = tracing_lseek,
7683 .release = tracing_snapshot_release,
7684 };
7685
7686 static const struct file_operations snapshot_raw_fops = {
7687 .open = snapshot_raw_open,
7688 .read = tracing_buffers_read,
7689 .release = tracing_buffers_release,
7690 .splice_read = tracing_buffers_splice_read,
7691 .llseek = no_llseek,
7692 };
7693
7694 #endif /* CONFIG_TRACER_SNAPSHOT */
7695
7696 /*
7697 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7698 * @filp: The active open file structure
7699 * @ubuf: The userspace provided buffer to read value into
7700 * @cnt: The maximum number of bytes to read
7701 * @ppos: The current "file" position
7702 *
7703 * This function implements the write interface for a struct trace_min_max_param.
7704 * The filp->private_data must point to a trace_min_max_param structure that
7705 * defines where to write the value, the min and the max acceptable values,
7706 * and a lock to protect the write.
7707 */
7708 static ssize_t
trace_min_max_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)7709 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7710 {
7711 struct trace_min_max_param *param = filp->private_data;
7712 u64 val;
7713 int err;
7714
7715 if (!param)
7716 return -EFAULT;
7717
7718 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7719 if (err)
7720 return err;
7721
7722 if (param->lock)
7723 mutex_lock(param->lock);
7724
7725 if (param->min && val < *param->min)
7726 err = -EINVAL;
7727
7728 if (param->max && val > *param->max)
7729 err = -EINVAL;
7730
7731 if (!err)
7732 *param->val = val;
7733
7734 if (param->lock)
7735 mutex_unlock(param->lock);
7736
7737 if (err)
7738 return err;
7739
7740 return cnt;
7741 }
7742
7743 /*
7744 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7745 * @filp: The active open file structure
7746 * @ubuf: The userspace provided buffer to read value into
7747 * @cnt: The maximum number of bytes to read
7748 * @ppos: The current "file" position
7749 *
7750 * This function implements the read interface for a struct trace_min_max_param.
7751 * The filp->private_data must point to a trace_min_max_param struct with valid
7752 * data.
7753 */
7754 static ssize_t
trace_min_max_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)7755 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7756 {
7757 struct trace_min_max_param *param = filp->private_data;
7758 char buf[U64_STR_SIZE];
7759 int len;
7760 u64 val;
7761
7762 if (!param)
7763 return -EFAULT;
7764
7765 val = *param->val;
7766
7767 if (cnt > sizeof(buf))
7768 cnt = sizeof(buf);
7769
7770 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7771
7772 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7773 }
7774
7775 const struct file_operations trace_min_max_fops = {
7776 .open = tracing_open_generic,
7777 .read = trace_min_max_read,
7778 .write = trace_min_max_write,
7779 };
7780
7781 #define TRACING_LOG_ERRS_MAX 8
7782 #define TRACING_LOG_LOC_MAX 128
7783
7784 #define CMD_PREFIX " Command: "
7785
7786 struct err_info {
7787 const char **errs; /* ptr to loc-specific array of err strings */
7788 u8 type; /* index into errs -> specific err string */
7789 u16 pos; /* caret position */
7790 u64 ts;
7791 };
7792
7793 struct tracing_log_err {
7794 struct list_head list;
7795 struct err_info info;
7796 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7797 char *cmd; /* what caused err */
7798 };
7799
7800 static DEFINE_MUTEX(tracing_err_log_lock);
7801
alloc_tracing_log_err(int len)7802 static struct tracing_log_err *alloc_tracing_log_err(int len)
7803 {
7804 struct tracing_log_err *err;
7805
7806 err = kzalloc(sizeof(*err), GFP_KERNEL);
7807 if (!err)
7808 return ERR_PTR(-ENOMEM);
7809
7810 err->cmd = kzalloc(len, GFP_KERNEL);
7811 if (!err->cmd) {
7812 kfree(err);
7813 return ERR_PTR(-ENOMEM);
7814 }
7815
7816 return err;
7817 }
7818
free_tracing_log_err(struct tracing_log_err * err)7819 static void free_tracing_log_err(struct tracing_log_err *err)
7820 {
7821 kfree(err->cmd);
7822 kfree(err);
7823 }
7824
get_tracing_log_err(struct trace_array * tr,int len)7825 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7826 int len)
7827 {
7828 struct tracing_log_err *err;
7829 char *cmd;
7830
7831 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7832 err = alloc_tracing_log_err(len);
7833 if (PTR_ERR(err) != -ENOMEM)
7834 tr->n_err_log_entries++;
7835
7836 return err;
7837 }
7838 cmd = kzalloc(len, GFP_KERNEL);
7839 if (!cmd)
7840 return ERR_PTR(-ENOMEM);
7841 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7842 kfree(err->cmd);
7843 err->cmd = cmd;
7844 list_del(&err->list);
7845
7846 return err;
7847 }
7848
7849 /**
7850 * err_pos - find the position of a string within a command for error careting
7851 * @cmd: The tracing command that caused the error
7852 * @str: The string to position the caret at within @cmd
7853 *
7854 * Finds the position of the first occurrence of @str within @cmd. The
7855 * return value can be passed to tracing_log_err() for caret placement
7856 * within @cmd.
7857 *
7858 * Returns the index within @cmd of the first occurrence of @str or 0
7859 * if @str was not found.
7860 */
err_pos(char * cmd,const char * str)7861 unsigned int err_pos(char *cmd, const char *str)
7862 {
7863 char *found;
7864
7865 if (WARN_ON(!strlen(cmd)))
7866 return 0;
7867
7868 found = strstr(cmd, str);
7869 if (found)
7870 return found - cmd;
7871
7872 return 0;
7873 }
7874
7875 /**
7876 * tracing_log_err - write an error to the tracing error log
7877 * @tr: The associated trace array for the error (NULL for top level array)
7878 * @loc: A string describing where the error occurred
7879 * @cmd: The tracing command that caused the error
7880 * @errs: The array of loc-specific static error strings
7881 * @type: The index into errs[], which produces the specific static err string
7882 * @pos: The position the caret should be placed in the cmd
7883 *
7884 * Writes an error into tracing/error_log of the form:
7885 *
7886 * <loc>: error: <text>
7887 * Command: <cmd>
7888 * ^
7889 *
7890 * tracing/error_log is a small log file containing the last
7891 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7892 * unless there has been a tracing error, and the error log can be
7893 * cleared and have its memory freed by writing the empty string in
7894 * truncation mode to it i.e. echo > tracing/error_log.
7895 *
7896 * NOTE: the @errs array along with the @type param are used to
7897 * produce a static error string - this string is not copied and saved
7898 * when the error is logged - only a pointer to it is saved. See
7899 * existing callers for examples of how static strings are typically
7900 * defined for use with tracing_log_err().
7901 */
tracing_log_err(struct trace_array * tr,const char * loc,const char * cmd,const char ** errs,u8 type,u16 pos)7902 void tracing_log_err(struct trace_array *tr,
7903 const char *loc, const char *cmd,
7904 const char **errs, u8 type, u16 pos)
7905 {
7906 struct tracing_log_err *err;
7907 int len = 0;
7908
7909 if (!tr)
7910 tr = &global_trace;
7911
7912 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7913
7914 mutex_lock(&tracing_err_log_lock);
7915 err = get_tracing_log_err(tr, len);
7916 if (PTR_ERR(err) == -ENOMEM) {
7917 mutex_unlock(&tracing_err_log_lock);
7918 return;
7919 }
7920
7921 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7922 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7923
7924 err->info.errs = errs;
7925 err->info.type = type;
7926 err->info.pos = pos;
7927 err->info.ts = local_clock();
7928
7929 list_add_tail(&err->list, &tr->err_log);
7930 mutex_unlock(&tracing_err_log_lock);
7931 }
7932
clear_tracing_err_log(struct trace_array * tr)7933 static void clear_tracing_err_log(struct trace_array *tr)
7934 {
7935 struct tracing_log_err *err, *next;
7936
7937 mutex_lock(&tracing_err_log_lock);
7938 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7939 list_del(&err->list);
7940 free_tracing_log_err(err);
7941 }
7942
7943 tr->n_err_log_entries = 0;
7944 mutex_unlock(&tracing_err_log_lock);
7945 }
7946
tracing_err_log_seq_start(struct seq_file * m,loff_t * pos)7947 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7948 {
7949 struct trace_array *tr = m->private;
7950
7951 mutex_lock(&tracing_err_log_lock);
7952
7953 return seq_list_start(&tr->err_log, *pos);
7954 }
7955
tracing_err_log_seq_next(struct seq_file * m,void * v,loff_t * pos)7956 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7957 {
7958 struct trace_array *tr = m->private;
7959
7960 return seq_list_next(v, &tr->err_log, pos);
7961 }
7962
tracing_err_log_seq_stop(struct seq_file * m,void * v)7963 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7964 {
7965 mutex_unlock(&tracing_err_log_lock);
7966 }
7967
tracing_err_log_show_pos(struct seq_file * m,u16 pos)7968 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7969 {
7970 u16 i;
7971
7972 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7973 seq_putc(m, ' ');
7974 for (i = 0; i < pos; i++)
7975 seq_putc(m, ' ');
7976 seq_puts(m, "^\n");
7977 }
7978
tracing_err_log_seq_show(struct seq_file * m,void * v)7979 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7980 {
7981 struct tracing_log_err *err = v;
7982
7983 if (err) {
7984 const char *err_text = err->info.errs[err->info.type];
7985 u64 sec = err->info.ts;
7986 u32 nsec;
7987
7988 nsec = do_div(sec, NSEC_PER_SEC);
7989 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7990 err->loc, err_text);
7991 seq_printf(m, "%s", err->cmd);
7992 tracing_err_log_show_pos(m, err->info.pos);
7993 }
7994
7995 return 0;
7996 }
7997
7998 static const struct seq_operations tracing_err_log_seq_ops = {
7999 .start = tracing_err_log_seq_start,
8000 .next = tracing_err_log_seq_next,
8001 .stop = tracing_err_log_seq_stop,
8002 .show = tracing_err_log_seq_show
8003 };
8004
tracing_err_log_open(struct inode * inode,struct file * file)8005 static int tracing_err_log_open(struct inode *inode, struct file *file)
8006 {
8007 struct trace_array *tr = inode->i_private;
8008 int ret = 0;
8009
8010 ret = tracing_check_open_get_tr(tr);
8011 if (ret)
8012 return ret;
8013
8014 /* If this file was opened for write, then erase contents */
8015 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8016 clear_tracing_err_log(tr);
8017
8018 if (file->f_mode & FMODE_READ) {
8019 ret = seq_open(file, &tracing_err_log_seq_ops);
8020 if (!ret) {
8021 struct seq_file *m = file->private_data;
8022 m->private = tr;
8023 } else {
8024 trace_array_put(tr);
8025 }
8026 }
8027 return ret;
8028 }
8029
tracing_err_log_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)8030 static ssize_t tracing_err_log_write(struct file *file,
8031 const char __user *buffer,
8032 size_t count, loff_t *ppos)
8033 {
8034 return count;
8035 }
8036
tracing_err_log_release(struct inode * inode,struct file * file)8037 static int tracing_err_log_release(struct inode *inode, struct file *file)
8038 {
8039 struct trace_array *tr = inode->i_private;
8040
8041 trace_array_put(tr);
8042
8043 if (file->f_mode & FMODE_READ)
8044 seq_release(inode, file);
8045
8046 return 0;
8047 }
8048
8049 static const struct file_operations tracing_err_log_fops = {
8050 .open = tracing_err_log_open,
8051 .write = tracing_err_log_write,
8052 .read = seq_read,
8053 .llseek = seq_lseek,
8054 .release = tracing_err_log_release,
8055 };
8056
tracing_buffers_open(struct inode * inode,struct file * filp)8057 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8058 {
8059 struct trace_array *tr = inode->i_private;
8060 struct ftrace_buffer_info *info;
8061 int ret;
8062
8063 ret = tracing_check_open_get_tr(tr);
8064 if (ret)
8065 return ret;
8066
8067 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8068 if (!info) {
8069 trace_array_put(tr);
8070 return -ENOMEM;
8071 }
8072
8073 mutex_lock(&trace_types_lock);
8074
8075 info->iter.tr = tr;
8076 info->iter.cpu_file = tracing_get_cpu(inode);
8077 info->iter.trace = tr->current_trace;
8078 info->iter.array_buffer = &tr->array_buffer;
8079 info->spare = NULL;
8080 /* Force reading ring buffer for first read */
8081 info->read = (unsigned int)-1;
8082
8083 filp->private_data = info;
8084
8085 tr->trace_ref++;
8086
8087 mutex_unlock(&trace_types_lock);
8088
8089 ret = nonseekable_open(inode, filp);
8090 if (ret < 0)
8091 trace_array_put(tr);
8092
8093 return ret;
8094 }
8095
8096 static __poll_t
tracing_buffers_poll(struct file * filp,poll_table * poll_table)8097 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8098 {
8099 struct ftrace_buffer_info *info = filp->private_data;
8100 struct trace_iterator *iter = &info->iter;
8101
8102 return trace_poll(iter, filp, poll_table);
8103 }
8104
8105 static ssize_t
tracing_buffers_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)8106 tracing_buffers_read(struct file *filp, char __user *ubuf,
8107 size_t count, loff_t *ppos)
8108 {
8109 struct ftrace_buffer_info *info = filp->private_data;
8110 struct trace_iterator *iter = &info->iter;
8111 ssize_t ret = 0;
8112 ssize_t size;
8113
8114 if (!count)
8115 return 0;
8116
8117 #ifdef CONFIG_TRACER_MAX_TRACE
8118 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8119 return -EBUSY;
8120 #endif
8121
8122 if (!info->spare) {
8123 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8124 iter->cpu_file);
8125 if (IS_ERR(info->spare)) {
8126 ret = PTR_ERR(info->spare);
8127 info->spare = NULL;
8128 } else {
8129 info->spare_cpu = iter->cpu_file;
8130 }
8131 }
8132 if (!info->spare)
8133 return ret;
8134
8135 /* Do we have previous read data to read? */
8136 if (info->read < PAGE_SIZE)
8137 goto read;
8138
8139 again:
8140 trace_access_lock(iter->cpu_file);
8141 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8142 &info->spare,
8143 count,
8144 iter->cpu_file, 0);
8145 trace_access_unlock(iter->cpu_file);
8146
8147 if (ret < 0) {
8148 if (trace_empty(iter)) {
8149 if ((filp->f_flags & O_NONBLOCK))
8150 return -EAGAIN;
8151
8152 ret = wait_on_pipe(iter, 0);
8153 if (ret)
8154 return ret;
8155
8156 goto again;
8157 }
8158 return 0;
8159 }
8160
8161 info->read = 0;
8162 read:
8163 size = PAGE_SIZE - info->read;
8164 if (size > count)
8165 size = count;
8166
8167 ret = copy_to_user(ubuf, info->spare + info->read, size);
8168 if (ret == size)
8169 return -EFAULT;
8170
8171 size -= ret;
8172
8173 *ppos += size;
8174 info->read += size;
8175
8176 return size;
8177 }
8178
tracing_buffers_release(struct inode * inode,struct file * file)8179 static int tracing_buffers_release(struct inode *inode, struct file *file)
8180 {
8181 struct ftrace_buffer_info *info = file->private_data;
8182 struct trace_iterator *iter = &info->iter;
8183
8184 mutex_lock(&trace_types_lock);
8185
8186 iter->tr->trace_ref--;
8187
8188 __trace_array_put(iter->tr);
8189
8190 iter->wait_index++;
8191 /* Make sure the waiters see the new wait_index */
8192 smp_wmb();
8193
8194 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8195
8196 if (info->spare)
8197 ring_buffer_free_read_page(iter->array_buffer->buffer,
8198 info->spare_cpu, info->spare);
8199 kvfree(info);
8200
8201 mutex_unlock(&trace_types_lock);
8202
8203 return 0;
8204 }
8205
8206 struct buffer_ref {
8207 struct trace_buffer *buffer;
8208 void *page;
8209 int cpu;
8210 refcount_t refcount;
8211 };
8212
buffer_ref_release(struct buffer_ref * ref)8213 static void buffer_ref_release(struct buffer_ref *ref)
8214 {
8215 if (!refcount_dec_and_test(&ref->refcount))
8216 return;
8217 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8218 kfree(ref);
8219 }
8220
buffer_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)8221 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8222 struct pipe_buffer *buf)
8223 {
8224 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8225
8226 buffer_ref_release(ref);
8227 buf->private = 0;
8228 }
8229
buffer_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)8230 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8231 struct pipe_buffer *buf)
8232 {
8233 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8234
8235 if (refcount_read(&ref->refcount) > INT_MAX/2)
8236 return false;
8237
8238 refcount_inc(&ref->refcount);
8239 return true;
8240 }
8241
8242 /* Pipe buffer operations for a buffer. */
8243 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8244 .release = buffer_pipe_buf_release,
8245 .get = buffer_pipe_buf_get,
8246 };
8247
8248 /*
8249 * Callback from splice_to_pipe(), if we need to release some pages
8250 * at the end of the spd in case we error'ed out in filling the pipe.
8251 */
buffer_spd_release(struct splice_pipe_desc * spd,unsigned int i)8252 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8253 {
8254 struct buffer_ref *ref =
8255 (struct buffer_ref *)spd->partial[i].private;
8256
8257 buffer_ref_release(ref);
8258 spd->partial[i].private = 0;
8259 }
8260
8261 static ssize_t
tracing_buffers_splice_read(struct file * file,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)8262 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8263 struct pipe_inode_info *pipe, size_t len,
8264 unsigned int flags)
8265 {
8266 struct ftrace_buffer_info *info = file->private_data;
8267 struct trace_iterator *iter = &info->iter;
8268 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8269 struct page *pages_def[PIPE_DEF_BUFFERS];
8270 struct splice_pipe_desc spd = {
8271 .pages = pages_def,
8272 .partial = partial_def,
8273 .nr_pages_max = PIPE_DEF_BUFFERS,
8274 .ops = &buffer_pipe_buf_ops,
8275 .spd_release = buffer_spd_release,
8276 };
8277 struct buffer_ref *ref;
8278 int entries, i;
8279 ssize_t ret = 0;
8280
8281 #ifdef CONFIG_TRACER_MAX_TRACE
8282 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8283 return -EBUSY;
8284 #endif
8285
8286 if (*ppos & (PAGE_SIZE - 1))
8287 return -EINVAL;
8288
8289 if (len & (PAGE_SIZE - 1)) {
8290 if (len < PAGE_SIZE)
8291 return -EINVAL;
8292 len &= PAGE_MASK;
8293 }
8294
8295 if (splice_grow_spd(pipe, &spd))
8296 return -ENOMEM;
8297
8298 again:
8299 trace_access_lock(iter->cpu_file);
8300 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8301
8302 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8303 struct page *page;
8304 int r;
8305
8306 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8307 if (!ref) {
8308 ret = -ENOMEM;
8309 break;
8310 }
8311
8312 refcount_set(&ref->refcount, 1);
8313 ref->buffer = iter->array_buffer->buffer;
8314 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8315 if (IS_ERR(ref->page)) {
8316 ret = PTR_ERR(ref->page);
8317 ref->page = NULL;
8318 kfree(ref);
8319 break;
8320 }
8321 ref->cpu = iter->cpu_file;
8322
8323 r = ring_buffer_read_page(ref->buffer, &ref->page,
8324 len, iter->cpu_file, 1);
8325 if (r < 0) {
8326 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8327 ref->page);
8328 kfree(ref);
8329 break;
8330 }
8331
8332 page = virt_to_page(ref->page);
8333
8334 spd.pages[i] = page;
8335 spd.partial[i].len = PAGE_SIZE;
8336 spd.partial[i].offset = 0;
8337 spd.partial[i].private = (unsigned long)ref;
8338 spd.nr_pages++;
8339 *ppos += PAGE_SIZE;
8340
8341 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8342 }
8343
8344 trace_access_unlock(iter->cpu_file);
8345 spd.nr_pages = i;
8346
8347 /* did we read anything? */
8348 if (!spd.nr_pages) {
8349 long wait_index;
8350
8351 if (ret)
8352 goto out;
8353
8354 ret = -EAGAIN;
8355 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8356 goto out;
8357
8358 wait_index = READ_ONCE(iter->wait_index);
8359
8360 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8361 if (ret)
8362 goto out;
8363
8364 /* No need to wait after waking up when tracing is off */
8365 if (!tracer_tracing_is_on(iter->tr))
8366 goto out;
8367
8368 /* Make sure we see the new wait_index */
8369 smp_rmb();
8370 if (wait_index != iter->wait_index)
8371 goto out;
8372
8373 goto again;
8374 }
8375
8376 ret = splice_to_pipe(pipe, &spd);
8377 out:
8378 splice_shrink_spd(&spd);
8379
8380 return ret;
8381 }
8382
8383 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
tracing_buffers_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8384 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8385 {
8386 struct ftrace_buffer_info *info = file->private_data;
8387 struct trace_iterator *iter = &info->iter;
8388
8389 if (cmd)
8390 return -ENOIOCTLCMD;
8391
8392 mutex_lock(&trace_types_lock);
8393
8394 iter->wait_index++;
8395 /* Make sure the waiters see the new wait_index */
8396 smp_wmb();
8397
8398 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8399
8400 mutex_unlock(&trace_types_lock);
8401 return 0;
8402 }
8403
8404 static const struct file_operations tracing_buffers_fops = {
8405 .open = tracing_buffers_open,
8406 .read = tracing_buffers_read,
8407 .poll = tracing_buffers_poll,
8408 .release = tracing_buffers_release,
8409 .splice_read = tracing_buffers_splice_read,
8410 .unlocked_ioctl = tracing_buffers_ioctl,
8411 .llseek = no_llseek,
8412 };
8413
8414 static ssize_t
tracing_stats_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)8415 tracing_stats_read(struct file *filp, char __user *ubuf,
8416 size_t count, loff_t *ppos)
8417 {
8418 struct inode *inode = file_inode(filp);
8419 struct trace_array *tr = inode->i_private;
8420 struct array_buffer *trace_buf = &tr->array_buffer;
8421 int cpu = tracing_get_cpu(inode);
8422 struct trace_seq *s;
8423 unsigned long cnt;
8424 unsigned long long t;
8425 unsigned long usec_rem;
8426
8427 s = kmalloc(sizeof(*s), GFP_KERNEL);
8428 if (!s)
8429 return -ENOMEM;
8430
8431 trace_seq_init(s);
8432
8433 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8434 trace_seq_printf(s, "entries: %ld\n", cnt);
8435
8436 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8437 trace_seq_printf(s, "overrun: %ld\n", cnt);
8438
8439 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8440 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8441
8442 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8443 trace_seq_printf(s, "bytes: %ld\n", cnt);
8444
8445 if (trace_clocks[tr->clock_id].in_ns) {
8446 /* local or global for trace_clock */
8447 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8448 usec_rem = do_div(t, USEC_PER_SEC);
8449 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8450 t, usec_rem);
8451
8452 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8453 usec_rem = do_div(t, USEC_PER_SEC);
8454 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8455 } else {
8456 /* counter or tsc mode for trace_clock */
8457 trace_seq_printf(s, "oldest event ts: %llu\n",
8458 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8459
8460 trace_seq_printf(s, "now ts: %llu\n",
8461 ring_buffer_time_stamp(trace_buf->buffer));
8462 }
8463
8464 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8465 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8466
8467 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8468 trace_seq_printf(s, "read events: %ld\n", cnt);
8469
8470 count = simple_read_from_buffer(ubuf, count, ppos,
8471 s->buffer, trace_seq_used(s));
8472
8473 kfree(s);
8474
8475 return count;
8476 }
8477
8478 static const struct file_operations tracing_stats_fops = {
8479 .open = tracing_open_generic_tr,
8480 .read = tracing_stats_read,
8481 .llseek = generic_file_llseek,
8482 .release = tracing_release_generic_tr,
8483 };
8484
8485 #ifdef CONFIG_DYNAMIC_FTRACE
8486
8487 static ssize_t
tracing_read_dyn_info(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8488 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8489 size_t cnt, loff_t *ppos)
8490 {
8491 ssize_t ret;
8492 char *buf;
8493 int r;
8494
8495 /* 256 should be plenty to hold the amount needed */
8496 buf = kmalloc(256, GFP_KERNEL);
8497 if (!buf)
8498 return -ENOMEM;
8499
8500 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8501 ftrace_update_tot_cnt,
8502 ftrace_number_of_pages,
8503 ftrace_number_of_groups);
8504
8505 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8506 kfree(buf);
8507 return ret;
8508 }
8509
8510 static const struct file_operations tracing_dyn_info_fops = {
8511 .open = tracing_open_generic,
8512 .read = tracing_read_dyn_info,
8513 .llseek = generic_file_llseek,
8514 };
8515 #endif /* CONFIG_DYNAMIC_FTRACE */
8516
8517 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8518 static void
ftrace_snapshot(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)8519 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8520 struct trace_array *tr, struct ftrace_probe_ops *ops,
8521 void *data)
8522 {
8523 tracing_snapshot_instance(tr);
8524 }
8525
8526 static void
ftrace_count_snapshot(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)8527 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8528 struct trace_array *tr, struct ftrace_probe_ops *ops,
8529 void *data)
8530 {
8531 struct ftrace_func_mapper *mapper = data;
8532 long *count = NULL;
8533
8534 if (mapper)
8535 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8536
8537 if (count) {
8538
8539 if (*count <= 0)
8540 return;
8541
8542 (*count)--;
8543 }
8544
8545 tracing_snapshot_instance(tr);
8546 }
8547
8548 static int
ftrace_snapshot_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)8549 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8550 struct ftrace_probe_ops *ops, void *data)
8551 {
8552 struct ftrace_func_mapper *mapper = data;
8553 long *count = NULL;
8554
8555 seq_printf(m, "%ps:", (void *)ip);
8556
8557 seq_puts(m, "snapshot");
8558
8559 if (mapper)
8560 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8561
8562 if (count)
8563 seq_printf(m, ":count=%ld\n", *count);
8564 else
8565 seq_puts(m, ":unlimited\n");
8566
8567 return 0;
8568 }
8569
8570 static int
ftrace_snapshot_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)8571 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8572 unsigned long ip, void *init_data, void **data)
8573 {
8574 struct ftrace_func_mapper *mapper = *data;
8575
8576 if (!mapper) {
8577 mapper = allocate_ftrace_func_mapper();
8578 if (!mapper)
8579 return -ENOMEM;
8580 *data = mapper;
8581 }
8582
8583 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8584 }
8585
8586 static void
ftrace_snapshot_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)8587 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8588 unsigned long ip, void *data)
8589 {
8590 struct ftrace_func_mapper *mapper = data;
8591
8592 if (!ip) {
8593 if (!mapper)
8594 return;
8595 free_ftrace_func_mapper(mapper, NULL);
8596 return;
8597 }
8598
8599 ftrace_func_mapper_remove_ip(mapper, ip);
8600 }
8601
8602 static struct ftrace_probe_ops snapshot_probe_ops = {
8603 .func = ftrace_snapshot,
8604 .print = ftrace_snapshot_print,
8605 };
8606
8607 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8608 .func = ftrace_count_snapshot,
8609 .print = ftrace_snapshot_print,
8610 .init = ftrace_snapshot_init,
8611 .free = ftrace_snapshot_free,
8612 };
8613
8614 static int
ftrace_trace_snapshot_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)8615 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8616 char *glob, char *cmd, char *param, int enable)
8617 {
8618 struct ftrace_probe_ops *ops;
8619 void *count = (void *)-1;
8620 char *number;
8621 int ret;
8622
8623 if (!tr)
8624 return -ENODEV;
8625
8626 /* hash funcs only work with set_ftrace_filter */
8627 if (!enable)
8628 return -EINVAL;
8629
8630 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8631
8632 if (glob[0] == '!')
8633 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8634
8635 if (!param)
8636 goto out_reg;
8637
8638 number = strsep(¶m, ":");
8639
8640 if (!strlen(number))
8641 goto out_reg;
8642
8643 /*
8644 * We use the callback data field (which is a pointer)
8645 * as our counter.
8646 */
8647 ret = kstrtoul(number, 0, (unsigned long *)&count);
8648 if (ret)
8649 return ret;
8650
8651 out_reg:
8652 ret = tracing_alloc_snapshot_instance(tr);
8653 if (ret < 0)
8654 goto out;
8655
8656 ret = register_ftrace_function_probe(glob, tr, ops, count);
8657
8658 out:
8659 return ret < 0 ? ret : 0;
8660 }
8661
8662 static struct ftrace_func_command ftrace_snapshot_cmd = {
8663 .name = "snapshot",
8664 .func = ftrace_trace_snapshot_callback,
8665 };
8666
register_snapshot_cmd(void)8667 static __init int register_snapshot_cmd(void)
8668 {
8669 return register_ftrace_command(&ftrace_snapshot_cmd);
8670 }
8671 #else
register_snapshot_cmd(void)8672 static inline __init int register_snapshot_cmd(void) { return 0; }
8673 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8674
tracing_get_dentry(struct trace_array * tr)8675 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8676 {
8677 if (WARN_ON(!tr->dir))
8678 return ERR_PTR(-ENODEV);
8679
8680 /* Top directory uses NULL as the parent */
8681 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8682 return NULL;
8683
8684 /* All sub buffers have a descriptor */
8685 return tr->dir;
8686 }
8687
tracing_dentry_percpu(struct trace_array * tr,int cpu)8688 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8689 {
8690 struct dentry *d_tracer;
8691
8692 if (tr->percpu_dir)
8693 return tr->percpu_dir;
8694
8695 d_tracer = tracing_get_dentry(tr);
8696 if (IS_ERR(d_tracer))
8697 return NULL;
8698
8699 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8700
8701 MEM_FAIL(!tr->percpu_dir,
8702 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8703
8704 return tr->percpu_dir;
8705 }
8706
8707 static struct dentry *
trace_create_cpu_file(const char * name,umode_t mode,struct dentry * parent,void * data,long cpu,const struct file_operations * fops)8708 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8709 void *data, long cpu, const struct file_operations *fops)
8710 {
8711 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8712
8713 if (ret) /* See tracing_get_cpu() */
8714 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8715 return ret;
8716 }
8717
8718 static void
tracing_init_tracefs_percpu(struct trace_array * tr,long cpu)8719 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8720 {
8721 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8722 struct dentry *d_cpu;
8723 char cpu_dir[30]; /* 30 characters should be more than enough */
8724
8725 if (!d_percpu)
8726 return;
8727
8728 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8729 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8730 if (!d_cpu) {
8731 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8732 return;
8733 }
8734
8735 /* per cpu trace_pipe */
8736 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8737 tr, cpu, &tracing_pipe_fops);
8738
8739 /* per cpu trace */
8740 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8741 tr, cpu, &tracing_fops);
8742
8743 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8744 tr, cpu, &tracing_buffers_fops);
8745
8746 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8747 tr, cpu, &tracing_stats_fops);
8748
8749 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8750 tr, cpu, &tracing_entries_fops);
8751
8752 #ifdef CONFIG_TRACER_SNAPSHOT
8753 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8754 tr, cpu, &snapshot_fops);
8755
8756 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8757 tr, cpu, &snapshot_raw_fops);
8758 #endif
8759 }
8760
8761 #ifdef CONFIG_FTRACE_SELFTEST
8762 /* Let selftest have access to static functions in this file */
8763 #include "trace_selftest.c"
8764 #endif
8765
8766 static ssize_t
trace_options_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8767 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8768 loff_t *ppos)
8769 {
8770 struct trace_option_dentry *topt = filp->private_data;
8771 char *buf;
8772
8773 if (topt->flags->val & topt->opt->bit)
8774 buf = "1\n";
8775 else
8776 buf = "0\n";
8777
8778 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8779 }
8780
8781 static ssize_t
trace_options_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8782 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8783 loff_t *ppos)
8784 {
8785 struct trace_option_dentry *topt = filp->private_data;
8786 unsigned long val;
8787 int ret;
8788
8789 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8790 if (ret)
8791 return ret;
8792
8793 if (val != 0 && val != 1)
8794 return -EINVAL;
8795
8796 if (!!(topt->flags->val & topt->opt->bit) != val) {
8797 mutex_lock(&trace_types_lock);
8798 ret = __set_tracer_option(topt->tr, topt->flags,
8799 topt->opt, !val);
8800 mutex_unlock(&trace_types_lock);
8801 if (ret)
8802 return ret;
8803 }
8804
8805 *ppos += cnt;
8806
8807 return cnt;
8808 }
8809
8810
8811 static const struct file_operations trace_options_fops = {
8812 .open = tracing_open_generic,
8813 .read = trace_options_read,
8814 .write = trace_options_write,
8815 .llseek = generic_file_llseek,
8816 };
8817
8818 /*
8819 * In order to pass in both the trace_array descriptor as well as the index
8820 * to the flag that the trace option file represents, the trace_array
8821 * has a character array of trace_flags_index[], which holds the index
8822 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8823 * The address of this character array is passed to the flag option file
8824 * read/write callbacks.
8825 *
8826 * In order to extract both the index and the trace_array descriptor,
8827 * get_tr_index() uses the following algorithm.
8828 *
8829 * idx = *ptr;
8830 *
8831 * As the pointer itself contains the address of the index (remember
8832 * index[1] == 1).
8833 *
8834 * Then to get the trace_array descriptor, by subtracting that index
8835 * from the ptr, we get to the start of the index itself.
8836 *
8837 * ptr - idx == &index[0]
8838 *
8839 * Then a simple container_of() from that pointer gets us to the
8840 * trace_array descriptor.
8841 */
get_tr_index(void * data,struct trace_array ** ptr,unsigned int * pindex)8842 static void get_tr_index(void *data, struct trace_array **ptr,
8843 unsigned int *pindex)
8844 {
8845 *pindex = *(unsigned char *)data;
8846
8847 *ptr = container_of(data - *pindex, struct trace_array,
8848 trace_flags_index);
8849 }
8850
8851 static ssize_t
trace_options_core_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)8852 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8853 loff_t *ppos)
8854 {
8855 void *tr_index = filp->private_data;
8856 struct trace_array *tr;
8857 unsigned int index;
8858 char *buf;
8859
8860 get_tr_index(tr_index, &tr, &index);
8861
8862 if (tr->trace_flags & (1 << index))
8863 buf = "1\n";
8864 else
8865 buf = "0\n";
8866
8867 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8868 }
8869
8870 static ssize_t
trace_options_core_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)8871 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8872 loff_t *ppos)
8873 {
8874 void *tr_index = filp->private_data;
8875 struct trace_array *tr;
8876 unsigned int index;
8877 unsigned long val;
8878 int ret;
8879
8880 get_tr_index(tr_index, &tr, &index);
8881
8882 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8883 if (ret)
8884 return ret;
8885
8886 if (val != 0 && val != 1)
8887 return -EINVAL;
8888
8889 mutex_lock(&event_mutex);
8890 mutex_lock(&trace_types_lock);
8891 ret = set_tracer_flag(tr, 1 << index, val);
8892 mutex_unlock(&trace_types_lock);
8893 mutex_unlock(&event_mutex);
8894
8895 if (ret < 0)
8896 return ret;
8897
8898 *ppos += cnt;
8899
8900 return cnt;
8901 }
8902
8903 static const struct file_operations trace_options_core_fops = {
8904 .open = tracing_open_generic,
8905 .read = trace_options_core_read,
8906 .write = trace_options_core_write,
8907 .llseek = generic_file_llseek,
8908 };
8909
trace_create_file(const char * name,umode_t mode,struct dentry * parent,void * data,const struct file_operations * fops)8910 struct dentry *trace_create_file(const char *name,
8911 umode_t mode,
8912 struct dentry *parent,
8913 void *data,
8914 const struct file_operations *fops)
8915 {
8916 struct dentry *ret;
8917
8918 ret = tracefs_create_file(name, mode, parent, data, fops);
8919 if (!ret)
8920 pr_warn("Could not create tracefs '%s' entry\n", name);
8921
8922 return ret;
8923 }
8924
8925
trace_options_init_dentry(struct trace_array * tr)8926 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8927 {
8928 struct dentry *d_tracer;
8929
8930 if (tr->options)
8931 return tr->options;
8932
8933 d_tracer = tracing_get_dentry(tr);
8934 if (IS_ERR(d_tracer))
8935 return NULL;
8936
8937 tr->options = tracefs_create_dir("options", d_tracer);
8938 if (!tr->options) {
8939 pr_warn("Could not create tracefs directory 'options'\n");
8940 return NULL;
8941 }
8942
8943 return tr->options;
8944 }
8945
8946 static void
create_trace_option_file(struct trace_array * tr,struct trace_option_dentry * topt,struct tracer_flags * flags,struct tracer_opt * opt)8947 create_trace_option_file(struct trace_array *tr,
8948 struct trace_option_dentry *topt,
8949 struct tracer_flags *flags,
8950 struct tracer_opt *opt)
8951 {
8952 struct dentry *t_options;
8953
8954 t_options = trace_options_init_dentry(tr);
8955 if (!t_options)
8956 return;
8957
8958 topt->flags = flags;
8959 topt->opt = opt;
8960 topt->tr = tr;
8961
8962 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8963 t_options, topt, &trace_options_fops);
8964
8965 }
8966
8967 static void
create_trace_option_files(struct trace_array * tr,struct tracer * tracer)8968 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8969 {
8970 struct trace_option_dentry *topts;
8971 struct trace_options *tr_topts;
8972 struct tracer_flags *flags;
8973 struct tracer_opt *opts;
8974 int cnt;
8975 int i;
8976
8977 if (!tracer)
8978 return;
8979
8980 flags = tracer->flags;
8981
8982 if (!flags || !flags->opts)
8983 return;
8984
8985 /*
8986 * If this is an instance, only create flags for tracers
8987 * the instance may have.
8988 */
8989 if (!trace_ok_for_array(tracer, tr))
8990 return;
8991
8992 for (i = 0; i < tr->nr_topts; i++) {
8993 /* Make sure there's no duplicate flags. */
8994 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8995 return;
8996 }
8997
8998 opts = flags->opts;
8999
9000 for (cnt = 0; opts[cnt].name; cnt++)
9001 ;
9002
9003 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9004 if (!topts)
9005 return;
9006
9007 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9008 GFP_KERNEL);
9009 if (!tr_topts) {
9010 kfree(topts);
9011 return;
9012 }
9013
9014 tr->topts = tr_topts;
9015 tr->topts[tr->nr_topts].tracer = tracer;
9016 tr->topts[tr->nr_topts].topts = topts;
9017 tr->nr_topts++;
9018
9019 for (cnt = 0; opts[cnt].name; cnt++) {
9020 create_trace_option_file(tr, &topts[cnt], flags,
9021 &opts[cnt]);
9022 MEM_FAIL(topts[cnt].entry == NULL,
9023 "Failed to create trace option: %s",
9024 opts[cnt].name);
9025 }
9026 }
9027
9028 static struct dentry *
create_trace_option_core_file(struct trace_array * tr,const char * option,long index)9029 create_trace_option_core_file(struct trace_array *tr,
9030 const char *option, long index)
9031 {
9032 struct dentry *t_options;
9033
9034 t_options = trace_options_init_dentry(tr);
9035 if (!t_options)
9036 return NULL;
9037
9038 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9039 (void *)&tr->trace_flags_index[index],
9040 &trace_options_core_fops);
9041 }
9042
create_trace_options_dir(struct trace_array * tr)9043 static void create_trace_options_dir(struct trace_array *tr)
9044 {
9045 struct dentry *t_options;
9046 bool top_level = tr == &global_trace;
9047 int i;
9048
9049 t_options = trace_options_init_dentry(tr);
9050 if (!t_options)
9051 return;
9052
9053 for (i = 0; trace_options[i]; i++) {
9054 if (top_level ||
9055 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9056 create_trace_option_core_file(tr, trace_options[i], i);
9057 }
9058 }
9059
9060 static ssize_t
rb_simple_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)9061 rb_simple_read(struct file *filp, char __user *ubuf,
9062 size_t cnt, loff_t *ppos)
9063 {
9064 struct trace_array *tr = filp->private_data;
9065 char buf[64];
9066 int r;
9067
9068 r = tracer_tracing_is_on(tr);
9069 r = sprintf(buf, "%d\n", r);
9070
9071 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9072 }
9073
9074 static ssize_t
rb_simple_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)9075 rb_simple_write(struct file *filp, const char __user *ubuf,
9076 size_t cnt, loff_t *ppos)
9077 {
9078 struct trace_array *tr = filp->private_data;
9079 struct trace_buffer *buffer = tr->array_buffer.buffer;
9080 unsigned long val;
9081 int ret;
9082
9083 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9084 if (ret)
9085 return ret;
9086
9087 if (buffer) {
9088 mutex_lock(&trace_types_lock);
9089 if (!!val == tracer_tracing_is_on(tr)) {
9090 val = 0; /* do nothing */
9091 } else if (val) {
9092 tracer_tracing_on(tr);
9093 if (tr->current_trace->start)
9094 tr->current_trace->start(tr);
9095 } else {
9096 tracer_tracing_off(tr);
9097 if (tr->current_trace->stop)
9098 tr->current_trace->stop(tr);
9099 /* Wake up any waiters */
9100 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9101 }
9102 mutex_unlock(&trace_types_lock);
9103 }
9104
9105 (*ppos)++;
9106
9107 return cnt;
9108 }
9109
9110 static const struct file_operations rb_simple_fops = {
9111 .open = tracing_open_generic_tr,
9112 .read = rb_simple_read,
9113 .write = rb_simple_write,
9114 .release = tracing_release_generic_tr,
9115 .llseek = default_llseek,
9116 };
9117
9118 static ssize_t
buffer_percent_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)9119 buffer_percent_read(struct file *filp, char __user *ubuf,
9120 size_t cnt, loff_t *ppos)
9121 {
9122 struct trace_array *tr = filp->private_data;
9123 char buf[64];
9124 int r;
9125
9126 r = tr->buffer_percent;
9127 r = sprintf(buf, "%d\n", r);
9128
9129 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9130 }
9131
9132 static ssize_t
buffer_percent_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)9133 buffer_percent_write(struct file *filp, const char __user *ubuf,
9134 size_t cnt, loff_t *ppos)
9135 {
9136 struct trace_array *tr = filp->private_data;
9137 unsigned long val;
9138 int ret;
9139
9140 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9141 if (ret)
9142 return ret;
9143
9144 if (val > 100)
9145 return -EINVAL;
9146
9147 if (!val)
9148 val = 1;
9149
9150 tr->buffer_percent = val;
9151
9152 (*ppos)++;
9153
9154 return cnt;
9155 }
9156
9157 static const struct file_operations buffer_percent_fops = {
9158 .open = tracing_open_generic_tr,
9159 .read = buffer_percent_read,
9160 .write = buffer_percent_write,
9161 .release = tracing_release_generic_tr,
9162 .llseek = default_llseek,
9163 };
9164
9165 static struct dentry *trace_instance_dir;
9166
9167 static void
9168 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9169
9170 static int
allocate_trace_buffer(struct trace_array * tr,struct array_buffer * buf,int size)9171 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9172 {
9173 enum ring_buffer_flags rb_flags;
9174
9175 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9176
9177 buf->tr = tr;
9178
9179 buf->buffer = ring_buffer_alloc(size, rb_flags);
9180 if (!buf->buffer)
9181 return -ENOMEM;
9182
9183 buf->data = alloc_percpu(struct trace_array_cpu);
9184 if (!buf->data) {
9185 ring_buffer_free(buf->buffer);
9186 buf->buffer = NULL;
9187 return -ENOMEM;
9188 }
9189
9190 /* Allocate the first page for all buffers */
9191 set_buffer_entries(&tr->array_buffer,
9192 ring_buffer_size(tr->array_buffer.buffer, 0));
9193
9194 return 0;
9195 }
9196
free_trace_buffer(struct array_buffer * buf)9197 static void free_trace_buffer(struct array_buffer *buf)
9198 {
9199 if (buf->buffer) {
9200 ring_buffer_free(buf->buffer);
9201 buf->buffer = NULL;
9202 free_percpu(buf->data);
9203 buf->data = NULL;
9204 }
9205 }
9206
allocate_trace_buffers(struct trace_array * tr,int size)9207 static int allocate_trace_buffers(struct trace_array *tr, int size)
9208 {
9209 int ret;
9210
9211 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9212 if (ret)
9213 return ret;
9214
9215 #ifdef CONFIG_TRACER_MAX_TRACE
9216 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9217 allocate_snapshot ? size : 1);
9218 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9219 free_trace_buffer(&tr->array_buffer);
9220 return -ENOMEM;
9221 }
9222 tr->allocated_snapshot = allocate_snapshot;
9223
9224 /*
9225 * Only the top level trace array gets its snapshot allocated
9226 * from the kernel command line.
9227 */
9228 allocate_snapshot = false;
9229 #endif
9230
9231 return 0;
9232 }
9233
free_trace_buffers(struct trace_array * tr)9234 static void free_trace_buffers(struct trace_array *tr)
9235 {
9236 if (!tr)
9237 return;
9238
9239 free_trace_buffer(&tr->array_buffer);
9240
9241 #ifdef CONFIG_TRACER_MAX_TRACE
9242 free_trace_buffer(&tr->max_buffer);
9243 #endif
9244 }
9245
init_trace_flags_index(struct trace_array * tr)9246 static void init_trace_flags_index(struct trace_array *tr)
9247 {
9248 int i;
9249
9250 /* Used by the trace options files */
9251 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9252 tr->trace_flags_index[i] = i;
9253 }
9254
__update_tracer_options(struct trace_array * tr)9255 static void __update_tracer_options(struct trace_array *tr)
9256 {
9257 struct tracer *t;
9258
9259 for (t = trace_types; t; t = t->next)
9260 add_tracer_options(tr, t);
9261 }
9262
update_tracer_options(struct trace_array * tr)9263 static void update_tracer_options(struct trace_array *tr)
9264 {
9265 mutex_lock(&trace_types_lock);
9266 tracer_options_updated = true;
9267 __update_tracer_options(tr);
9268 mutex_unlock(&trace_types_lock);
9269 }
9270
9271 /* Must have trace_types_lock held */
trace_array_find(const char * instance)9272 struct trace_array *trace_array_find(const char *instance)
9273 {
9274 struct trace_array *tr, *found = NULL;
9275
9276 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9277 if (tr->name && strcmp(tr->name, instance) == 0) {
9278 found = tr;
9279 break;
9280 }
9281 }
9282
9283 return found;
9284 }
9285
trace_array_find_get(const char * instance)9286 struct trace_array *trace_array_find_get(const char *instance)
9287 {
9288 struct trace_array *tr;
9289
9290 mutex_lock(&trace_types_lock);
9291 tr = trace_array_find(instance);
9292 if (tr)
9293 tr->ref++;
9294 mutex_unlock(&trace_types_lock);
9295
9296 return tr;
9297 }
9298
trace_array_create_dir(struct trace_array * tr)9299 static int trace_array_create_dir(struct trace_array *tr)
9300 {
9301 int ret;
9302
9303 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9304 if (!tr->dir)
9305 return -EINVAL;
9306
9307 ret = event_trace_add_tracer(tr->dir, tr);
9308 if (ret) {
9309 tracefs_remove(tr->dir);
9310 return ret;
9311 }
9312
9313 init_tracer_tracefs(tr, tr->dir);
9314 __update_tracer_options(tr);
9315
9316 return ret;
9317 }
9318
trace_array_create(const char * name)9319 static struct trace_array *trace_array_create(const char *name)
9320 {
9321 struct trace_array *tr;
9322 int ret;
9323
9324 ret = -ENOMEM;
9325 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9326 if (!tr)
9327 return ERR_PTR(ret);
9328
9329 tr->name = kstrdup(name, GFP_KERNEL);
9330 if (!tr->name)
9331 goto out_free_tr;
9332
9333 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9334 goto out_free_tr;
9335
9336 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9337
9338 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9339
9340 raw_spin_lock_init(&tr->start_lock);
9341
9342 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9343
9344 tr->current_trace = &nop_trace;
9345
9346 INIT_LIST_HEAD(&tr->systems);
9347 INIT_LIST_HEAD(&tr->events);
9348 INIT_LIST_HEAD(&tr->hist_vars);
9349 INIT_LIST_HEAD(&tr->err_log);
9350
9351 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9352 goto out_free_tr;
9353
9354 if (ftrace_allocate_ftrace_ops(tr) < 0)
9355 goto out_free_tr;
9356
9357 ftrace_init_trace_array(tr);
9358
9359 init_trace_flags_index(tr);
9360
9361 if (trace_instance_dir) {
9362 ret = trace_array_create_dir(tr);
9363 if (ret)
9364 goto out_free_tr;
9365 } else
9366 __trace_early_add_events(tr);
9367
9368 list_add(&tr->list, &ftrace_trace_arrays);
9369
9370 tr->ref++;
9371
9372 return tr;
9373
9374 out_free_tr:
9375 ftrace_free_ftrace_ops(tr);
9376 free_trace_buffers(tr);
9377 free_cpumask_var(tr->tracing_cpumask);
9378 kfree(tr->name);
9379 kfree(tr);
9380
9381 return ERR_PTR(ret);
9382 }
9383
instance_mkdir(const char * name)9384 static int instance_mkdir(const char *name)
9385 {
9386 struct trace_array *tr;
9387 int ret;
9388
9389 mutex_lock(&event_mutex);
9390 mutex_lock(&trace_types_lock);
9391
9392 ret = -EEXIST;
9393 if (trace_array_find(name))
9394 goto out_unlock;
9395
9396 tr = trace_array_create(name);
9397
9398 ret = PTR_ERR_OR_ZERO(tr);
9399
9400 out_unlock:
9401 mutex_unlock(&trace_types_lock);
9402 mutex_unlock(&event_mutex);
9403 return ret;
9404 }
9405
9406 /**
9407 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9408 * @name: The name of the trace array to be looked up/created.
9409 *
9410 * Returns pointer to trace array with given name.
9411 * NULL, if it cannot be created.
9412 *
9413 * NOTE: This function increments the reference counter associated with the
9414 * trace array returned. This makes sure it cannot be freed while in use.
9415 * Use trace_array_put() once the trace array is no longer needed.
9416 * If the trace_array is to be freed, trace_array_destroy() needs to
9417 * be called after the trace_array_put(), or simply let user space delete
9418 * it from the tracefs instances directory. But until the
9419 * trace_array_put() is called, user space can not delete it.
9420 *
9421 */
trace_array_get_by_name(const char * name)9422 struct trace_array *trace_array_get_by_name(const char *name)
9423 {
9424 struct trace_array *tr;
9425
9426 mutex_lock(&event_mutex);
9427 mutex_lock(&trace_types_lock);
9428
9429 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9430 if (tr->name && strcmp(tr->name, name) == 0)
9431 goto out_unlock;
9432 }
9433
9434 tr = trace_array_create(name);
9435
9436 if (IS_ERR(tr))
9437 tr = NULL;
9438 out_unlock:
9439 if (tr)
9440 tr->ref++;
9441
9442 mutex_unlock(&trace_types_lock);
9443 mutex_unlock(&event_mutex);
9444 return tr;
9445 }
9446 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9447
__remove_instance(struct trace_array * tr)9448 static int __remove_instance(struct trace_array *tr)
9449 {
9450 int i;
9451
9452 /* Reference counter for a newly created trace array = 1. */
9453 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9454 return -EBUSY;
9455
9456 list_del(&tr->list);
9457
9458 /* Disable all the flags that were enabled coming in */
9459 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9460 if ((1 << i) & ZEROED_TRACE_FLAGS)
9461 set_tracer_flag(tr, 1 << i, 0);
9462 }
9463
9464 tracing_set_nop(tr);
9465 clear_ftrace_function_probes(tr);
9466 event_trace_del_tracer(tr);
9467 ftrace_clear_pids(tr);
9468 ftrace_destroy_function_files(tr);
9469 tracefs_remove(tr->dir);
9470 free_percpu(tr->last_func_repeats);
9471 free_trace_buffers(tr);
9472
9473 for (i = 0; i < tr->nr_topts; i++) {
9474 kfree(tr->topts[i].topts);
9475 }
9476 kfree(tr->topts);
9477
9478 free_cpumask_var(tr->tracing_cpumask);
9479 kfree(tr->name);
9480 kfree(tr);
9481
9482 return 0;
9483 }
9484
trace_array_destroy(struct trace_array * this_tr)9485 int trace_array_destroy(struct trace_array *this_tr)
9486 {
9487 struct trace_array *tr;
9488 int ret;
9489
9490 if (!this_tr)
9491 return -EINVAL;
9492
9493 mutex_lock(&event_mutex);
9494 mutex_lock(&trace_types_lock);
9495
9496 ret = -ENODEV;
9497
9498 /* Making sure trace array exists before destroying it. */
9499 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9500 if (tr == this_tr) {
9501 ret = __remove_instance(tr);
9502 break;
9503 }
9504 }
9505
9506 mutex_unlock(&trace_types_lock);
9507 mutex_unlock(&event_mutex);
9508
9509 return ret;
9510 }
9511 EXPORT_SYMBOL_GPL(trace_array_destroy);
9512
instance_rmdir(const char * name)9513 static int instance_rmdir(const char *name)
9514 {
9515 struct trace_array *tr;
9516 int ret;
9517
9518 mutex_lock(&event_mutex);
9519 mutex_lock(&trace_types_lock);
9520
9521 ret = -ENODEV;
9522 tr = trace_array_find(name);
9523 if (tr)
9524 ret = __remove_instance(tr);
9525
9526 mutex_unlock(&trace_types_lock);
9527 mutex_unlock(&event_mutex);
9528
9529 return ret;
9530 }
9531
create_trace_instances(struct dentry * d_tracer)9532 static __init void create_trace_instances(struct dentry *d_tracer)
9533 {
9534 struct trace_array *tr;
9535
9536 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9537 instance_mkdir,
9538 instance_rmdir);
9539 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9540 return;
9541
9542 mutex_lock(&event_mutex);
9543 mutex_lock(&trace_types_lock);
9544
9545 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9546 if (!tr->name)
9547 continue;
9548 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9549 "Failed to create instance directory\n"))
9550 break;
9551 }
9552
9553 mutex_unlock(&trace_types_lock);
9554 mutex_unlock(&event_mutex);
9555 }
9556
9557 static void
init_tracer_tracefs(struct trace_array * tr,struct dentry * d_tracer)9558 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9559 {
9560 struct trace_event_file *file;
9561 int cpu;
9562
9563 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9564 tr, &show_traces_fops);
9565
9566 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9567 tr, &set_tracer_fops);
9568
9569 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9570 tr, &tracing_cpumask_fops);
9571
9572 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9573 tr, &tracing_iter_fops);
9574
9575 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9576 tr, &tracing_fops);
9577
9578 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9579 tr, &tracing_pipe_fops);
9580
9581 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9582 tr, &tracing_entries_fops);
9583
9584 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9585 tr, &tracing_total_entries_fops);
9586
9587 trace_create_file("free_buffer", 0200, d_tracer,
9588 tr, &tracing_free_buffer_fops);
9589
9590 trace_create_file("trace_marker", 0220, d_tracer,
9591 tr, &tracing_mark_fops);
9592
9593 file = __find_event_file(tr, "ftrace", "print");
9594 if (file && file->dir)
9595 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9596 file, &event_trigger_fops);
9597 tr->trace_marker_file = file;
9598
9599 trace_create_file("trace_marker_raw", 0220, d_tracer,
9600 tr, &tracing_mark_raw_fops);
9601
9602 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9603 &trace_clock_fops);
9604
9605 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9606 tr, &rb_simple_fops);
9607
9608 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9609 &trace_time_stamp_mode_fops);
9610
9611 tr->buffer_percent = 50;
9612
9613 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9614 tr, &buffer_percent_fops);
9615
9616 create_trace_options_dir(tr);
9617
9618 #ifdef CONFIG_TRACER_MAX_TRACE
9619 trace_create_maxlat_file(tr, d_tracer);
9620 #endif
9621
9622 if (ftrace_create_function_files(tr, d_tracer))
9623 MEM_FAIL(1, "Could not allocate function filter files");
9624
9625 #ifdef CONFIG_TRACER_SNAPSHOT
9626 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9627 tr, &snapshot_fops);
9628 #endif
9629
9630 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9631 tr, &tracing_err_log_fops);
9632
9633 for_each_tracing_cpu(cpu)
9634 tracing_init_tracefs_percpu(tr, cpu);
9635
9636 ftrace_init_tracefs(tr, d_tracer);
9637 }
9638
trace_automount(struct dentry * mntpt,void * ingore)9639 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9640 {
9641 struct vfsmount *mnt;
9642 struct file_system_type *type;
9643
9644 /*
9645 * To maintain backward compatibility for tools that mount
9646 * debugfs to get to the tracing facility, tracefs is automatically
9647 * mounted to the debugfs/tracing directory.
9648 */
9649 type = get_fs_type("tracefs");
9650 if (!type)
9651 return NULL;
9652 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9653 put_filesystem(type);
9654 if (IS_ERR(mnt))
9655 return NULL;
9656 mntget(mnt);
9657
9658 return mnt;
9659 }
9660
9661 /**
9662 * tracing_init_dentry - initialize top level trace array
9663 *
9664 * This is called when creating files or directories in the tracing
9665 * directory. It is called via fs_initcall() by any of the boot up code
9666 * and expects to return the dentry of the top level tracing directory.
9667 */
tracing_init_dentry(void)9668 int tracing_init_dentry(void)
9669 {
9670 struct trace_array *tr = &global_trace;
9671
9672 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9673 pr_warn("Tracing disabled due to lockdown\n");
9674 return -EPERM;
9675 }
9676
9677 /* The top level trace array uses NULL as parent */
9678 if (tr->dir)
9679 return 0;
9680
9681 if (WARN_ON(!tracefs_initialized()))
9682 return -ENODEV;
9683
9684 /*
9685 * As there may still be users that expect the tracing
9686 * files to exist in debugfs/tracing, we must automount
9687 * the tracefs file system there, so older tools still
9688 * work with the newer kernel.
9689 */
9690 tr->dir = debugfs_create_automount("tracing", NULL,
9691 trace_automount, NULL);
9692
9693 return 0;
9694 }
9695
9696 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9697 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9698
9699 static struct workqueue_struct *eval_map_wq __initdata;
9700 static struct work_struct eval_map_work __initdata;
9701 static struct work_struct tracerfs_init_work __initdata;
9702
eval_map_work_func(struct work_struct * work)9703 static void __init eval_map_work_func(struct work_struct *work)
9704 {
9705 int len;
9706
9707 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9708 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9709 }
9710
trace_eval_init(void)9711 static int __init trace_eval_init(void)
9712 {
9713 INIT_WORK(&eval_map_work, eval_map_work_func);
9714
9715 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9716 if (!eval_map_wq) {
9717 pr_err("Unable to allocate eval_map_wq\n");
9718 /* Do work here */
9719 eval_map_work_func(&eval_map_work);
9720 return -ENOMEM;
9721 }
9722
9723 queue_work(eval_map_wq, &eval_map_work);
9724 return 0;
9725 }
9726
9727 subsys_initcall(trace_eval_init);
9728
trace_eval_sync(void)9729 static int __init trace_eval_sync(void)
9730 {
9731 /* Make sure the eval map updates are finished */
9732 if (eval_map_wq)
9733 destroy_workqueue(eval_map_wq);
9734 return 0;
9735 }
9736
9737 late_initcall_sync(trace_eval_sync);
9738
9739
9740 #ifdef CONFIG_MODULES
trace_module_add_evals(struct module * mod)9741 static void trace_module_add_evals(struct module *mod)
9742 {
9743 if (!mod->num_trace_evals)
9744 return;
9745
9746 /*
9747 * Modules with bad taint do not have events created, do
9748 * not bother with enums either.
9749 */
9750 if (trace_module_has_bad_taint(mod))
9751 return;
9752
9753 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9754 }
9755
9756 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
trace_module_remove_evals(struct module * mod)9757 static void trace_module_remove_evals(struct module *mod)
9758 {
9759 union trace_eval_map_item *map;
9760 union trace_eval_map_item **last = &trace_eval_maps;
9761
9762 if (!mod->num_trace_evals)
9763 return;
9764
9765 mutex_lock(&trace_eval_mutex);
9766
9767 map = trace_eval_maps;
9768
9769 while (map) {
9770 if (map->head.mod == mod)
9771 break;
9772 map = trace_eval_jmp_to_tail(map);
9773 last = &map->tail.next;
9774 map = map->tail.next;
9775 }
9776 if (!map)
9777 goto out;
9778
9779 *last = trace_eval_jmp_to_tail(map)->tail.next;
9780 kfree(map);
9781 out:
9782 mutex_unlock(&trace_eval_mutex);
9783 }
9784 #else
trace_module_remove_evals(struct module * mod)9785 static inline void trace_module_remove_evals(struct module *mod) { }
9786 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9787
trace_module_notify(struct notifier_block * self,unsigned long val,void * data)9788 static int trace_module_notify(struct notifier_block *self,
9789 unsigned long val, void *data)
9790 {
9791 struct module *mod = data;
9792
9793 switch (val) {
9794 case MODULE_STATE_COMING:
9795 trace_module_add_evals(mod);
9796 break;
9797 case MODULE_STATE_GOING:
9798 trace_module_remove_evals(mod);
9799 break;
9800 }
9801
9802 return NOTIFY_OK;
9803 }
9804
9805 static struct notifier_block trace_module_nb = {
9806 .notifier_call = trace_module_notify,
9807 .priority = 0,
9808 };
9809 #endif /* CONFIG_MODULES */
9810
tracer_init_tracefs_work_func(struct work_struct * work)9811 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9812 {
9813
9814 event_trace_init();
9815
9816 init_tracer_tracefs(&global_trace, NULL);
9817 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9818
9819 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9820 &global_trace, &tracing_thresh_fops);
9821
9822 trace_create_file("README", TRACE_MODE_READ, NULL,
9823 NULL, &tracing_readme_fops);
9824
9825 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9826 NULL, &tracing_saved_cmdlines_fops);
9827
9828 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9829 NULL, &tracing_saved_cmdlines_size_fops);
9830
9831 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9832 NULL, &tracing_saved_tgids_fops);
9833
9834 trace_create_eval_file(NULL);
9835
9836 #ifdef CONFIG_MODULES
9837 register_module_notifier(&trace_module_nb);
9838 #endif
9839
9840 #ifdef CONFIG_DYNAMIC_FTRACE
9841 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9842 NULL, &tracing_dyn_info_fops);
9843 #endif
9844
9845 create_trace_instances(NULL);
9846
9847 update_tracer_options(&global_trace);
9848 }
9849
tracer_init_tracefs(void)9850 static __init int tracer_init_tracefs(void)
9851 {
9852 int ret;
9853
9854 trace_access_lock_init();
9855
9856 ret = tracing_init_dentry();
9857 if (ret)
9858 return 0;
9859
9860 if (eval_map_wq) {
9861 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9862 queue_work(eval_map_wq, &tracerfs_init_work);
9863 } else {
9864 tracer_init_tracefs_work_func(NULL);
9865 }
9866
9867 rv_init_interface();
9868
9869 return 0;
9870 }
9871
9872 fs_initcall(tracer_init_tracefs);
9873
trace_panic_handler(struct notifier_block * this,unsigned long event,void * unused)9874 static int trace_panic_handler(struct notifier_block *this,
9875 unsigned long event, void *unused)
9876 {
9877 if (ftrace_dump_on_oops)
9878 ftrace_dump(ftrace_dump_on_oops);
9879 return NOTIFY_OK;
9880 }
9881
9882 static struct notifier_block trace_panic_notifier = {
9883 .notifier_call = trace_panic_handler,
9884 .next = NULL,
9885 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9886 };
9887
trace_die_handler(struct notifier_block * self,unsigned long val,void * data)9888 static int trace_die_handler(struct notifier_block *self,
9889 unsigned long val,
9890 void *data)
9891 {
9892 switch (val) {
9893 case DIE_OOPS:
9894 if (ftrace_dump_on_oops)
9895 ftrace_dump(ftrace_dump_on_oops);
9896 break;
9897 default:
9898 break;
9899 }
9900 return NOTIFY_OK;
9901 }
9902
9903 static struct notifier_block trace_die_notifier = {
9904 .notifier_call = trace_die_handler,
9905 .priority = 200
9906 };
9907
9908 /*
9909 * printk is set to max of 1024, we really don't need it that big.
9910 * Nothing should be printing 1000 characters anyway.
9911 */
9912 #define TRACE_MAX_PRINT 1000
9913
9914 /*
9915 * Define here KERN_TRACE so that we have one place to modify
9916 * it if we decide to change what log level the ftrace dump
9917 * should be at.
9918 */
9919 #define KERN_TRACE KERN_EMERG
9920
9921 void
trace_printk_seq(struct trace_seq * s)9922 trace_printk_seq(struct trace_seq *s)
9923 {
9924 /* Probably should print a warning here. */
9925 if (s->seq.len >= TRACE_MAX_PRINT)
9926 s->seq.len = TRACE_MAX_PRINT;
9927
9928 /*
9929 * More paranoid code. Although the buffer size is set to
9930 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9931 * an extra layer of protection.
9932 */
9933 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9934 s->seq.len = s->seq.size - 1;
9935
9936 /* should be zero ended, but we are paranoid. */
9937 s->buffer[s->seq.len] = 0;
9938
9939 printk(KERN_TRACE "%s", s->buffer);
9940
9941 trace_seq_init(s);
9942 }
9943
trace_init_global_iter(struct trace_iterator * iter)9944 void trace_init_global_iter(struct trace_iterator *iter)
9945 {
9946 iter->tr = &global_trace;
9947 iter->trace = iter->tr->current_trace;
9948 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9949 iter->array_buffer = &global_trace.array_buffer;
9950
9951 if (iter->trace && iter->trace->open)
9952 iter->trace->open(iter);
9953
9954 /* Annotate start of buffers if we had overruns */
9955 if (ring_buffer_overruns(iter->array_buffer->buffer))
9956 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9957
9958 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9959 if (trace_clocks[iter->tr->clock_id].in_ns)
9960 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9961
9962 /* Can not use kmalloc for iter.temp and iter.fmt */
9963 iter->temp = static_temp_buf;
9964 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9965 iter->fmt = static_fmt_buf;
9966 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9967 }
9968
ftrace_dump(enum ftrace_dump_mode oops_dump_mode)9969 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9970 {
9971 /* use static because iter can be a bit big for the stack */
9972 static struct trace_iterator iter;
9973 static atomic_t dump_running;
9974 struct trace_array *tr = &global_trace;
9975 unsigned int old_userobj;
9976 unsigned long flags;
9977 int cnt = 0, cpu;
9978
9979 /* Only allow one dump user at a time. */
9980 if (atomic_inc_return(&dump_running) != 1) {
9981 atomic_dec(&dump_running);
9982 return;
9983 }
9984
9985 /*
9986 * Always turn off tracing when we dump.
9987 * We don't need to show trace output of what happens
9988 * between multiple crashes.
9989 *
9990 * If the user does a sysrq-z, then they can re-enable
9991 * tracing with echo 1 > tracing_on.
9992 */
9993 tracing_off();
9994
9995 local_irq_save(flags);
9996
9997 /* Simulate the iterator */
9998 trace_init_global_iter(&iter);
9999
10000 for_each_tracing_cpu(cpu) {
10001 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10002 }
10003
10004 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10005
10006 /* don't look at user memory in panic mode */
10007 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10008
10009 switch (oops_dump_mode) {
10010 case DUMP_ALL:
10011 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10012 break;
10013 case DUMP_ORIG:
10014 iter.cpu_file = raw_smp_processor_id();
10015 break;
10016 case DUMP_NONE:
10017 goto out_enable;
10018 default:
10019 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10020 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10021 }
10022
10023 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10024
10025 /* Did function tracer already get disabled? */
10026 if (ftrace_is_dead()) {
10027 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10028 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10029 }
10030
10031 /*
10032 * We need to stop all tracing on all CPUS to read
10033 * the next buffer. This is a bit expensive, but is
10034 * not done often. We fill all what we can read,
10035 * and then release the locks again.
10036 */
10037
10038 while (!trace_empty(&iter)) {
10039
10040 if (!cnt)
10041 printk(KERN_TRACE "---------------------------------\n");
10042
10043 cnt++;
10044
10045 trace_iterator_reset(&iter);
10046 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10047
10048 if (trace_find_next_entry_inc(&iter) != NULL) {
10049 int ret;
10050
10051 ret = print_trace_line(&iter);
10052 if (ret != TRACE_TYPE_NO_CONSUME)
10053 trace_consume(&iter);
10054 }
10055 touch_nmi_watchdog();
10056
10057 trace_printk_seq(&iter.seq);
10058 }
10059
10060 if (!cnt)
10061 printk(KERN_TRACE " (ftrace buffer empty)\n");
10062 else
10063 printk(KERN_TRACE "---------------------------------\n");
10064
10065 out_enable:
10066 tr->trace_flags |= old_userobj;
10067
10068 for_each_tracing_cpu(cpu) {
10069 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10070 }
10071 atomic_dec(&dump_running);
10072 local_irq_restore(flags);
10073 }
10074 EXPORT_SYMBOL_GPL(ftrace_dump);
10075
10076 #define WRITE_BUFSIZE 4096
10077
trace_parse_run_command(struct file * file,const char __user * buffer,size_t count,loff_t * ppos,int (* createfn)(const char *))10078 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10079 size_t count, loff_t *ppos,
10080 int (*createfn)(const char *))
10081 {
10082 char *kbuf, *buf, *tmp;
10083 int ret = 0;
10084 size_t done = 0;
10085 size_t size;
10086
10087 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10088 if (!kbuf)
10089 return -ENOMEM;
10090
10091 while (done < count) {
10092 size = count - done;
10093
10094 if (size >= WRITE_BUFSIZE)
10095 size = WRITE_BUFSIZE - 1;
10096
10097 if (copy_from_user(kbuf, buffer + done, size)) {
10098 ret = -EFAULT;
10099 goto out;
10100 }
10101 kbuf[size] = '\0';
10102 buf = kbuf;
10103 do {
10104 tmp = strchr(buf, '\n');
10105 if (tmp) {
10106 *tmp = '\0';
10107 size = tmp - buf + 1;
10108 } else {
10109 size = strlen(buf);
10110 if (done + size < count) {
10111 if (buf != kbuf)
10112 break;
10113 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10114 pr_warn("Line length is too long: Should be less than %d\n",
10115 WRITE_BUFSIZE - 2);
10116 ret = -EINVAL;
10117 goto out;
10118 }
10119 }
10120 done += size;
10121
10122 /* Remove comments */
10123 tmp = strchr(buf, '#');
10124
10125 if (tmp)
10126 *tmp = '\0';
10127
10128 ret = createfn(buf);
10129 if (ret)
10130 goto out;
10131 buf += size;
10132
10133 } while (done < count);
10134 }
10135 ret = done;
10136
10137 out:
10138 kfree(kbuf);
10139
10140 return ret;
10141 }
10142
tracer_alloc_buffers(void)10143 __init static int tracer_alloc_buffers(void)
10144 {
10145 int ring_buf_size;
10146 int ret = -ENOMEM;
10147
10148
10149 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10150 pr_warn("Tracing disabled due to lockdown\n");
10151 return -EPERM;
10152 }
10153
10154 /*
10155 * Make sure we don't accidentally add more trace options
10156 * than we have bits for.
10157 */
10158 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10159
10160 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10161 goto out;
10162
10163 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10164 goto out_free_buffer_mask;
10165
10166 /* Only allocate trace_printk buffers if a trace_printk exists */
10167 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10168 /* Must be called before global_trace.buffer is allocated */
10169 trace_printk_init_buffers();
10170
10171 /* To save memory, keep the ring buffer size to its minimum */
10172 if (ring_buffer_expanded)
10173 ring_buf_size = trace_buf_size;
10174 else
10175 ring_buf_size = 1;
10176
10177 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10178 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10179
10180 raw_spin_lock_init(&global_trace.start_lock);
10181
10182 /*
10183 * The prepare callbacks allocates some memory for the ring buffer. We
10184 * don't free the buffer if the CPU goes down. If we were to free
10185 * the buffer, then the user would lose any trace that was in the
10186 * buffer. The memory will be removed once the "instance" is removed.
10187 */
10188 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10189 "trace/RB:prepare", trace_rb_cpu_prepare,
10190 NULL);
10191 if (ret < 0)
10192 goto out_free_cpumask;
10193 /* Used for event triggers */
10194 ret = -ENOMEM;
10195 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10196 if (!temp_buffer)
10197 goto out_rm_hp_state;
10198
10199 if (trace_create_savedcmd() < 0)
10200 goto out_free_temp_buffer;
10201
10202 /* TODO: make the number of buffers hot pluggable with CPUS */
10203 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10204 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10205 goto out_free_savedcmd;
10206 }
10207
10208 if (global_trace.buffer_disabled)
10209 tracing_off();
10210
10211 if (trace_boot_clock) {
10212 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10213 if (ret < 0)
10214 pr_warn("Trace clock %s not defined, going back to default\n",
10215 trace_boot_clock);
10216 }
10217
10218 /*
10219 * register_tracer() might reference current_trace, so it
10220 * needs to be set before we register anything. This is
10221 * just a bootstrap of current_trace anyway.
10222 */
10223 global_trace.current_trace = &nop_trace;
10224
10225 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10226
10227 ftrace_init_global_array_ops(&global_trace);
10228
10229 init_trace_flags_index(&global_trace);
10230
10231 register_tracer(&nop_trace);
10232
10233 /* Function tracing may start here (via kernel command line) */
10234 init_function_trace();
10235
10236 /* All seems OK, enable tracing */
10237 tracing_disabled = 0;
10238
10239 atomic_notifier_chain_register(&panic_notifier_list,
10240 &trace_panic_notifier);
10241
10242 register_die_notifier(&trace_die_notifier);
10243
10244 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10245
10246 INIT_LIST_HEAD(&global_trace.systems);
10247 INIT_LIST_HEAD(&global_trace.events);
10248 INIT_LIST_HEAD(&global_trace.hist_vars);
10249 INIT_LIST_HEAD(&global_trace.err_log);
10250 list_add(&global_trace.list, &ftrace_trace_arrays);
10251
10252 apply_trace_boot_options();
10253
10254 register_snapshot_cmd();
10255
10256 test_can_verify();
10257
10258 return 0;
10259
10260 out_free_savedcmd:
10261 free_saved_cmdlines_buffer(savedcmd);
10262 out_free_temp_buffer:
10263 ring_buffer_free(temp_buffer);
10264 out_rm_hp_state:
10265 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10266 out_free_cpumask:
10267 free_cpumask_var(global_trace.tracing_cpumask);
10268 out_free_buffer_mask:
10269 free_cpumask_var(tracing_buffer_mask);
10270 out:
10271 return ret;
10272 }
10273
ftrace_boot_snapshot(void)10274 void __init ftrace_boot_snapshot(void)
10275 {
10276 if (snapshot_at_boot) {
10277 tracing_snapshot();
10278 internal_trace_puts("** Boot snapshot taken **\n");
10279 }
10280 }
10281
early_trace_init(void)10282 void __init early_trace_init(void)
10283 {
10284 if (tracepoint_printk) {
10285 tracepoint_print_iter =
10286 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10287 if (MEM_FAIL(!tracepoint_print_iter,
10288 "Failed to allocate trace iterator\n"))
10289 tracepoint_printk = 0;
10290 else
10291 static_key_enable(&tracepoint_printk_key.key);
10292 }
10293 tracer_alloc_buffers();
10294
10295 init_events();
10296 }
10297
trace_init(void)10298 void __init trace_init(void)
10299 {
10300 trace_event_init();
10301 }
10302
clear_boot_tracer(void)10303 __init static void clear_boot_tracer(void)
10304 {
10305 /*
10306 * The default tracer at boot buffer is an init section.
10307 * This function is called in lateinit. If we did not
10308 * find the boot tracer, then clear it out, to prevent
10309 * later registration from accessing the buffer that is
10310 * about to be freed.
10311 */
10312 if (!default_bootup_tracer)
10313 return;
10314
10315 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10316 default_bootup_tracer);
10317 default_bootup_tracer = NULL;
10318 }
10319
10320 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
tracing_set_default_clock(void)10321 __init static void tracing_set_default_clock(void)
10322 {
10323 /* sched_clock_stable() is determined in late_initcall */
10324 if (!trace_boot_clock && !sched_clock_stable()) {
10325 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10326 pr_warn("Can not set tracing clock due to lockdown\n");
10327 return;
10328 }
10329
10330 printk(KERN_WARNING
10331 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10332 "If you want to keep using the local clock, then add:\n"
10333 " \"trace_clock=local\"\n"
10334 "on the kernel command line\n");
10335 tracing_set_clock(&global_trace, "global");
10336 }
10337 }
10338 #else
tracing_set_default_clock(void)10339 static inline void tracing_set_default_clock(void) { }
10340 #endif
10341
late_trace_init(void)10342 __init static int late_trace_init(void)
10343 {
10344 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10345 static_key_disable(&tracepoint_printk_key.key);
10346 tracepoint_printk = 0;
10347 }
10348
10349 tracing_set_default_clock();
10350 clear_boot_tracer();
10351 return 0;
10352 }
10353
10354 late_initcall_sync(late_trace_init);
10355