1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
5
6 #include <linux/fs.h>
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/trace.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/trace_seq.h>
17 #include <linux/trace_events.h>
18 #include <linux/compiler.h>
19 #include <linux/glob.h>
20 #include <linux/irq_work.h>
21 #include <linux/workqueue.h>
22 #include <linux/ctype.h>
23 #include <linux/once_lite.h>
24
25 #include "pid_list.h"
26
27 #ifdef CONFIG_FTRACE_SYSCALLS
28 #include <asm/unistd.h> /* For NR_SYSCALLS */
29 #include <asm/syscall.h> /* some archs define it here */
30 #endif
31
32 #define TRACE_MODE_WRITE 0640
33 #define TRACE_MODE_READ 0440
34
35 enum trace_type {
36 __TRACE_FIRST_TYPE = 0,
37
38 TRACE_FN,
39 TRACE_CTX,
40 TRACE_WAKE,
41 TRACE_STACK,
42 TRACE_PRINT,
43 TRACE_BPRINT,
44 TRACE_MMIO_RW,
45 TRACE_MMIO_MAP,
46 TRACE_BRANCH,
47 TRACE_GRAPH_RET,
48 TRACE_GRAPH_ENT,
49 TRACE_USER_STACK,
50 TRACE_BLK,
51 TRACE_BPUTS,
52 TRACE_HWLAT,
53 TRACE_OSNOISE,
54 TRACE_TIMERLAT,
55 TRACE_RAW_DATA,
56 TRACE_FUNC_REPEATS,
57
58 __TRACE_LAST_TYPE,
59 };
60
61
62 #undef __field
63 #define __field(type, item) type item;
64
65 #undef __field_fn
66 #define __field_fn(type, item) type item;
67
68 #undef __field_struct
69 #define __field_struct(type, item) __field(type, item)
70
71 #undef __field_desc
72 #define __field_desc(type, container, item)
73
74 #undef __field_packed
75 #define __field_packed(type, container, item)
76
77 #undef __array
78 #define __array(type, item, size) type item[size];
79
80 #undef __array_desc
81 #define __array_desc(type, container, item, size)
82
83 #undef __dynamic_array
84 #define __dynamic_array(type, item) type item[];
85
86 #undef __rel_dynamic_array
87 #define __rel_dynamic_array(type, item) type item[];
88
89 #undef F_STRUCT
90 #define F_STRUCT(args...) args
91
92 #undef FTRACE_ENTRY
93 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
94 struct struct_name { \
95 struct trace_entry ent; \
96 tstruct \
97 }
98
99 #undef FTRACE_ENTRY_DUP
100 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
101
102 #undef FTRACE_ENTRY_REG
103 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
104 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
105
106 #undef FTRACE_ENTRY_PACKED
107 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
108 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
109
110 #include "trace_entries.h"
111
112 /* Use this for memory failure errors */
113 #define MEM_FAIL(condition, fmt, ...) \
114 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
115
116 /*
117 * syscalls are special, and need special handling, this is why
118 * they are not included in trace_entries.h
119 */
120 struct syscall_trace_enter {
121 struct trace_entry ent;
122 int nr;
123 unsigned long args[];
124 };
125
126 struct syscall_trace_exit {
127 struct trace_entry ent;
128 int nr;
129 long ret;
130 };
131
132 struct kprobe_trace_entry_head {
133 struct trace_entry ent;
134 unsigned long ip;
135 };
136
137 struct eprobe_trace_entry_head {
138 struct trace_entry ent;
139 };
140
141 struct kretprobe_trace_entry_head {
142 struct trace_entry ent;
143 unsigned long func;
144 unsigned long ret_ip;
145 };
146
147 #define TRACE_BUF_SIZE 1024
148
149 struct trace_array;
150
151 /*
152 * The CPU trace array - it consists of thousands of trace entries
153 * plus some other descriptor data: (for example which task started
154 * the trace, etc.)
155 */
156 struct trace_array_cpu {
157 atomic_t disabled;
158 void *buffer_page; /* ring buffer spare */
159
160 unsigned long entries;
161 unsigned long saved_latency;
162 unsigned long critical_start;
163 unsigned long critical_end;
164 unsigned long critical_sequence;
165 unsigned long nice;
166 unsigned long policy;
167 unsigned long rt_priority;
168 unsigned long skipped_entries;
169 u64 preempt_timestamp;
170 pid_t pid;
171 kuid_t uid;
172 char comm[TASK_COMM_LEN];
173
174 #ifdef CONFIG_FUNCTION_TRACER
175 int ftrace_ignore_pid;
176 #endif
177 bool ignore_pid;
178 };
179
180 struct tracer;
181 struct trace_option_dentry;
182
183 struct array_buffer {
184 struct trace_array *tr;
185 struct trace_buffer *buffer;
186 struct trace_array_cpu __percpu *data;
187 u64 time_start;
188 int cpu;
189 };
190
191 #define TRACE_FLAGS_MAX_SIZE 32
192
193 struct trace_options {
194 struct tracer *tracer;
195 struct trace_option_dentry *topts;
196 };
197
198 struct trace_pid_list *trace_pid_list_alloc(void);
199 void trace_pid_list_free(struct trace_pid_list *pid_list);
200 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
201 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
202 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
203 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
204 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
205 unsigned int *next);
206
207 enum {
208 TRACE_PIDS = BIT(0),
209 TRACE_NO_PIDS = BIT(1),
210 };
211
pid_type_enabled(int type,struct trace_pid_list * pid_list,struct trace_pid_list * no_pid_list)212 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
213 struct trace_pid_list *no_pid_list)
214 {
215 /* Return true if the pid list in type has pids */
216 return ((type & TRACE_PIDS) && pid_list) ||
217 ((type & TRACE_NO_PIDS) && no_pid_list);
218 }
219
still_need_pid_events(int type,struct trace_pid_list * pid_list,struct trace_pid_list * no_pid_list)220 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
221 struct trace_pid_list *no_pid_list)
222 {
223 /*
224 * Turning off what is in @type, return true if the "other"
225 * pid list, still has pids in it.
226 */
227 return (!(type & TRACE_PIDS) && pid_list) ||
228 (!(type & TRACE_NO_PIDS) && no_pid_list);
229 }
230
231 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
232
233 /**
234 * struct cond_snapshot - conditional snapshot data and callback
235 *
236 * The cond_snapshot structure encapsulates a callback function and
237 * data associated with the snapshot for a given tracing instance.
238 *
239 * When a snapshot is taken conditionally, by invoking
240 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
241 * passed in turn to the cond_snapshot.update() function. That data
242 * can be compared by the update() implementation with the cond_data
243 * contained within the struct cond_snapshot instance associated with
244 * the trace_array. Because the tr->max_lock is held throughout the
245 * update() call, the update() function can directly retrieve the
246 * cond_snapshot and cond_data associated with the per-instance
247 * snapshot associated with the trace_array.
248 *
249 * The cond_snapshot.update() implementation can save data to be
250 * associated with the snapshot if it decides to, and returns 'true'
251 * in that case, or it returns 'false' if the conditional snapshot
252 * shouldn't be taken.
253 *
254 * The cond_snapshot instance is created and associated with the
255 * user-defined cond_data by tracing_cond_snapshot_enable().
256 * Likewise, the cond_snapshot instance is destroyed and is no longer
257 * associated with the trace instance by
258 * tracing_cond_snapshot_disable().
259 *
260 * The method below is required.
261 *
262 * @update: When a conditional snapshot is invoked, the update()
263 * callback function is invoked with the tr->max_lock held. The
264 * update() implementation signals whether or not to actually
265 * take the snapshot, by returning 'true' if so, 'false' if no
266 * snapshot should be taken. Because the max_lock is held for
267 * the duration of update(), the implementation is safe to
268 * directly retrieved and save any implementation data it needs
269 * to in association with the snapshot.
270 */
271 struct cond_snapshot {
272 void *cond_data;
273 cond_update_fn_t update;
274 };
275
276 /*
277 * struct trace_func_repeats - used to keep track of the consecutive
278 * (on the same CPU) calls of a single function.
279 */
280 struct trace_func_repeats {
281 unsigned long ip;
282 unsigned long parent_ip;
283 unsigned long count;
284 u64 ts_last_call;
285 };
286
287 /*
288 * The trace array - an array of per-CPU trace arrays. This is the
289 * highest level data structure that individual tracers deal with.
290 * They have on/off state as well:
291 */
292 struct trace_array {
293 struct list_head list;
294 char *name;
295 struct array_buffer array_buffer;
296 #ifdef CONFIG_TRACER_MAX_TRACE
297 /*
298 * The max_buffer is used to snapshot the trace when a maximum
299 * latency is reached, or when the user initiates a snapshot.
300 * Some tracers will use this to store a maximum trace while
301 * it continues examining live traces.
302 *
303 * The buffers for the max_buffer are set up the same as the array_buffer
304 * When a snapshot is taken, the buffer of the max_buffer is swapped
305 * with the buffer of the array_buffer and the buffers are reset for
306 * the array_buffer so the tracing can continue.
307 */
308 struct array_buffer max_buffer;
309 bool allocated_snapshot;
310 #endif
311 #ifdef CONFIG_TRACER_MAX_TRACE
312 unsigned long max_latency;
313 #ifdef CONFIG_FSNOTIFY
314 struct dentry *d_max_latency;
315 struct work_struct fsnotify_work;
316 struct irq_work fsnotify_irqwork;
317 #endif
318 #endif
319 struct trace_pid_list __rcu *filtered_pids;
320 struct trace_pid_list __rcu *filtered_no_pids;
321 /*
322 * max_lock is used to protect the swapping of buffers
323 * when taking a max snapshot. The buffers themselves are
324 * protected by per_cpu spinlocks. But the action of the swap
325 * needs its own lock.
326 *
327 * This is defined as a arch_spinlock_t in order to help
328 * with performance when lockdep debugging is enabled.
329 *
330 * It is also used in other places outside the update_max_tr
331 * so it needs to be defined outside of the
332 * CONFIG_TRACER_MAX_TRACE.
333 */
334 arch_spinlock_t max_lock;
335 int buffer_disabled;
336 #ifdef CONFIG_FTRACE_SYSCALLS
337 int sys_refcount_enter;
338 int sys_refcount_exit;
339 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
340 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
341 #endif
342 int stop_count;
343 int clock_id;
344 int nr_topts;
345 bool clear_trace;
346 int buffer_percent;
347 unsigned int n_err_log_entries;
348 struct tracer *current_trace;
349 unsigned int trace_flags;
350 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
351 unsigned int flags;
352 raw_spinlock_t start_lock;
353 struct list_head err_log;
354 struct dentry *dir;
355 struct dentry *options;
356 struct dentry *percpu_dir;
357 struct dentry *event_dir;
358 struct trace_options *topts;
359 struct list_head systems;
360 struct list_head events;
361 struct trace_event_file *trace_marker_file;
362 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
363 int ref;
364 int trace_ref;
365 #ifdef CONFIG_FUNCTION_TRACER
366 struct ftrace_ops *ops;
367 struct trace_pid_list __rcu *function_pids;
368 struct trace_pid_list __rcu *function_no_pids;
369 #ifdef CONFIG_DYNAMIC_FTRACE
370 /* All of these are protected by the ftrace_lock */
371 struct list_head func_probes;
372 struct list_head mod_trace;
373 struct list_head mod_notrace;
374 #endif
375 /* function tracing enabled */
376 int function_enabled;
377 #endif
378 int no_filter_buffering_ref;
379 struct list_head hist_vars;
380 #ifdef CONFIG_TRACER_SNAPSHOT
381 struct cond_snapshot *cond_snapshot;
382 #endif
383 struct trace_func_repeats __percpu *last_func_repeats;
384 };
385
386 enum {
387 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
388 };
389
390 extern struct list_head ftrace_trace_arrays;
391
392 extern struct mutex trace_types_lock;
393
394 extern int trace_array_get(struct trace_array *tr);
395 extern int tracing_check_open_get_tr(struct trace_array *tr);
396 extern struct trace_array *trace_array_find(const char *instance);
397 extern struct trace_array *trace_array_find_get(const char *instance);
398
399 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
400 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
401 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
402
403 extern bool trace_clock_in_ns(struct trace_array *tr);
404
405 /*
406 * The global tracer (top) should be the first trace array added,
407 * but we check the flag anyway.
408 */
top_trace_array(void)409 static inline struct trace_array *top_trace_array(void)
410 {
411 struct trace_array *tr;
412
413 if (list_empty(&ftrace_trace_arrays))
414 return NULL;
415
416 tr = list_entry(ftrace_trace_arrays.prev,
417 typeof(*tr), list);
418 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
419 return tr;
420 }
421
422 #define FTRACE_CMP_TYPE(var, type) \
423 __builtin_types_compatible_p(typeof(var), type *)
424
425 #undef IF_ASSIGN
426 #define IF_ASSIGN(var, entry, etype, id) \
427 if (FTRACE_CMP_TYPE(var, etype)) { \
428 var = (typeof(var))(entry); \
429 WARN_ON(id != 0 && (entry)->type != id); \
430 break; \
431 }
432
433 /* Will cause compile errors if type is not found. */
434 extern void __ftrace_bad_type(void);
435
436 /*
437 * The trace_assign_type is a verifier that the entry type is
438 * the same as the type being assigned. To add new types simply
439 * add a line with the following format:
440 *
441 * IF_ASSIGN(var, ent, type, id);
442 *
443 * Where "type" is the trace type that includes the trace_entry
444 * as the "ent" item. And "id" is the trace identifier that is
445 * used in the trace_type enum.
446 *
447 * If the type can have more than one id, then use zero.
448 */
449 #define trace_assign_type(var, ent) \
450 do { \
451 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
452 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
453 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
454 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
455 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
456 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
457 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
458 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
459 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
460 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
461 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
462 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
463 TRACE_MMIO_RW); \
464 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
465 TRACE_MMIO_MAP); \
466 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
467 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
468 TRACE_GRAPH_ENT); \
469 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
470 TRACE_GRAPH_RET); \
471 IF_ASSIGN(var, ent, struct func_repeats_entry, \
472 TRACE_FUNC_REPEATS); \
473 __ftrace_bad_type(); \
474 } while (0)
475
476 /*
477 * An option specific to a tracer. This is a boolean value.
478 * The bit is the bit index that sets its value on the
479 * flags value in struct tracer_flags.
480 */
481 struct tracer_opt {
482 const char *name; /* Will appear on the trace_options file */
483 u32 bit; /* Mask assigned in val field in tracer_flags */
484 };
485
486 /*
487 * The set of specific options for a tracer. Your tracer
488 * have to set the initial value of the flags val.
489 */
490 struct tracer_flags {
491 u32 val;
492 struct tracer_opt *opts;
493 struct tracer *trace;
494 };
495
496 /* Makes more easy to define a tracer opt */
497 #define TRACER_OPT(s, b) .name = #s, .bit = b
498
499
500 struct trace_option_dentry {
501 struct tracer_opt *opt;
502 struct tracer_flags *flags;
503 struct trace_array *tr;
504 struct dentry *entry;
505 };
506
507 /**
508 * struct tracer - a specific tracer and its callbacks to interact with tracefs
509 * @name: the name chosen to select it on the available_tracers file
510 * @init: called when one switches to this tracer (echo name > current_tracer)
511 * @reset: called when one switches to another tracer
512 * @start: called when tracing is unpaused (echo 1 > tracing_on)
513 * @stop: called when tracing is paused (echo 0 > tracing_on)
514 * @update_thresh: called when tracing_thresh is updated
515 * @open: called when the trace file is opened
516 * @pipe_open: called when the trace_pipe file is opened
517 * @close: called when the trace file is released
518 * @pipe_close: called when the trace_pipe file is released
519 * @read: override the default read callback on trace_pipe
520 * @splice_read: override the default splice_read callback on trace_pipe
521 * @selftest: selftest to run on boot (see trace_selftest.c)
522 * @print_headers: override the first lines that describe your columns
523 * @print_line: callback that prints a trace
524 * @set_flag: signals one of your private flags changed (trace_options file)
525 * @flags: your private flags
526 */
527 struct tracer {
528 const char *name;
529 int (*init)(struct trace_array *tr);
530 void (*reset)(struct trace_array *tr);
531 void (*start)(struct trace_array *tr);
532 void (*stop)(struct trace_array *tr);
533 int (*update_thresh)(struct trace_array *tr);
534 void (*open)(struct trace_iterator *iter);
535 void (*pipe_open)(struct trace_iterator *iter);
536 void (*close)(struct trace_iterator *iter);
537 void (*pipe_close)(struct trace_iterator *iter);
538 ssize_t (*read)(struct trace_iterator *iter,
539 struct file *filp, char __user *ubuf,
540 size_t cnt, loff_t *ppos);
541 ssize_t (*splice_read)(struct trace_iterator *iter,
542 struct file *filp,
543 loff_t *ppos,
544 struct pipe_inode_info *pipe,
545 size_t len,
546 unsigned int flags);
547 #ifdef CONFIG_FTRACE_STARTUP_TEST
548 int (*selftest)(struct tracer *trace,
549 struct trace_array *tr);
550 #endif
551 void (*print_header)(struct seq_file *m);
552 enum print_line_t (*print_line)(struct trace_iterator *iter);
553 /* If you handled the flag setting, return 0 */
554 int (*set_flag)(struct trace_array *tr,
555 u32 old_flags, u32 bit, int set);
556 /* Return 0 if OK with change, else return non-zero */
557 int (*flag_changed)(struct trace_array *tr,
558 u32 mask, int set);
559 struct tracer *next;
560 struct tracer_flags *flags;
561 int enabled;
562 bool print_max;
563 bool allow_instances;
564 #ifdef CONFIG_TRACER_MAX_TRACE
565 bool use_max_tr;
566 #endif
567 /* True if tracer cannot be enabled in kernel param */
568 bool noboot;
569 };
570
571 static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator * iter,int cpu)572 trace_buffer_iter(struct trace_iterator *iter, int cpu)
573 {
574 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
575 }
576
577 int tracer_init(struct tracer *t, struct trace_array *tr);
578 int tracing_is_enabled(void);
579 void tracing_reset_online_cpus(struct array_buffer *buf);
580 void tracing_reset_current(int cpu);
581 void tracing_reset_all_online_cpus(void);
582 void tracing_reset_all_online_cpus_unlocked(void);
583 int tracing_open_generic(struct inode *inode, struct file *filp);
584 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
585 bool tracing_is_disabled(void);
586 bool tracer_tracing_is_on(struct trace_array *tr);
587 void tracer_tracing_on(struct trace_array *tr);
588 void tracer_tracing_off(struct trace_array *tr);
589 struct dentry *trace_create_file(const char *name,
590 umode_t mode,
591 struct dentry *parent,
592 void *data,
593 const struct file_operations *fops);
594
595 int tracing_init_dentry(void);
596
597 struct ring_buffer_event;
598
599 struct ring_buffer_event *
600 trace_buffer_lock_reserve(struct trace_buffer *buffer,
601 int type,
602 unsigned long len,
603 unsigned int trace_ctx);
604
605 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
606 struct trace_array_cpu *data);
607
608 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
609 int *ent_cpu, u64 *ent_ts);
610
611 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
612 struct ring_buffer_event *event);
613
614 bool trace_is_tracepoint_string(const char *str);
615 const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
616 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
617 va_list ap);
618
619 int trace_empty(struct trace_iterator *iter);
620
621 void *trace_find_next_entry_inc(struct trace_iterator *iter);
622
623 void trace_init_global_iter(struct trace_iterator *iter);
624
625 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
626
627 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
628 unsigned long trace_total_entries(struct trace_array *tr);
629
630 void trace_function(struct trace_array *tr,
631 unsigned long ip,
632 unsigned long parent_ip,
633 unsigned int trace_ctx);
634 void trace_graph_function(struct trace_array *tr,
635 unsigned long ip,
636 unsigned long parent_ip,
637 unsigned int trace_ctx);
638 void trace_latency_header(struct seq_file *m);
639 void trace_default_header(struct seq_file *m);
640 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
641
642 void trace_graph_return(struct ftrace_graph_ret *trace);
643 int trace_graph_entry(struct ftrace_graph_ent *trace);
644 void set_graph_array(struct trace_array *tr);
645
646 void tracing_start_cmdline_record(void);
647 void tracing_stop_cmdline_record(void);
648 void tracing_start_tgid_record(void);
649 void tracing_stop_tgid_record(void);
650
651 int register_tracer(struct tracer *type);
652 int is_tracing_stopped(void);
653
654 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
655
656 extern cpumask_var_t __read_mostly tracing_buffer_mask;
657
658 #define for_each_tracing_cpu(cpu) \
659 for_each_cpu(cpu, tracing_buffer_mask)
660
661 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
662
663 extern unsigned long tracing_thresh;
664
665 /* PID filtering */
666
667 extern int pid_max;
668
669 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
670 pid_t search_pid);
671 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
672 struct trace_pid_list *filtered_no_pids,
673 struct task_struct *task);
674 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
675 struct task_struct *self,
676 struct task_struct *task);
677 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
678 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
679 int trace_pid_show(struct seq_file *m, void *v);
680 void trace_free_pid_list(struct trace_pid_list *pid_list);
681 int trace_pid_write(struct trace_pid_list *filtered_pids,
682 struct trace_pid_list **new_pid_list,
683 const char __user *ubuf, size_t cnt);
684
685 #ifdef CONFIG_TRACER_MAX_TRACE
686 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
687 void *cond_data);
688 void update_max_tr_single(struct trace_array *tr,
689 struct task_struct *tsk, int cpu);
690
691 #ifdef CONFIG_FSNOTIFY
692 #define LATENCY_FS_NOTIFY
693 #endif
694 #endif /* CONFIG_TRACER_MAX_TRACE */
695
696 #ifdef LATENCY_FS_NOTIFY
697 void latency_fsnotify(struct trace_array *tr);
698 #else
latency_fsnotify(struct trace_array * tr)699 static inline void latency_fsnotify(struct trace_array *tr) { }
700 #endif
701
702 #ifdef CONFIG_STACKTRACE
703 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
704 #else
__trace_stack(struct trace_array * tr,unsigned int trace_ctx,int skip)705 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
706 int skip)
707 {
708 }
709 #endif /* CONFIG_STACKTRACE */
710
711 void trace_last_func_repeats(struct trace_array *tr,
712 struct trace_func_repeats *last_info,
713 unsigned int trace_ctx);
714
715 extern u64 ftrace_now(int cpu);
716
717 extern void trace_find_cmdline(int pid, char comm[]);
718 extern int trace_find_tgid(int pid);
719 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
720
721 #ifdef CONFIG_DYNAMIC_FTRACE
722 extern unsigned long ftrace_update_tot_cnt;
723 extern unsigned long ftrace_number_of_pages;
724 extern unsigned long ftrace_number_of_groups;
725 void ftrace_init_trace_array(struct trace_array *tr);
726 #else
ftrace_init_trace_array(struct trace_array * tr)727 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
728 #endif
729 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
730 extern int DYN_FTRACE_TEST_NAME(void);
731 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
732 extern int DYN_FTRACE_TEST_NAME2(void);
733
734 extern bool ring_buffer_expanded;
735 extern bool tracing_selftest_disabled;
736
737 #ifdef CONFIG_FTRACE_STARTUP_TEST
738 extern void __init disable_tracing_selftest(const char *reason);
739
740 extern int trace_selftest_startup_function(struct tracer *trace,
741 struct trace_array *tr);
742 extern int trace_selftest_startup_function_graph(struct tracer *trace,
743 struct trace_array *tr);
744 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
745 struct trace_array *tr);
746 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
747 struct trace_array *tr);
748 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
749 struct trace_array *tr);
750 extern int trace_selftest_startup_wakeup(struct tracer *trace,
751 struct trace_array *tr);
752 extern int trace_selftest_startup_nop(struct tracer *trace,
753 struct trace_array *tr);
754 extern int trace_selftest_startup_branch(struct tracer *trace,
755 struct trace_array *tr);
756 /*
757 * Tracer data references selftest functions that only occur
758 * on boot up. These can be __init functions. Thus, when selftests
759 * are enabled, then the tracers need to reference __init functions.
760 */
761 #define __tracer_data __refdata
762 #else
disable_tracing_selftest(const char * reason)763 static inline void __init disable_tracing_selftest(const char *reason)
764 {
765 }
766 /* Tracers are seldom changed. Optimize when selftests are disabled. */
767 #define __tracer_data __read_mostly
768 #endif /* CONFIG_FTRACE_STARTUP_TEST */
769
770 extern void *head_page(struct trace_array_cpu *data);
771 extern unsigned long long ns2usecs(u64 nsec);
772 extern int
773 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
774 extern int
775 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
776 extern int
777 trace_array_vprintk(struct trace_array *tr,
778 unsigned long ip, const char *fmt, va_list args);
779 int trace_array_printk_buf(struct trace_buffer *buffer,
780 unsigned long ip, const char *fmt, ...);
781 void trace_printk_seq(struct trace_seq *s);
782 enum print_line_t print_trace_line(struct trace_iterator *iter);
783
784 extern char trace_find_mark(unsigned long long duration);
785
786 struct ftrace_hash;
787
788 struct ftrace_mod_load {
789 struct list_head list;
790 char *func;
791 char *module;
792 int enable;
793 };
794
795 enum {
796 FTRACE_HASH_FL_MOD = (1 << 0),
797 };
798
799 struct ftrace_hash {
800 unsigned long size_bits;
801 struct hlist_head *buckets;
802 unsigned long count;
803 unsigned long flags;
804 struct rcu_head rcu;
805 };
806
807 struct ftrace_func_entry *
808 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
809
ftrace_hash_empty(struct ftrace_hash * hash)810 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
811 {
812 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
813 }
814
815 /* Standard output formatting function used for function return traces */
816 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
817
818 /* Flag options */
819 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
820 #define TRACE_GRAPH_PRINT_CPU 0x2
821 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
822 #define TRACE_GRAPH_PRINT_PROC 0x8
823 #define TRACE_GRAPH_PRINT_DURATION 0x10
824 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
825 #define TRACE_GRAPH_PRINT_REL_TIME 0x40
826 #define TRACE_GRAPH_PRINT_IRQS 0x80
827 #define TRACE_GRAPH_PRINT_TAIL 0x100
828 #define TRACE_GRAPH_SLEEP_TIME 0x200
829 #define TRACE_GRAPH_GRAPH_TIME 0x400
830 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
831 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
832
833 extern void ftrace_graph_sleep_time_control(bool enable);
834
835 #ifdef CONFIG_FUNCTION_PROFILER
836 extern void ftrace_graph_graph_time_control(bool enable);
837 #else
ftrace_graph_graph_time_control(bool enable)838 static inline void ftrace_graph_graph_time_control(bool enable) { }
839 #endif
840
841 extern enum print_line_t
842 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
843 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
844 extern void
845 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
846 extern void graph_trace_open(struct trace_iterator *iter);
847 extern void graph_trace_close(struct trace_iterator *iter);
848 extern int __trace_graph_entry(struct trace_array *tr,
849 struct ftrace_graph_ent *trace,
850 unsigned int trace_ctx);
851 extern void __trace_graph_return(struct trace_array *tr,
852 struct ftrace_graph_ret *trace,
853 unsigned int trace_ctx);
854
855 #ifdef CONFIG_DYNAMIC_FTRACE
856 extern struct ftrace_hash __rcu *ftrace_graph_hash;
857 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
858
ftrace_graph_addr(struct ftrace_graph_ent * trace)859 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
860 {
861 unsigned long addr = trace->func;
862 int ret = 0;
863 struct ftrace_hash *hash;
864
865 preempt_disable_notrace();
866
867 /*
868 * Have to open code "rcu_dereference_sched()" because the
869 * function graph tracer can be called when RCU is not
870 * "watching".
871 * Protected with schedule_on_each_cpu(ftrace_sync)
872 */
873 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
874
875 if (ftrace_hash_empty(hash)) {
876 ret = 1;
877 goto out;
878 }
879
880 if (ftrace_lookup_ip(hash, addr)) {
881
882 /*
883 * This needs to be cleared on the return functions
884 * when the depth is zero.
885 */
886 trace_recursion_set(TRACE_GRAPH_BIT);
887 trace_recursion_set_depth(trace->depth);
888
889 /*
890 * If no irqs are to be traced, but a set_graph_function
891 * is set, and called by an interrupt handler, we still
892 * want to trace it.
893 */
894 if (in_hardirq())
895 trace_recursion_set(TRACE_IRQ_BIT);
896 else
897 trace_recursion_clear(TRACE_IRQ_BIT);
898 ret = 1;
899 }
900
901 out:
902 preempt_enable_notrace();
903 return ret;
904 }
905
ftrace_graph_addr_finish(struct ftrace_graph_ret * trace)906 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
907 {
908 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
909 trace->depth == trace_recursion_depth())
910 trace_recursion_clear(TRACE_GRAPH_BIT);
911 }
912
ftrace_graph_notrace_addr(unsigned long addr)913 static inline int ftrace_graph_notrace_addr(unsigned long addr)
914 {
915 int ret = 0;
916 struct ftrace_hash *notrace_hash;
917
918 preempt_disable_notrace();
919
920 /*
921 * Have to open code "rcu_dereference_sched()" because the
922 * function graph tracer can be called when RCU is not
923 * "watching".
924 * Protected with schedule_on_each_cpu(ftrace_sync)
925 */
926 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
927 !preemptible());
928
929 if (ftrace_lookup_ip(notrace_hash, addr))
930 ret = 1;
931
932 preempt_enable_notrace();
933 return ret;
934 }
935 #else
ftrace_graph_addr(struct ftrace_graph_ent * trace)936 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
937 {
938 return 1;
939 }
940
ftrace_graph_notrace_addr(unsigned long addr)941 static inline int ftrace_graph_notrace_addr(unsigned long addr)
942 {
943 return 0;
944 }
ftrace_graph_addr_finish(struct ftrace_graph_ret * trace)945 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
946 { }
947 #endif /* CONFIG_DYNAMIC_FTRACE */
948
949 extern unsigned int fgraph_max_depth;
950
ftrace_graph_ignore_func(struct ftrace_graph_ent * trace)951 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
952 {
953 /* trace it when it is-nested-in or is a function enabled. */
954 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
955 ftrace_graph_addr(trace)) ||
956 (trace->depth < 0) ||
957 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
958 }
959
960 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
961 static inline enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)962 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
963 {
964 return TRACE_TYPE_UNHANDLED;
965 }
966 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
967
968 extern struct list_head ftrace_pids;
969
970 #ifdef CONFIG_FUNCTION_TRACER
971
972 #define FTRACE_PID_IGNORE -1
973 #define FTRACE_PID_TRACE -2
974
975 struct ftrace_func_command {
976 struct list_head list;
977 char *name;
978 int (*func)(struct trace_array *tr,
979 struct ftrace_hash *hash,
980 char *func, char *cmd,
981 char *params, int enable);
982 };
983 extern bool ftrace_filter_param __initdata;
ftrace_trace_task(struct trace_array * tr)984 static inline int ftrace_trace_task(struct trace_array *tr)
985 {
986 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
987 FTRACE_PID_IGNORE;
988 }
989 extern int ftrace_is_dead(void);
990 int ftrace_create_function_files(struct trace_array *tr,
991 struct dentry *parent);
992 void ftrace_destroy_function_files(struct trace_array *tr);
993 int ftrace_allocate_ftrace_ops(struct trace_array *tr);
994 void ftrace_free_ftrace_ops(struct trace_array *tr);
995 void ftrace_init_global_array_ops(struct trace_array *tr);
996 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
997 void ftrace_reset_array_ops(struct trace_array *tr);
998 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
999 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1000 struct dentry *d_tracer);
1001 void ftrace_clear_pids(struct trace_array *tr);
1002 int init_function_trace(void);
1003 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1004 #else
ftrace_trace_task(struct trace_array * tr)1005 static inline int ftrace_trace_task(struct trace_array *tr)
1006 {
1007 return 1;
1008 }
ftrace_is_dead(void)1009 static inline int ftrace_is_dead(void) { return 0; }
1010 static inline int
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)1011 ftrace_create_function_files(struct trace_array *tr,
1012 struct dentry *parent)
1013 {
1014 return 0;
1015 }
ftrace_allocate_ftrace_ops(struct trace_array * tr)1016 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1017 {
1018 return 0;
1019 }
ftrace_free_ftrace_ops(struct trace_array * tr)1020 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
ftrace_destroy_function_files(struct trace_array * tr)1021 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1022 static inline __init void
ftrace_init_global_array_ops(struct trace_array * tr)1023 ftrace_init_global_array_ops(struct trace_array *tr) { }
ftrace_reset_array_ops(struct trace_array * tr)1024 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d)1025 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d)1026 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
ftrace_clear_pids(struct trace_array * tr)1027 static inline void ftrace_clear_pids(struct trace_array *tr) { }
init_function_trace(void)1028 static inline int init_function_trace(void) { return 0; }
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)1029 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1030 /* ftace_func_t type is not defined, use macro instead of static inline */
1031 #define ftrace_init_array_ops(tr, func) do { } while (0)
1032 #endif /* CONFIG_FUNCTION_TRACER */
1033
1034 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1035
1036 struct ftrace_probe_ops {
1037 void (*func)(unsigned long ip,
1038 unsigned long parent_ip,
1039 struct trace_array *tr,
1040 struct ftrace_probe_ops *ops,
1041 void *data);
1042 int (*init)(struct ftrace_probe_ops *ops,
1043 struct trace_array *tr,
1044 unsigned long ip, void *init_data,
1045 void **data);
1046 void (*free)(struct ftrace_probe_ops *ops,
1047 struct trace_array *tr,
1048 unsigned long ip, void *data);
1049 int (*print)(struct seq_file *m,
1050 unsigned long ip,
1051 struct ftrace_probe_ops *ops,
1052 void *data);
1053 };
1054
1055 struct ftrace_func_mapper;
1056 typedef int (*ftrace_mapper_func)(void *data);
1057
1058 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1059 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1060 unsigned long ip);
1061 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1062 unsigned long ip, void *data);
1063 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1064 unsigned long ip);
1065 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1066 ftrace_mapper_func free_func);
1067
1068 extern int
1069 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1070 struct ftrace_probe_ops *ops, void *data);
1071 extern int
1072 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1073 struct ftrace_probe_ops *ops);
1074 extern void clear_ftrace_function_probes(struct trace_array *tr);
1075
1076 int register_ftrace_command(struct ftrace_func_command *cmd);
1077 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1078
1079 void ftrace_create_filter_files(struct ftrace_ops *ops,
1080 struct dentry *parent);
1081 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1082
1083 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1084 int len, int reset);
1085 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1086 int len, int reset);
1087 #else
1088 struct ftrace_func_command;
1089
register_ftrace_command(struct ftrace_func_command * cmd)1090 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1091 {
1092 return -EINVAL;
1093 }
unregister_ftrace_command(char * cmd_name)1094 static inline __init int unregister_ftrace_command(char *cmd_name)
1095 {
1096 return -EINVAL;
1097 }
clear_ftrace_function_probes(struct trace_array * tr)1098 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1099 {
1100 }
1101
1102 /*
1103 * The ops parameter passed in is usually undefined.
1104 * This must be a macro.
1105 */
1106 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1107 #define ftrace_destroy_filter_files(ops) do { } while (0)
1108 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1109
1110 bool ftrace_event_is_function(struct trace_event_call *call);
1111
1112 /*
1113 * struct trace_parser - servers for reading the user input separated by spaces
1114 * @cont: set if the input is not complete - no final space char was found
1115 * @buffer: holds the parsed user input
1116 * @idx: user input length
1117 * @size: buffer size
1118 */
1119 struct trace_parser {
1120 bool cont;
1121 char *buffer;
1122 unsigned idx;
1123 unsigned size;
1124 };
1125
trace_parser_loaded(struct trace_parser * parser)1126 static inline bool trace_parser_loaded(struct trace_parser *parser)
1127 {
1128 return (parser->idx != 0);
1129 }
1130
trace_parser_cont(struct trace_parser * parser)1131 static inline bool trace_parser_cont(struct trace_parser *parser)
1132 {
1133 return parser->cont;
1134 }
1135
trace_parser_clear(struct trace_parser * parser)1136 static inline void trace_parser_clear(struct trace_parser *parser)
1137 {
1138 parser->cont = false;
1139 parser->idx = 0;
1140 }
1141
1142 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1143 extern void trace_parser_put(struct trace_parser *parser);
1144 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1145 size_t cnt, loff_t *ppos);
1146
1147 /*
1148 * Only create function graph options if function graph is configured.
1149 */
1150 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1151 # define FGRAPH_FLAGS \
1152 C(DISPLAY_GRAPH, "display-graph"),
1153 #else
1154 # define FGRAPH_FLAGS
1155 #endif
1156
1157 #ifdef CONFIG_BRANCH_TRACER
1158 # define BRANCH_FLAGS \
1159 C(BRANCH, "branch"),
1160 #else
1161 # define BRANCH_FLAGS
1162 #endif
1163
1164 #ifdef CONFIG_FUNCTION_TRACER
1165 # define FUNCTION_FLAGS \
1166 C(FUNCTION, "function-trace"), \
1167 C(FUNC_FORK, "function-fork"),
1168 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1169 #else
1170 # define FUNCTION_FLAGS
1171 # define FUNCTION_DEFAULT_FLAGS 0UL
1172 # define TRACE_ITER_FUNC_FORK 0UL
1173 #endif
1174
1175 #ifdef CONFIG_STACKTRACE
1176 # define STACK_FLAGS \
1177 C(STACKTRACE, "stacktrace"),
1178 #else
1179 # define STACK_FLAGS
1180 #endif
1181
1182 /*
1183 * trace_iterator_flags is an enumeration that defines bit
1184 * positions into trace_flags that controls the output.
1185 *
1186 * NOTE: These bits must match the trace_options array in
1187 * trace.c (this macro guarantees it).
1188 */
1189 #define TRACE_FLAGS \
1190 C(PRINT_PARENT, "print-parent"), \
1191 C(SYM_OFFSET, "sym-offset"), \
1192 C(SYM_ADDR, "sym-addr"), \
1193 C(VERBOSE, "verbose"), \
1194 C(RAW, "raw"), \
1195 C(HEX, "hex"), \
1196 C(BIN, "bin"), \
1197 C(BLOCK, "block"), \
1198 C(PRINTK, "trace_printk"), \
1199 C(ANNOTATE, "annotate"), \
1200 C(USERSTACKTRACE, "userstacktrace"), \
1201 C(SYM_USEROBJ, "sym-userobj"), \
1202 C(PRINTK_MSGONLY, "printk-msg-only"), \
1203 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1204 C(LATENCY_FMT, "latency-format"), \
1205 C(RECORD_CMD, "record-cmd"), \
1206 C(RECORD_TGID, "record-tgid"), \
1207 C(OVERWRITE, "overwrite"), \
1208 C(STOP_ON_FREE, "disable_on_free"), \
1209 C(IRQ_INFO, "irq-info"), \
1210 C(MARKERS, "markers"), \
1211 C(EVENT_FORK, "event-fork"), \
1212 C(PAUSE_ON_TRACE, "pause-on-trace"), \
1213 C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
1214 FUNCTION_FLAGS \
1215 FGRAPH_FLAGS \
1216 STACK_FLAGS \
1217 BRANCH_FLAGS
1218
1219 /*
1220 * By defining C, we can make TRACE_FLAGS a list of bit names
1221 * that will define the bits for the flag masks.
1222 */
1223 #undef C
1224 #define C(a, b) TRACE_ITER_##a##_BIT
1225
1226 enum trace_iterator_bits {
1227 TRACE_FLAGS
1228 /* Make sure we don't go more than we have bits for */
1229 TRACE_ITER_LAST_BIT
1230 };
1231
1232 /*
1233 * By redefining C, we can make TRACE_FLAGS a list of masks that
1234 * use the bits as defined above.
1235 */
1236 #undef C
1237 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1238
1239 enum trace_iterator_flags { TRACE_FLAGS };
1240
1241 /*
1242 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1243 * control the output of kernel symbols.
1244 */
1245 #define TRACE_ITER_SYM_MASK \
1246 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1247
1248 extern struct tracer nop_trace;
1249
1250 #ifdef CONFIG_BRANCH_TRACER
1251 extern int enable_branch_tracing(struct trace_array *tr);
1252 extern void disable_branch_tracing(void);
trace_branch_enable(struct trace_array * tr)1253 static inline int trace_branch_enable(struct trace_array *tr)
1254 {
1255 if (tr->trace_flags & TRACE_ITER_BRANCH)
1256 return enable_branch_tracing(tr);
1257 return 0;
1258 }
trace_branch_disable(void)1259 static inline void trace_branch_disable(void)
1260 {
1261 /* due to races, always disable */
1262 disable_branch_tracing();
1263 }
1264 #else
trace_branch_enable(struct trace_array * tr)1265 static inline int trace_branch_enable(struct trace_array *tr)
1266 {
1267 return 0;
1268 }
trace_branch_disable(void)1269 static inline void trace_branch_disable(void)
1270 {
1271 }
1272 #endif /* CONFIG_BRANCH_TRACER */
1273
1274 /* set ring buffers to default size if not already done so */
1275 int tracing_update_buffers(void);
1276
1277 struct ftrace_event_field {
1278 struct list_head link;
1279 const char *name;
1280 const char *type;
1281 int filter_type;
1282 int offset;
1283 int size;
1284 int is_signed;
1285 };
1286
1287 struct prog_entry;
1288
1289 struct event_filter {
1290 struct prog_entry __rcu *prog;
1291 char *filter_string;
1292 };
1293
1294 struct event_subsystem {
1295 struct list_head list;
1296 const char *name;
1297 struct event_filter *filter;
1298 int ref_count;
1299 };
1300
1301 struct trace_subsystem_dir {
1302 struct list_head list;
1303 struct event_subsystem *subsystem;
1304 struct trace_array *tr;
1305 struct dentry *entry;
1306 int ref_count;
1307 int nr_events;
1308 };
1309
1310 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1311 struct trace_buffer *buffer,
1312 struct ring_buffer_event *event);
1313
1314 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1315 struct trace_buffer *buffer,
1316 struct ring_buffer_event *event,
1317 unsigned int trcace_ctx,
1318 struct pt_regs *regs);
1319
trace_buffer_unlock_commit(struct trace_array * tr,struct trace_buffer * buffer,struct ring_buffer_event * event,unsigned int trace_ctx)1320 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1321 struct trace_buffer *buffer,
1322 struct ring_buffer_event *event,
1323 unsigned int trace_ctx)
1324 {
1325 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1326 }
1327
1328 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1329 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1330 void trace_buffered_event_disable(void);
1331 void trace_buffered_event_enable(void);
1332
1333 static inline void
__trace_event_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)1334 __trace_event_discard_commit(struct trace_buffer *buffer,
1335 struct ring_buffer_event *event)
1336 {
1337 if (this_cpu_read(trace_buffered_event) == event) {
1338 /* Simply release the temp buffer and enable preemption */
1339 this_cpu_dec(trace_buffered_event_cnt);
1340 preempt_enable_notrace();
1341 return;
1342 }
1343 /* ring_buffer_discard_commit() enables preemption */
1344 ring_buffer_discard_commit(buffer, event);
1345 }
1346
1347 /*
1348 * Helper function for event_trigger_unlock_commit{_regs}().
1349 * If there are event triggers attached to this event that requires
1350 * filtering against its fields, then they will be called as the
1351 * entry already holds the field information of the current event.
1352 *
1353 * It also checks if the event should be discarded or not.
1354 * It is to be discarded if the event is soft disabled and the
1355 * event was only recorded to process triggers, or if the event
1356 * filter is active and this event did not match the filters.
1357 *
1358 * Returns true if the event is discarded, false otherwise.
1359 */
1360 static inline bool
__event_trigger_test_discard(struct trace_event_file * file,struct trace_buffer * buffer,struct ring_buffer_event * event,void * entry,enum event_trigger_type * tt)1361 __event_trigger_test_discard(struct trace_event_file *file,
1362 struct trace_buffer *buffer,
1363 struct ring_buffer_event *event,
1364 void *entry,
1365 enum event_trigger_type *tt)
1366 {
1367 unsigned long eflags = file->flags;
1368
1369 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1370 *tt = event_triggers_call(file, buffer, entry, event);
1371
1372 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1373 EVENT_FILE_FL_FILTERED |
1374 EVENT_FILE_FL_PID_FILTER))))
1375 return false;
1376
1377 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1378 goto discard;
1379
1380 if (file->flags & EVENT_FILE_FL_FILTERED &&
1381 !filter_match_preds(file->filter, entry))
1382 goto discard;
1383
1384 if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1385 trace_event_ignore_this_pid(file))
1386 goto discard;
1387
1388 return false;
1389 discard:
1390 __trace_event_discard_commit(buffer, event);
1391 return true;
1392 }
1393
1394 /**
1395 * event_trigger_unlock_commit - handle triggers and finish event commit
1396 * @file: The file pointer associated with the event
1397 * @buffer: The ring buffer that the event is being written to
1398 * @event: The event meta data in the ring buffer
1399 * @entry: The event itself
1400 * @trace_ctx: The tracing context flags.
1401 *
1402 * This is a helper function to handle triggers that require data
1403 * from the event itself. It also tests the event against filters and
1404 * if the event is soft disabled and should be discarded.
1405 */
1406 static inline void
event_trigger_unlock_commit(struct trace_event_file * file,struct trace_buffer * buffer,struct ring_buffer_event * event,void * entry,unsigned int trace_ctx)1407 event_trigger_unlock_commit(struct trace_event_file *file,
1408 struct trace_buffer *buffer,
1409 struct ring_buffer_event *event,
1410 void *entry, unsigned int trace_ctx)
1411 {
1412 enum event_trigger_type tt = ETT_NONE;
1413
1414 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1415 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1416
1417 if (tt)
1418 event_triggers_post_call(file, tt);
1419 }
1420
1421 #define FILTER_PRED_INVALID ((unsigned short)-1)
1422 #define FILTER_PRED_IS_RIGHT (1 << 15)
1423 #define FILTER_PRED_FOLD (1 << 15)
1424
1425 /*
1426 * The max preds is the size of unsigned short with
1427 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1428 * and FOLD flags. The other is reserved.
1429 *
1430 * 2^14 preds is way more than enough.
1431 */
1432 #define MAX_FILTER_PRED 16384
1433
1434 struct filter_pred;
1435 struct regex;
1436
1437 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1438
1439 enum regex_type {
1440 MATCH_FULL = 0,
1441 MATCH_FRONT_ONLY,
1442 MATCH_MIDDLE_ONLY,
1443 MATCH_END_ONLY,
1444 MATCH_GLOB,
1445 MATCH_INDEX,
1446 };
1447
1448 struct regex {
1449 char pattern[MAX_FILTER_STR_VAL];
1450 int len;
1451 int field_len;
1452 regex_match_func match;
1453 };
1454
is_string_field(struct ftrace_event_field * field)1455 static inline bool is_string_field(struct ftrace_event_field *field)
1456 {
1457 return field->filter_type == FILTER_DYN_STRING ||
1458 field->filter_type == FILTER_RDYN_STRING ||
1459 field->filter_type == FILTER_STATIC_STRING ||
1460 field->filter_type == FILTER_PTR_STRING ||
1461 field->filter_type == FILTER_COMM;
1462 }
1463
is_function_field(struct ftrace_event_field * field)1464 static inline bool is_function_field(struct ftrace_event_field *field)
1465 {
1466 return field->filter_type == FILTER_TRACE_FN;
1467 }
1468
1469 extern enum regex_type
1470 filter_parse_regex(char *buff, int len, char **search, int *not);
1471 extern void print_event_filter(struct trace_event_file *file,
1472 struct trace_seq *s);
1473 extern int apply_event_filter(struct trace_event_file *file,
1474 char *filter_string);
1475 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1476 char *filter_string);
1477 extern void print_subsystem_event_filter(struct event_subsystem *system,
1478 struct trace_seq *s);
1479 extern int filter_assign_type(const char *type);
1480 extern int create_event_filter(struct trace_array *tr,
1481 struct trace_event_call *call,
1482 char *filter_str, bool set_str,
1483 struct event_filter **filterp);
1484 extern void free_event_filter(struct event_filter *filter);
1485
1486 struct ftrace_event_field *
1487 trace_find_event_field(struct trace_event_call *call, char *name);
1488
1489 extern void trace_event_enable_cmd_record(bool enable);
1490 extern void trace_event_enable_tgid_record(bool enable);
1491
1492 extern int event_trace_init(void);
1493 extern int init_events(void);
1494 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1495 extern int event_trace_del_tracer(struct trace_array *tr);
1496 extern void __trace_early_add_events(struct trace_array *tr);
1497
1498 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1499 const char *system,
1500 const char *event);
1501 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1502 const char *system,
1503 const char *event);
1504
event_file_data(struct file * filp)1505 static inline void *event_file_data(struct file *filp)
1506 {
1507 return READ_ONCE(file_inode(filp)->i_private);
1508 }
1509
1510 extern struct mutex event_mutex;
1511 extern struct list_head ftrace_events;
1512
1513 extern const struct file_operations event_trigger_fops;
1514 extern const struct file_operations event_hist_fops;
1515 extern const struct file_operations event_hist_debug_fops;
1516 extern const struct file_operations event_inject_fops;
1517
1518 #ifdef CONFIG_HIST_TRIGGERS
1519 extern int register_trigger_hist_cmd(void);
1520 extern int register_trigger_hist_enable_disable_cmds(void);
1521 #else
register_trigger_hist_cmd(void)1522 static inline int register_trigger_hist_cmd(void) { return 0; }
register_trigger_hist_enable_disable_cmds(void)1523 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1524 #endif
1525
1526 extern int register_trigger_cmds(void);
1527 extern void clear_event_triggers(struct trace_array *tr);
1528
1529 enum {
1530 EVENT_TRIGGER_FL_PROBE = BIT(0),
1531 };
1532
1533 struct event_trigger_data {
1534 unsigned long count;
1535 int ref;
1536 int flags;
1537 struct event_trigger_ops *ops;
1538 struct event_command *cmd_ops;
1539 struct event_filter __rcu *filter;
1540 char *filter_str;
1541 void *private_data;
1542 bool paused;
1543 bool paused_tmp;
1544 struct list_head list;
1545 char *name;
1546 struct list_head named_list;
1547 struct event_trigger_data *named_data;
1548 };
1549
1550 /* Avoid typos */
1551 #define ENABLE_EVENT_STR "enable_event"
1552 #define DISABLE_EVENT_STR "disable_event"
1553 #define ENABLE_HIST_STR "enable_hist"
1554 #define DISABLE_HIST_STR "disable_hist"
1555
1556 struct enable_trigger_data {
1557 struct trace_event_file *file;
1558 bool enable;
1559 bool hist;
1560 };
1561
1562 extern int event_enable_trigger_print(struct seq_file *m,
1563 struct event_trigger_data *data);
1564 extern void event_enable_trigger_free(struct event_trigger_data *data);
1565 extern int event_enable_trigger_parse(struct event_command *cmd_ops,
1566 struct trace_event_file *file,
1567 char *glob, char *cmd,
1568 char *param_and_filter);
1569 extern int event_enable_register_trigger(char *glob,
1570 struct event_trigger_data *data,
1571 struct trace_event_file *file);
1572 extern void event_enable_unregister_trigger(char *glob,
1573 struct event_trigger_data *test,
1574 struct trace_event_file *file);
1575 extern void trigger_data_free(struct event_trigger_data *data);
1576 extern int event_trigger_init(struct event_trigger_data *data);
1577 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1578 int trigger_enable);
1579 extern void update_cond_flag(struct trace_event_file *file);
1580 extern int set_trigger_filter(char *filter_str,
1581 struct event_trigger_data *trigger_data,
1582 struct trace_event_file *file);
1583 extern struct event_trigger_data *find_named_trigger(const char *name);
1584 extern bool is_named_trigger(struct event_trigger_data *test);
1585 extern int save_named_trigger(const char *name,
1586 struct event_trigger_data *data);
1587 extern void del_named_trigger(struct event_trigger_data *data);
1588 extern void pause_named_trigger(struct event_trigger_data *data);
1589 extern void unpause_named_trigger(struct event_trigger_data *data);
1590 extern void set_named_trigger_data(struct event_trigger_data *data,
1591 struct event_trigger_data *named_data);
1592 extern struct event_trigger_data *
1593 get_named_trigger_data(struct event_trigger_data *data);
1594 extern int register_event_command(struct event_command *cmd);
1595 extern int unregister_event_command(struct event_command *cmd);
1596 extern int register_trigger_hist_enable_disable_cmds(void);
1597 extern bool event_trigger_check_remove(const char *glob);
1598 extern bool event_trigger_empty_param(const char *param);
1599 extern int event_trigger_separate_filter(char *param_and_filter, char **param,
1600 char **filter, bool param_required);
1601 extern struct event_trigger_data *
1602 event_trigger_alloc(struct event_command *cmd_ops,
1603 char *cmd,
1604 char *param,
1605 void *private_data);
1606 extern int event_trigger_parse_num(char *trigger,
1607 struct event_trigger_data *trigger_data);
1608 extern int event_trigger_set_filter(struct event_command *cmd_ops,
1609 struct trace_event_file *file,
1610 char *param,
1611 struct event_trigger_data *trigger_data);
1612 extern void event_trigger_reset_filter(struct event_command *cmd_ops,
1613 struct event_trigger_data *trigger_data);
1614 extern int event_trigger_register(struct event_command *cmd_ops,
1615 struct trace_event_file *file,
1616 char *glob,
1617 struct event_trigger_data *trigger_data);
1618 extern void event_trigger_unregister(struct event_command *cmd_ops,
1619 struct trace_event_file *file,
1620 char *glob,
1621 struct event_trigger_data *trigger_data);
1622
1623 /**
1624 * struct event_trigger_ops - callbacks for trace event triggers
1625 *
1626 * The methods in this structure provide per-event trigger hooks for
1627 * various trigger operations.
1628 *
1629 * The @init and @free methods are used during trigger setup and
1630 * teardown, typically called from an event_command's @parse()
1631 * function implementation.
1632 *
1633 * The @print method is used to print the trigger spec.
1634 *
1635 * The @trigger method is the function that actually implements the
1636 * trigger and is called in the context of the triggering event
1637 * whenever that event occurs.
1638 *
1639 * All the methods below, except for @init() and @free(), must be
1640 * implemented.
1641 *
1642 * @trigger: The trigger 'probe' function called when the triggering
1643 * event occurs. The data passed into this callback is the data
1644 * that was supplied to the event_command @reg() function that
1645 * registered the trigger (see struct event_command) along with
1646 * the trace record, rec.
1647 *
1648 * @init: An optional initialization function called for the trigger
1649 * when the trigger is registered (via the event_command reg()
1650 * function). This can be used to perform per-trigger
1651 * initialization such as incrementing a per-trigger reference
1652 * count, for instance. This is usually implemented by the
1653 * generic utility function @event_trigger_init() (see
1654 * trace_event_triggers.c).
1655 *
1656 * @free: An optional de-initialization function called for the
1657 * trigger when the trigger is unregistered (via the
1658 * event_command @reg() function). This can be used to perform
1659 * per-trigger de-initialization such as decrementing a
1660 * per-trigger reference count and freeing corresponding trigger
1661 * data, for instance. This is usually implemented by the
1662 * generic utility function @event_trigger_free() (see
1663 * trace_event_triggers.c).
1664 *
1665 * @print: The callback function invoked to have the trigger print
1666 * itself. This is usually implemented by a wrapper function
1667 * that calls the generic utility function @event_trigger_print()
1668 * (see trace_event_triggers.c).
1669 */
1670 struct event_trigger_ops {
1671 void (*trigger)(struct event_trigger_data *data,
1672 struct trace_buffer *buffer,
1673 void *rec,
1674 struct ring_buffer_event *rbe);
1675 int (*init)(struct event_trigger_data *data);
1676 void (*free)(struct event_trigger_data *data);
1677 int (*print)(struct seq_file *m,
1678 struct event_trigger_data *data);
1679 };
1680
1681 /**
1682 * struct event_command - callbacks and data members for event commands
1683 *
1684 * Event commands are invoked by users by writing the command name
1685 * into the 'trigger' file associated with a trace event. The
1686 * parameters associated with a specific invocation of an event
1687 * command are used to create an event trigger instance, which is
1688 * added to the list of trigger instances associated with that trace
1689 * event. When the event is hit, the set of triggers associated with
1690 * that event is invoked.
1691 *
1692 * The data members in this structure provide per-event command data
1693 * for various event commands.
1694 *
1695 * All the data members below, except for @post_trigger, must be set
1696 * for each event command.
1697 *
1698 * @name: The unique name that identifies the event command. This is
1699 * the name used when setting triggers via trigger files.
1700 *
1701 * @trigger_type: A unique id that identifies the event command
1702 * 'type'. This value has two purposes, the first to ensure that
1703 * only one trigger of the same type can be set at a given time
1704 * for a particular event e.g. it doesn't make sense to have both
1705 * a traceon and traceoff trigger attached to a single event at
1706 * the same time, so traceon and traceoff have the same type
1707 * though they have different names. The @trigger_type value is
1708 * also used as a bit value for deferring the actual trigger
1709 * action until after the current event is finished. Some
1710 * commands need to do this if they themselves log to the trace
1711 * buffer (see the @post_trigger() member below). @trigger_type
1712 * values are defined by adding new values to the trigger_type
1713 * enum in include/linux/trace_events.h.
1714 *
1715 * @flags: See the enum event_command_flags below.
1716 *
1717 * All the methods below, except for @set_filter() and @unreg_all(),
1718 * must be implemented.
1719 *
1720 * @parse: The callback function responsible for parsing and
1721 * registering the trigger written to the 'trigger' file by the
1722 * user. It allocates the trigger instance and registers it with
1723 * the appropriate trace event. It makes use of the other
1724 * event_command callback functions to orchestrate this, and is
1725 * usually implemented by the generic utility function
1726 * @event_trigger_callback() (see trace_event_triggers.c).
1727 *
1728 * @reg: Adds the trigger to the list of triggers associated with the
1729 * event, and enables the event trigger itself, after
1730 * initializing it (via the event_trigger_ops @init() function).
1731 * This is also where commands can use the @trigger_type value to
1732 * make the decision as to whether or not multiple instances of
1733 * the trigger should be allowed. This is usually implemented by
1734 * the generic utility function @register_trigger() (see
1735 * trace_event_triggers.c).
1736 *
1737 * @unreg: Removes the trigger from the list of triggers associated
1738 * with the event, and disables the event trigger itself, after
1739 * initializing it (via the event_trigger_ops @free() function).
1740 * This is usually implemented by the generic utility function
1741 * @unregister_trigger() (see trace_event_triggers.c).
1742 *
1743 * @unreg_all: An optional function called to remove all the triggers
1744 * from the list of triggers associated with the event. Called
1745 * when a trigger file is opened in truncate mode.
1746 *
1747 * @set_filter: An optional function called to parse and set a filter
1748 * for the trigger. If no @set_filter() method is set for the
1749 * event command, filters set by the user for the command will be
1750 * ignored. This is usually implemented by the generic utility
1751 * function @set_trigger_filter() (see trace_event_triggers.c).
1752 *
1753 * @get_trigger_ops: The callback function invoked to retrieve the
1754 * event_trigger_ops implementation associated with the command.
1755 * This callback function allows a single event_command to
1756 * support multiple trigger implementations via different sets of
1757 * event_trigger_ops, depending on the value of the @param
1758 * string.
1759 */
1760 struct event_command {
1761 struct list_head list;
1762 char *name;
1763 enum event_trigger_type trigger_type;
1764 int flags;
1765 int (*parse)(struct event_command *cmd_ops,
1766 struct trace_event_file *file,
1767 char *glob, char *cmd,
1768 char *param_and_filter);
1769 int (*reg)(char *glob,
1770 struct event_trigger_data *data,
1771 struct trace_event_file *file);
1772 void (*unreg)(char *glob,
1773 struct event_trigger_data *data,
1774 struct trace_event_file *file);
1775 void (*unreg_all)(struct trace_event_file *file);
1776 int (*set_filter)(char *filter_str,
1777 struct event_trigger_data *data,
1778 struct trace_event_file *file);
1779 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1780 };
1781
1782 /**
1783 * enum event_command_flags - flags for struct event_command
1784 *
1785 * @POST_TRIGGER: A flag that says whether or not this command needs
1786 * to have its action delayed until after the current event has
1787 * been closed. Some triggers need to avoid being invoked while
1788 * an event is currently in the process of being logged, since
1789 * the trigger may itself log data into the trace buffer. Thus
1790 * we make sure the current event is committed before invoking
1791 * those triggers. To do that, the trigger invocation is split
1792 * in two - the first part checks the filter using the current
1793 * trace record; if a command has the @post_trigger flag set, it
1794 * sets a bit for itself in the return value, otherwise it
1795 * directly invokes the trigger. Once all commands have been
1796 * either invoked or set their return flag, the current record is
1797 * either committed or discarded. At that point, if any commands
1798 * have deferred their triggers, those commands are finally
1799 * invoked following the close of the current event. In other
1800 * words, if the event_trigger_ops @func() probe implementation
1801 * itself logs to the trace buffer, this flag should be set,
1802 * otherwise it can be left unspecified.
1803 *
1804 * @NEEDS_REC: A flag that says whether or not this command needs
1805 * access to the trace record in order to perform its function,
1806 * regardless of whether or not it has a filter associated with
1807 * it (filters make a trigger require access to the trace record
1808 * but are not always present).
1809 */
1810 enum event_command_flags {
1811 EVENT_CMD_FL_POST_TRIGGER = 1,
1812 EVENT_CMD_FL_NEEDS_REC = 2,
1813 };
1814
event_command_post_trigger(struct event_command * cmd_ops)1815 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1816 {
1817 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1818 }
1819
event_command_needs_rec(struct event_command * cmd_ops)1820 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1821 {
1822 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1823 }
1824
1825 extern int trace_event_enable_disable(struct trace_event_file *file,
1826 int enable, int soft_disable);
1827 extern int tracing_alloc_snapshot(void);
1828 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1829 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1830
1831 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1832 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1833
1834 extern const char *__start___trace_bprintk_fmt[];
1835 extern const char *__stop___trace_bprintk_fmt[];
1836
1837 extern const char *__start___tracepoint_str[];
1838 extern const char *__stop___tracepoint_str[];
1839
1840 void trace_printk_control(bool enabled);
1841 void trace_printk_start_comm(void);
1842 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1843 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1844
1845 /* Used from boot time tracer */
1846 extern int trace_set_options(struct trace_array *tr, char *option);
1847 extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1848 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1849 unsigned long size, int cpu_id);
1850 extern int tracing_set_cpumask(struct trace_array *tr,
1851 cpumask_var_t tracing_cpumask_new);
1852
1853
1854 #define MAX_EVENT_NAME_LEN 64
1855
1856 extern ssize_t trace_parse_run_command(struct file *file,
1857 const char __user *buffer, size_t count, loff_t *ppos,
1858 int (*createfn)(const char *));
1859
1860 extern unsigned int err_pos(char *cmd, const char *str);
1861 extern void tracing_log_err(struct trace_array *tr,
1862 const char *loc, const char *cmd,
1863 const char **errs, u8 type, u16 pos);
1864
1865 /*
1866 * Normal trace_printk() and friends allocates special buffers
1867 * to do the manipulation, as well as saves the print formats
1868 * into sections to display. But the trace infrastructure wants
1869 * to use these without the added overhead at the price of being
1870 * a bit slower (used mainly for warnings, where we don't care
1871 * about performance). The internal_trace_puts() is for such
1872 * a purpose.
1873 */
1874 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1875
1876 #undef FTRACE_ENTRY
1877 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
1878 extern struct trace_event_call \
1879 __aligned(4) event_##call;
1880 #undef FTRACE_ENTRY_DUP
1881 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1882 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1883 #undef FTRACE_ENTRY_PACKED
1884 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1885 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1886
1887 #include "trace_entries.h"
1888
1889 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1890 int perf_ftrace_event_register(struct trace_event_call *call,
1891 enum trace_reg type, void *data);
1892 #else
1893 #define perf_ftrace_event_register NULL
1894 #endif
1895
1896 #ifdef CONFIG_FTRACE_SYSCALLS
1897 void init_ftrace_syscalls(void);
1898 const char *get_syscall_name(int syscall);
1899 #else
init_ftrace_syscalls(void)1900 static inline void init_ftrace_syscalls(void) { }
get_syscall_name(int syscall)1901 static inline const char *get_syscall_name(int syscall)
1902 {
1903 return NULL;
1904 }
1905 #endif
1906
1907 #ifdef CONFIG_EVENT_TRACING
1908 void trace_event_init(void);
1909 void trace_event_eval_update(struct trace_eval_map **map, int len);
1910 /* Used from boot time tracer */
1911 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
1912 extern int trigger_process_regex(struct trace_event_file *file, char *buff);
1913 #else
trace_event_init(void)1914 static inline void __init trace_event_init(void) { }
trace_event_eval_update(struct trace_eval_map ** map,int len)1915 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1916 #endif
1917
1918 #ifdef CONFIG_TRACER_SNAPSHOT
1919 void tracing_snapshot_instance(struct trace_array *tr);
1920 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1921 #else
tracing_snapshot_instance(struct trace_array * tr)1922 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
tracing_alloc_snapshot_instance(struct trace_array * tr)1923 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1924 {
1925 return 0;
1926 }
1927 #endif
1928
1929 #ifdef CONFIG_PREEMPT_TRACER
1930 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1931 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1932 #else
tracer_preempt_on(unsigned long a0,unsigned long a1)1933 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
tracer_preempt_off(unsigned long a0,unsigned long a1)1934 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1935 #endif
1936 #ifdef CONFIG_IRQSOFF_TRACER
1937 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1938 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1939 #else
tracer_hardirqs_on(unsigned long a0,unsigned long a1)1940 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
tracer_hardirqs_off(unsigned long a0,unsigned long a1)1941 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1942 #endif
1943
1944 extern struct trace_iterator *tracepoint_print_iter;
1945
1946 /*
1947 * Reset the state of the trace_iterator so that it can read consumed data.
1948 * Normally, the trace_iterator is used for reading the data when it is not
1949 * consumed, and must retain state.
1950 */
trace_iterator_reset(struct trace_iterator * iter)1951 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1952 {
1953 memset_startat(iter, 0, seq);
1954 iter->pos = -1;
1955 }
1956
1957 /* Check the name is good for event/group/fields */
__is_good_name(const char * name,bool hash_ok)1958 static inline bool __is_good_name(const char *name, bool hash_ok)
1959 {
1960 if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
1961 return false;
1962 while (*++name != '\0') {
1963 if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
1964 (!hash_ok || *name != '-'))
1965 return false;
1966 }
1967 return true;
1968 }
1969
1970 /* Check the name is good for event/group/fields */
is_good_name(const char * name)1971 static inline bool is_good_name(const char *name)
1972 {
1973 return __is_good_name(name, false);
1974 }
1975
1976 /* Check the name is good for system */
is_good_system_name(const char * name)1977 static inline bool is_good_system_name(const char *name)
1978 {
1979 return __is_good_name(name, true);
1980 }
1981
1982 /* Convert certain expected symbols into '_' when generating event names */
sanitize_event_name(char * name)1983 static inline void sanitize_event_name(char *name)
1984 {
1985 while (*name++ != '\0')
1986 if (*name == ':' || *name == '.')
1987 *name = '_';
1988 }
1989
1990 /*
1991 * This is a generic way to read and write a u64 value from a file in tracefs.
1992 *
1993 * The value is stored on the variable pointed by *val. The value needs
1994 * to be at least *min and at most *max. The write is protected by an
1995 * existing *lock.
1996 */
1997 struct trace_min_max_param {
1998 struct mutex *lock;
1999 u64 *val;
2000 u64 *min;
2001 u64 *max;
2002 };
2003
2004 #define U64_STR_SIZE 24 /* 20 digits max */
2005
2006 extern const struct file_operations trace_min_max_fops;
2007
2008 #ifdef CONFIG_RV
2009 extern int rv_init_interface(void);
2010 #else
rv_init_interface(void)2011 static inline int rv_init_interface(void)
2012 {
2013 return 0;
2014 }
2015 #endif
2016
2017 #endif /* _LINUX_KERNEL_TRACE_H */
2018