1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_output.c
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
7 */
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11 #include <linux/kprobes.h>
12 #include <linux/sched/clock.h>
13 #include <linux/sched/mm.h>
14
15 #include "trace_output.h"
16
17 /* must be a power of 2 */
18 #define EVENT_HASHSIZE 128
19
20 DECLARE_RWSEM(trace_event_sem);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE;
25
trace_print_bputs_msg_only(struct trace_iterator * iter)26 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
27 {
28 struct trace_seq *s = &iter->seq;
29 struct trace_entry *entry = iter->ent;
30 struct bputs_entry *field;
31
32 trace_assign_type(field, entry);
33
34 trace_seq_puts(s, field->str);
35
36 return trace_handle_return(s);
37 }
38
trace_print_bprintk_msg_only(struct trace_iterator * iter)39 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
40 {
41 struct trace_seq *s = &iter->seq;
42 struct trace_entry *entry = iter->ent;
43 struct bprint_entry *field;
44
45 trace_assign_type(field, entry);
46
47 trace_seq_bprintf(s, field->fmt, field->buf);
48
49 return trace_handle_return(s);
50 }
51
trace_print_printk_msg_only(struct trace_iterator * iter)52 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
53 {
54 struct trace_seq *s = &iter->seq;
55 struct trace_entry *entry = iter->ent;
56 struct print_entry *field;
57
58 trace_assign_type(field, entry);
59
60 trace_seq_puts(s, field->buf);
61
62 return trace_handle_return(s);
63 }
64
65 const char *
trace_print_flags_seq(struct trace_seq * p,const char * delim,unsigned long flags,const struct trace_print_flags * flag_array)66 trace_print_flags_seq(struct trace_seq *p, const char *delim,
67 unsigned long flags,
68 const struct trace_print_flags *flag_array)
69 {
70 unsigned long mask;
71 const char *str;
72 const char *ret = trace_seq_buffer_ptr(p);
73 int i, first = 1;
74
75 for (i = 0; flag_array[i].name && flags; i++) {
76
77 mask = flag_array[i].mask;
78 if ((flags & mask) != mask)
79 continue;
80
81 str = flag_array[i].name;
82 flags &= ~mask;
83 if (!first && delim)
84 trace_seq_puts(p, delim);
85 else
86 first = 0;
87 trace_seq_puts(p, str);
88 }
89
90 /* check for left over flags */
91 if (flags) {
92 if (!first && delim)
93 trace_seq_puts(p, delim);
94 trace_seq_printf(p, "0x%lx", flags);
95 }
96
97 trace_seq_putc(p, 0);
98
99 return ret;
100 }
101 EXPORT_SYMBOL(trace_print_flags_seq);
102
103 const char *
trace_print_symbols_seq(struct trace_seq * p,unsigned long val,const struct trace_print_flags * symbol_array)104 trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
105 const struct trace_print_flags *symbol_array)
106 {
107 int i;
108 const char *ret = trace_seq_buffer_ptr(p);
109
110 for (i = 0; symbol_array[i].name; i++) {
111
112 if (val != symbol_array[i].mask)
113 continue;
114
115 trace_seq_puts(p, symbol_array[i].name);
116 break;
117 }
118
119 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
120 trace_seq_printf(p, "0x%lx", val);
121
122 trace_seq_putc(p, 0);
123
124 return ret;
125 }
126 EXPORT_SYMBOL(trace_print_symbols_seq);
127
128 #if BITS_PER_LONG == 32
129 const char *
trace_print_flags_seq_u64(struct trace_seq * p,const char * delim,unsigned long long flags,const struct trace_print_flags_u64 * flag_array)130 trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
131 unsigned long long flags,
132 const struct trace_print_flags_u64 *flag_array)
133 {
134 unsigned long long mask;
135 const char *str;
136 const char *ret = trace_seq_buffer_ptr(p);
137 int i, first = 1;
138
139 for (i = 0; flag_array[i].name && flags; i++) {
140
141 mask = flag_array[i].mask;
142 if ((flags & mask) != mask)
143 continue;
144
145 str = flag_array[i].name;
146 flags &= ~mask;
147 if (!first && delim)
148 trace_seq_puts(p, delim);
149 else
150 first = 0;
151 trace_seq_puts(p, str);
152 }
153
154 /* check for left over flags */
155 if (flags) {
156 if (!first && delim)
157 trace_seq_puts(p, delim);
158 trace_seq_printf(p, "0x%llx", flags);
159 }
160
161 trace_seq_putc(p, 0);
162
163 return ret;
164 }
165 EXPORT_SYMBOL(trace_print_flags_seq_u64);
166
167 const char *
trace_print_symbols_seq_u64(struct trace_seq * p,unsigned long long val,const struct trace_print_flags_u64 * symbol_array)168 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
169 const struct trace_print_flags_u64 *symbol_array)
170 {
171 int i;
172 const char *ret = trace_seq_buffer_ptr(p);
173
174 for (i = 0; symbol_array[i].name; i++) {
175
176 if (val != symbol_array[i].mask)
177 continue;
178
179 trace_seq_puts(p, symbol_array[i].name);
180 break;
181 }
182
183 if (ret == (const char *)(trace_seq_buffer_ptr(p)))
184 trace_seq_printf(p, "0x%llx", val);
185
186 trace_seq_putc(p, 0);
187
188 return ret;
189 }
190 EXPORT_SYMBOL(trace_print_symbols_seq_u64);
191 #endif
192
193 const char *
trace_print_bitmask_seq(struct trace_seq * p,void * bitmask_ptr,unsigned int bitmask_size)194 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
195 unsigned int bitmask_size)
196 {
197 const char *ret = trace_seq_buffer_ptr(p);
198
199 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
200 trace_seq_putc(p, 0);
201
202 return ret;
203 }
204 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
205
206 /**
207 * trace_print_hex_seq - print buffer as hex sequence
208 * @p: trace seq struct to write to
209 * @buf: The buffer to print
210 * @buf_len: Length of @buf in bytes
211 * @concatenate: Print @buf as single hex string or with spacing
212 *
213 * Prints the passed buffer as a hex sequence either as a whole,
214 * single hex string if @concatenate is true or with spacing after
215 * each byte in case @concatenate is false.
216 */
217 const char *
trace_print_hex_seq(struct trace_seq * p,const unsigned char * buf,int buf_len,bool concatenate)218 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
219 bool concatenate)
220 {
221 int i;
222 const char *ret = trace_seq_buffer_ptr(p);
223 const char *fmt = concatenate ? "%*phN" : "%*ph";
224
225 for (i = 0; i < buf_len; i += 16)
226 trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
227 trace_seq_putc(p, 0);
228
229 return ret;
230 }
231 EXPORT_SYMBOL(trace_print_hex_seq);
232
233 const char *
trace_print_array_seq(struct trace_seq * p,const void * buf,int count,size_t el_size)234 trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
235 size_t el_size)
236 {
237 const char *ret = trace_seq_buffer_ptr(p);
238 const char *prefix = "";
239 void *ptr = (void *)buf;
240 size_t buf_len = count * el_size;
241
242 trace_seq_putc(p, '{');
243
244 while (ptr < buf + buf_len) {
245 switch (el_size) {
246 case 1:
247 trace_seq_printf(p, "%s0x%x", prefix,
248 *(u8 *)ptr);
249 break;
250 case 2:
251 trace_seq_printf(p, "%s0x%x", prefix,
252 *(u16 *)ptr);
253 break;
254 case 4:
255 trace_seq_printf(p, "%s0x%x", prefix,
256 *(u32 *)ptr);
257 break;
258 case 8:
259 trace_seq_printf(p, "%s0x%llx", prefix,
260 *(u64 *)ptr);
261 break;
262 default:
263 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
264 *(u8 *)ptr);
265 el_size = 1;
266 }
267 prefix = ",";
268 ptr += el_size;
269 }
270
271 trace_seq_putc(p, '}');
272 trace_seq_putc(p, 0);
273
274 return ret;
275 }
276 EXPORT_SYMBOL(trace_print_array_seq);
277
278 const char *
trace_print_hex_dump_seq(struct trace_seq * p,const char * prefix_str,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)279 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
280 int prefix_type, int rowsize, int groupsize,
281 const void *buf, size_t len, bool ascii)
282 {
283 const char *ret = trace_seq_buffer_ptr(p);
284
285 trace_seq_putc(p, '\n');
286 trace_seq_hex_dump(p, prefix_str, prefix_type,
287 rowsize, groupsize, buf, len, ascii);
288 trace_seq_putc(p, 0);
289 return ret;
290 }
291 EXPORT_SYMBOL(trace_print_hex_dump_seq);
292
trace_raw_output_prep(struct trace_iterator * iter,struct trace_event * trace_event)293 int trace_raw_output_prep(struct trace_iterator *iter,
294 struct trace_event *trace_event)
295 {
296 struct trace_event_call *event;
297 struct trace_seq *s = &iter->seq;
298 struct trace_seq *p = &iter->tmp_seq;
299 struct trace_entry *entry;
300
301 event = container_of(trace_event, struct trace_event_call, event);
302 entry = iter->ent;
303
304 if (entry->type != event->event.type) {
305 WARN_ON_ONCE(1);
306 return TRACE_TYPE_UNHANDLED;
307 }
308
309 trace_seq_init(p);
310 trace_seq_printf(s, "%s: ", trace_event_name(event));
311
312 return trace_handle_return(s);
313 }
314 EXPORT_SYMBOL(trace_raw_output_prep);
315
trace_event_printf(struct trace_iterator * iter,const char * fmt,...)316 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
317 {
318 va_list ap;
319
320 va_start(ap, fmt);
321 trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
322 va_end(ap);
323 }
324 EXPORT_SYMBOL(trace_event_printf);
325
trace_output_raw(struct trace_iterator * iter,char * name,char * fmt,va_list ap)326 static int trace_output_raw(struct trace_iterator *iter, char *name,
327 char *fmt, va_list ap)
328 {
329 struct trace_seq *s = &iter->seq;
330
331 trace_seq_printf(s, "%s: ", name);
332 trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
333
334 return trace_handle_return(s);
335 }
336
trace_output_call(struct trace_iterator * iter,char * name,char * fmt,...)337 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
338 {
339 va_list ap;
340 int ret;
341
342 va_start(ap, fmt);
343 ret = trace_output_raw(iter, name, fmt, ap);
344 va_end(ap);
345
346 return ret;
347 }
348 EXPORT_SYMBOL_GPL(trace_output_call);
349
kretprobed(const char * name,unsigned long addr)350 static inline const char *kretprobed(const char *name, unsigned long addr)
351 {
352 if (is_kretprobe_trampoline(addr))
353 return "[unknown/kretprobe'd]";
354 return name;
355 }
356
357 void
trace_seq_print_sym(struct trace_seq * s,unsigned long address,bool offset)358 trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
359 {
360 #ifdef CONFIG_KALLSYMS
361 char str[KSYM_SYMBOL_LEN];
362 const char *name;
363
364 if (offset)
365 sprint_symbol(str, address);
366 else
367 kallsyms_lookup(address, NULL, NULL, NULL, str);
368 name = kretprobed(str, address);
369
370 if (name && strlen(name)) {
371 trace_seq_puts(s, name);
372 return;
373 }
374 #endif
375 trace_seq_printf(s, "0x%08lx", address);
376 }
377
378 #ifndef CONFIG_64BIT
379 # define IP_FMT "%08lx"
380 #else
381 # define IP_FMT "%016lx"
382 #endif
383
seq_print_user_ip(struct trace_seq * s,struct mm_struct * mm,unsigned long ip,unsigned long sym_flags)384 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
385 unsigned long ip, unsigned long sym_flags)
386 {
387 struct file *file = NULL;
388 unsigned long vmstart = 0;
389 int ret = 1;
390
391 if (s->full)
392 return 0;
393
394 if (mm) {
395 const struct vm_area_struct *vma;
396
397 mmap_read_lock(mm);
398 vma = find_vma(mm, ip);
399 if (vma) {
400 file = vma->vm_file;
401 vmstart = vma->vm_start;
402 }
403 if (file) {
404 ret = trace_seq_path(s, &file->f_path);
405 if (ret)
406 trace_seq_printf(s, "[+0x%lx]",
407 ip - vmstart);
408 }
409 mmap_read_unlock(mm);
410 }
411 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
412 trace_seq_printf(s, " <" IP_FMT ">", ip);
413 return !trace_seq_has_overflowed(s);
414 }
415
416 int
seq_print_ip_sym(struct trace_seq * s,unsigned long ip,unsigned long sym_flags)417 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
418 {
419 if (!ip) {
420 trace_seq_putc(s, '0');
421 goto out;
422 }
423
424 trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
425
426 if (sym_flags & TRACE_ITER_SYM_ADDR)
427 trace_seq_printf(s, " <" IP_FMT ">", ip);
428
429 out:
430 return !trace_seq_has_overflowed(s);
431 }
432
433 /**
434 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
435 * @s: trace seq struct to write to
436 * @entry: The trace entry field from the ring buffer
437 *
438 * Prints the generic fields of irqs off, in hard or softirq, preempt
439 * count.
440 */
trace_print_lat_fmt(struct trace_seq * s,struct trace_entry * entry)441 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
442 {
443 char hardsoft_irq;
444 char need_resched;
445 char irqs_off;
446 int hardirq;
447 int softirq;
448 int bh_off;
449 int nmi;
450
451 nmi = entry->flags & TRACE_FLAG_NMI;
452 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
453 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
454 bh_off = entry->flags & TRACE_FLAG_BH_OFF;
455
456 irqs_off =
457 (entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
458 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
459 bh_off ? 'b' :
460 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
461 '.';
462
463 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
464 TRACE_FLAG_PREEMPT_RESCHED)) {
465 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
466 need_resched = 'N';
467 break;
468 case TRACE_FLAG_NEED_RESCHED:
469 need_resched = 'n';
470 break;
471 case TRACE_FLAG_PREEMPT_RESCHED:
472 need_resched = 'p';
473 break;
474 default:
475 need_resched = '.';
476 break;
477 }
478
479 hardsoft_irq =
480 (nmi && hardirq) ? 'Z' :
481 nmi ? 'z' :
482 (hardirq && softirq) ? 'H' :
483 hardirq ? 'h' :
484 softirq ? 's' :
485 '.' ;
486
487 trace_seq_printf(s, "%c%c%c",
488 irqs_off, need_resched, hardsoft_irq);
489
490 if (entry->preempt_count & 0xf)
491 trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
492 else
493 trace_seq_putc(s, '.');
494
495 if (entry->preempt_count & 0xf0)
496 trace_seq_printf(s, "%x", entry->preempt_count >> 4);
497 else
498 trace_seq_putc(s, '.');
499
500 return !trace_seq_has_overflowed(s);
501 }
502
503 static int
lat_print_generic(struct trace_seq * s,struct trace_entry * entry,int cpu)504 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
505 {
506 char comm[TASK_COMM_LEN];
507
508 trace_find_cmdline(entry->pid, comm);
509
510 trace_seq_printf(s, "%8.8s-%-7d %3d",
511 comm, entry->pid, cpu);
512
513 return trace_print_lat_fmt(s, entry);
514 }
515
516 #undef MARK
517 #define MARK(v, s) {.val = v, .sym = s}
518 /* trace overhead mark */
519 static const struct trace_mark {
520 unsigned long long val; /* unit: nsec */
521 char sym;
522 } mark[] = {
523 MARK(1000000000ULL , '$'), /* 1 sec */
524 MARK(100000000ULL , '@'), /* 100 msec */
525 MARK(10000000ULL , '*'), /* 10 msec */
526 MARK(1000000ULL , '#'), /* 1000 usecs */
527 MARK(100000ULL , '!'), /* 100 usecs */
528 MARK(10000ULL , '+'), /* 10 usecs */
529 };
530 #undef MARK
531
trace_find_mark(unsigned long long d)532 char trace_find_mark(unsigned long long d)
533 {
534 int i;
535 int size = ARRAY_SIZE(mark);
536
537 for (i = 0; i < size; i++) {
538 if (d > mark[i].val)
539 break;
540 }
541
542 return (i == size) ? ' ' : mark[i].sym;
543 }
544
545 static int
lat_print_timestamp(struct trace_iterator * iter,u64 next_ts)546 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
547 {
548 struct trace_array *tr = iter->tr;
549 unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
550 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
551 unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
552 unsigned long long rel_ts = next_ts - iter->ts;
553 struct trace_seq *s = &iter->seq;
554
555 if (in_ns) {
556 abs_ts = ns2usecs(abs_ts);
557 rel_ts = ns2usecs(rel_ts);
558 }
559
560 if (verbose && in_ns) {
561 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
562 unsigned long abs_msec = (unsigned long)abs_ts;
563 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
564 unsigned long rel_msec = (unsigned long)rel_ts;
565
566 trace_seq_printf(
567 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
568 ns2usecs(iter->ts),
569 abs_msec, abs_usec,
570 rel_msec, rel_usec);
571
572 } else if (verbose && !in_ns) {
573 trace_seq_printf(
574 s, "[%016llx] %lld (+%lld): ",
575 iter->ts, abs_ts, rel_ts);
576
577 } else if (!verbose && in_ns) {
578 trace_seq_printf(
579 s, " %4lldus%c: ",
580 abs_ts,
581 trace_find_mark(rel_ts * NSEC_PER_USEC));
582
583 } else { /* !verbose && !in_ns */
584 trace_seq_printf(s, " %4lld: ", abs_ts);
585 }
586
587 return !trace_seq_has_overflowed(s);
588 }
589
trace_print_time(struct trace_seq * s,struct trace_iterator * iter,unsigned long long ts)590 static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
591 unsigned long long ts)
592 {
593 unsigned long secs, usec_rem;
594 unsigned long long t;
595
596 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
597 t = ns2usecs(ts);
598 usec_rem = do_div(t, USEC_PER_SEC);
599 secs = (unsigned long)t;
600 trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
601 } else
602 trace_seq_printf(s, " %12llu", ts);
603 }
604
trace_print_context(struct trace_iterator * iter)605 int trace_print_context(struct trace_iterator *iter)
606 {
607 struct trace_array *tr = iter->tr;
608 struct trace_seq *s = &iter->seq;
609 struct trace_entry *entry = iter->ent;
610 char comm[TASK_COMM_LEN];
611
612 trace_find_cmdline(entry->pid, comm);
613
614 trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
615
616 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
617 unsigned int tgid = trace_find_tgid(entry->pid);
618
619 if (!tgid)
620 trace_seq_printf(s, "(-------) ");
621 else
622 trace_seq_printf(s, "(%7d) ", tgid);
623 }
624
625 trace_seq_printf(s, "[%03d] ", iter->cpu);
626
627 if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
628 trace_print_lat_fmt(s, entry);
629
630 trace_print_time(s, iter, iter->ts);
631 trace_seq_puts(s, ": ");
632
633 return !trace_seq_has_overflowed(s);
634 }
635
trace_print_lat_context(struct trace_iterator * iter)636 int trace_print_lat_context(struct trace_iterator *iter)
637 {
638 struct trace_entry *entry, *next_entry;
639 struct trace_array *tr = iter->tr;
640 struct trace_seq *s = &iter->seq;
641 unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
642 u64 next_ts;
643
644 next_entry = trace_find_next_entry(iter, NULL, &next_ts);
645 if (!next_entry)
646 next_ts = iter->ts;
647
648 /* trace_find_next_entry() may change iter->ent */
649 entry = iter->ent;
650
651 if (verbose) {
652 char comm[TASK_COMM_LEN];
653
654 trace_find_cmdline(entry->pid, comm);
655
656 trace_seq_printf(
657 s, "%16s %7d %3d %d %08x %08lx ",
658 comm, entry->pid, iter->cpu, entry->flags,
659 entry->preempt_count & 0xf, iter->idx);
660 } else {
661 lat_print_generic(s, entry, iter->cpu);
662 }
663
664 lat_print_timestamp(iter, next_ts);
665
666 return !trace_seq_has_overflowed(s);
667 }
668
669 /**
670 * ftrace_find_event - find a registered event
671 * @type: the type of event to look for
672 *
673 * Returns an event of type @type otherwise NULL
674 * Called with trace_event_read_lock() held.
675 */
ftrace_find_event(int type)676 struct trace_event *ftrace_find_event(int type)
677 {
678 struct trace_event *event;
679 unsigned key;
680
681 key = type & (EVENT_HASHSIZE - 1);
682
683 hlist_for_each_entry(event, &event_hash[key], node) {
684 if (event->type == type)
685 return event;
686 }
687
688 return NULL;
689 }
690
691 static LIST_HEAD(ftrace_event_list);
692
trace_search_list(struct list_head ** list)693 static int trace_search_list(struct list_head **list)
694 {
695 struct trace_event *e = NULL, *iter;
696 int next = __TRACE_LAST_TYPE;
697
698 if (list_empty(&ftrace_event_list)) {
699 *list = &ftrace_event_list;
700 return next;
701 }
702
703 /*
704 * We used up all possible max events,
705 * lets see if somebody freed one.
706 */
707 list_for_each_entry(iter, &ftrace_event_list, list) {
708 if (iter->type != next) {
709 e = iter;
710 break;
711 }
712 next++;
713 }
714
715 /* Did we used up all 65 thousand events??? */
716 if (next > TRACE_EVENT_TYPE_MAX)
717 return 0;
718
719 if (e)
720 *list = &e->list;
721 else
722 *list = &ftrace_event_list;
723 return next;
724 }
725
trace_event_read_lock(void)726 void trace_event_read_lock(void)
727 {
728 down_read(&trace_event_sem);
729 }
730
trace_event_read_unlock(void)731 void trace_event_read_unlock(void)
732 {
733 up_read(&trace_event_sem);
734 }
735
736 /**
737 * register_trace_event - register output for an event type
738 * @event: the event type to register
739 *
740 * Event types are stored in a hash and this hash is used to
741 * find a way to print an event. If the @event->type is set
742 * then it will use that type, otherwise it will assign a
743 * type to use.
744 *
745 * If you assign your own type, please make sure it is added
746 * to the trace_type enum in trace.h, to avoid collisions
747 * with the dynamic types.
748 *
749 * Returns the event type number or zero on error.
750 */
register_trace_event(struct trace_event * event)751 int register_trace_event(struct trace_event *event)
752 {
753 unsigned key;
754 int ret = 0;
755
756 down_write(&trace_event_sem);
757
758 if (WARN_ON(!event))
759 goto out;
760
761 if (WARN_ON(!event->funcs))
762 goto out;
763
764 INIT_LIST_HEAD(&event->list);
765
766 if (!event->type) {
767 struct list_head *list = NULL;
768
769 if (next_event_type > TRACE_EVENT_TYPE_MAX) {
770
771 event->type = trace_search_list(&list);
772 if (!event->type)
773 goto out;
774
775 } else {
776
777 event->type = next_event_type++;
778 list = &ftrace_event_list;
779 }
780
781 if (WARN_ON(ftrace_find_event(event->type)))
782 goto out;
783
784 list_add_tail(&event->list, list);
785
786 } else if (WARN(event->type > __TRACE_LAST_TYPE,
787 "Need to add type to trace.h")) {
788 goto out;
789 } else {
790 /* Is this event already used */
791 if (ftrace_find_event(event->type))
792 goto out;
793 }
794
795 if (event->funcs->trace == NULL)
796 event->funcs->trace = trace_nop_print;
797 if (event->funcs->raw == NULL)
798 event->funcs->raw = trace_nop_print;
799 if (event->funcs->hex == NULL)
800 event->funcs->hex = trace_nop_print;
801 if (event->funcs->binary == NULL)
802 event->funcs->binary = trace_nop_print;
803
804 key = event->type & (EVENT_HASHSIZE - 1);
805
806 hlist_add_head(&event->node, &event_hash[key]);
807
808 ret = event->type;
809 out:
810 up_write(&trace_event_sem);
811
812 return ret;
813 }
814 EXPORT_SYMBOL_GPL(register_trace_event);
815
816 /*
817 * Used by module code with the trace_event_sem held for write.
818 */
__unregister_trace_event(struct trace_event * event)819 int __unregister_trace_event(struct trace_event *event)
820 {
821 hlist_del(&event->node);
822 list_del(&event->list);
823 return 0;
824 }
825
826 /**
827 * unregister_trace_event - remove a no longer used event
828 * @event: the event to remove
829 */
unregister_trace_event(struct trace_event * event)830 int unregister_trace_event(struct trace_event *event)
831 {
832 down_write(&trace_event_sem);
833 __unregister_trace_event(event);
834 up_write(&trace_event_sem);
835
836 return 0;
837 }
838 EXPORT_SYMBOL_GPL(unregister_trace_event);
839
840 /*
841 * Standard events
842 */
843
trace_nop_print(struct trace_iterator * iter,int flags,struct trace_event * event)844 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
845 struct trace_event *event)
846 {
847 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
848
849 return trace_handle_return(&iter->seq);
850 }
851
print_fn_trace(struct trace_seq * s,unsigned long ip,unsigned long parent_ip,int flags)852 static void print_fn_trace(struct trace_seq *s, unsigned long ip,
853 unsigned long parent_ip, int flags)
854 {
855 seq_print_ip_sym(s, ip, flags);
856
857 if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
858 trace_seq_puts(s, " <-");
859 seq_print_ip_sym(s, parent_ip, flags);
860 }
861 }
862
863 /* TRACE_FN */
trace_fn_trace(struct trace_iterator * iter,int flags,struct trace_event * event)864 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
865 struct trace_event *event)
866 {
867 struct ftrace_entry *field;
868 struct trace_seq *s = &iter->seq;
869
870 trace_assign_type(field, iter->ent);
871
872 print_fn_trace(s, field->ip, field->parent_ip, flags);
873 trace_seq_putc(s, '\n');
874
875 return trace_handle_return(s);
876 }
877
trace_fn_raw(struct trace_iterator * iter,int flags,struct trace_event * event)878 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
879 struct trace_event *event)
880 {
881 struct ftrace_entry *field;
882
883 trace_assign_type(field, iter->ent);
884
885 trace_seq_printf(&iter->seq, "%lx %lx\n",
886 field->ip,
887 field->parent_ip);
888
889 return trace_handle_return(&iter->seq);
890 }
891
trace_fn_hex(struct trace_iterator * iter,int flags,struct trace_event * event)892 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
893 struct trace_event *event)
894 {
895 struct ftrace_entry *field;
896 struct trace_seq *s = &iter->seq;
897
898 trace_assign_type(field, iter->ent);
899
900 SEQ_PUT_HEX_FIELD(s, field->ip);
901 SEQ_PUT_HEX_FIELD(s, field->parent_ip);
902
903 return trace_handle_return(s);
904 }
905
trace_fn_bin(struct trace_iterator * iter,int flags,struct trace_event * event)906 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
907 struct trace_event *event)
908 {
909 struct ftrace_entry *field;
910 struct trace_seq *s = &iter->seq;
911
912 trace_assign_type(field, iter->ent);
913
914 SEQ_PUT_FIELD(s, field->ip);
915 SEQ_PUT_FIELD(s, field->parent_ip);
916
917 return trace_handle_return(s);
918 }
919
920 static struct trace_event_functions trace_fn_funcs = {
921 .trace = trace_fn_trace,
922 .raw = trace_fn_raw,
923 .hex = trace_fn_hex,
924 .binary = trace_fn_bin,
925 };
926
927 static struct trace_event trace_fn_event = {
928 .type = TRACE_FN,
929 .funcs = &trace_fn_funcs,
930 };
931
932 /* TRACE_CTX an TRACE_WAKE */
trace_ctxwake_print(struct trace_iterator * iter,char * delim)933 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
934 char *delim)
935 {
936 struct ctx_switch_entry *field;
937 char comm[TASK_COMM_LEN];
938 int S, T;
939
940
941 trace_assign_type(field, iter->ent);
942
943 T = task_index_to_char(field->next_state);
944 S = task_index_to_char(field->prev_state);
945 trace_find_cmdline(field->next_pid, comm);
946 trace_seq_printf(&iter->seq,
947 " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
948 field->prev_pid,
949 field->prev_prio,
950 S, delim,
951 field->next_cpu,
952 field->next_pid,
953 field->next_prio,
954 T, comm);
955
956 return trace_handle_return(&iter->seq);
957 }
958
trace_ctx_print(struct trace_iterator * iter,int flags,struct trace_event * event)959 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
960 struct trace_event *event)
961 {
962 return trace_ctxwake_print(iter, "==>");
963 }
964
trace_wake_print(struct trace_iterator * iter,int flags,struct trace_event * event)965 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
966 int flags, struct trace_event *event)
967 {
968 return trace_ctxwake_print(iter, " +");
969 }
970
trace_ctxwake_raw(struct trace_iterator * iter,char S)971 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
972 {
973 struct ctx_switch_entry *field;
974 int T;
975
976 trace_assign_type(field, iter->ent);
977
978 if (!S)
979 S = task_index_to_char(field->prev_state);
980 T = task_index_to_char(field->next_state);
981 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
982 field->prev_pid,
983 field->prev_prio,
984 S,
985 field->next_cpu,
986 field->next_pid,
987 field->next_prio,
988 T);
989
990 return trace_handle_return(&iter->seq);
991 }
992
trace_ctx_raw(struct trace_iterator * iter,int flags,struct trace_event * event)993 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
994 struct trace_event *event)
995 {
996 return trace_ctxwake_raw(iter, 0);
997 }
998
trace_wake_raw(struct trace_iterator * iter,int flags,struct trace_event * event)999 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
1000 struct trace_event *event)
1001 {
1002 return trace_ctxwake_raw(iter, '+');
1003 }
1004
1005
trace_ctxwake_hex(struct trace_iterator * iter,char S)1006 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1007 {
1008 struct ctx_switch_entry *field;
1009 struct trace_seq *s = &iter->seq;
1010 int T;
1011
1012 trace_assign_type(field, iter->ent);
1013
1014 if (!S)
1015 S = task_index_to_char(field->prev_state);
1016 T = task_index_to_char(field->next_state);
1017
1018 SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1019 SEQ_PUT_HEX_FIELD(s, field->prev_prio);
1020 SEQ_PUT_HEX_FIELD(s, S);
1021 SEQ_PUT_HEX_FIELD(s, field->next_cpu);
1022 SEQ_PUT_HEX_FIELD(s, field->next_pid);
1023 SEQ_PUT_HEX_FIELD(s, field->next_prio);
1024 SEQ_PUT_HEX_FIELD(s, T);
1025
1026 return trace_handle_return(s);
1027 }
1028
trace_ctx_hex(struct trace_iterator * iter,int flags,struct trace_event * event)1029 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1030 struct trace_event *event)
1031 {
1032 return trace_ctxwake_hex(iter, 0);
1033 }
1034
trace_wake_hex(struct trace_iterator * iter,int flags,struct trace_event * event)1035 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1036 struct trace_event *event)
1037 {
1038 return trace_ctxwake_hex(iter, '+');
1039 }
1040
trace_ctxwake_bin(struct trace_iterator * iter,int flags,struct trace_event * event)1041 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1042 int flags, struct trace_event *event)
1043 {
1044 struct ctx_switch_entry *field;
1045 struct trace_seq *s = &iter->seq;
1046
1047 trace_assign_type(field, iter->ent);
1048
1049 SEQ_PUT_FIELD(s, field->prev_pid);
1050 SEQ_PUT_FIELD(s, field->prev_prio);
1051 SEQ_PUT_FIELD(s, field->prev_state);
1052 SEQ_PUT_FIELD(s, field->next_cpu);
1053 SEQ_PUT_FIELD(s, field->next_pid);
1054 SEQ_PUT_FIELD(s, field->next_prio);
1055 SEQ_PUT_FIELD(s, field->next_state);
1056
1057 return trace_handle_return(s);
1058 }
1059
1060 static struct trace_event_functions trace_ctx_funcs = {
1061 .trace = trace_ctx_print,
1062 .raw = trace_ctx_raw,
1063 .hex = trace_ctx_hex,
1064 .binary = trace_ctxwake_bin,
1065 };
1066
1067 static struct trace_event trace_ctx_event = {
1068 .type = TRACE_CTX,
1069 .funcs = &trace_ctx_funcs,
1070 };
1071
1072 static struct trace_event_functions trace_wake_funcs = {
1073 .trace = trace_wake_print,
1074 .raw = trace_wake_raw,
1075 .hex = trace_wake_hex,
1076 .binary = trace_ctxwake_bin,
1077 };
1078
1079 static struct trace_event trace_wake_event = {
1080 .type = TRACE_WAKE,
1081 .funcs = &trace_wake_funcs,
1082 };
1083
1084 /* TRACE_STACK */
1085
trace_stack_print(struct trace_iterator * iter,int flags,struct trace_event * event)1086 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1087 int flags, struct trace_event *event)
1088 {
1089 struct stack_entry *field;
1090 struct trace_seq *s = &iter->seq;
1091 unsigned long *p;
1092 unsigned long *end;
1093
1094 trace_assign_type(field, iter->ent);
1095 end = (unsigned long *)((long)iter->ent + iter->ent_size);
1096
1097 trace_seq_puts(s, "<stack trace>\n");
1098
1099 for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
1100
1101 if (trace_seq_has_overflowed(s))
1102 break;
1103
1104 trace_seq_puts(s, " => ");
1105 seq_print_ip_sym(s, *p, flags);
1106 trace_seq_putc(s, '\n');
1107 }
1108
1109 return trace_handle_return(s);
1110 }
1111
1112 static struct trace_event_functions trace_stack_funcs = {
1113 .trace = trace_stack_print,
1114 };
1115
1116 static struct trace_event trace_stack_event = {
1117 .type = TRACE_STACK,
1118 .funcs = &trace_stack_funcs,
1119 };
1120
1121 /* TRACE_USER_STACK */
trace_user_stack_print(struct trace_iterator * iter,int flags,struct trace_event * event)1122 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1123 int flags, struct trace_event *event)
1124 {
1125 struct trace_array *tr = iter->tr;
1126 struct userstack_entry *field;
1127 struct trace_seq *s = &iter->seq;
1128 struct mm_struct *mm = NULL;
1129 unsigned int i;
1130
1131 trace_assign_type(field, iter->ent);
1132
1133 trace_seq_puts(s, "<user stack trace>\n");
1134
1135 if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
1136 struct task_struct *task;
1137 /*
1138 * we do the lookup on the thread group leader,
1139 * since individual threads might have already quit!
1140 */
1141 rcu_read_lock();
1142 task = find_task_by_vpid(field->tgid);
1143 if (task)
1144 mm = get_task_mm(task);
1145 rcu_read_unlock();
1146 }
1147
1148 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1149 unsigned long ip = field->caller[i];
1150
1151 if (!ip || trace_seq_has_overflowed(s))
1152 break;
1153
1154 trace_seq_puts(s, " => ");
1155 seq_print_user_ip(s, mm, ip, flags);
1156 trace_seq_putc(s, '\n');
1157 }
1158
1159 if (mm)
1160 mmput(mm);
1161
1162 return trace_handle_return(s);
1163 }
1164
1165 static struct trace_event_functions trace_user_stack_funcs = {
1166 .trace = trace_user_stack_print,
1167 };
1168
1169 static struct trace_event trace_user_stack_event = {
1170 .type = TRACE_USER_STACK,
1171 .funcs = &trace_user_stack_funcs,
1172 };
1173
1174 /* TRACE_HWLAT */
1175 static enum print_line_t
trace_hwlat_print(struct trace_iterator * iter,int flags,struct trace_event * event)1176 trace_hwlat_print(struct trace_iterator *iter, int flags,
1177 struct trace_event *event)
1178 {
1179 struct trace_entry *entry = iter->ent;
1180 struct trace_seq *s = &iter->seq;
1181 struct hwlat_entry *field;
1182
1183 trace_assign_type(field, entry);
1184
1185 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
1186 field->seqnum,
1187 field->duration,
1188 field->outer_duration,
1189 (long long)field->timestamp.tv_sec,
1190 field->timestamp.tv_nsec, field->count);
1191
1192 if (field->nmi_count) {
1193 /*
1194 * The generic sched_clock() is not NMI safe, thus
1195 * we only record the count and not the time.
1196 */
1197 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
1198 trace_seq_printf(s, " nmi-total:%llu",
1199 field->nmi_total_ts);
1200 trace_seq_printf(s, " nmi-count:%u",
1201 field->nmi_count);
1202 }
1203
1204 trace_seq_putc(s, '\n');
1205
1206 return trace_handle_return(s);
1207 }
1208
1209 static enum print_line_t
trace_hwlat_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1210 trace_hwlat_raw(struct trace_iterator *iter, int flags,
1211 struct trace_event *event)
1212 {
1213 struct hwlat_entry *field;
1214 struct trace_seq *s = &iter->seq;
1215
1216 trace_assign_type(field, iter->ent);
1217
1218 trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
1219 field->duration,
1220 field->outer_duration,
1221 (long long)field->timestamp.tv_sec,
1222 field->timestamp.tv_nsec,
1223 field->seqnum);
1224
1225 return trace_handle_return(s);
1226 }
1227
1228 static struct trace_event_functions trace_hwlat_funcs = {
1229 .trace = trace_hwlat_print,
1230 .raw = trace_hwlat_raw,
1231 };
1232
1233 static struct trace_event trace_hwlat_event = {
1234 .type = TRACE_HWLAT,
1235 .funcs = &trace_hwlat_funcs,
1236 };
1237
1238 /* TRACE_OSNOISE */
1239 static enum print_line_t
trace_osnoise_print(struct trace_iterator * iter,int flags,struct trace_event * event)1240 trace_osnoise_print(struct trace_iterator *iter, int flags,
1241 struct trace_event *event)
1242 {
1243 struct trace_entry *entry = iter->ent;
1244 struct trace_seq *s = &iter->seq;
1245 struct osnoise_entry *field;
1246 u64 ratio, ratio_dec;
1247 u64 net_runtime;
1248
1249 trace_assign_type(field, entry);
1250
1251 /*
1252 * compute the available % of cpu time.
1253 */
1254 net_runtime = field->runtime - field->noise;
1255 ratio = net_runtime * 10000000;
1256 do_div(ratio, field->runtime);
1257 ratio_dec = do_div(ratio, 100000);
1258
1259 trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
1260 field->runtime,
1261 field->noise,
1262 ratio, ratio_dec,
1263 field->max_sample);
1264
1265 trace_seq_printf(s, " %6u", field->hw_count);
1266 trace_seq_printf(s, " %6u", field->nmi_count);
1267 trace_seq_printf(s, " %6u", field->irq_count);
1268 trace_seq_printf(s, " %6u", field->softirq_count);
1269 trace_seq_printf(s, " %6u", field->thread_count);
1270
1271 trace_seq_putc(s, '\n');
1272
1273 return trace_handle_return(s);
1274 }
1275
1276 static enum print_line_t
trace_osnoise_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1277 trace_osnoise_raw(struct trace_iterator *iter, int flags,
1278 struct trace_event *event)
1279 {
1280 struct osnoise_entry *field;
1281 struct trace_seq *s = &iter->seq;
1282
1283 trace_assign_type(field, iter->ent);
1284
1285 trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
1286 field->runtime,
1287 field->noise,
1288 field->max_sample,
1289 field->hw_count,
1290 field->nmi_count,
1291 field->irq_count,
1292 field->softirq_count,
1293 field->thread_count);
1294
1295 return trace_handle_return(s);
1296 }
1297
1298 static struct trace_event_functions trace_osnoise_funcs = {
1299 .trace = trace_osnoise_print,
1300 .raw = trace_osnoise_raw,
1301 };
1302
1303 static struct trace_event trace_osnoise_event = {
1304 .type = TRACE_OSNOISE,
1305 .funcs = &trace_osnoise_funcs,
1306 };
1307
1308 /* TRACE_TIMERLAT */
1309 static enum print_line_t
trace_timerlat_print(struct trace_iterator * iter,int flags,struct trace_event * event)1310 trace_timerlat_print(struct trace_iterator *iter, int flags,
1311 struct trace_event *event)
1312 {
1313 struct trace_entry *entry = iter->ent;
1314 struct trace_seq *s = &iter->seq;
1315 struct timerlat_entry *field;
1316
1317 trace_assign_type(field, entry);
1318
1319 trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
1320 field->seqnum,
1321 field->context ? "thread" : "irq",
1322 field->timer_latency);
1323
1324 return trace_handle_return(s);
1325 }
1326
1327 static enum print_line_t
trace_timerlat_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1328 trace_timerlat_raw(struct trace_iterator *iter, int flags,
1329 struct trace_event *event)
1330 {
1331 struct timerlat_entry *field;
1332 struct trace_seq *s = &iter->seq;
1333
1334 trace_assign_type(field, iter->ent);
1335
1336 trace_seq_printf(s, "%u %d %llu\n",
1337 field->seqnum,
1338 field->context,
1339 field->timer_latency);
1340
1341 return trace_handle_return(s);
1342 }
1343
1344 static struct trace_event_functions trace_timerlat_funcs = {
1345 .trace = trace_timerlat_print,
1346 .raw = trace_timerlat_raw,
1347 };
1348
1349 static struct trace_event trace_timerlat_event = {
1350 .type = TRACE_TIMERLAT,
1351 .funcs = &trace_timerlat_funcs,
1352 };
1353
1354 /* TRACE_BPUTS */
1355 static enum print_line_t
trace_bputs_print(struct trace_iterator * iter,int flags,struct trace_event * event)1356 trace_bputs_print(struct trace_iterator *iter, int flags,
1357 struct trace_event *event)
1358 {
1359 struct trace_entry *entry = iter->ent;
1360 struct trace_seq *s = &iter->seq;
1361 struct bputs_entry *field;
1362
1363 trace_assign_type(field, entry);
1364
1365 seq_print_ip_sym(s, field->ip, flags);
1366 trace_seq_puts(s, ": ");
1367 trace_seq_puts(s, field->str);
1368
1369 return trace_handle_return(s);
1370 }
1371
1372
1373 static enum print_line_t
trace_bputs_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1374 trace_bputs_raw(struct trace_iterator *iter, int flags,
1375 struct trace_event *event)
1376 {
1377 struct bputs_entry *field;
1378 struct trace_seq *s = &iter->seq;
1379
1380 trace_assign_type(field, iter->ent);
1381
1382 trace_seq_printf(s, ": %lx : ", field->ip);
1383 trace_seq_puts(s, field->str);
1384
1385 return trace_handle_return(s);
1386 }
1387
1388 static struct trace_event_functions trace_bputs_funcs = {
1389 .trace = trace_bputs_print,
1390 .raw = trace_bputs_raw,
1391 };
1392
1393 static struct trace_event trace_bputs_event = {
1394 .type = TRACE_BPUTS,
1395 .funcs = &trace_bputs_funcs,
1396 };
1397
1398 /* TRACE_BPRINT */
1399 static enum print_line_t
trace_bprint_print(struct trace_iterator * iter,int flags,struct trace_event * event)1400 trace_bprint_print(struct trace_iterator *iter, int flags,
1401 struct trace_event *event)
1402 {
1403 struct trace_entry *entry = iter->ent;
1404 struct trace_seq *s = &iter->seq;
1405 struct bprint_entry *field;
1406
1407 trace_assign_type(field, entry);
1408
1409 seq_print_ip_sym(s, field->ip, flags);
1410 trace_seq_puts(s, ": ");
1411 trace_seq_bprintf(s, field->fmt, field->buf);
1412
1413 return trace_handle_return(s);
1414 }
1415
1416
1417 static enum print_line_t
trace_bprint_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1418 trace_bprint_raw(struct trace_iterator *iter, int flags,
1419 struct trace_event *event)
1420 {
1421 struct bprint_entry *field;
1422 struct trace_seq *s = &iter->seq;
1423
1424 trace_assign_type(field, iter->ent);
1425
1426 trace_seq_printf(s, ": %lx : ", field->ip);
1427 trace_seq_bprintf(s, field->fmt, field->buf);
1428
1429 return trace_handle_return(s);
1430 }
1431
1432 static struct trace_event_functions trace_bprint_funcs = {
1433 .trace = trace_bprint_print,
1434 .raw = trace_bprint_raw,
1435 };
1436
1437 static struct trace_event trace_bprint_event = {
1438 .type = TRACE_BPRINT,
1439 .funcs = &trace_bprint_funcs,
1440 };
1441
1442 /* TRACE_PRINT */
trace_print_print(struct trace_iterator * iter,int flags,struct trace_event * event)1443 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1444 int flags, struct trace_event *event)
1445 {
1446 struct print_entry *field;
1447 struct trace_seq *s = &iter->seq;
1448
1449 trace_assign_type(field, iter->ent);
1450
1451 seq_print_ip_sym(s, field->ip, flags);
1452 trace_seq_printf(s, ": %s", field->buf);
1453
1454 return trace_handle_return(s);
1455 }
1456
trace_print_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1457 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1458 struct trace_event *event)
1459 {
1460 struct print_entry *field;
1461
1462 trace_assign_type(field, iter->ent);
1463
1464 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
1465
1466 return trace_handle_return(&iter->seq);
1467 }
1468
1469 static struct trace_event_functions trace_print_funcs = {
1470 .trace = trace_print_print,
1471 .raw = trace_print_raw,
1472 };
1473
1474 static struct trace_event trace_print_event = {
1475 .type = TRACE_PRINT,
1476 .funcs = &trace_print_funcs,
1477 };
1478
trace_raw_data(struct trace_iterator * iter,int flags,struct trace_event * event)1479 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
1480 struct trace_event *event)
1481 {
1482 struct raw_data_entry *field;
1483 int i;
1484
1485 trace_assign_type(field, iter->ent);
1486
1487 trace_seq_printf(&iter->seq, "# %x buf:", field->id);
1488
1489 for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
1490 trace_seq_printf(&iter->seq, " %02x",
1491 (unsigned char)field->buf[i]);
1492
1493 trace_seq_putc(&iter->seq, '\n');
1494
1495 return trace_handle_return(&iter->seq);
1496 }
1497
1498 static struct trace_event_functions trace_raw_data_funcs = {
1499 .trace = trace_raw_data,
1500 .raw = trace_raw_data,
1501 };
1502
1503 static struct trace_event trace_raw_data_event = {
1504 .type = TRACE_RAW_DATA,
1505 .funcs = &trace_raw_data_funcs,
1506 };
1507
1508 static enum print_line_t
trace_func_repeats_raw(struct trace_iterator * iter,int flags,struct trace_event * event)1509 trace_func_repeats_raw(struct trace_iterator *iter, int flags,
1510 struct trace_event *event)
1511 {
1512 struct func_repeats_entry *field;
1513 struct trace_seq *s = &iter->seq;
1514
1515 trace_assign_type(field, iter->ent);
1516
1517 trace_seq_printf(s, "%lu %lu %u %llu\n",
1518 field->ip,
1519 field->parent_ip,
1520 field->count,
1521 FUNC_REPEATS_GET_DELTA_TS(field));
1522
1523 return trace_handle_return(s);
1524 }
1525
1526 static enum print_line_t
trace_func_repeats_print(struct trace_iterator * iter,int flags,struct trace_event * event)1527 trace_func_repeats_print(struct trace_iterator *iter, int flags,
1528 struct trace_event *event)
1529 {
1530 struct func_repeats_entry *field;
1531 struct trace_seq *s = &iter->seq;
1532
1533 trace_assign_type(field, iter->ent);
1534
1535 print_fn_trace(s, field->ip, field->parent_ip, flags);
1536 trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
1537 trace_print_time(s, iter,
1538 iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
1539 trace_seq_puts(s, ")\n");
1540
1541 return trace_handle_return(s);
1542 }
1543
1544 static struct trace_event_functions trace_func_repeats_funcs = {
1545 .trace = trace_func_repeats_print,
1546 .raw = trace_func_repeats_raw,
1547 };
1548
1549 static struct trace_event trace_func_repeats_event = {
1550 .type = TRACE_FUNC_REPEATS,
1551 .funcs = &trace_func_repeats_funcs,
1552 };
1553
1554 static struct trace_event *events[] __initdata = {
1555 &trace_fn_event,
1556 &trace_ctx_event,
1557 &trace_wake_event,
1558 &trace_stack_event,
1559 &trace_user_stack_event,
1560 &trace_bputs_event,
1561 &trace_bprint_event,
1562 &trace_print_event,
1563 &trace_hwlat_event,
1564 &trace_osnoise_event,
1565 &trace_timerlat_event,
1566 &trace_raw_data_event,
1567 &trace_func_repeats_event,
1568 NULL
1569 };
1570
init_events(void)1571 __init int init_events(void)
1572 {
1573 struct trace_event *event;
1574 int i, ret;
1575
1576 for (i = 0; events[i]; i++) {
1577 event = events[i];
1578 ret = register_trace_event(event);
1579 WARN_ONCE(!ret, "event %d failed to register", event->type);
1580 }
1581
1582 return 0;
1583 }
1584