1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_synth - synthetic trace events
4 *
5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20
21 #include "trace_synth.h"
22
23 #undef ERRORS
24 #define ERRORS \
25 C(BAD_NAME, "Illegal name"), \
26 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
27 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
28 C(EVENT_EXISTS, "Event already exists"), \
29 C(TOO_MANY_FIELDS, "Too many fields"), \
30 C(INCOMPLETE_TYPE, "Incomplete type"), \
31 C(INVALID_TYPE, "Invalid type"), \
32 C(INVALID_FIELD, "Invalid field"), \
33 C(INVALID_ARRAY_SPEC, "Invalid array specification"),
34
35 #undef C
36 #define C(a, b) SYNTH_ERR_##a
37
38 enum { ERRORS };
39
40 #undef C
41 #define C(a, b) b
42
43 static const char *err_text[] = { ERRORS };
44
45 static char *last_cmd;
46
errpos(const char * str)47 static int errpos(const char *str)
48 {
49 if (!str || !last_cmd)
50 return 0;
51
52 return err_pos(last_cmd, str);
53 }
54
last_cmd_set(const char * str)55 static void last_cmd_set(const char *str)
56 {
57 if (!str)
58 return;
59
60 kfree(last_cmd);
61
62 last_cmd = kstrdup(str, GFP_KERNEL);
63 }
64
synth_err(u8 err_type,u16 err_pos)65 static void synth_err(u8 err_type, u16 err_pos)
66 {
67 if (!last_cmd)
68 return;
69
70 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
71 err_type, err_pos);
72 }
73
74 static int create_synth_event(const char *raw_command);
75 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
76 static int synth_event_release(struct dyn_event *ev);
77 static bool synth_event_is_busy(struct dyn_event *ev);
78 static bool synth_event_match(const char *system, const char *event,
79 int argc, const char **argv, struct dyn_event *ev);
80
81 static struct dyn_event_operations synth_event_ops = {
82 .create = create_synth_event,
83 .show = synth_event_show,
84 .is_busy = synth_event_is_busy,
85 .free = synth_event_release,
86 .match = synth_event_match,
87 };
88
is_synth_event(struct dyn_event * ev)89 static bool is_synth_event(struct dyn_event *ev)
90 {
91 return ev->ops == &synth_event_ops;
92 }
93
to_synth_event(struct dyn_event * ev)94 static struct synth_event *to_synth_event(struct dyn_event *ev)
95 {
96 return container_of(ev, struct synth_event, devent);
97 }
98
synth_event_is_busy(struct dyn_event * ev)99 static bool synth_event_is_busy(struct dyn_event *ev)
100 {
101 struct synth_event *event = to_synth_event(ev);
102
103 return event->ref != 0;
104 }
105
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)106 static bool synth_event_match(const char *system, const char *event,
107 int argc, const char **argv, struct dyn_event *ev)
108 {
109 struct synth_event *sev = to_synth_event(ev);
110
111 return strcmp(sev->name, event) == 0 &&
112 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
113 }
114
115 struct synth_trace_event {
116 struct trace_entry ent;
117 u64 fields[];
118 };
119
synth_event_define_fields(struct trace_event_call * call)120 static int synth_event_define_fields(struct trace_event_call *call)
121 {
122 struct synth_trace_event trace;
123 int offset = offsetof(typeof(trace), fields);
124 struct synth_event *event = call->data;
125 unsigned int i, size, n_u64;
126 char *name, *type;
127 bool is_signed;
128 int ret = 0;
129
130 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
131 size = event->fields[i]->size;
132 is_signed = event->fields[i]->is_signed;
133 type = event->fields[i]->type;
134 name = event->fields[i]->name;
135 ret = trace_define_field(call, type, name, offset, size,
136 is_signed, FILTER_OTHER);
137 if (ret)
138 break;
139
140 event->fields[i]->offset = n_u64;
141
142 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
143 offset += STR_VAR_LEN_MAX;
144 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
145 } else {
146 offset += sizeof(u64);
147 n_u64++;
148 }
149 }
150
151 event->n_u64 = n_u64;
152
153 return ret;
154 }
155
synth_field_signed(char * type)156 static bool synth_field_signed(char *type)
157 {
158 if (str_has_prefix(type, "u"))
159 return false;
160 if (strcmp(type, "gfp_t") == 0)
161 return false;
162
163 return true;
164 }
165
synth_field_is_string(char * type)166 static int synth_field_is_string(char *type)
167 {
168 if (strstr(type, "char[") != NULL)
169 return true;
170
171 return false;
172 }
173
synth_field_string_size(char * type)174 static int synth_field_string_size(char *type)
175 {
176 char buf[4], *end, *start;
177 unsigned int len;
178 int size, err;
179
180 start = strstr(type, "char[");
181 if (start == NULL)
182 return -EINVAL;
183 start += sizeof("char[") - 1;
184
185 end = strchr(type, ']');
186 if (!end || end < start || type + strlen(type) > end + 1)
187 return -EINVAL;
188
189 len = end - start;
190 if (len > 3)
191 return -EINVAL;
192
193 if (len == 0)
194 return 0; /* variable-length string */
195
196 strncpy(buf, start, len);
197 buf[len] = '\0';
198
199 err = kstrtouint(buf, 0, &size);
200 if (err)
201 return err;
202
203 if (size > STR_VAR_LEN_MAX)
204 return -EINVAL;
205
206 return size;
207 }
208
synth_field_size(char * type)209 static int synth_field_size(char *type)
210 {
211 int size = 0;
212
213 if (strcmp(type, "s64") == 0)
214 size = sizeof(s64);
215 else if (strcmp(type, "u64") == 0)
216 size = sizeof(u64);
217 else if (strcmp(type, "s32") == 0)
218 size = sizeof(s32);
219 else if (strcmp(type, "u32") == 0)
220 size = sizeof(u32);
221 else if (strcmp(type, "s16") == 0)
222 size = sizeof(s16);
223 else if (strcmp(type, "u16") == 0)
224 size = sizeof(u16);
225 else if (strcmp(type, "s8") == 0)
226 size = sizeof(s8);
227 else if (strcmp(type, "u8") == 0)
228 size = sizeof(u8);
229 else if (strcmp(type, "char") == 0)
230 size = sizeof(char);
231 else if (strcmp(type, "unsigned char") == 0)
232 size = sizeof(unsigned char);
233 else if (strcmp(type, "int") == 0)
234 size = sizeof(int);
235 else if (strcmp(type, "unsigned int") == 0)
236 size = sizeof(unsigned int);
237 else if (strcmp(type, "long") == 0)
238 size = sizeof(long);
239 else if (strcmp(type, "unsigned long") == 0)
240 size = sizeof(unsigned long);
241 else if (strcmp(type, "bool") == 0)
242 size = sizeof(bool);
243 else if (strcmp(type, "pid_t") == 0)
244 size = sizeof(pid_t);
245 else if (strcmp(type, "gfp_t") == 0)
246 size = sizeof(gfp_t);
247 else if (synth_field_is_string(type))
248 size = synth_field_string_size(type);
249
250 return size;
251 }
252
synth_field_fmt(char * type)253 static const char *synth_field_fmt(char *type)
254 {
255 const char *fmt = "%llu";
256
257 if (strcmp(type, "s64") == 0)
258 fmt = "%lld";
259 else if (strcmp(type, "u64") == 0)
260 fmt = "%llu";
261 else if (strcmp(type, "s32") == 0)
262 fmt = "%d";
263 else if (strcmp(type, "u32") == 0)
264 fmt = "%u";
265 else if (strcmp(type, "s16") == 0)
266 fmt = "%d";
267 else if (strcmp(type, "u16") == 0)
268 fmt = "%u";
269 else if (strcmp(type, "s8") == 0)
270 fmt = "%d";
271 else if (strcmp(type, "u8") == 0)
272 fmt = "%u";
273 else if (strcmp(type, "char") == 0)
274 fmt = "%d";
275 else if (strcmp(type, "unsigned char") == 0)
276 fmt = "%u";
277 else if (strcmp(type, "int") == 0)
278 fmt = "%d";
279 else if (strcmp(type, "unsigned int") == 0)
280 fmt = "%u";
281 else if (strcmp(type, "long") == 0)
282 fmt = "%ld";
283 else if (strcmp(type, "unsigned long") == 0)
284 fmt = "%lu";
285 else if (strcmp(type, "bool") == 0)
286 fmt = "%d";
287 else if (strcmp(type, "pid_t") == 0)
288 fmt = "%d";
289 else if (strcmp(type, "gfp_t") == 0)
290 fmt = "%x";
291 else if (synth_field_is_string(type))
292 fmt = "%.*s";
293
294 return fmt;
295 }
296
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,u64 val,char * space)297 static void print_synth_event_num_val(struct trace_seq *s,
298 char *print_fmt, char *name,
299 int size, u64 val, char *space)
300 {
301 switch (size) {
302 case 1:
303 trace_seq_printf(s, print_fmt, name, (u8)val, space);
304 break;
305
306 case 2:
307 trace_seq_printf(s, print_fmt, name, (u16)val, space);
308 break;
309
310 case 4:
311 trace_seq_printf(s, print_fmt, name, (u32)val, space);
312 break;
313
314 default:
315 trace_seq_printf(s, print_fmt, name, val, space);
316 break;
317 }
318 }
319
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)320 static enum print_line_t print_synth_event(struct trace_iterator *iter,
321 int flags,
322 struct trace_event *event)
323 {
324 struct trace_array *tr = iter->tr;
325 struct trace_seq *s = &iter->seq;
326 struct synth_trace_event *entry;
327 struct synth_event *se;
328 unsigned int i, n_u64;
329 char print_fmt[32];
330 const char *fmt;
331
332 entry = (struct synth_trace_event *)iter->ent;
333 se = container_of(event, struct synth_event, call.event);
334
335 trace_seq_printf(s, "%s: ", se->name);
336
337 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
338 if (trace_seq_has_overflowed(s))
339 goto end;
340
341 fmt = synth_field_fmt(se->fields[i]->type);
342
343 /* parameter types */
344 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
345 trace_seq_printf(s, "%s ", fmt);
346
347 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
348
349 /* parameter values */
350 if (se->fields[i]->is_string) {
351 if (se->fields[i]->is_dynamic) {
352 u32 offset, data_offset;
353 char *str_field;
354
355 offset = (u32)entry->fields[n_u64];
356 data_offset = offset & 0xffff;
357
358 str_field = (char *)entry + data_offset;
359
360 trace_seq_printf(s, print_fmt, se->fields[i]->name,
361 STR_VAR_LEN_MAX,
362 str_field,
363 i == se->n_fields - 1 ? "" : " ");
364 n_u64++;
365 } else {
366 trace_seq_printf(s, print_fmt, se->fields[i]->name,
367 STR_VAR_LEN_MAX,
368 (char *)&entry->fields[n_u64],
369 i == se->n_fields - 1 ? "" : " ");
370 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
371 }
372 } else {
373 struct trace_print_flags __flags[] = {
374 __def_gfpflag_names, {-1, NULL} };
375 char *space = (i == se->n_fields - 1 ? "" : " ");
376
377 print_synth_event_num_val(s, print_fmt,
378 se->fields[i]->name,
379 se->fields[i]->size,
380 entry->fields[n_u64],
381 space);
382
383 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
384 trace_seq_puts(s, " (");
385 trace_print_flags_seq(s, "|",
386 entry->fields[n_u64],
387 __flags);
388 trace_seq_putc(s, ')');
389 }
390 n_u64++;
391 }
392 }
393 end:
394 trace_seq_putc(s, '\n');
395
396 return trace_handle_return(s);
397 }
398
399 static struct trace_event_functions synth_event_funcs = {
400 .trace = print_synth_event
401 };
402
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)403 static unsigned int trace_string(struct synth_trace_event *entry,
404 struct synth_event *event,
405 char *str_val,
406 bool is_dynamic,
407 unsigned int data_size,
408 unsigned int *n_u64)
409 {
410 unsigned int len = 0;
411 char *str_field;
412
413 if (is_dynamic) {
414 u32 data_offset;
415
416 data_offset = offsetof(typeof(*entry), fields);
417 data_offset += event->n_u64 * sizeof(u64);
418 data_offset += data_size;
419
420 str_field = (char *)entry + data_offset;
421
422 len = strlen(str_val) + 1;
423 strscpy(str_field, str_val, len);
424
425 data_offset |= len << 16;
426 *(u32 *)&entry->fields[*n_u64] = data_offset;
427
428 (*n_u64)++;
429 } else {
430 str_field = (char *)&entry->fields[*n_u64];
431
432 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
433 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
434 }
435
436 return len;
437 }
438
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)439 static notrace void trace_event_raw_event_synth(void *__data,
440 u64 *var_ref_vals,
441 unsigned int *var_ref_idx)
442 {
443 unsigned int i, n_u64, val_idx, len, data_size = 0;
444 struct trace_event_file *trace_file = __data;
445 struct synth_trace_event *entry;
446 struct trace_event_buffer fbuffer;
447 struct trace_buffer *buffer;
448 struct synth_event *event;
449 int fields_size = 0;
450
451 event = trace_file->event_call->data;
452
453 if (trace_trigger_soft_disabled(trace_file))
454 return;
455
456 fields_size = event->n_u64 * sizeof(u64);
457
458 for (i = 0; i < event->n_dynamic_fields; i++) {
459 unsigned int field_pos = event->dynamic_fields[i]->field_pos;
460 char *str_val;
461
462 val_idx = var_ref_idx[field_pos];
463 str_val = (char *)(long)var_ref_vals[val_idx];
464
465 len = strlen(str_val) + 1;
466
467 fields_size += len;
468 }
469
470 /*
471 * Avoid ring buffer recursion detection, as this event
472 * is being performed within another event.
473 */
474 buffer = trace_file->tr->array_buffer.buffer;
475 ring_buffer_nest_start(buffer);
476
477 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
478 sizeof(*entry) + fields_size);
479 if (!entry)
480 goto out;
481
482 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
483 val_idx = var_ref_idx[i];
484 if (event->fields[i]->is_string) {
485 char *str_val = (char *)(long)var_ref_vals[val_idx];
486
487 len = trace_string(entry, event, str_val,
488 event->fields[i]->is_dynamic,
489 data_size, &n_u64);
490 data_size += len; /* only dynamic string increments */
491 } else {
492 struct synth_field *field = event->fields[i];
493 u64 val = var_ref_vals[val_idx];
494
495 switch (field->size) {
496 case 1:
497 *(u8 *)&entry->fields[n_u64] = (u8)val;
498 break;
499
500 case 2:
501 *(u16 *)&entry->fields[n_u64] = (u16)val;
502 break;
503
504 case 4:
505 *(u32 *)&entry->fields[n_u64] = (u32)val;
506 break;
507
508 default:
509 entry->fields[n_u64] = val;
510 break;
511 }
512 n_u64++;
513 }
514 }
515
516 trace_event_buffer_commit(&fbuffer);
517 out:
518 ring_buffer_nest_end(buffer);
519 }
520
free_synth_event_print_fmt(struct trace_event_call * call)521 static void free_synth_event_print_fmt(struct trace_event_call *call)
522 {
523 if (call) {
524 kfree(call->print_fmt);
525 call->print_fmt = NULL;
526 }
527 }
528
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)529 static int __set_synth_event_print_fmt(struct synth_event *event,
530 char *buf, int len)
531 {
532 const char *fmt;
533 int pos = 0;
534 int i;
535
536 /* When len=0, we just calculate the needed length */
537 #define LEN_OR_ZERO (len ? len - pos : 0)
538
539 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
540 for (i = 0; i < event->n_fields; i++) {
541 fmt = synth_field_fmt(event->fields[i]->type);
542 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
543 event->fields[i]->name, fmt,
544 i == event->n_fields - 1 ? "" : ", ");
545 }
546 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
547
548 for (i = 0; i < event->n_fields; i++) {
549 if (event->fields[i]->is_string &&
550 event->fields[i]->is_dynamic)
551 pos += snprintf(buf + pos, LEN_OR_ZERO,
552 ", __get_str(%s)", event->fields[i]->name);
553 else
554 pos += snprintf(buf + pos, LEN_OR_ZERO,
555 ", REC->%s", event->fields[i]->name);
556 }
557
558 #undef LEN_OR_ZERO
559
560 /* return the length of print_fmt */
561 return pos;
562 }
563
set_synth_event_print_fmt(struct trace_event_call * call)564 static int set_synth_event_print_fmt(struct trace_event_call *call)
565 {
566 struct synth_event *event = call->data;
567 char *print_fmt;
568 int len;
569
570 /* First: called with 0 length to calculate the needed length */
571 len = __set_synth_event_print_fmt(event, NULL, 0);
572
573 print_fmt = kmalloc(len + 1, GFP_KERNEL);
574 if (!print_fmt)
575 return -ENOMEM;
576
577 /* Second: actually write the @print_fmt */
578 __set_synth_event_print_fmt(event, print_fmt, len + 1);
579 call->print_fmt = print_fmt;
580
581 return 0;
582 }
583
free_synth_field(struct synth_field * field)584 static void free_synth_field(struct synth_field *field)
585 {
586 kfree(field->type);
587 kfree(field->name);
588 kfree(field);
589 }
590
check_field_version(const char * prefix,const char * field_type,const char * field_name)591 static int check_field_version(const char *prefix, const char *field_type,
592 const char *field_name)
593 {
594 /*
595 * For backward compatibility, the old synthetic event command
596 * format did not require semicolons, and in order to not
597 * break user space, that old format must still work. If a new
598 * feature is added, then the format that uses the new feature
599 * will be required to have semicolons, as nothing that uses
600 * the old format would be using the new, yet to be created,
601 * feature. When a new feature is added, this will detect it,
602 * and return a number greater than 1, and require the format
603 * to use semicolons.
604 */
605 return 1;
606 }
607
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)608 static struct synth_field *parse_synth_field(int argc, char **argv,
609 int *consumed, int *field_version)
610 {
611 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
612 struct synth_field *field;
613 int len, ret = -ENOMEM;
614 struct seq_buf s;
615 ssize_t size;
616
617 if (!strcmp(field_type, "unsigned")) {
618 if (argc < 3) {
619 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
620 return ERR_PTR(-EINVAL);
621 }
622 prefix = "unsigned ";
623 field_type = argv[1];
624 field_name = argv[2];
625 *consumed += 3;
626 } else {
627 field_name = argv[1];
628 *consumed += 2;
629 }
630
631 if (!field_name) {
632 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
633 return ERR_PTR(-EINVAL);
634 }
635
636 *field_version = check_field_version(prefix, field_type, field_name);
637
638 field = kzalloc(sizeof(*field), GFP_KERNEL);
639 if (!field)
640 return ERR_PTR(-ENOMEM);
641
642 len = strlen(field_name);
643 array = strchr(field_name, '[');
644 if (array)
645 len -= strlen(array);
646
647 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
648 if (!field->name)
649 goto free;
650
651 if (!is_good_name(field->name)) {
652 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
653 ret = -EINVAL;
654 goto free;
655 }
656
657 len = strlen(field_type) + 1;
658
659 if (array)
660 len += strlen(array);
661
662 if (prefix)
663 len += strlen(prefix);
664
665 field->type = kzalloc(len, GFP_KERNEL);
666 if (!field->type)
667 goto free;
668
669 seq_buf_init(&s, field->type, len);
670 if (prefix)
671 seq_buf_puts(&s, prefix);
672 seq_buf_puts(&s, field_type);
673 if (array)
674 seq_buf_puts(&s, array);
675 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
676 goto free;
677
678 s.buffer[s.len] = '\0';
679
680 size = synth_field_size(field->type);
681 if (size < 0) {
682 if (array)
683 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
684 else
685 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
686 ret = -EINVAL;
687 goto free;
688 } else if (size == 0) {
689 if (synth_field_is_string(field->type)) {
690 char *type;
691
692 len = sizeof("__data_loc ") + strlen(field->type) + 1;
693 type = kzalloc(len, GFP_KERNEL);
694 if (!type)
695 goto free;
696
697 seq_buf_init(&s, type, len);
698 seq_buf_puts(&s, "__data_loc ");
699 seq_buf_puts(&s, field->type);
700
701 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
702 goto free;
703 s.buffer[s.len] = '\0';
704
705 kfree(field->type);
706 field->type = type;
707
708 field->is_dynamic = true;
709 size = sizeof(u64);
710 } else {
711 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
712 ret = -EINVAL;
713 goto free;
714 }
715 }
716 field->size = size;
717
718 if (synth_field_is_string(field->type))
719 field->is_string = true;
720
721 field->is_signed = synth_field_signed(field->type);
722 out:
723 return field;
724 free:
725 free_synth_field(field);
726 field = ERR_PTR(ret);
727 goto out;
728 }
729
free_synth_tracepoint(struct tracepoint * tp)730 static void free_synth_tracepoint(struct tracepoint *tp)
731 {
732 if (!tp)
733 return;
734
735 kfree(tp->name);
736 kfree(tp);
737 }
738
alloc_synth_tracepoint(char * name)739 static struct tracepoint *alloc_synth_tracepoint(char *name)
740 {
741 struct tracepoint *tp;
742
743 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
744 if (!tp)
745 return ERR_PTR(-ENOMEM);
746
747 tp->name = kstrdup(name, GFP_KERNEL);
748 if (!tp->name) {
749 kfree(tp);
750 return ERR_PTR(-ENOMEM);
751 }
752
753 return tp;
754 }
755
find_synth_event(const char * name)756 struct synth_event *find_synth_event(const char *name)
757 {
758 struct dyn_event *pos;
759 struct synth_event *event;
760
761 for_each_dyn_event(pos) {
762 if (!is_synth_event(pos))
763 continue;
764 event = to_synth_event(pos);
765 if (strcmp(event->name, name) == 0)
766 return event;
767 }
768
769 return NULL;
770 }
771
772 static struct trace_event_fields synth_event_fields_array[] = {
773 { .type = TRACE_FUNCTION_TYPE,
774 .define_fields = synth_event_define_fields },
775 {}
776 };
777
register_synth_event(struct synth_event * event)778 static int register_synth_event(struct synth_event *event)
779 {
780 struct trace_event_call *call = &event->call;
781 int ret = 0;
782
783 event->call.class = &event->class;
784 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
785 if (!event->class.system) {
786 ret = -ENOMEM;
787 goto out;
788 }
789
790 event->tp = alloc_synth_tracepoint(event->name);
791 if (IS_ERR(event->tp)) {
792 ret = PTR_ERR(event->tp);
793 event->tp = NULL;
794 goto out;
795 }
796
797 INIT_LIST_HEAD(&call->class->fields);
798 call->event.funcs = &synth_event_funcs;
799 call->class->fields_array = synth_event_fields_array;
800
801 ret = register_trace_event(&call->event);
802 if (!ret) {
803 ret = -ENODEV;
804 goto out;
805 }
806 call->flags = TRACE_EVENT_FL_TRACEPOINT;
807 call->class->reg = trace_event_reg;
808 call->class->probe = trace_event_raw_event_synth;
809 call->data = event;
810 call->tp = event->tp;
811
812 ret = trace_add_event_call(call);
813 if (ret) {
814 pr_warn("Failed to register synthetic event: %s\n",
815 trace_event_name(call));
816 goto err;
817 }
818
819 ret = set_synth_event_print_fmt(call);
820 if (ret < 0) {
821 trace_remove_event_call(call);
822 goto err;
823 }
824 out:
825 return ret;
826 err:
827 unregister_trace_event(&call->event);
828 goto out;
829 }
830
unregister_synth_event(struct synth_event * event)831 static int unregister_synth_event(struct synth_event *event)
832 {
833 struct trace_event_call *call = &event->call;
834 int ret;
835
836 ret = trace_remove_event_call(call);
837
838 return ret;
839 }
840
free_synth_event(struct synth_event * event)841 static void free_synth_event(struct synth_event *event)
842 {
843 unsigned int i;
844
845 if (!event)
846 return;
847
848 for (i = 0; i < event->n_fields; i++)
849 free_synth_field(event->fields[i]);
850
851 kfree(event->fields);
852 kfree(event->dynamic_fields);
853 kfree(event->name);
854 kfree(event->class.system);
855 free_synth_tracepoint(event->tp);
856 free_synth_event_print_fmt(&event->call);
857 kfree(event);
858 }
859
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)860 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
861 struct synth_field **fields)
862 {
863 unsigned int i, j, n_dynamic_fields = 0;
864 struct synth_event *event;
865
866 event = kzalloc(sizeof(*event), GFP_KERNEL);
867 if (!event) {
868 event = ERR_PTR(-ENOMEM);
869 goto out;
870 }
871
872 event->name = kstrdup(name, GFP_KERNEL);
873 if (!event->name) {
874 kfree(event);
875 event = ERR_PTR(-ENOMEM);
876 goto out;
877 }
878
879 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
880 if (!event->fields) {
881 free_synth_event(event);
882 event = ERR_PTR(-ENOMEM);
883 goto out;
884 }
885
886 for (i = 0; i < n_fields; i++)
887 if (fields[i]->is_dynamic)
888 n_dynamic_fields++;
889
890 if (n_dynamic_fields) {
891 event->dynamic_fields = kcalloc(n_dynamic_fields,
892 sizeof(*event->dynamic_fields),
893 GFP_KERNEL);
894 if (!event->dynamic_fields) {
895 free_synth_event(event);
896 event = ERR_PTR(-ENOMEM);
897 goto out;
898 }
899 }
900
901 dyn_event_init(&event->devent, &synth_event_ops);
902
903 for (i = 0, j = 0; i < n_fields; i++) {
904 fields[i]->field_pos = i;
905 event->fields[i] = fields[i];
906
907 if (fields[i]->is_dynamic)
908 event->dynamic_fields[j++] = fields[i];
909 }
910 event->n_dynamic_fields = j;
911 event->n_fields = n_fields;
912 out:
913 return event;
914 }
915
synth_event_check_arg_fn(void * data)916 static int synth_event_check_arg_fn(void *data)
917 {
918 struct dynevent_arg_pair *arg_pair = data;
919 int size;
920
921 size = synth_field_size((char *)arg_pair->lhs);
922 if (size == 0) {
923 if (strstr((char *)arg_pair->lhs, "["))
924 return 0;
925 }
926
927 return size ? 0 : -EINVAL;
928 }
929
930 /**
931 * synth_event_add_field - Add a new field to a synthetic event cmd
932 * @cmd: A pointer to the dynevent_cmd struct representing the new event
933 * @type: The type of the new field to add
934 * @name: The name of the new field to add
935 *
936 * Add a new field to a synthetic event cmd object. Field ordering is in
937 * the same order the fields are added.
938 *
939 * See synth_field_size() for available types. If field_name contains
940 * [n] the field is considered to be an array.
941 *
942 * Return: 0 if successful, error otherwise.
943 */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)944 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
945 const char *name)
946 {
947 struct dynevent_arg_pair arg_pair;
948 int ret;
949
950 if (cmd->type != DYNEVENT_TYPE_SYNTH)
951 return -EINVAL;
952
953 if (!type || !name)
954 return -EINVAL;
955
956 dynevent_arg_pair_init(&arg_pair, 0, ';');
957
958 arg_pair.lhs = type;
959 arg_pair.rhs = name;
960
961 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
962 if (ret)
963 return ret;
964
965 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
966 ret = -EINVAL;
967
968 return ret;
969 }
970 EXPORT_SYMBOL_GPL(synth_event_add_field);
971
972 /**
973 * synth_event_add_field_str - Add a new field to a synthetic event cmd
974 * @cmd: A pointer to the dynevent_cmd struct representing the new event
975 * @type_name: The type and name of the new field to add, as a single string
976 *
977 * Add a new field to a synthetic event cmd object, as a single
978 * string. The @type_name string is expected to be of the form 'type
979 * name', which will be appended by ';'. No sanity checking is done -
980 * what's passed in is assumed to already be well-formed. Field
981 * ordering is in the same order the fields are added.
982 *
983 * See synth_field_size() for available types. If field_name contains
984 * [n] the field is considered to be an array.
985 *
986 * Return: 0 if successful, error otherwise.
987 */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)988 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
989 {
990 struct dynevent_arg arg;
991 int ret;
992
993 if (cmd->type != DYNEVENT_TYPE_SYNTH)
994 return -EINVAL;
995
996 if (!type_name)
997 return -EINVAL;
998
999 dynevent_arg_init(&arg, ';');
1000
1001 arg.str = type_name;
1002
1003 ret = dynevent_arg_add(cmd, &arg, NULL);
1004 if (ret)
1005 return ret;
1006
1007 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1008 ret = -EINVAL;
1009
1010 return ret;
1011 }
1012 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1013
1014 /**
1015 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1016 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1017 * @fields: An array of type/name field descriptions
1018 * @n_fields: The number of field descriptions contained in the fields array
1019 *
1020 * Add a new set of fields to a synthetic event cmd object. The event
1021 * fields that will be defined for the event should be passed in as an
1022 * array of struct synth_field_desc, and the number of elements in the
1023 * array passed in as n_fields. Field ordering will retain the
1024 * ordering given in the fields array.
1025 *
1026 * See synth_field_size() for available types. If field_name contains
1027 * [n] the field is considered to be an array.
1028 *
1029 * Return: 0 if successful, error otherwise.
1030 */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1031 int synth_event_add_fields(struct dynevent_cmd *cmd,
1032 struct synth_field_desc *fields,
1033 unsigned int n_fields)
1034 {
1035 unsigned int i;
1036 int ret = 0;
1037
1038 for (i = 0; i < n_fields; i++) {
1039 if (fields[i].type == NULL || fields[i].name == NULL) {
1040 ret = -EINVAL;
1041 break;
1042 }
1043
1044 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1045 if (ret)
1046 break;
1047 }
1048
1049 return ret;
1050 }
1051 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1052
1053 /**
1054 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1055 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1056 * @name: The name of the synthetic event
1057 * @mod: The module creating the event, NULL if not created from a module
1058 * @args: Variable number of arg (pairs), one pair for each field
1059 *
1060 * NOTE: Users normally won't want to call this function directly, but
1061 * rather use the synth_event_gen_cmd_start() wrapper, which
1062 * automatically adds a NULL to the end of the arg list. If this
1063 * function is used directly, make sure the last arg in the variable
1064 * arg list is NULL.
1065 *
1066 * Generate a synthetic event command to be executed by
1067 * synth_event_gen_cmd_end(). This function can be used to generate
1068 * the complete command or only the first part of it; in the latter
1069 * case, synth_event_add_field(), synth_event_add_field_str(), or
1070 * synth_event_add_fields() can be used to add more fields following
1071 * this.
1072 *
1073 * There should be an even number variable args, each pair consisting
1074 * of a type followed by a field name.
1075 *
1076 * See synth_field_size() for available types. If field_name contains
1077 * [n] the field is considered to be an array.
1078 *
1079 * Return: 0 if successful, error otherwise.
1080 */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1081 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1082 struct module *mod, ...)
1083 {
1084 struct dynevent_arg arg;
1085 va_list args;
1086 int ret;
1087
1088 cmd->event_name = name;
1089 cmd->private_data = mod;
1090
1091 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1092 return -EINVAL;
1093
1094 dynevent_arg_init(&arg, 0);
1095 arg.str = name;
1096 ret = dynevent_arg_add(cmd, &arg, NULL);
1097 if (ret)
1098 return ret;
1099
1100 va_start(args, mod);
1101 for (;;) {
1102 const char *type, *name;
1103
1104 type = va_arg(args, const char *);
1105 if (!type)
1106 break;
1107 name = va_arg(args, const char *);
1108 if (!name)
1109 break;
1110
1111 if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1112 ret = -EINVAL;
1113 break;
1114 }
1115
1116 ret = synth_event_add_field(cmd, type, name);
1117 if (ret)
1118 break;
1119 }
1120 va_end(args);
1121
1122 return ret;
1123 }
1124 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1125
1126 /**
1127 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1128 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1129 * @name: The name of the synthetic event
1130 * @fields: An array of type/name field descriptions
1131 * @n_fields: The number of field descriptions contained in the fields array
1132 *
1133 * Generate a synthetic event command to be executed by
1134 * synth_event_gen_cmd_end(). This function can be used to generate
1135 * the complete command or only the first part of it; in the latter
1136 * case, synth_event_add_field(), synth_event_add_field_str(), or
1137 * synth_event_add_fields() can be used to add more fields following
1138 * this.
1139 *
1140 * The event fields that will be defined for the event should be
1141 * passed in as an array of struct synth_field_desc, and the number of
1142 * elements in the array passed in as n_fields. Field ordering will
1143 * retain the ordering given in the fields array.
1144 *
1145 * See synth_field_size() for available types. If field_name contains
1146 * [n] the field is considered to be an array.
1147 *
1148 * Return: 0 if successful, error otherwise.
1149 */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1150 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1151 struct module *mod,
1152 struct synth_field_desc *fields,
1153 unsigned int n_fields)
1154 {
1155 struct dynevent_arg arg;
1156 unsigned int i;
1157 int ret = 0;
1158
1159 cmd->event_name = name;
1160 cmd->private_data = mod;
1161
1162 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1163 return -EINVAL;
1164
1165 if (n_fields > SYNTH_FIELDS_MAX)
1166 return -EINVAL;
1167
1168 dynevent_arg_init(&arg, 0);
1169 arg.str = name;
1170 ret = dynevent_arg_add(cmd, &arg, NULL);
1171 if (ret)
1172 return ret;
1173
1174 for (i = 0; i < n_fields; i++) {
1175 if (fields[i].type == NULL || fields[i].name == NULL)
1176 return -EINVAL;
1177
1178 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1179 if (ret)
1180 break;
1181 }
1182
1183 return ret;
1184 }
1185 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1186
__create_synth_event(const char * name,const char * raw_fields)1187 static int __create_synth_event(const char *name, const char *raw_fields)
1188 {
1189 char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1190 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1191 int consumed, cmd_version = 1, n_fields_this_loop;
1192 int i, argc, n_fields = 0, ret = 0;
1193 struct synth_event *event = NULL;
1194
1195 /*
1196 * Argument syntax:
1197 * - Add synthetic event: <event_name> field[;field] ...
1198 * - Remove synthetic event: !<event_name> field[;field] ...
1199 * where 'field' = type field_name
1200 */
1201
1202 if (name[0] == '\0') {
1203 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1204 return -EINVAL;
1205 }
1206
1207 if (!is_good_name(name)) {
1208 synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1209 return -EINVAL;
1210 }
1211
1212 mutex_lock(&event_mutex);
1213
1214 event = find_synth_event(name);
1215 if (event) {
1216 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1217 ret = -EEXIST;
1218 goto err;
1219 }
1220
1221 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1222 if (!tmp_fields) {
1223 ret = -ENOMEM;
1224 goto err;
1225 }
1226
1227 while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1228 argv = argv_split(GFP_KERNEL, field_str, &argc);
1229 if (!argv) {
1230 ret = -ENOMEM;
1231 goto err;
1232 }
1233
1234 if (!argc) {
1235 argv_free(argv);
1236 continue;
1237 }
1238
1239 n_fields_this_loop = 0;
1240 consumed = 0;
1241 while (argc > consumed) {
1242 int field_version;
1243
1244 field = parse_synth_field(argc - consumed,
1245 argv + consumed, &consumed,
1246 &field_version);
1247 if (IS_ERR(field)) {
1248 ret = PTR_ERR(field);
1249 goto err_free_arg;
1250 }
1251
1252 /*
1253 * Track the highest version of any field we
1254 * found in the command.
1255 */
1256 if (field_version > cmd_version)
1257 cmd_version = field_version;
1258
1259 /*
1260 * Now sort out what is and isn't valid for
1261 * each supported version.
1262 *
1263 * If we see more than 1 field per loop, it
1264 * means we have multiple fields between
1265 * semicolons, and that's something we no
1266 * longer support in a version 2 or greater
1267 * command.
1268 */
1269 if (cmd_version > 1 && n_fields_this_loop >= 1) {
1270 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1271 ret = -EINVAL;
1272 goto err_free_arg;
1273 }
1274
1275 fields[n_fields++] = field;
1276 if (n_fields == SYNTH_FIELDS_MAX) {
1277 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1278 ret = -EINVAL;
1279 goto err_free_arg;
1280 }
1281
1282 n_fields_this_loop++;
1283 }
1284 argv_free(argv);
1285
1286 if (consumed < argc) {
1287 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1288 ret = -EINVAL;
1289 goto err;
1290 }
1291
1292 }
1293
1294 if (n_fields == 0) {
1295 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1296 ret = -EINVAL;
1297 goto err;
1298 }
1299
1300 event = alloc_synth_event(name, n_fields, fields);
1301 if (IS_ERR(event)) {
1302 ret = PTR_ERR(event);
1303 event = NULL;
1304 goto err;
1305 }
1306 ret = register_synth_event(event);
1307 if (!ret)
1308 dyn_event_add(&event->devent, &event->call);
1309 else
1310 free_synth_event(event);
1311 out:
1312 mutex_unlock(&event_mutex);
1313
1314 kfree(saved_fields);
1315
1316 return ret;
1317 err_free_arg:
1318 argv_free(argv);
1319 err:
1320 for (i = 0; i < n_fields; i++)
1321 free_synth_field(fields[i]);
1322
1323 goto out;
1324 }
1325
1326 /**
1327 * synth_event_create - Create a new synthetic event
1328 * @name: The name of the new synthetic event
1329 * @fields: An array of type/name field descriptions
1330 * @n_fields: The number of field descriptions contained in the fields array
1331 * @mod: The module creating the event, NULL if not created from a module
1332 *
1333 * Create a new synthetic event with the given name under the
1334 * trace/events/synthetic/ directory. The event fields that will be
1335 * defined for the event should be passed in as an array of struct
1336 * synth_field_desc, and the number elements in the array passed in as
1337 * n_fields. Field ordering will retain the ordering given in the
1338 * fields array.
1339 *
1340 * If the new synthetic event is being created from a module, the mod
1341 * param must be non-NULL. This will ensure that the trace buffer
1342 * won't contain unreadable events.
1343 *
1344 * The new synth event should be deleted using synth_event_delete()
1345 * function. The new synthetic event can be generated from modules or
1346 * other kernel code using trace_synth_event() and related functions.
1347 *
1348 * Return: 0 if successful, error otherwise.
1349 */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1350 int synth_event_create(const char *name, struct synth_field_desc *fields,
1351 unsigned int n_fields, struct module *mod)
1352 {
1353 struct dynevent_cmd cmd;
1354 char *buf;
1355 int ret;
1356
1357 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1358 if (!buf)
1359 return -ENOMEM;
1360
1361 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1362
1363 ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1364 fields, n_fields);
1365 if (ret)
1366 goto out;
1367
1368 ret = synth_event_gen_cmd_end(&cmd);
1369 out:
1370 kfree(buf);
1371
1372 return ret;
1373 }
1374 EXPORT_SYMBOL_GPL(synth_event_create);
1375
destroy_synth_event(struct synth_event * se)1376 static int destroy_synth_event(struct synth_event *se)
1377 {
1378 int ret;
1379
1380 if (se->ref)
1381 return -EBUSY;
1382
1383 if (trace_event_dyn_busy(&se->call))
1384 return -EBUSY;
1385
1386 ret = unregister_synth_event(se);
1387 if (!ret) {
1388 dyn_event_remove(&se->devent);
1389 free_synth_event(se);
1390 }
1391
1392 return ret;
1393 }
1394
1395 /**
1396 * synth_event_delete - Delete a synthetic event
1397 * @event_name: The name of the new synthetic event
1398 *
1399 * Delete a synthetic event that was created with synth_event_create().
1400 *
1401 * Return: 0 if successful, error otherwise.
1402 */
synth_event_delete(const char * event_name)1403 int synth_event_delete(const char *event_name)
1404 {
1405 struct synth_event *se = NULL;
1406 struct module *mod = NULL;
1407 int ret = -ENOENT;
1408
1409 mutex_lock(&event_mutex);
1410 se = find_synth_event(event_name);
1411 if (se) {
1412 mod = se->mod;
1413 ret = destroy_synth_event(se);
1414 }
1415 mutex_unlock(&event_mutex);
1416
1417 if (mod) {
1418 mutex_lock(&trace_types_lock);
1419 /*
1420 * It is safest to reset the ring buffer if the module
1421 * being unloaded registered any events that were
1422 * used. The only worry is if a new module gets
1423 * loaded, and takes on the same id as the events of
1424 * this module. When printing out the buffer, traced
1425 * events left over from this module may be passed to
1426 * the new module events and unexpected results may
1427 * occur.
1428 */
1429 tracing_reset_all_online_cpus();
1430 mutex_unlock(&trace_types_lock);
1431 }
1432
1433 return ret;
1434 }
1435 EXPORT_SYMBOL_GPL(synth_event_delete);
1436
check_command(const char * raw_command)1437 static int check_command(const char *raw_command)
1438 {
1439 char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1440 int argc, ret = 0;
1441
1442 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1443 if (!cmd)
1444 return -ENOMEM;
1445
1446 name_and_field = strsep(&cmd, ";");
1447 if (!name_and_field) {
1448 ret = -EINVAL;
1449 goto free;
1450 }
1451
1452 if (name_and_field[0] == '!')
1453 goto free;
1454
1455 argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1456 if (!argv) {
1457 ret = -ENOMEM;
1458 goto free;
1459 }
1460 argv_free(argv);
1461
1462 if (argc < 3)
1463 ret = -EINVAL;
1464 free:
1465 kfree(saved_cmd);
1466
1467 return ret;
1468 }
1469
create_or_delete_synth_event(const char * raw_command)1470 static int create_or_delete_synth_event(const char *raw_command)
1471 {
1472 char *name = NULL, *fields, *p;
1473 int ret = 0;
1474
1475 raw_command = skip_spaces(raw_command);
1476 if (raw_command[0] == '\0')
1477 return ret;
1478
1479 last_cmd_set(raw_command);
1480
1481 ret = check_command(raw_command);
1482 if (ret) {
1483 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1484 return ret;
1485 }
1486
1487 p = strpbrk(raw_command, " \t");
1488 if (!p && raw_command[0] != '!') {
1489 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1490 ret = -EINVAL;
1491 goto free;
1492 }
1493
1494 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1495 if (!name)
1496 return -ENOMEM;
1497
1498 if (name[0] == '!') {
1499 ret = synth_event_delete(name + 1);
1500 goto free;
1501 }
1502
1503 fields = skip_spaces(p);
1504
1505 ret = __create_synth_event(name, fields);
1506 free:
1507 kfree(name);
1508
1509 return ret;
1510 }
1511
synth_event_run_command(struct dynevent_cmd * cmd)1512 static int synth_event_run_command(struct dynevent_cmd *cmd)
1513 {
1514 struct synth_event *se;
1515 int ret;
1516
1517 ret = create_or_delete_synth_event(cmd->seq.buffer);
1518 if (ret)
1519 return ret;
1520
1521 se = find_synth_event(cmd->event_name);
1522 if (WARN_ON(!se))
1523 return -ENOENT;
1524
1525 se->mod = cmd->private_data;
1526
1527 return ret;
1528 }
1529
1530 /**
1531 * synth_event_cmd_init - Initialize a synthetic event command object
1532 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1533 * @buf: A pointer to the buffer used to build the command
1534 * @maxlen: The length of the buffer passed in @buf
1535 *
1536 * Initialize a synthetic event command object. Use this before
1537 * calling any of the other dyenvent_cmd functions.
1538 */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1539 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1540 {
1541 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1542 synth_event_run_command);
1543 }
1544 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1545
1546 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1547 __synth_event_trace_init(struct trace_event_file *file,
1548 struct synth_event_trace_state *trace_state)
1549 {
1550 int ret = 0;
1551
1552 memset(trace_state, '\0', sizeof(*trace_state));
1553
1554 /*
1555 * Normal event tracing doesn't get called at all unless the
1556 * ENABLED bit is set (which attaches the probe thus allowing
1557 * this code to be called, etc). Because this is called
1558 * directly by the user, we don't have that but we still need
1559 * to honor not logging when disabled. For the iterated
1560 * trace case, we save the enabled state upon start and just
1561 * ignore the following data calls.
1562 */
1563 if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1564 trace_trigger_soft_disabled(file)) {
1565 trace_state->disabled = true;
1566 ret = -ENOENT;
1567 goto out;
1568 }
1569
1570 trace_state->event = file->event_call->data;
1571 out:
1572 return ret;
1573 }
1574
1575 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1576 __synth_event_trace_start(struct trace_event_file *file,
1577 struct synth_event_trace_state *trace_state,
1578 int dynamic_fields_size)
1579 {
1580 int entry_size, fields_size = 0;
1581 int ret = 0;
1582
1583 fields_size = trace_state->event->n_u64 * sizeof(u64);
1584 fields_size += dynamic_fields_size;
1585
1586 /*
1587 * Avoid ring buffer recursion detection, as this event
1588 * is being performed within another event.
1589 */
1590 trace_state->buffer = file->tr->array_buffer.buffer;
1591 ring_buffer_nest_start(trace_state->buffer);
1592
1593 entry_size = sizeof(*trace_state->entry) + fields_size;
1594 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1595 file,
1596 entry_size);
1597 if (!trace_state->entry) {
1598 ring_buffer_nest_end(trace_state->buffer);
1599 ret = -EINVAL;
1600 }
1601
1602 return ret;
1603 }
1604
1605 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1606 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1607 {
1608 trace_event_buffer_commit(&trace_state->fbuffer);
1609
1610 ring_buffer_nest_end(trace_state->buffer);
1611 }
1612
1613 /**
1614 * synth_event_trace - Trace a synthetic event
1615 * @file: The trace_event_file representing the synthetic event
1616 * @n_vals: The number of values in vals
1617 * @args: Variable number of args containing the event values
1618 *
1619 * Trace a synthetic event using the values passed in the variable
1620 * argument list.
1621 *
1622 * The argument list should be a list 'n_vals' u64 values. The number
1623 * of vals must match the number of field in the synthetic event, and
1624 * must be in the same order as the synthetic event fields.
1625 *
1626 * All vals should be cast to u64, and string vals are just pointers
1627 * to strings, cast to u64. Strings will be copied into space
1628 * reserved in the event for the string, using these pointers.
1629 *
1630 * Return: 0 on success, err otherwise.
1631 */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1632 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1633 {
1634 unsigned int i, n_u64, len, data_size = 0;
1635 struct synth_event_trace_state state;
1636 va_list args;
1637 int ret;
1638
1639 ret = __synth_event_trace_init(file, &state);
1640 if (ret) {
1641 if (ret == -ENOENT)
1642 ret = 0; /* just disabled, not really an error */
1643 return ret;
1644 }
1645
1646 if (state.event->n_dynamic_fields) {
1647 va_start(args, n_vals);
1648
1649 for (i = 0; i < state.event->n_fields; i++) {
1650 u64 val = va_arg(args, u64);
1651
1652 if (state.event->fields[i]->is_string &&
1653 state.event->fields[i]->is_dynamic) {
1654 char *str_val = (char *)(long)val;
1655
1656 data_size += strlen(str_val) + 1;
1657 }
1658 }
1659
1660 va_end(args);
1661 }
1662
1663 ret = __synth_event_trace_start(file, &state, data_size);
1664 if (ret)
1665 return ret;
1666
1667 if (n_vals != state.event->n_fields) {
1668 ret = -EINVAL;
1669 goto out;
1670 }
1671
1672 data_size = 0;
1673
1674 va_start(args, n_vals);
1675 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1676 u64 val;
1677
1678 val = va_arg(args, u64);
1679
1680 if (state.event->fields[i]->is_string) {
1681 char *str_val = (char *)(long)val;
1682
1683 len = trace_string(state.entry, state.event, str_val,
1684 state.event->fields[i]->is_dynamic,
1685 data_size, &n_u64);
1686 data_size += len; /* only dynamic string increments */
1687 } else {
1688 struct synth_field *field = state.event->fields[i];
1689
1690 switch (field->size) {
1691 case 1:
1692 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1693 break;
1694
1695 case 2:
1696 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1697 break;
1698
1699 case 4:
1700 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1701 break;
1702
1703 default:
1704 state.entry->fields[n_u64] = val;
1705 break;
1706 }
1707 n_u64++;
1708 }
1709 }
1710 va_end(args);
1711 out:
1712 __synth_event_trace_end(&state);
1713
1714 return ret;
1715 }
1716 EXPORT_SYMBOL_GPL(synth_event_trace);
1717
1718 /**
1719 * synth_event_trace_array - Trace a synthetic event from an array
1720 * @file: The trace_event_file representing the synthetic event
1721 * @vals: Array of values
1722 * @n_vals: The number of values in vals
1723 *
1724 * Trace a synthetic event using the values passed in as 'vals'.
1725 *
1726 * The 'vals' array is just an array of 'n_vals' u64. The number of
1727 * vals must match the number of field in the synthetic event, and
1728 * must be in the same order as the synthetic event fields.
1729 *
1730 * All vals should be cast to u64, and string vals are just pointers
1731 * to strings, cast to u64. Strings will be copied into space
1732 * reserved in the event for the string, using these pointers.
1733 *
1734 * Return: 0 on success, err otherwise.
1735 */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1736 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1737 unsigned int n_vals)
1738 {
1739 unsigned int i, n_u64, field_pos, len, data_size = 0;
1740 struct synth_event_trace_state state;
1741 char *str_val;
1742 int ret;
1743
1744 ret = __synth_event_trace_init(file, &state);
1745 if (ret) {
1746 if (ret == -ENOENT)
1747 ret = 0; /* just disabled, not really an error */
1748 return ret;
1749 }
1750
1751 if (state.event->n_dynamic_fields) {
1752 for (i = 0; i < state.event->n_dynamic_fields; i++) {
1753 field_pos = state.event->dynamic_fields[i]->field_pos;
1754 str_val = (char *)(long)vals[field_pos];
1755 len = strlen(str_val) + 1;
1756 data_size += len;
1757 }
1758 }
1759
1760 ret = __synth_event_trace_start(file, &state, data_size);
1761 if (ret)
1762 return ret;
1763
1764 if (n_vals != state.event->n_fields) {
1765 ret = -EINVAL;
1766 goto out;
1767 }
1768
1769 data_size = 0;
1770
1771 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1772 if (state.event->fields[i]->is_string) {
1773 char *str_val = (char *)(long)vals[i];
1774
1775 len = trace_string(state.entry, state.event, str_val,
1776 state.event->fields[i]->is_dynamic,
1777 data_size, &n_u64);
1778 data_size += len; /* only dynamic string increments */
1779 } else {
1780 struct synth_field *field = state.event->fields[i];
1781 u64 val = vals[i];
1782
1783 switch (field->size) {
1784 case 1:
1785 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1786 break;
1787
1788 case 2:
1789 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1790 break;
1791
1792 case 4:
1793 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1794 break;
1795
1796 default:
1797 state.entry->fields[n_u64] = val;
1798 break;
1799 }
1800 n_u64++;
1801 }
1802 }
1803 out:
1804 __synth_event_trace_end(&state);
1805
1806 return ret;
1807 }
1808 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1809
1810 /**
1811 * synth_event_trace_start - Start piecewise synthetic event trace
1812 * @file: The trace_event_file representing the synthetic event
1813 * @trace_state: A pointer to object tracking the piecewise trace state
1814 *
1815 * Start the trace of a synthetic event field-by-field rather than all
1816 * at once.
1817 *
1818 * This function 'opens' an event trace, which means space is reserved
1819 * for the event in the trace buffer, after which the event's
1820 * individual field values can be set through either
1821 * synth_event_add_next_val() or synth_event_add_val().
1822 *
1823 * A pointer to a trace_state object is passed in, which will keep
1824 * track of the current event trace state until the event trace is
1825 * closed (and the event finally traced) using
1826 * synth_event_trace_end().
1827 *
1828 * Note that synth_event_trace_end() must be called after all values
1829 * have been added for each event trace, regardless of whether adding
1830 * all field values succeeded or not.
1831 *
1832 * Note also that for a given event trace, all fields must be added
1833 * using either synth_event_add_next_val() or synth_event_add_val()
1834 * but not both together or interleaved.
1835 *
1836 * Return: 0 on success, err otherwise.
1837 */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1838 int synth_event_trace_start(struct trace_event_file *file,
1839 struct synth_event_trace_state *trace_state)
1840 {
1841 int ret;
1842
1843 if (!trace_state)
1844 return -EINVAL;
1845
1846 ret = __synth_event_trace_init(file, trace_state);
1847 if (ret) {
1848 if (ret == -ENOENT)
1849 ret = 0; /* just disabled, not really an error */
1850 return ret;
1851 }
1852
1853 if (trace_state->event->n_dynamic_fields)
1854 return -ENOTSUPP;
1855
1856 ret = __synth_event_trace_start(file, trace_state, 0);
1857
1858 return ret;
1859 }
1860 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1861
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1862 static int __synth_event_add_val(const char *field_name, u64 val,
1863 struct synth_event_trace_state *trace_state)
1864 {
1865 struct synth_field *field = NULL;
1866 struct synth_trace_event *entry;
1867 struct synth_event *event;
1868 int i, ret = 0;
1869
1870 if (!trace_state) {
1871 ret = -EINVAL;
1872 goto out;
1873 }
1874
1875 /* can't mix add_next_synth_val() with add_synth_val() */
1876 if (field_name) {
1877 if (trace_state->add_next) {
1878 ret = -EINVAL;
1879 goto out;
1880 }
1881 trace_state->add_name = true;
1882 } else {
1883 if (trace_state->add_name) {
1884 ret = -EINVAL;
1885 goto out;
1886 }
1887 trace_state->add_next = true;
1888 }
1889
1890 if (trace_state->disabled)
1891 goto out;
1892
1893 event = trace_state->event;
1894 if (trace_state->add_name) {
1895 for (i = 0; i < event->n_fields; i++) {
1896 field = event->fields[i];
1897 if (strcmp(field->name, field_name) == 0)
1898 break;
1899 }
1900 if (!field) {
1901 ret = -EINVAL;
1902 goto out;
1903 }
1904 } else {
1905 if (trace_state->cur_field >= event->n_fields) {
1906 ret = -EINVAL;
1907 goto out;
1908 }
1909 field = event->fields[trace_state->cur_field++];
1910 }
1911
1912 entry = trace_state->entry;
1913 if (field->is_string) {
1914 char *str_val = (char *)(long)val;
1915 char *str_field;
1916
1917 if (field->is_dynamic) { /* add_val can't do dynamic strings */
1918 ret = -EINVAL;
1919 goto out;
1920 }
1921
1922 if (!str_val) {
1923 ret = -EINVAL;
1924 goto out;
1925 }
1926
1927 str_field = (char *)&entry->fields[field->offset];
1928 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1929 } else {
1930 switch (field->size) {
1931 case 1:
1932 *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1933 break;
1934
1935 case 2:
1936 *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1937 break;
1938
1939 case 4:
1940 *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1941 break;
1942
1943 default:
1944 trace_state->entry->fields[field->offset] = val;
1945 break;
1946 }
1947 }
1948 out:
1949 return ret;
1950 }
1951
1952 /**
1953 * synth_event_add_next_val - Add the next field's value to an open synth trace
1954 * @val: The value to set the next field to
1955 * @trace_state: A pointer to object tracking the piecewise trace state
1956 *
1957 * Set the value of the next field in an event that's been opened by
1958 * synth_event_trace_start().
1959 *
1960 * The val param should be the value cast to u64. If the value points
1961 * to a string, the val param should be a char * cast to u64.
1962 *
1963 * This function assumes all the fields in an event are to be set one
1964 * after another - successive calls to this function are made, one for
1965 * each field, in the order of the fields in the event, until all
1966 * fields have been set. If you'd rather set each field individually
1967 * without regard to ordering, synth_event_add_val() can be used
1968 * instead.
1969 *
1970 * Note however that synth_event_add_next_val() and
1971 * synth_event_add_val() can't be intermixed for a given event trace -
1972 * one or the other but not both can be used at the same time.
1973 *
1974 * Note also that synth_event_trace_end() must be called after all
1975 * values have been added for each event trace, regardless of whether
1976 * adding all field values succeeded or not.
1977 *
1978 * Return: 0 on success, err otherwise.
1979 */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)1980 int synth_event_add_next_val(u64 val,
1981 struct synth_event_trace_state *trace_state)
1982 {
1983 return __synth_event_add_val(NULL, val, trace_state);
1984 }
1985 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1986
1987 /**
1988 * synth_event_add_val - Add a named field's value to an open synth trace
1989 * @field_name: The name of the synthetic event field value to set
1990 * @val: The value to set the named field to
1991 * @trace_state: A pointer to object tracking the piecewise trace state
1992 *
1993 * Set the value of the named field in an event that's been opened by
1994 * synth_event_trace_start().
1995 *
1996 * The val param should be the value cast to u64. If the value points
1997 * to a string, the val param should be a char * cast to u64.
1998 *
1999 * This function looks up the field name, and if found, sets the field
2000 * to the specified value. This lookup makes this function more
2001 * expensive than synth_event_add_next_val(), so use that or the
2002 * none-piecewise synth_event_trace() instead if efficiency is more
2003 * important.
2004 *
2005 * Note however that synth_event_add_next_val() and
2006 * synth_event_add_val() can't be intermixed for a given event trace -
2007 * one or the other but not both can be used at the same time.
2008 *
2009 * Note also that synth_event_trace_end() must be called after all
2010 * values have been added for each event trace, regardless of whether
2011 * adding all field values succeeded or not.
2012 *
2013 * Return: 0 on success, err otherwise.
2014 */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2015 int synth_event_add_val(const char *field_name, u64 val,
2016 struct synth_event_trace_state *trace_state)
2017 {
2018 return __synth_event_add_val(field_name, val, trace_state);
2019 }
2020 EXPORT_SYMBOL_GPL(synth_event_add_val);
2021
2022 /**
2023 * synth_event_trace_end - End piecewise synthetic event trace
2024 * @trace_state: A pointer to object tracking the piecewise trace state
2025 *
2026 * End the trace of a synthetic event opened by
2027 * synth_event_trace__start().
2028 *
2029 * This function 'closes' an event trace, which basically means that
2030 * it commits the reserved event and cleans up other loose ends.
2031 *
2032 * A pointer to a trace_state object is passed in, which will keep
2033 * track of the current event trace state opened with
2034 * synth_event_trace_start().
2035 *
2036 * Note that this function must be called after all values have been
2037 * added for each event trace, regardless of whether adding all field
2038 * values succeeded or not.
2039 *
2040 * Return: 0 on success, err otherwise.
2041 */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2042 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2043 {
2044 if (!trace_state)
2045 return -EINVAL;
2046
2047 __synth_event_trace_end(trace_state);
2048
2049 return 0;
2050 }
2051 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2052
create_synth_event(const char * raw_command)2053 static int create_synth_event(const char *raw_command)
2054 {
2055 char *fields, *p;
2056 const char *name;
2057 int len, ret = 0;
2058
2059 raw_command = skip_spaces(raw_command);
2060 if (raw_command[0] == '\0')
2061 return ret;
2062
2063 last_cmd_set(raw_command);
2064
2065 name = raw_command;
2066
2067 /* Don't try to process if not our system */
2068 if (name[0] != 's' || name[1] != ':')
2069 return -ECANCELED;
2070 name += 2;
2071
2072 p = strpbrk(raw_command, " \t");
2073 if (!p) {
2074 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2075 return -EINVAL;
2076 }
2077
2078 fields = skip_spaces(p);
2079
2080 /* This interface accepts group name prefix */
2081 if (strchr(name, '/')) {
2082 len = str_has_prefix(name, SYNTH_SYSTEM "/");
2083 if (len == 0) {
2084 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2085 return -EINVAL;
2086 }
2087 name += len;
2088 }
2089
2090 len = name - raw_command;
2091
2092 ret = check_command(raw_command + len);
2093 if (ret) {
2094 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2095 return ret;
2096 }
2097
2098 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2099 if (!name)
2100 return -ENOMEM;
2101
2102 ret = __create_synth_event(name, fields);
2103
2104 kfree(name);
2105
2106 return ret;
2107 }
2108
synth_event_release(struct dyn_event * ev)2109 static int synth_event_release(struct dyn_event *ev)
2110 {
2111 struct synth_event *event = to_synth_event(ev);
2112 int ret;
2113
2114 if (event->ref)
2115 return -EBUSY;
2116
2117 if (trace_event_dyn_busy(&event->call))
2118 return -EBUSY;
2119
2120 ret = unregister_synth_event(event);
2121 if (ret)
2122 return ret;
2123
2124 dyn_event_remove(ev);
2125 free_synth_event(event);
2126 return 0;
2127 }
2128
__synth_event_show(struct seq_file * m,struct synth_event * event)2129 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2130 {
2131 struct synth_field *field;
2132 unsigned int i;
2133 char *type, *t;
2134
2135 seq_printf(m, "%s\t", event->name);
2136
2137 for (i = 0; i < event->n_fields; i++) {
2138 field = event->fields[i];
2139
2140 type = field->type;
2141 t = strstr(type, "__data_loc");
2142 if (t) { /* __data_loc belongs in format but not event desc */
2143 t += sizeof("__data_loc");
2144 type = t;
2145 }
2146
2147 /* parameter values */
2148 seq_printf(m, "%s %s%s", type, field->name,
2149 i == event->n_fields - 1 ? "" : "; ");
2150 }
2151
2152 seq_putc(m, '\n');
2153
2154 return 0;
2155 }
2156
synth_event_show(struct seq_file * m,struct dyn_event * ev)2157 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2158 {
2159 struct synth_event *event = to_synth_event(ev);
2160
2161 seq_printf(m, "s:%s/", event->class.system);
2162
2163 return __synth_event_show(m, event);
2164 }
2165
synth_events_seq_show(struct seq_file * m,void * v)2166 static int synth_events_seq_show(struct seq_file *m, void *v)
2167 {
2168 struct dyn_event *ev = v;
2169
2170 if (!is_synth_event(ev))
2171 return 0;
2172
2173 return __synth_event_show(m, to_synth_event(ev));
2174 }
2175
2176 static const struct seq_operations synth_events_seq_op = {
2177 .start = dyn_event_seq_start,
2178 .next = dyn_event_seq_next,
2179 .stop = dyn_event_seq_stop,
2180 .show = synth_events_seq_show,
2181 };
2182
synth_events_open(struct inode * inode,struct file * file)2183 static int synth_events_open(struct inode *inode, struct file *file)
2184 {
2185 int ret;
2186
2187 ret = security_locked_down(LOCKDOWN_TRACEFS);
2188 if (ret)
2189 return ret;
2190
2191 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2192 ret = dyn_events_release_all(&synth_event_ops);
2193 if (ret < 0)
2194 return ret;
2195 }
2196
2197 return seq_open(file, &synth_events_seq_op);
2198 }
2199
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2200 static ssize_t synth_events_write(struct file *file,
2201 const char __user *buffer,
2202 size_t count, loff_t *ppos)
2203 {
2204 return trace_parse_run_command(file, buffer, count, ppos,
2205 create_or_delete_synth_event);
2206 }
2207
2208 static const struct file_operations synth_events_fops = {
2209 .open = synth_events_open,
2210 .write = synth_events_write,
2211 .read = seq_read,
2212 .llseek = seq_lseek,
2213 .release = seq_release,
2214 };
2215
2216 /*
2217 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2218 * events in postcore_initcall without tracefs.
2219 */
trace_events_synth_init_early(void)2220 static __init int trace_events_synth_init_early(void)
2221 {
2222 int err = 0;
2223
2224 err = dyn_event_register(&synth_event_ops);
2225 if (err)
2226 pr_warn("Could not register synth_event_ops\n");
2227
2228 return err;
2229 }
2230 core_initcall(trace_events_synth_init_early);
2231
trace_events_synth_init(void)2232 static __init int trace_events_synth_init(void)
2233 {
2234 struct dentry *entry = NULL;
2235 int err = 0;
2236 err = tracing_init_dentry();
2237 if (err)
2238 goto err;
2239
2240 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2241 NULL, NULL, &synth_events_fops);
2242 if (!entry) {
2243 err = -ENODEV;
2244 goto err;
2245 }
2246
2247 return err;
2248 err:
2249 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2250
2251 return err;
2252 }
2253
2254 fs_initcall(trace_events_synth_init);
2255