1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_events_trigger - trace event triggers
4 *
5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 */
7
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14
15 #include "trace.h"
16
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19
trigger_data_free(struct event_trigger_data * data)20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 if (data->cmd_ops->set_filter)
23 data->cmd_ops->set_filter(NULL, data, NULL);
24
25 /* make sure current triggers exit before free */
26 tracepoint_synchronize_unregister();
27
28 kfree(data);
29 }
30
31 /**
32 * event_triggers_call - Call triggers associated with a trace event
33 * @file: The trace_event_file associated with the event
34 * @buffer: The ring buffer that the event is being written to
35 * @rec: The trace entry for the event, NULL for unconditional invocation
36 * @event: The event meta data in the ring buffer
37 *
38 * For each trigger associated with an event, invoke the trigger
39 * function registered with the associated trigger command. If rec is
40 * non-NULL, it means that the trigger requires further processing and
41 * shouldn't be unconditionally invoked. If rec is non-NULL and the
42 * trigger has a filter associated with it, rec will checked against
43 * the filter and if the record matches the trigger will be invoked.
44 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
45 * in any case until the current event is written, the trigger
46 * function isn't invoked but the bit associated with the deferred
47 * trigger is set in the return value.
48 *
49 * Returns an enum event_trigger_type value containing a set bit for
50 * any trigger that should be deferred, ETT_NONE if nothing to defer.
51 *
52 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
53 *
54 * Return: an enum event_trigger_type value containing a set bit for
55 * any trigger that should be deferred, ETT_NONE if nothing to defer.
56 */
57 enum event_trigger_type
event_triggers_call(struct trace_event_file * file,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)58 event_triggers_call(struct trace_event_file *file,
59 struct trace_buffer *buffer, void *rec,
60 struct ring_buffer_event *event)
61 {
62 struct event_trigger_data *data;
63 enum event_trigger_type tt = ETT_NONE;
64 struct event_filter *filter;
65
66 if (list_empty(&file->triggers))
67 return tt;
68
69 list_for_each_entry_rcu(data, &file->triggers, list) {
70 if (data->paused)
71 continue;
72 if (!rec) {
73 data->ops->trigger(data, buffer, rec, event);
74 continue;
75 }
76 filter = rcu_dereference_sched(data->filter);
77 if (filter && !filter_match_preds(filter, rec))
78 continue;
79 if (event_command_post_trigger(data->cmd_ops)) {
80 tt |= data->cmd_ops->trigger_type;
81 continue;
82 }
83 data->ops->trigger(data, buffer, rec, event);
84 }
85 return tt;
86 }
87 EXPORT_SYMBOL_GPL(event_triggers_call);
88
__trace_trigger_soft_disabled(struct trace_event_file * file)89 bool __trace_trigger_soft_disabled(struct trace_event_file *file)
90 {
91 unsigned long eflags = file->flags;
92
93 if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
94 event_triggers_call(file, NULL, NULL, NULL);
95 if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
96 return true;
97 if (eflags & EVENT_FILE_FL_PID_FILTER)
98 return trace_event_ignore_this_pid(file);
99 return false;
100 }
101 EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
102
103 /**
104 * event_triggers_post_call - Call 'post_triggers' for a trace event
105 * @file: The trace_event_file associated with the event
106 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
107 *
108 * For each trigger associated with an event, invoke the trigger
109 * function registered with the associated trigger command, if the
110 * corresponding bit is set in the tt enum passed into this function.
111 * See @event_triggers_call for details on how those bits are set.
112 *
113 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
114 */
115 void
event_triggers_post_call(struct trace_event_file * file,enum event_trigger_type tt)116 event_triggers_post_call(struct trace_event_file *file,
117 enum event_trigger_type tt)
118 {
119 struct event_trigger_data *data;
120
121 list_for_each_entry_rcu(data, &file->triggers, list) {
122 if (data->paused)
123 continue;
124 if (data->cmd_ops->trigger_type & tt)
125 data->ops->trigger(data, NULL, NULL, NULL);
126 }
127 }
128 EXPORT_SYMBOL_GPL(event_triggers_post_call);
129
130 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
131
trigger_next(struct seq_file * m,void * t,loff_t * pos)132 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
133 {
134 struct trace_event_file *event_file = event_file_data(m->private);
135
136 if (t == SHOW_AVAILABLE_TRIGGERS) {
137 (*pos)++;
138 return NULL;
139 }
140 return seq_list_next(t, &event_file->triggers, pos);
141 }
142
check_user_trigger(struct trace_event_file * file)143 static bool check_user_trigger(struct trace_event_file *file)
144 {
145 struct event_trigger_data *data;
146
147 list_for_each_entry_rcu(data, &file->triggers, list,
148 lockdep_is_held(&event_mutex)) {
149 if (data->flags & EVENT_TRIGGER_FL_PROBE)
150 continue;
151 return true;
152 }
153 return false;
154 }
155
trigger_start(struct seq_file * m,loff_t * pos)156 static void *trigger_start(struct seq_file *m, loff_t *pos)
157 {
158 struct trace_event_file *event_file;
159
160 /* ->stop() is called even if ->start() fails */
161 mutex_lock(&event_mutex);
162 event_file = event_file_data(m->private);
163 if (unlikely(!event_file))
164 return ERR_PTR(-ENODEV);
165
166 if (list_empty(&event_file->triggers) || !check_user_trigger(event_file))
167 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
168
169 return seq_list_start(&event_file->triggers, *pos);
170 }
171
trigger_stop(struct seq_file * m,void * t)172 static void trigger_stop(struct seq_file *m, void *t)
173 {
174 mutex_unlock(&event_mutex);
175 }
176
trigger_show(struct seq_file * m,void * v)177 static int trigger_show(struct seq_file *m, void *v)
178 {
179 struct event_trigger_data *data;
180 struct event_command *p;
181
182 if (v == SHOW_AVAILABLE_TRIGGERS) {
183 seq_puts(m, "# Available triggers:\n");
184 seq_putc(m, '#');
185 mutex_lock(&trigger_cmd_mutex);
186 list_for_each_entry_reverse(p, &trigger_commands, list)
187 seq_printf(m, " %s", p->name);
188 seq_putc(m, '\n');
189 mutex_unlock(&trigger_cmd_mutex);
190 return 0;
191 }
192
193 data = list_entry(v, struct event_trigger_data, list);
194 data->ops->print(m, data);
195
196 return 0;
197 }
198
199 static const struct seq_operations event_triggers_seq_ops = {
200 .start = trigger_start,
201 .next = trigger_next,
202 .stop = trigger_stop,
203 .show = trigger_show,
204 };
205
event_trigger_regex_open(struct inode * inode,struct file * file)206 static int event_trigger_regex_open(struct inode *inode, struct file *file)
207 {
208 int ret;
209
210 ret = security_locked_down(LOCKDOWN_TRACEFS);
211 if (ret)
212 return ret;
213
214 mutex_lock(&event_mutex);
215
216 if (unlikely(!event_file_data(file))) {
217 mutex_unlock(&event_mutex);
218 return -ENODEV;
219 }
220
221 if ((file->f_mode & FMODE_WRITE) &&
222 (file->f_flags & O_TRUNC)) {
223 struct trace_event_file *event_file;
224 struct event_command *p;
225
226 event_file = event_file_data(file);
227
228 list_for_each_entry(p, &trigger_commands, list) {
229 if (p->unreg_all)
230 p->unreg_all(event_file);
231 }
232 }
233
234 if (file->f_mode & FMODE_READ) {
235 ret = seq_open(file, &event_triggers_seq_ops);
236 if (!ret) {
237 struct seq_file *m = file->private_data;
238 m->private = file;
239 }
240 }
241
242 mutex_unlock(&event_mutex);
243
244 return ret;
245 }
246
trigger_process_regex(struct trace_event_file * file,char * buff)247 int trigger_process_regex(struct trace_event_file *file, char *buff)
248 {
249 char *command, *next;
250 struct event_command *p;
251 int ret = -EINVAL;
252
253 next = buff = skip_spaces(buff);
254 command = strsep(&next, ": \t");
255 if (next) {
256 next = skip_spaces(next);
257 if (!*next)
258 next = NULL;
259 }
260 command = (command[0] != '!') ? command : command + 1;
261
262 mutex_lock(&trigger_cmd_mutex);
263 list_for_each_entry(p, &trigger_commands, list) {
264 if (strcmp(p->name, command) == 0) {
265 ret = p->parse(p, file, buff, command, next);
266 goto out_unlock;
267 }
268 }
269 out_unlock:
270 mutex_unlock(&trigger_cmd_mutex);
271
272 return ret;
273 }
274
event_trigger_regex_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)275 static ssize_t event_trigger_regex_write(struct file *file,
276 const char __user *ubuf,
277 size_t cnt, loff_t *ppos)
278 {
279 struct trace_event_file *event_file;
280 ssize_t ret;
281 char *buf;
282
283 if (!cnt)
284 return 0;
285
286 if (cnt >= PAGE_SIZE)
287 return -EINVAL;
288
289 buf = memdup_user_nul(ubuf, cnt);
290 if (IS_ERR(buf))
291 return PTR_ERR(buf);
292
293 strim(buf);
294
295 mutex_lock(&event_mutex);
296 event_file = event_file_data(file);
297 if (unlikely(!event_file)) {
298 mutex_unlock(&event_mutex);
299 kfree(buf);
300 return -ENODEV;
301 }
302 ret = trigger_process_regex(event_file, buf);
303 mutex_unlock(&event_mutex);
304
305 kfree(buf);
306 if (ret < 0)
307 goto out;
308
309 *ppos += cnt;
310 ret = cnt;
311 out:
312 return ret;
313 }
314
event_trigger_regex_release(struct inode * inode,struct file * file)315 static int event_trigger_regex_release(struct inode *inode, struct file *file)
316 {
317 mutex_lock(&event_mutex);
318
319 if (file->f_mode & FMODE_READ)
320 seq_release(inode, file);
321
322 mutex_unlock(&event_mutex);
323
324 return 0;
325 }
326
327 static ssize_t
event_trigger_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)328 event_trigger_write(struct file *filp, const char __user *ubuf,
329 size_t cnt, loff_t *ppos)
330 {
331 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
332 }
333
334 static int
event_trigger_open(struct inode * inode,struct file * filp)335 event_trigger_open(struct inode *inode, struct file *filp)
336 {
337 /* Checks for tracefs lockdown */
338 return event_trigger_regex_open(inode, filp);
339 }
340
341 static int
event_trigger_release(struct inode * inode,struct file * file)342 event_trigger_release(struct inode *inode, struct file *file)
343 {
344 return event_trigger_regex_release(inode, file);
345 }
346
347 const struct file_operations event_trigger_fops = {
348 .open = event_trigger_open,
349 .read = seq_read,
350 .write = event_trigger_write,
351 .llseek = tracing_lseek,
352 .release = event_trigger_release,
353 };
354
355 /*
356 * Currently we only register event commands from __init, so mark this
357 * __init too.
358 */
register_event_command(struct event_command * cmd)359 __init int register_event_command(struct event_command *cmd)
360 {
361 struct event_command *p;
362 int ret = 0;
363
364 mutex_lock(&trigger_cmd_mutex);
365 list_for_each_entry(p, &trigger_commands, list) {
366 if (strcmp(cmd->name, p->name) == 0) {
367 ret = -EBUSY;
368 goto out_unlock;
369 }
370 }
371 list_add(&cmd->list, &trigger_commands);
372 out_unlock:
373 mutex_unlock(&trigger_cmd_mutex);
374
375 return ret;
376 }
377
378 /*
379 * Currently we only unregister event commands from __init, so mark
380 * this __init too.
381 */
unregister_event_command(struct event_command * cmd)382 __init int unregister_event_command(struct event_command *cmd)
383 {
384 struct event_command *p, *n;
385 int ret = -ENODEV;
386
387 mutex_lock(&trigger_cmd_mutex);
388 list_for_each_entry_safe(p, n, &trigger_commands, list) {
389 if (strcmp(cmd->name, p->name) == 0) {
390 ret = 0;
391 list_del_init(&p->list);
392 goto out_unlock;
393 }
394 }
395 out_unlock:
396 mutex_unlock(&trigger_cmd_mutex);
397
398 return ret;
399 }
400
401 /**
402 * event_trigger_print - Generic event_trigger_ops @print implementation
403 * @name: The name of the event trigger
404 * @m: The seq_file being printed to
405 * @data: Trigger-specific data
406 * @filter_str: filter_str to print, if present
407 *
408 * Common implementation for event triggers to print themselves.
409 *
410 * Usually wrapped by a function that simply sets the @name of the
411 * trigger command and then invokes this.
412 *
413 * Return: 0 on success, errno otherwise
414 */
415 static int
event_trigger_print(const char * name,struct seq_file * m,void * data,char * filter_str)416 event_trigger_print(const char *name, struct seq_file *m,
417 void *data, char *filter_str)
418 {
419 long count = (long)data;
420
421 seq_puts(m, name);
422
423 if (count == -1)
424 seq_puts(m, ":unlimited");
425 else
426 seq_printf(m, ":count=%ld", count);
427
428 if (filter_str)
429 seq_printf(m, " if %s\n", filter_str);
430 else
431 seq_putc(m, '\n');
432
433 return 0;
434 }
435
436 /**
437 * event_trigger_init - Generic event_trigger_ops @init implementation
438 * @data: Trigger-specific data
439 *
440 * Common implementation of event trigger initialization.
441 *
442 * Usually used directly as the @init method in event trigger
443 * implementations.
444 *
445 * Return: 0 on success, errno otherwise
446 */
event_trigger_init(struct event_trigger_data * data)447 int event_trigger_init(struct event_trigger_data *data)
448 {
449 data->ref++;
450 return 0;
451 }
452
453 /**
454 * event_trigger_free - Generic event_trigger_ops @free implementation
455 * @data: Trigger-specific data
456 *
457 * Common implementation of event trigger de-initialization.
458 *
459 * Usually used directly as the @free method in event trigger
460 * implementations.
461 */
462 static void
event_trigger_free(struct event_trigger_data * data)463 event_trigger_free(struct event_trigger_data *data)
464 {
465 if (WARN_ON_ONCE(data->ref <= 0))
466 return;
467
468 data->ref--;
469 if (!data->ref)
470 trigger_data_free(data);
471 }
472
trace_event_trigger_enable_disable(struct trace_event_file * file,int trigger_enable)473 int trace_event_trigger_enable_disable(struct trace_event_file *file,
474 int trigger_enable)
475 {
476 int ret = 0;
477
478 if (trigger_enable) {
479 if (atomic_inc_return(&file->tm_ref) > 1)
480 return ret;
481 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
482 ret = trace_event_enable_disable(file, 1, 1);
483 } else {
484 if (atomic_dec_return(&file->tm_ref) > 0)
485 return ret;
486 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
487 ret = trace_event_enable_disable(file, 0, 1);
488 }
489
490 return ret;
491 }
492
493 /**
494 * clear_event_triggers - Clear all triggers associated with a trace array
495 * @tr: The trace array to clear
496 *
497 * For each trigger, the triggering event has its tm_ref decremented
498 * via trace_event_trigger_enable_disable(), and any associated event
499 * (in the case of enable/disable_event triggers) will have its sm_ref
500 * decremented via free()->trace_event_enable_disable(). That
501 * combination effectively reverses the soft-mode/trigger state added
502 * by trigger registration.
503 *
504 * Must be called with event_mutex held.
505 */
506 void
clear_event_triggers(struct trace_array * tr)507 clear_event_triggers(struct trace_array *tr)
508 {
509 struct trace_event_file *file;
510
511 list_for_each_entry(file, &tr->events, list) {
512 struct event_trigger_data *data, *n;
513 list_for_each_entry_safe(data, n, &file->triggers, list) {
514 trace_event_trigger_enable_disable(file, 0);
515 list_del_rcu(&data->list);
516 if (data->ops->free)
517 data->ops->free(data);
518 }
519 }
520 }
521
522 /**
523 * update_cond_flag - Set or reset the TRIGGER_COND bit
524 * @file: The trace_event_file associated with the event
525 *
526 * If an event has triggers and any of those triggers has a filter or
527 * a post_trigger, trigger invocation needs to be deferred until after
528 * the current event has logged its data, and the event should have
529 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
530 * cleared.
531 */
update_cond_flag(struct trace_event_file * file)532 void update_cond_flag(struct trace_event_file *file)
533 {
534 struct event_trigger_data *data;
535 bool set_cond = false;
536
537 lockdep_assert_held(&event_mutex);
538
539 list_for_each_entry(data, &file->triggers, list) {
540 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
541 event_command_needs_rec(data->cmd_ops)) {
542 set_cond = true;
543 break;
544 }
545 }
546
547 if (set_cond)
548 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
549 else
550 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
551 }
552
553 /**
554 * register_trigger - Generic event_command @reg implementation
555 * @glob: The raw string used to register the trigger
556 * @data: Trigger-specific data to associate with the trigger
557 * @file: The trace_event_file associated with the event
558 *
559 * Common implementation for event trigger registration.
560 *
561 * Usually used directly as the @reg method in event command
562 * implementations.
563 *
564 * Return: 0 on success, errno otherwise
565 */
register_trigger(char * glob,struct event_trigger_data * data,struct trace_event_file * file)566 static int register_trigger(char *glob,
567 struct event_trigger_data *data,
568 struct trace_event_file *file)
569 {
570 struct event_trigger_data *test;
571 int ret = 0;
572
573 lockdep_assert_held(&event_mutex);
574
575 list_for_each_entry(test, &file->triggers, list) {
576 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
577 ret = -EEXIST;
578 goto out;
579 }
580 }
581
582 if (data->ops->init) {
583 ret = data->ops->init(data);
584 if (ret < 0)
585 goto out;
586 }
587
588 list_add_rcu(&data->list, &file->triggers);
589
590 update_cond_flag(file);
591 ret = trace_event_trigger_enable_disable(file, 1);
592 if (ret < 0) {
593 list_del_rcu(&data->list);
594 update_cond_flag(file);
595 }
596 out:
597 return ret;
598 }
599
600 /**
601 * unregister_trigger - Generic event_command @unreg implementation
602 * @glob: The raw string used to register the trigger
603 * @test: Trigger-specific data used to find the trigger to remove
604 * @file: The trace_event_file associated with the event
605 *
606 * Common implementation for event trigger unregistration.
607 *
608 * Usually used directly as the @unreg method in event command
609 * implementations.
610 */
unregister_trigger(char * glob,struct event_trigger_data * test,struct trace_event_file * file)611 static void unregister_trigger(char *glob,
612 struct event_trigger_data *test,
613 struct trace_event_file *file)
614 {
615 struct event_trigger_data *data = NULL, *iter;
616
617 lockdep_assert_held(&event_mutex);
618
619 list_for_each_entry(iter, &file->triggers, list) {
620 if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
621 data = iter;
622 list_del_rcu(&data->list);
623 trace_event_trigger_enable_disable(file, 0);
624 update_cond_flag(file);
625 break;
626 }
627 }
628
629 if (data && data->ops->free)
630 data->ops->free(data);
631 }
632
633 /*
634 * Event trigger parsing helper functions.
635 *
636 * These functions help make it easier to write an event trigger
637 * parsing function i.e. the struct event_command.parse() callback
638 * function responsible for parsing and registering a trigger command
639 * written to the 'trigger' file.
640 *
641 * A trigger command (or just 'trigger' for short) takes the form:
642 * [trigger] [if filter]
643 *
644 * The struct event_command.parse() callback (and other struct
645 * event_command functions) refer to several components of a trigger
646 * command. Those same components are referenced by the event trigger
647 * parsing helper functions defined below. These components are:
648 *
649 * cmd - the trigger command name
650 * glob - the trigger command name optionally prefaced with '!'
651 * param_and_filter - text following cmd and ':'
652 * param - text following cmd and ':' and stripped of filter
653 * filter - the optional filter text following (and including) 'if'
654 *
655 * To illustrate the use of these componenents, here are some concrete
656 * examples. For the following triggers:
657 *
658 * echo 'traceon:5 if pid == 0' > trigger
659 * - 'traceon' is both cmd and glob
660 * - '5 if pid == 0' is the param_and_filter
661 * - '5' is the param
662 * - 'if pid == 0' is the filter
663 *
664 * echo 'enable_event:sys:event:n' > trigger
665 * - 'enable_event' is both cmd and glob
666 * - 'sys:event:n' is the param_and_filter
667 * - 'sys:event:n' is the param
668 * - there is no filter
669 *
670 * echo 'hist:keys=pid if prio > 50' > trigger
671 * - 'hist' is both cmd and glob
672 * - 'keys=pid if prio > 50' is the param_and_filter
673 * - 'keys=pid' is the param
674 * - 'if prio > 50' is the filter
675 *
676 * echo '!enable_event:sys:event:n' > trigger
677 * - 'enable_event' the cmd
678 * - '!enable_event' is the glob
679 * - 'sys:event:n' is the param_and_filter
680 * - 'sys:event:n' is the param
681 * - there is no filter
682 *
683 * echo 'traceoff' > trigger
684 * - 'traceoff' is both cmd and glob
685 * - there is no param_and_filter
686 * - there is no param
687 * - there is no filter
688 *
689 * There are a few different categories of event trigger covered by
690 * these helpers:
691 *
692 * - triggers that don't require a parameter e.g. traceon
693 * - triggers that do require a parameter e.g. enable_event and hist
694 * - triggers that though they may not require a param may support an
695 * optional 'n' param (n = number of times the trigger should fire)
696 * e.g.: traceon:5 or enable_event:sys:event:n
697 * - triggers that do not support an 'n' param e.g. hist
698 *
699 * These functions can be used or ignored as necessary - it all
700 * depends on the complexity of the trigger, and the granularity of
701 * the functions supported reflects the fact that some implementations
702 * may need to customize certain aspects of their implementations and
703 * won't need certain functions. For instance, the hist trigger
704 * implementation doesn't use event_trigger_separate_filter() because
705 * it has special requirements for handling the filter.
706 */
707
708 /**
709 * event_trigger_check_remove - check whether an event trigger specifies remove
710 * @glob: The trigger command string, with optional remove(!) operator
711 *
712 * The event trigger callback implementations pass in 'glob' as a
713 * parameter. This is the command name either with or without a
714 * remove(!) operator. This function simply parses the glob and
715 * determines whether the command corresponds to a trigger removal or
716 * a trigger addition.
717 *
718 * Return: true if this is a remove command, false otherwise
719 */
event_trigger_check_remove(const char * glob)720 bool event_trigger_check_remove(const char *glob)
721 {
722 return (glob && glob[0] == '!') ? true : false;
723 }
724
725 /**
726 * event_trigger_empty_param - check whether the param is empty
727 * @param: The trigger param string
728 *
729 * The event trigger callback implementations pass in 'param' as a
730 * parameter. This corresponds to the string following the command
731 * name minus the command name. This function can be called by a
732 * callback implementation for any command that requires a param; a
733 * callback that doesn't require a param can ignore it.
734 *
735 * Return: true if this is an empty param, false otherwise
736 */
event_trigger_empty_param(const char * param)737 bool event_trigger_empty_param(const char *param)
738 {
739 return !param;
740 }
741
742 /**
743 * event_trigger_separate_filter - separate an event trigger from a filter
744 * @param_and_filter: String containing trigger and possibly filter
745 * @param: outparam, will be filled with a pointer to the trigger
746 * @filter: outparam, will be filled with a pointer to the filter
747 * @param_required: Specifies whether or not the param string is required
748 *
749 * Given a param string of the form '[trigger] [if filter]', this
750 * function separates the filter from the trigger and returns the
751 * trigger in @param and the filter in @filter. Either the @param
752 * or the @filter may be set to NULL by this function - if not set to
753 * NULL, they will contain strings corresponding to the trigger and
754 * filter.
755 *
756 * There are two cases that need to be handled with respect to the
757 * passed-in param: either the param is required, or it is not
758 * required. If @param_required is set, and there's no param, it will
759 * return -EINVAL. If @param_required is not set and there's a param
760 * that starts with a number, that corresponds to the case of a
761 * trigger with :n (n = number of times the trigger should fire) and
762 * the parsing continues normally; otherwise the function just returns
763 * and assumes param just contains a filter and there's nothing else
764 * to do.
765 *
766 * Return: 0 on success, errno otherwise
767 */
event_trigger_separate_filter(char * param_and_filter,char ** param,char ** filter,bool param_required)768 int event_trigger_separate_filter(char *param_and_filter, char **param,
769 char **filter, bool param_required)
770 {
771 int ret = 0;
772
773 *param = *filter = NULL;
774
775 if (!param_and_filter) {
776 if (param_required)
777 ret = -EINVAL;
778 goto out;
779 }
780
781 /*
782 * Here we check for an optional param. The only legal
783 * optional param is :n, and if that's the case, continue
784 * below. Otherwise we assume what's left is a filter and
785 * return it as the filter string for the caller to deal with.
786 */
787 if (!param_required && param_and_filter && !isdigit(param_and_filter[0])) {
788 *filter = param_and_filter;
789 goto out;
790 }
791
792 /*
793 * Separate the param from the filter (param [if filter]).
794 * Here we have either an optional :n param or a required
795 * param and an optional filter.
796 */
797 *param = strsep(¶m_and_filter, " \t");
798
799 /*
800 * Here we have a filter, though it may be empty.
801 */
802 if (param_and_filter) {
803 *filter = skip_spaces(param_and_filter);
804 if (!**filter)
805 *filter = NULL;
806 }
807 out:
808 return ret;
809 }
810
811 /**
812 * event_trigger_alloc - allocate and init event_trigger_data for a trigger
813 * @cmd_ops: The event_command operations for the trigger
814 * @cmd: The cmd string
815 * @param: The param string
816 * @private_data: User data to associate with the event trigger
817 *
818 * Allocate an event_trigger_data instance and initialize it. The
819 * @cmd_ops are used along with the @cmd and @param to get the
820 * trigger_ops to assign to the event_trigger_data. @private_data can
821 * also be passed in and associated with the event_trigger_data.
822 *
823 * Use event_trigger_free() to free an event_trigger_data object.
824 *
825 * Return: The trigger_data object success, NULL otherwise
826 */
event_trigger_alloc(struct event_command * cmd_ops,char * cmd,char * param,void * private_data)827 struct event_trigger_data *event_trigger_alloc(struct event_command *cmd_ops,
828 char *cmd,
829 char *param,
830 void *private_data)
831 {
832 struct event_trigger_data *trigger_data;
833 struct event_trigger_ops *trigger_ops;
834
835 trigger_ops = cmd_ops->get_trigger_ops(cmd, param);
836
837 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
838 if (!trigger_data)
839 return NULL;
840
841 trigger_data->count = -1;
842 trigger_data->ops = trigger_ops;
843 trigger_data->cmd_ops = cmd_ops;
844 trigger_data->private_data = private_data;
845
846 INIT_LIST_HEAD(&trigger_data->list);
847 INIT_LIST_HEAD(&trigger_data->named_list);
848 RCU_INIT_POINTER(trigger_data->filter, NULL);
849
850 return trigger_data;
851 }
852
853 /**
854 * event_trigger_parse_num - parse and return the number param for a trigger
855 * @param: The param string
856 * @trigger_data: The trigger_data for the trigger
857 *
858 * Parse the :n (n = number of times the trigger should fire) param
859 * and set the count variable in the trigger_data to the parsed count.
860 *
861 * Return: 0 on success, errno otherwise
862 */
event_trigger_parse_num(char * param,struct event_trigger_data * trigger_data)863 int event_trigger_parse_num(char *param,
864 struct event_trigger_data *trigger_data)
865 {
866 char *number;
867 int ret = 0;
868
869 if (param) {
870 number = strsep(¶m, ":");
871
872 if (!strlen(number))
873 return -EINVAL;
874
875 /*
876 * We use the callback data field (which is a pointer)
877 * as our counter.
878 */
879 ret = kstrtoul(number, 0, &trigger_data->count);
880 }
881
882 return ret;
883 }
884
885 /**
886 * event_trigger_set_filter - set an event trigger's filter
887 * @cmd_ops: The event_command operations for the trigger
888 * @file: The event file for the trigger's event
889 * @param: The string containing the filter
890 * @trigger_data: The trigger_data for the trigger
891 *
892 * Set the filter for the trigger. If the filter is NULL, just return
893 * without error.
894 *
895 * Return: 0 on success, errno otherwise
896 */
event_trigger_set_filter(struct event_command * cmd_ops,struct trace_event_file * file,char * param,struct event_trigger_data * trigger_data)897 int event_trigger_set_filter(struct event_command *cmd_ops,
898 struct trace_event_file *file,
899 char *param,
900 struct event_trigger_data *trigger_data)
901 {
902 if (param && cmd_ops->set_filter)
903 return cmd_ops->set_filter(param, trigger_data, file);
904
905 return 0;
906 }
907
908 /**
909 * event_trigger_reset_filter - reset an event trigger's filter
910 * @cmd_ops: The event_command operations for the trigger
911 * @trigger_data: The trigger_data for the trigger
912 *
913 * Reset the filter for the trigger to no filter.
914 */
event_trigger_reset_filter(struct event_command * cmd_ops,struct event_trigger_data * trigger_data)915 void event_trigger_reset_filter(struct event_command *cmd_ops,
916 struct event_trigger_data *trigger_data)
917 {
918 if (cmd_ops->set_filter)
919 cmd_ops->set_filter(NULL, trigger_data, NULL);
920 }
921
922 /**
923 * event_trigger_register - register an event trigger
924 * @cmd_ops: The event_command operations for the trigger
925 * @file: The event file for the trigger's event
926 * @glob: The trigger command string, with optional remove(!) operator
927 * @trigger_data: The trigger_data for the trigger
928 *
929 * Register an event trigger. The @cmd_ops are used to call the
930 * cmd_ops->reg() function which actually does the registration.
931 *
932 * Return: 0 on success, errno otherwise
933 */
event_trigger_register(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,struct event_trigger_data * trigger_data)934 int event_trigger_register(struct event_command *cmd_ops,
935 struct trace_event_file *file,
936 char *glob,
937 struct event_trigger_data *trigger_data)
938 {
939 return cmd_ops->reg(glob, trigger_data, file);
940 }
941
942 /**
943 * event_trigger_unregister - unregister an event trigger
944 * @cmd_ops: The event_command operations for the trigger
945 * @file: The event file for the trigger's event
946 * @glob: The trigger command string, with optional remove(!) operator
947 * @trigger_data: The trigger_data for the trigger
948 *
949 * Unregister an event trigger. The @cmd_ops are used to call the
950 * cmd_ops->unreg() function which actually does the unregistration.
951 */
event_trigger_unregister(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,struct event_trigger_data * trigger_data)952 void event_trigger_unregister(struct event_command *cmd_ops,
953 struct trace_event_file *file,
954 char *glob,
955 struct event_trigger_data *trigger_data)
956 {
957 cmd_ops->unreg(glob, trigger_data, file);
958 }
959
960 /*
961 * End event trigger parsing helper functions.
962 */
963
964 /**
965 * event_trigger_parse - Generic event_command @parse implementation
966 * @cmd_ops: The command ops, used for trigger registration
967 * @file: The trace_event_file associated with the event
968 * @glob: The raw string used to register the trigger
969 * @cmd: The cmd portion of the string used to register the trigger
970 * @param_and_filter: The param and filter portion of the string used to register the trigger
971 *
972 * Common implementation for event command parsing and trigger
973 * instantiation.
974 *
975 * Usually used directly as the @parse method in event command
976 * implementations.
977 *
978 * Return: 0 on success, errno otherwise
979 */
980 static int
event_trigger_parse(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param_and_filter)981 event_trigger_parse(struct event_command *cmd_ops,
982 struct trace_event_file *file,
983 char *glob, char *cmd, char *param_and_filter)
984 {
985 struct event_trigger_data *trigger_data;
986 char *param, *filter;
987 bool remove;
988 int ret;
989
990 remove = event_trigger_check_remove(glob);
991
992 ret = event_trigger_separate_filter(param_and_filter, ¶m, &filter, false);
993 if (ret)
994 return ret;
995
996 ret = -ENOMEM;
997 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, file);
998 if (!trigger_data)
999 goto out;
1000
1001 if (remove) {
1002 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1003 kfree(trigger_data);
1004 ret = 0;
1005 goto out;
1006 }
1007
1008 ret = event_trigger_parse_num(param, trigger_data);
1009 if (ret)
1010 goto out_free;
1011
1012 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1013 if (ret < 0)
1014 goto out_free;
1015
1016 /* Up the trigger_data count to make sure reg doesn't free it on failure */
1017 event_trigger_init(trigger_data);
1018
1019 ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1020 if (ret)
1021 goto out_free;
1022
1023 /* Down the counter of trigger_data or free it if not used anymore */
1024 event_trigger_free(trigger_data);
1025 out:
1026 return ret;
1027
1028 out_free:
1029 event_trigger_reset_filter(cmd_ops, trigger_data);
1030 kfree(trigger_data);
1031 goto out;
1032 }
1033
1034 /**
1035 * set_trigger_filter - Generic event_command @set_filter implementation
1036 * @filter_str: The filter string for the trigger, NULL to remove filter
1037 * @trigger_data: Trigger-specific data
1038 * @file: The trace_event_file associated with the event
1039 *
1040 * Common implementation for event command filter parsing and filter
1041 * instantiation.
1042 *
1043 * Usually used directly as the @set_filter method in event command
1044 * implementations.
1045 *
1046 * Also used to remove a filter (if filter_str = NULL).
1047 *
1048 * Return: 0 on success, errno otherwise
1049 */
set_trigger_filter(char * filter_str,struct event_trigger_data * trigger_data,struct trace_event_file * file)1050 int set_trigger_filter(char *filter_str,
1051 struct event_trigger_data *trigger_data,
1052 struct trace_event_file *file)
1053 {
1054 struct event_trigger_data *data = trigger_data;
1055 struct event_filter *filter = NULL, *tmp;
1056 int ret = -EINVAL;
1057 char *s;
1058
1059 if (!filter_str) /* clear the current filter */
1060 goto assign;
1061
1062 s = strsep(&filter_str, " \t");
1063
1064 if (!strlen(s) || strcmp(s, "if") != 0)
1065 goto out;
1066
1067 if (!filter_str)
1068 goto out;
1069
1070 /* The filter is for the 'trigger' event, not the triggered event */
1071 ret = create_event_filter(file->tr, file->event_call,
1072 filter_str, true, &filter);
1073
1074 /* Only enabled set_str for error handling */
1075 if (filter) {
1076 kfree(filter->filter_string);
1077 filter->filter_string = NULL;
1078 }
1079
1080 /*
1081 * If create_event_filter() fails, filter still needs to be freed.
1082 * Which the calling code will do with data->filter.
1083 */
1084 assign:
1085 tmp = rcu_access_pointer(data->filter);
1086
1087 rcu_assign_pointer(data->filter, filter);
1088
1089 if (tmp) {
1090 /*
1091 * Make sure the call is done with the filter.
1092 * It is possible that a filter could fail at boot up,
1093 * and then this path will be called. Avoid the synchronization
1094 * in that case.
1095 */
1096 if (system_state != SYSTEM_BOOTING)
1097 tracepoint_synchronize_unregister();
1098 free_event_filter(tmp);
1099 }
1100
1101 kfree(data->filter_str);
1102 data->filter_str = NULL;
1103
1104 if (filter_str) {
1105 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
1106 if (!data->filter_str) {
1107 free_event_filter(rcu_access_pointer(data->filter));
1108 data->filter = NULL;
1109 ret = -ENOMEM;
1110 }
1111 }
1112 out:
1113 return ret;
1114 }
1115
1116 static LIST_HEAD(named_triggers);
1117
1118 /**
1119 * find_named_trigger - Find the common named trigger associated with @name
1120 * @name: The name of the set of named triggers to find the common data for
1121 *
1122 * Named triggers are sets of triggers that share a common set of
1123 * trigger data. The first named trigger registered with a given name
1124 * owns the common trigger data that the others subsequently
1125 * registered with the same name will reference. This function
1126 * returns the common trigger data associated with that first
1127 * registered instance.
1128 *
1129 * Return: the common trigger data for the given named trigger on
1130 * success, NULL otherwise.
1131 */
find_named_trigger(const char * name)1132 struct event_trigger_data *find_named_trigger(const char *name)
1133 {
1134 struct event_trigger_data *data;
1135
1136 if (!name)
1137 return NULL;
1138
1139 list_for_each_entry(data, &named_triggers, named_list) {
1140 if (data->named_data)
1141 continue;
1142 if (strcmp(data->name, name) == 0)
1143 return data;
1144 }
1145
1146 return NULL;
1147 }
1148
1149 /**
1150 * is_named_trigger - determine if a given trigger is a named trigger
1151 * @test: The trigger data to test
1152 *
1153 * Return: true if 'test' is a named trigger, false otherwise.
1154 */
is_named_trigger(struct event_trigger_data * test)1155 bool is_named_trigger(struct event_trigger_data *test)
1156 {
1157 struct event_trigger_data *data;
1158
1159 list_for_each_entry(data, &named_triggers, named_list) {
1160 if (test == data)
1161 return true;
1162 }
1163
1164 return false;
1165 }
1166
1167 /**
1168 * save_named_trigger - save the trigger in the named trigger list
1169 * @name: The name of the named trigger set
1170 * @data: The trigger data to save
1171 *
1172 * Return: 0 if successful, negative error otherwise.
1173 */
save_named_trigger(const char * name,struct event_trigger_data * data)1174 int save_named_trigger(const char *name, struct event_trigger_data *data)
1175 {
1176 data->name = kstrdup(name, GFP_KERNEL);
1177 if (!data->name)
1178 return -ENOMEM;
1179
1180 list_add(&data->named_list, &named_triggers);
1181
1182 return 0;
1183 }
1184
1185 /**
1186 * del_named_trigger - delete a trigger from the named trigger list
1187 * @data: The trigger data to delete
1188 */
del_named_trigger(struct event_trigger_data * data)1189 void del_named_trigger(struct event_trigger_data *data)
1190 {
1191 kfree(data->name);
1192 data->name = NULL;
1193
1194 list_del(&data->named_list);
1195 }
1196
__pause_named_trigger(struct event_trigger_data * data,bool pause)1197 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
1198 {
1199 struct event_trigger_data *test;
1200
1201 list_for_each_entry(test, &named_triggers, named_list) {
1202 if (strcmp(test->name, data->name) == 0) {
1203 if (pause) {
1204 test->paused_tmp = test->paused;
1205 test->paused = true;
1206 } else {
1207 test->paused = test->paused_tmp;
1208 }
1209 }
1210 }
1211 }
1212
1213 /**
1214 * pause_named_trigger - Pause all named triggers with the same name
1215 * @data: The trigger data of a named trigger to pause
1216 *
1217 * Pauses a named trigger along with all other triggers having the
1218 * same name. Because named triggers share a common set of data,
1219 * pausing only one is meaningless, so pausing one named trigger needs
1220 * to pause all triggers with the same name.
1221 */
pause_named_trigger(struct event_trigger_data * data)1222 void pause_named_trigger(struct event_trigger_data *data)
1223 {
1224 __pause_named_trigger(data, true);
1225 }
1226
1227 /**
1228 * unpause_named_trigger - Un-pause all named triggers with the same name
1229 * @data: The trigger data of a named trigger to unpause
1230 *
1231 * Un-pauses a named trigger along with all other triggers having the
1232 * same name. Because named triggers share a common set of data,
1233 * unpausing only one is meaningless, so unpausing one named trigger
1234 * needs to unpause all triggers with the same name.
1235 */
unpause_named_trigger(struct event_trigger_data * data)1236 void unpause_named_trigger(struct event_trigger_data *data)
1237 {
1238 __pause_named_trigger(data, false);
1239 }
1240
1241 /**
1242 * set_named_trigger_data - Associate common named trigger data
1243 * @data: The trigger data to associate
1244 * @named_data: The common named trigger to be associated
1245 *
1246 * Named triggers are sets of triggers that share a common set of
1247 * trigger data. The first named trigger registered with a given name
1248 * owns the common trigger data that the others subsequently
1249 * registered with the same name will reference. This function
1250 * associates the common trigger data from the first trigger with the
1251 * given trigger.
1252 */
set_named_trigger_data(struct event_trigger_data * data,struct event_trigger_data * named_data)1253 void set_named_trigger_data(struct event_trigger_data *data,
1254 struct event_trigger_data *named_data)
1255 {
1256 data->named_data = named_data;
1257 }
1258
1259 struct event_trigger_data *
get_named_trigger_data(struct event_trigger_data * data)1260 get_named_trigger_data(struct event_trigger_data *data)
1261 {
1262 return data->named_data;
1263 }
1264
1265 static void
traceon_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1266 traceon_trigger(struct event_trigger_data *data,
1267 struct trace_buffer *buffer, void *rec,
1268 struct ring_buffer_event *event)
1269 {
1270 struct trace_event_file *file = data->private_data;
1271
1272 if (file) {
1273 if (tracer_tracing_is_on(file->tr))
1274 return;
1275
1276 tracer_tracing_on(file->tr);
1277 return;
1278 }
1279
1280 if (tracing_is_on())
1281 return;
1282
1283 tracing_on();
1284 }
1285
1286 static void
traceon_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1287 traceon_count_trigger(struct event_trigger_data *data,
1288 struct trace_buffer *buffer, void *rec,
1289 struct ring_buffer_event *event)
1290 {
1291 struct trace_event_file *file = data->private_data;
1292
1293 if (file) {
1294 if (tracer_tracing_is_on(file->tr))
1295 return;
1296 } else {
1297 if (tracing_is_on())
1298 return;
1299 }
1300
1301 if (!data->count)
1302 return;
1303
1304 if (data->count != -1)
1305 (data->count)--;
1306
1307 if (file)
1308 tracer_tracing_on(file->tr);
1309 else
1310 tracing_on();
1311 }
1312
1313 static void
traceoff_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1314 traceoff_trigger(struct event_trigger_data *data,
1315 struct trace_buffer *buffer, void *rec,
1316 struct ring_buffer_event *event)
1317 {
1318 struct trace_event_file *file = data->private_data;
1319
1320 if (file) {
1321 if (!tracer_tracing_is_on(file->tr))
1322 return;
1323
1324 tracer_tracing_off(file->tr);
1325 return;
1326 }
1327
1328 if (!tracing_is_on())
1329 return;
1330
1331 tracing_off();
1332 }
1333
1334 static void
traceoff_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1335 traceoff_count_trigger(struct event_trigger_data *data,
1336 struct trace_buffer *buffer, void *rec,
1337 struct ring_buffer_event *event)
1338 {
1339 struct trace_event_file *file = data->private_data;
1340
1341 if (file) {
1342 if (!tracer_tracing_is_on(file->tr))
1343 return;
1344 } else {
1345 if (!tracing_is_on())
1346 return;
1347 }
1348
1349 if (!data->count)
1350 return;
1351
1352 if (data->count != -1)
1353 (data->count)--;
1354
1355 if (file)
1356 tracer_tracing_off(file->tr);
1357 else
1358 tracing_off();
1359 }
1360
1361 static int
traceon_trigger_print(struct seq_file * m,struct event_trigger_data * data)1362 traceon_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1363 {
1364 return event_trigger_print("traceon", m, (void *)data->count,
1365 data->filter_str);
1366 }
1367
1368 static int
traceoff_trigger_print(struct seq_file * m,struct event_trigger_data * data)1369 traceoff_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1370 {
1371 return event_trigger_print("traceoff", m, (void *)data->count,
1372 data->filter_str);
1373 }
1374
1375 static struct event_trigger_ops traceon_trigger_ops = {
1376 .trigger = traceon_trigger,
1377 .print = traceon_trigger_print,
1378 .init = event_trigger_init,
1379 .free = event_trigger_free,
1380 };
1381
1382 static struct event_trigger_ops traceon_count_trigger_ops = {
1383 .trigger = traceon_count_trigger,
1384 .print = traceon_trigger_print,
1385 .init = event_trigger_init,
1386 .free = event_trigger_free,
1387 };
1388
1389 static struct event_trigger_ops traceoff_trigger_ops = {
1390 .trigger = traceoff_trigger,
1391 .print = traceoff_trigger_print,
1392 .init = event_trigger_init,
1393 .free = event_trigger_free,
1394 };
1395
1396 static struct event_trigger_ops traceoff_count_trigger_ops = {
1397 .trigger = traceoff_count_trigger,
1398 .print = traceoff_trigger_print,
1399 .init = event_trigger_init,
1400 .free = event_trigger_free,
1401 };
1402
1403 static struct event_trigger_ops *
onoff_get_trigger_ops(char * cmd,char * param)1404 onoff_get_trigger_ops(char *cmd, char *param)
1405 {
1406 struct event_trigger_ops *ops;
1407
1408 /* we register both traceon and traceoff to this callback */
1409 if (strcmp(cmd, "traceon") == 0)
1410 ops = param ? &traceon_count_trigger_ops :
1411 &traceon_trigger_ops;
1412 else
1413 ops = param ? &traceoff_count_trigger_ops :
1414 &traceoff_trigger_ops;
1415
1416 return ops;
1417 }
1418
1419 static struct event_command trigger_traceon_cmd = {
1420 .name = "traceon",
1421 .trigger_type = ETT_TRACE_ONOFF,
1422 .parse = event_trigger_parse,
1423 .reg = register_trigger,
1424 .unreg = unregister_trigger,
1425 .get_trigger_ops = onoff_get_trigger_ops,
1426 .set_filter = set_trigger_filter,
1427 };
1428
1429 static struct event_command trigger_traceoff_cmd = {
1430 .name = "traceoff",
1431 .trigger_type = ETT_TRACE_ONOFF,
1432 .flags = EVENT_CMD_FL_POST_TRIGGER,
1433 .parse = event_trigger_parse,
1434 .reg = register_trigger,
1435 .unreg = unregister_trigger,
1436 .get_trigger_ops = onoff_get_trigger_ops,
1437 .set_filter = set_trigger_filter,
1438 };
1439
1440 #ifdef CONFIG_TRACER_SNAPSHOT
1441 static void
snapshot_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1442 snapshot_trigger(struct event_trigger_data *data,
1443 struct trace_buffer *buffer, void *rec,
1444 struct ring_buffer_event *event)
1445 {
1446 struct trace_event_file *file = data->private_data;
1447
1448 if (file)
1449 tracing_snapshot_instance(file->tr);
1450 else
1451 tracing_snapshot();
1452 }
1453
1454 static void
snapshot_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1455 snapshot_count_trigger(struct event_trigger_data *data,
1456 struct trace_buffer *buffer, void *rec,
1457 struct ring_buffer_event *event)
1458 {
1459 if (!data->count)
1460 return;
1461
1462 if (data->count != -1)
1463 (data->count)--;
1464
1465 snapshot_trigger(data, buffer, rec, event);
1466 }
1467
1468 static int
register_snapshot_trigger(char * glob,struct event_trigger_data * data,struct trace_event_file * file)1469 register_snapshot_trigger(char *glob,
1470 struct event_trigger_data *data,
1471 struct trace_event_file *file)
1472 {
1473 int ret = tracing_alloc_snapshot_instance(file->tr);
1474
1475 if (ret < 0)
1476 return ret;
1477
1478 return register_trigger(glob, data, file);
1479 }
1480
1481 static int
snapshot_trigger_print(struct seq_file * m,struct event_trigger_data * data)1482 snapshot_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1483 {
1484 return event_trigger_print("snapshot", m, (void *)data->count,
1485 data->filter_str);
1486 }
1487
1488 static struct event_trigger_ops snapshot_trigger_ops = {
1489 .trigger = snapshot_trigger,
1490 .print = snapshot_trigger_print,
1491 .init = event_trigger_init,
1492 .free = event_trigger_free,
1493 };
1494
1495 static struct event_trigger_ops snapshot_count_trigger_ops = {
1496 .trigger = snapshot_count_trigger,
1497 .print = snapshot_trigger_print,
1498 .init = event_trigger_init,
1499 .free = event_trigger_free,
1500 };
1501
1502 static struct event_trigger_ops *
snapshot_get_trigger_ops(char * cmd,char * param)1503 snapshot_get_trigger_ops(char *cmd, char *param)
1504 {
1505 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1506 }
1507
1508 static struct event_command trigger_snapshot_cmd = {
1509 .name = "snapshot",
1510 .trigger_type = ETT_SNAPSHOT,
1511 .parse = event_trigger_parse,
1512 .reg = register_snapshot_trigger,
1513 .unreg = unregister_trigger,
1514 .get_trigger_ops = snapshot_get_trigger_ops,
1515 .set_filter = set_trigger_filter,
1516 };
1517
register_trigger_snapshot_cmd(void)1518 static __init int register_trigger_snapshot_cmd(void)
1519 {
1520 int ret;
1521
1522 ret = register_event_command(&trigger_snapshot_cmd);
1523 WARN_ON(ret < 0);
1524
1525 return ret;
1526 }
1527 #else
register_trigger_snapshot_cmd(void)1528 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1529 #endif /* CONFIG_TRACER_SNAPSHOT */
1530
1531 #ifdef CONFIG_STACKTRACE
1532 #ifdef CONFIG_UNWINDER_ORC
1533 /* Skip 2:
1534 * event_triggers_post_call()
1535 * trace_event_raw_event_xxx()
1536 */
1537 # define STACK_SKIP 2
1538 #else
1539 /*
1540 * Skip 4:
1541 * stacktrace_trigger()
1542 * event_triggers_post_call()
1543 * trace_event_buffer_commit()
1544 * trace_event_raw_event_xxx()
1545 */
1546 #define STACK_SKIP 4
1547 #endif
1548
1549 static void
stacktrace_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1550 stacktrace_trigger(struct event_trigger_data *data,
1551 struct trace_buffer *buffer, void *rec,
1552 struct ring_buffer_event *event)
1553 {
1554 struct trace_event_file *file = data->private_data;
1555
1556 if (file)
1557 __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
1558 else
1559 trace_dump_stack(STACK_SKIP);
1560 }
1561
1562 static void
stacktrace_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1563 stacktrace_count_trigger(struct event_trigger_data *data,
1564 struct trace_buffer *buffer, void *rec,
1565 struct ring_buffer_event *event)
1566 {
1567 if (!data->count)
1568 return;
1569
1570 if (data->count != -1)
1571 (data->count)--;
1572
1573 stacktrace_trigger(data, buffer, rec, event);
1574 }
1575
1576 static int
stacktrace_trigger_print(struct seq_file * m,struct event_trigger_data * data)1577 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_data *data)
1578 {
1579 return event_trigger_print("stacktrace", m, (void *)data->count,
1580 data->filter_str);
1581 }
1582
1583 static struct event_trigger_ops stacktrace_trigger_ops = {
1584 .trigger = stacktrace_trigger,
1585 .print = stacktrace_trigger_print,
1586 .init = event_trigger_init,
1587 .free = event_trigger_free,
1588 };
1589
1590 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1591 .trigger = stacktrace_count_trigger,
1592 .print = stacktrace_trigger_print,
1593 .init = event_trigger_init,
1594 .free = event_trigger_free,
1595 };
1596
1597 static struct event_trigger_ops *
stacktrace_get_trigger_ops(char * cmd,char * param)1598 stacktrace_get_trigger_ops(char *cmd, char *param)
1599 {
1600 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1601 }
1602
1603 static struct event_command trigger_stacktrace_cmd = {
1604 .name = "stacktrace",
1605 .trigger_type = ETT_STACKTRACE,
1606 .flags = EVENT_CMD_FL_POST_TRIGGER,
1607 .parse = event_trigger_parse,
1608 .reg = register_trigger,
1609 .unreg = unregister_trigger,
1610 .get_trigger_ops = stacktrace_get_trigger_ops,
1611 .set_filter = set_trigger_filter,
1612 };
1613
register_trigger_stacktrace_cmd(void)1614 static __init int register_trigger_stacktrace_cmd(void)
1615 {
1616 int ret;
1617
1618 ret = register_event_command(&trigger_stacktrace_cmd);
1619 WARN_ON(ret < 0);
1620
1621 return ret;
1622 }
1623 #else
register_trigger_stacktrace_cmd(void)1624 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1625 #endif /* CONFIG_STACKTRACE */
1626
unregister_trigger_traceon_traceoff_cmds(void)1627 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1628 {
1629 unregister_event_command(&trigger_traceon_cmd);
1630 unregister_event_command(&trigger_traceoff_cmd);
1631 }
1632
1633 static void
event_enable_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1634 event_enable_trigger(struct event_trigger_data *data,
1635 struct trace_buffer *buffer, void *rec,
1636 struct ring_buffer_event *event)
1637 {
1638 struct enable_trigger_data *enable_data = data->private_data;
1639
1640 if (enable_data->enable)
1641 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1642 else
1643 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1644 }
1645
1646 static void
event_enable_count_trigger(struct event_trigger_data * data,struct trace_buffer * buffer,void * rec,struct ring_buffer_event * event)1647 event_enable_count_trigger(struct event_trigger_data *data,
1648 struct trace_buffer *buffer, void *rec,
1649 struct ring_buffer_event *event)
1650 {
1651 struct enable_trigger_data *enable_data = data->private_data;
1652
1653 if (!data->count)
1654 return;
1655
1656 /* Skip if the event is in a state we want to switch to */
1657 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1658 return;
1659
1660 if (data->count != -1)
1661 (data->count)--;
1662
1663 event_enable_trigger(data, buffer, rec, event);
1664 }
1665
event_enable_trigger_print(struct seq_file * m,struct event_trigger_data * data)1666 int event_enable_trigger_print(struct seq_file *m,
1667 struct event_trigger_data *data)
1668 {
1669 struct enable_trigger_data *enable_data = data->private_data;
1670
1671 seq_printf(m, "%s:%s:%s",
1672 enable_data->hist ?
1673 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1674 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1675 enable_data->file->event_call->class->system,
1676 trace_event_name(enable_data->file->event_call));
1677
1678 if (data->count == -1)
1679 seq_puts(m, ":unlimited");
1680 else
1681 seq_printf(m, ":count=%ld", data->count);
1682
1683 if (data->filter_str)
1684 seq_printf(m, " if %s\n", data->filter_str);
1685 else
1686 seq_putc(m, '\n');
1687
1688 return 0;
1689 }
1690
event_enable_trigger_free(struct event_trigger_data * data)1691 void event_enable_trigger_free(struct event_trigger_data *data)
1692 {
1693 struct enable_trigger_data *enable_data = data->private_data;
1694
1695 if (WARN_ON_ONCE(data->ref <= 0))
1696 return;
1697
1698 data->ref--;
1699 if (!data->ref) {
1700 /* Remove the SOFT_MODE flag */
1701 trace_event_enable_disable(enable_data->file, 0, 1);
1702 trace_event_put_ref(enable_data->file->event_call);
1703 trigger_data_free(data);
1704 kfree(enable_data);
1705 }
1706 }
1707
1708 static struct event_trigger_ops event_enable_trigger_ops = {
1709 .trigger = event_enable_trigger,
1710 .print = event_enable_trigger_print,
1711 .init = event_trigger_init,
1712 .free = event_enable_trigger_free,
1713 };
1714
1715 static struct event_trigger_ops event_enable_count_trigger_ops = {
1716 .trigger = event_enable_count_trigger,
1717 .print = event_enable_trigger_print,
1718 .init = event_trigger_init,
1719 .free = event_enable_trigger_free,
1720 };
1721
1722 static struct event_trigger_ops event_disable_trigger_ops = {
1723 .trigger = event_enable_trigger,
1724 .print = event_enable_trigger_print,
1725 .init = event_trigger_init,
1726 .free = event_enable_trigger_free,
1727 };
1728
1729 static struct event_trigger_ops event_disable_count_trigger_ops = {
1730 .trigger = event_enable_count_trigger,
1731 .print = event_enable_trigger_print,
1732 .init = event_trigger_init,
1733 .free = event_enable_trigger_free,
1734 };
1735
event_enable_trigger_parse(struct event_command * cmd_ops,struct trace_event_file * file,char * glob,char * cmd,char * param_and_filter)1736 int event_enable_trigger_parse(struct event_command *cmd_ops,
1737 struct trace_event_file *file,
1738 char *glob, char *cmd, char *param_and_filter)
1739 {
1740 struct trace_event_file *event_enable_file;
1741 struct enable_trigger_data *enable_data;
1742 struct event_trigger_data *trigger_data;
1743 struct trace_array *tr = file->tr;
1744 char *param, *filter;
1745 bool enable, remove;
1746 const char *system;
1747 const char *event;
1748 bool hist = false;
1749 int ret;
1750
1751 remove = event_trigger_check_remove(glob);
1752
1753 if (event_trigger_empty_param(param_and_filter))
1754 return -EINVAL;
1755
1756 ret = event_trigger_separate_filter(param_and_filter, ¶m, &filter, true);
1757 if (ret)
1758 return ret;
1759
1760 system = strsep(¶m, ":");
1761 if (!param)
1762 return -EINVAL;
1763
1764 event = strsep(¶m, ":");
1765
1766 ret = -EINVAL;
1767 event_enable_file = find_event_file(tr, system, event);
1768 if (!event_enable_file)
1769 goto out;
1770
1771 #ifdef CONFIG_HIST_TRIGGERS
1772 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1773 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1774
1775 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1776 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1777 #else
1778 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1779 #endif
1780 ret = -ENOMEM;
1781
1782 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1783 if (!enable_data)
1784 goto out;
1785
1786 enable_data->hist = hist;
1787 enable_data->enable = enable;
1788 enable_data->file = event_enable_file;
1789
1790 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, enable_data);
1791 if (!trigger_data) {
1792 kfree(enable_data);
1793 goto out;
1794 }
1795
1796 if (remove) {
1797 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
1798 kfree(trigger_data);
1799 kfree(enable_data);
1800 ret = 0;
1801 goto out;
1802 }
1803
1804 /* Up the trigger_data count to make sure nothing frees it on failure */
1805 event_trigger_init(trigger_data);
1806
1807 ret = event_trigger_parse_num(param, trigger_data);
1808 if (ret)
1809 goto out_free;
1810
1811 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
1812 if (ret < 0)
1813 goto out_free;
1814
1815 /* Don't let event modules unload while probe registered */
1816 ret = trace_event_try_get_ref(event_enable_file->event_call);
1817 if (!ret) {
1818 ret = -EBUSY;
1819 goto out_free;
1820 }
1821
1822 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1823 if (ret < 0)
1824 goto out_put;
1825
1826 ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
1827 if (ret)
1828 goto out_disable;
1829
1830 event_trigger_free(trigger_data);
1831 out:
1832 return ret;
1833 out_disable:
1834 trace_event_enable_disable(event_enable_file, 0, 1);
1835 out_put:
1836 trace_event_put_ref(event_enable_file->event_call);
1837 out_free:
1838 event_trigger_reset_filter(cmd_ops, trigger_data);
1839 event_trigger_free(trigger_data);
1840 kfree(enable_data);
1841
1842 goto out;
1843 }
1844
event_enable_register_trigger(char * glob,struct event_trigger_data * data,struct trace_event_file * file)1845 int event_enable_register_trigger(char *glob,
1846 struct event_trigger_data *data,
1847 struct trace_event_file *file)
1848 {
1849 struct enable_trigger_data *enable_data = data->private_data;
1850 struct enable_trigger_data *test_enable_data;
1851 struct event_trigger_data *test;
1852 int ret = 0;
1853
1854 lockdep_assert_held(&event_mutex);
1855
1856 list_for_each_entry(test, &file->triggers, list) {
1857 test_enable_data = test->private_data;
1858 if (test_enable_data &&
1859 (test->cmd_ops->trigger_type ==
1860 data->cmd_ops->trigger_type) &&
1861 (test_enable_data->file == enable_data->file)) {
1862 ret = -EEXIST;
1863 goto out;
1864 }
1865 }
1866
1867 if (data->ops->init) {
1868 ret = data->ops->init(data);
1869 if (ret < 0)
1870 goto out;
1871 }
1872
1873 list_add_rcu(&data->list, &file->triggers);
1874
1875 update_cond_flag(file);
1876 ret = trace_event_trigger_enable_disable(file, 1);
1877 if (ret < 0) {
1878 list_del_rcu(&data->list);
1879 update_cond_flag(file);
1880 }
1881 out:
1882 return ret;
1883 }
1884
event_enable_unregister_trigger(char * glob,struct event_trigger_data * test,struct trace_event_file * file)1885 void event_enable_unregister_trigger(char *glob,
1886 struct event_trigger_data *test,
1887 struct trace_event_file *file)
1888 {
1889 struct enable_trigger_data *test_enable_data = test->private_data;
1890 struct event_trigger_data *data = NULL, *iter;
1891 struct enable_trigger_data *enable_data;
1892
1893 lockdep_assert_held(&event_mutex);
1894
1895 list_for_each_entry(iter, &file->triggers, list) {
1896 enable_data = iter->private_data;
1897 if (enable_data &&
1898 (iter->cmd_ops->trigger_type ==
1899 test->cmd_ops->trigger_type) &&
1900 (enable_data->file == test_enable_data->file)) {
1901 data = iter;
1902 list_del_rcu(&data->list);
1903 trace_event_trigger_enable_disable(file, 0);
1904 update_cond_flag(file);
1905 break;
1906 }
1907 }
1908
1909 if (data && data->ops->free)
1910 data->ops->free(data);
1911 }
1912
1913 static struct event_trigger_ops *
event_enable_get_trigger_ops(char * cmd,char * param)1914 event_enable_get_trigger_ops(char *cmd, char *param)
1915 {
1916 struct event_trigger_ops *ops;
1917 bool enable;
1918
1919 #ifdef CONFIG_HIST_TRIGGERS
1920 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1921 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1922 #else
1923 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1924 #endif
1925 if (enable)
1926 ops = param ? &event_enable_count_trigger_ops :
1927 &event_enable_trigger_ops;
1928 else
1929 ops = param ? &event_disable_count_trigger_ops :
1930 &event_disable_trigger_ops;
1931
1932 return ops;
1933 }
1934
1935 static struct event_command trigger_enable_cmd = {
1936 .name = ENABLE_EVENT_STR,
1937 .trigger_type = ETT_EVENT_ENABLE,
1938 .parse = event_enable_trigger_parse,
1939 .reg = event_enable_register_trigger,
1940 .unreg = event_enable_unregister_trigger,
1941 .get_trigger_ops = event_enable_get_trigger_ops,
1942 .set_filter = set_trigger_filter,
1943 };
1944
1945 static struct event_command trigger_disable_cmd = {
1946 .name = DISABLE_EVENT_STR,
1947 .trigger_type = ETT_EVENT_ENABLE,
1948 .parse = event_enable_trigger_parse,
1949 .reg = event_enable_register_trigger,
1950 .unreg = event_enable_unregister_trigger,
1951 .get_trigger_ops = event_enable_get_trigger_ops,
1952 .set_filter = set_trigger_filter,
1953 };
1954
unregister_trigger_enable_disable_cmds(void)1955 static __init void unregister_trigger_enable_disable_cmds(void)
1956 {
1957 unregister_event_command(&trigger_enable_cmd);
1958 unregister_event_command(&trigger_disable_cmd);
1959 }
1960
register_trigger_enable_disable_cmds(void)1961 static __init int register_trigger_enable_disable_cmds(void)
1962 {
1963 int ret;
1964
1965 ret = register_event_command(&trigger_enable_cmd);
1966 if (WARN_ON(ret < 0))
1967 return ret;
1968 ret = register_event_command(&trigger_disable_cmd);
1969 if (WARN_ON(ret < 0))
1970 unregister_trigger_enable_disable_cmds();
1971
1972 return ret;
1973 }
1974
register_trigger_traceon_traceoff_cmds(void)1975 static __init int register_trigger_traceon_traceoff_cmds(void)
1976 {
1977 int ret;
1978
1979 ret = register_event_command(&trigger_traceon_cmd);
1980 if (WARN_ON(ret < 0))
1981 return ret;
1982 ret = register_event_command(&trigger_traceoff_cmd);
1983 if (WARN_ON(ret < 0))
1984 unregister_trigger_traceon_traceoff_cmds();
1985
1986 return ret;
1987 }
1988
register_trigger_cmds(void)1989 __init int register_trigger_cmds(void)
1990 {
1991 register_trigger_traceon_traceoff_cmds();
1992 register_trigger_snapshot_cmd();
1993 register_trigger_stacktrace_cmd();
1994 register_trigger_enable_disable_cmds();
1995 register_trigger_hist_enable_disable_cmds();
1996 register_trigger_hist_cmd();
1997
1998 return 0;
1999 }
2000