1 /*
2  * trace_events_filter - generic event filtering
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26 
27 #include "trace.h"
28 #include "trace_output.h"
29 
30 #define DEFAULT_SYS_FILTER_MESSAGE					\
31 	"### global filter ###\n"					\
32 	"# Use this to set filters for multiple events.\n"		\
33 	"# Only events with the given fields will be affected.\n"	\
34 	"# If no events are modified, an error message will be displayed here"
35 
36 enum filter_op_ids
37 {
38 	OP_OR,
39 	OP_AND,
40 	OP_GLOB,
41 	OP_NE,
42 	OP_EQ,
43 	OP_LT,
44 	OP_LE,
45 	OP_GT,
46 	OP_GE,
47 	OP_NONE,
48 	OP_OPEN_PAREN,
49 };
50 
51 struct filter_op {
52 	int id;
53 	char *string;
54 	int precedence;
55 };
56 
57 static struct filter_op filter_ops[] = {
58 	{ OP_OR,	"||",		1 },
59 	{ OP_AND,	"&&",		2 },
60 	{ OP_GLOB,	"~",		4 },
61 	{ OP_NE,	"!=",		4 },
62 	{ OP_EQ,	"==",		4 },
63 	{ OP_LT,	"<",		5 },
64 	{ OP_LE,	"<=",		5 },
65 	{ OP_GT,	">",		5 },
66 	{ OP_GE,	">=",		5 },
67 	{ OP_NONE,	"OP_NONE",	0 },
68 	{ OP_OPEN_PAREN, "(",		0 },
69 };
70 
71 enum {
72 	FILT_ERR_NONE,
73 	FILT_ERR_INVALID_OP,
74 	FILT_ERR_UNBALANCED_PAREN,
75 	FILT_ERR_TOO_MANY_OPERANDS,
76 	FILT_ERR_OPERAND_TOO_LONG,
77 	FILT_ERR_FIELD_NOT_FOUND,
78 	FILT_ERR_ILLEGAL_FIELD_OP,
79 	FILT_ERR_ILLEGAL_INTVAL,
80 	FILT_ERR_BAD_SUBSYS_FILTER,
81 	FILT_ERR_TOO_MANY_PREDS,
82 	FILT_ERR_MISSING_FIELD,
83 	FILT_ERR_INVALID_FILTER,
84 	FILT_ERR_IP_FIELD_ONLY,
85 };
86 
87 static char *err_text[] = {
88 	"No error",
89 	"Invalid operator",
90 	"Unbalanced parens",
91 	"Too many operands",
92 	"Operand too long",
93 	"Field not found",
94 	"Illegal operation for field type",
95 	"Illegal integer value",
96 	"Couldn't find or set field in one of a subsystem's events",
97 	"Too many terms in predicate expression",
98 	"Missing field name and/or value",
99 	"Meaningless filter expression",
100 	"Only 'ip' field is supported for function trace",
101 };
102 
103 struct opstack_op {
104 	int op;
105 	struct list_head list;
106 };
107 
108 struct postfix_elt {
109 	int op;
110 	char *operand;
111 	struct list_head list;
112 };
113 
114 struct filter_parse_state {
115 	struct filter_op *ops;
116 	struct list_head opstack;
117 	struct list_head postfix;
118 	int lasterr;
119 	int lasterr_pos;
120 
121 	struct {
122 		char *string;
123 		unsigned int cnt;
124 		unsigned int tail;
125 	} infix;
126 
127 	struct {
128 		char string[MAX_FILTER_STR_VAL];
129 		int pos;
130 		unsigned int tail;
131 	} operand;
132 };
133 
134 struct pred_stack {
135 	struct filter_pred	**preds;
136 	int			index;
137 };
138 
139 #define DEFINE_COMPARISON_PRED(type)					\
140 static int filter_pred_##type(struct filter_pred *pred, void *event)	\
141 {									\
142 	type *addr = (type *)(event + pred->offset);			\
143 	type val = (type)pred->val;					\
144 	int match = 0;							\
145 									\
146 	switch (pred->op) {						\
147 	case OP_LT:							\
148 		match = (*addr < val);					\
149 		break;							\
150 	case OP_LE:							\
151 		match = (*addr <= val);					\
152 		break;							\
153 	case OP_GT:							\
154 		match = (*addr > val);					\
155 		break;							\
156 	case OP_GE:							\
157 		match = (*addr >= val);					\
158 		break;							\
159 	default:							\
160 		break;							\
161 	}								\
162 									\
163 	return match;							\
164 }
165 
166 #define DEFINE_EQUALITY_PRED(size)					\
167 static int filter_pred_##size(struct filter_pred *pred, void *event)	\
168 {									\
169 	u##size *addr = (u##size *)(event + pred->offset);		\
170 	u##size val = (u##size)pred->val;				\
171 	int match;							\
172 									\
173 	match = (val == *addr) ^ pred->not;				\
174 									\
175 	return match;							\
176 }
177 
178 DEFINE_COMPARISON_PRED(s64);
179 DEFINE_COMPARISON_PRED(u64);
180 DEFINE_COMPARISON_PRED(s32);
181 DEFINE_COMPARISON_PRED(u32);
182 DEFINE_COMPARISON_PRED(s16);
183 DEFINE_COMPARISON_PRED(u16);
184 DEFINE_COMPARISON_PRED(s8);
185 DEFINE_COMPARISON_PRED(u8);
186 
187 DEFINE_EQUALITY_PRED(64);
188 DEFINE_EQUALITY_PRED(32);
189 DEFINE_EQUALITY_PRED(16);
190 DEFINE_EQUALITY_PRED(8);
191 
192 /* Filter predicate for fixed sized arrays of characters */
filter_pred_string(struct filter_pred * pred,void * event)193 static int filter_pred_string(struct filter_pred *pred, void *event)
194 {
195 	char *addr = (char *)(event + pred->offset);
196 	int cmp, match;
197 
198 	cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
199 
200 	match = cmp ^ pred->not;
201 
202 	return match;
203 }
204 
205 /* Filter predicate for char * pointers */
filter_pred_pchar(struct filter_pred * pred,void * event)206 static int filter_pred_pchar(struct filter_pred *pred, void *event)
207 {
208 	char **addr = (char **)(event + pred->offset);
209 	int cmp, match;
210 	int len = strlen(*addr) + 1;	/* including tailing '\0' */
211 
212 	cmp = pred->regex.match(*addr, &pred->regex, len);
213 
214 	match = cmp ^ pred->not;
215 
216 	return match;
217 }
218 
219 /*
220  * Filter predicate for dynamic sized arrays of characters.
221  * These are implemented through a list of strings at the end
222  * of the entry.
223  * Also each of these strings have a field in the entry which
224  * contains its offset from the beginning of the entry.
225  * We have then first to get this field, dereference it
226  * and add it to the address of the entry, and at last we have
227  * the address of the string.
228  */
filter_pred_strloc(struct filter_pred * pred,void * event)229 static int filter_pred_strloc(struct filter_pred *pred, void *event)
230 {
231 	u32 str_item = *(u32 *)(event + pred->offset);
232 	int str_loc = str_item & 0xffff;
233 	int str_len = str_item >> 16;
234 	char *addr = (char *)(event + str_loc);
235 	int cmp, match;
236 
237 	cmp = pred->regex.match(addr, &pred->regex, str_len);
238 
239 	match = cmp ^ pred->not;
240 
241 	return match;
242 }
243 
filter_pred_none(struct filter_pred * pred,void * event)244 static int filter_pred_none(struct filter_pred *pred, void *event)
245 {
246 	return 0;
247 }
248 
249 /*
250  * regex_match_foo - Basic regex callbacks
251  *
252  * @str: the string to be searched
253  * @r:   the regex structure containing the pattern string
254  * @len: the length of the string to be searched (including '\0')
255  *
256  * Note:
257  * - @str might not be NULL-terminated if it's of type DYN_STRING
258  *   or STATIC_STRING
259  */
260 
regex_match_full(char * str,struct regex * r,int len)261 static int regex_match_full(char *str, struct regex *r, int len)
262 {
263 	if (strncmp(str, r->pattern, len) == 0)
264 		return 1;
265 	return 0;
266 }
267 
regex_match_front(char * str,struct regex * r,int len)268 static int regex_match_front(char *str, struct regex *r, int len)
269 {
270 	if (strncmp(str, r->pattern, r->len) == 0)
271 		return 1;
272 	return 0;
273 }
274 
regex_match_middle(char * str,struct regex * r,int len)275 static int regex_match_middle(char *str, struct regex *r, int len)
276 {
277 	if (strnstr(str, r->pattern, len))
278 		return 1;
279 	return 0;
280 }
281 
regex_match_end(char * str,struct regex * r,int len)282 static int regex_match_end(char *str, struct regex *r, int len)
283 {
284 	int strlen = len - 1;
285 
286 	if (strlen >= r->len &&
287 	    memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
288 		return 1;
289 	return 0;
290 }
291 
292 /**
293  * filter_parse_regex - parse a basic regex
294  * @buff:   the raw regex
295  * @len:    length of the regex
296  * @search: will point to the beginning of the string to compare
297  * @not:    tell whether the match will have to be inverted
298  *
299  * This passes in a buffer containing a regex and this function will
300  * set search to point to the search part of the buffer and
301  * return the type of search it is (see enum above).
302  * This does modify buff.
303  *
304  * Returns enum type.
305  *  search returns the pointer to use for comparison.
306  *  not returns 1 if buff started with a '!'
307  *     0 otherwise.
308  */
filter_parse_regex(char * buff,int len,char ** search,int * not)309 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
310 {
311 	int type = MATCH_FULL;
312 	int i;
313 
314 	if (buff[0] == '!') {
315 		*not = 1;
316 		buff++;
317 		len--;
318 	} else
319 		*not = 0;
320 
321 	*search = buff;
322 
323 	for (i = 0; i < len; i++) {
324 		if (buff[i] == '*') {
325 			if (!i) {
326 				*search = buff + 1;
327 				type = MATCH_END_ONLY;
328 			} else {
329 				if (type == MATCH_END_ONLY)
330 					type = MATCH_MIDDLE_ONLY;
331 				else
332 					type = MATCH_FRONT_ONLY;
333 				buff[i] = 0;
334 				break;
335 			}
336 		}
337 	}
338 
339 	return type;
340 }
341 
filter_build_regex(struct filter_pred * pred)342 static void filter_build_regex(struct filter_pred *pred)
343 {
344 	struct regex *r = &pred->regex;
345 	char *search;
346 	enum regex_type type = MATCH_FULL;
347 	int not = 0;
348 
349 	if (pred->op == OP_GLOB) {
350 		type = filter_parse_regex(r->pattern, r->len, &search, &not);
351 		r->len = strlen(search);
352 		memmove(r->pattern, search, r->len+1);
353 	}
354 
355 	switch (type) {
356 	case MATCH_FULL:
357 		r->match = regex_match_full;
358 		break;
359 	case MATCH_FRONT_ONLY:
360 		r->match = regex_match_front;
361 		break;
362 	case MATCH_MIDDLE_ONLY:
363 		r->match = regex_match_middle;
364 		break;
365 	case MATCH_END_ONLY:
366 		r->match = regex_match_end;
367 		break;
368 	}
369 
370 	pred->not ^= not;
371 }
372 
373 enum move_type {
374 	MOVE_DOWN,
375 	MOVE_UP_FROM_LEFT,
376 	MOVE_UP_FROM_RIGHT
377 };
378 
379 static struct filter_pred *
get_pred_parent(struct filter_pred * pred,struct filter_pred * preds,int index,enum move_type * move)380 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
381 		int index, enum move_type *move)
382 {
383 	if (pred->parent & FILTER_PRED_IS_RIGHT)
384 		*move = MOVE_UP_FROM_RIGHT;
385 	else
386 		*move = MOVE_UP_FROM_LEFT;
387 	pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
388 
389 	return pred;
390 }
391 
392 enum walk_return {
393 	WALK_PRED_ABORT,
394 	WALK_PRED_PARENT,
395 	WALK_PRED_DEFAULT,
396 };
397 
398 typedef int (*filter_pred_walkcb_t) (enum move_type move,
399 				     struct filter_pred *pred,
400 				     int *err, void *data);
401 
walk_pred_tree(struct filter_pred * preds,struct filter_pred * root,filter_pred_walkcb_t cb,void * data)402 static int walk_pred_tree(struct filter_pred *preds,
403 			  struct filter_pred *root,
404 			  filter_pred_walkcb_t cb, void *data)
405 {
406 	struct filter_pred *pred = root;
407 	enum move_type move = MOVE_DOWN;
408 	int done = 0;
409 
410 	if  (!preds)
411 		return -EINVAL;
412 
413 	do {
414 		int err = 0, ret;
415 
416 		ret = cb(move, pred, &err, data);
417 		if (ret == WALK_PRED_ABORT)
418 			return err;
419 		if (ret == WALK_PRED_PARENT)
420 			goto get_parent;
421 
422 		switch (move) {
423 		case MOVE_DOWN:
424 			if (pred->left != FILTER_PRED_INVALID) {
425 				pred = &preds[pred->left];
426 				continue;
427 			}
428 			goto get_parent;
429 		case MOVE_UP_FROM_LEFT:
430 			pred = &preds[pred->right];
431 			move = MOVE_DOWN;
432 			continue;
433 		case MOVE_UP_FROM_RIGHT:
434  get_parent:
435 			if (pred == root)
436 				break;
437 			pred = get_pred_parent(pred, preds,
438 					       pred->parent,
439 					       &move);
440 			continue;
441 		}
442 		done = 1;
443 	} while (!done);
444 
445 	/* We are fine. */
446 	return 0;
447 }
448 
449 /*
450  * A series of AND or ORs where found together. Instead of
451  * climbing up and down the tree branches, an array of the
452  * ops were made in order of checks. We can just move across
453  * the array and short circuit if needed.
454  */
process_ops(struct filter_pred * preds,struct filter_pred * op,void * rec)455 static int process_ops(struct filter_pred *preds,
456 		       struct filter_pred *op, void *rec)
457 {
458 	struct filter_pred *pred;
459 	int match = 0;
460 	int type;
461 	int i;
462 
463 	/*
464 	 * Micro-optimization: We set type to true if op
465 	 * is an OR and false otherwise (AND). Then we
466 	 * just need to test if the match is equal to
467 	 * the type, and if it is, we can short circuit the
468 	 * rest of the checks:
469 	 *
470 	 * if ((match && op->op == OP_OR) ||
471 	 *     (!match && op->op == OP_AND))
472 	 *	  return match;
473 	 */
474 	type = op->op == OP_OR;
475 
476 	for (i = 0; i < op->val; i++) {
477 		pred = &preds[op->ops[i]];
478 		if (!WARN_ON_ONCE(!pred->fn))
479 			match = pred->fn(pred, rec);
480 		if (!!match == type)
481 			return match;
482 	}
483 	return match;
484 }
485 
486 struct filter_match_preds_data {
487 	struct filter_pred *preds;
488 	int match;
489 	void *rec;
490 };
491 
filter_match_preds_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)492 static int filter_match_preds_cb(enum move_type move, struct filter_pred *pred,
493 				 int *err, void *data)
494 {
495 	struct filter_match_preds_data *d = data;
496 
497 	*err = 0;
498 	switch (move) {
499 	case MOVE_DOWN:
500 		/* only AND and OR have children */
501 		if (pred->left != FILTER_PRED_INVALID) {
502 			/* If ops is set, then it was folded. */
503 			if (!pred->ops)
504 				return WALK_PRED_DEFAULT;
505 			/* We can treat folded ops as a leaf node */
506 			d->match = process_ops(d->preds, pred, d->rec);
507 		} else {
508 			if (!WARN_ON_ONCE(!pred->fn))
509 				d->match = pred->fn(pred, d->rec);
510 		}
511 
512 		return WALK_PRED_PARENT;
513 	case MOVE_UP_FROM_LEFT:
514 		/*
515 		 * Check for short circuits.
516 		 *
517 		 * Optimization: !!match == (pred->op == OP_OR)
518 		 *   is the same as:
519 		 * if ((match && pred->op == OP_OR) ||
520 		 *     (!match && pred->op == OP_AND))
521 		 */
522 		if (!!d->match == (pred->op == OP_OR))
523 			return WALK_PRED_PARENT;
524 		break;
525 	case MOVE_UP_FROM_RIGHT:
526 		break;
527 	}
528 
529 	return WALK_PRED_DEFAULT;
530 }
531 
532 /* return 1 if event matches, 0 otherwise (discard) */
filter_match_preds(struct event_filter * filter,void * rec)533 int filter_match_preds(struct event_filter *filter, void *rec)
534 {
535 	struct filter_pred *preds;
536 	struct filter_pred *root;
537 	struct filter_match_preds_data data = {
538 		/* match is currently meaningless */
539 		.match = -1,
540 		.rec   = rec,
541 	};
542 	int n_preds, ret;
543 
544 	/* no filter is considered a match */
545 	if (!filter)
546 		return 1;
547 
548 	n_preds = filter->n_preds;
549 	if (!n_preds)
550 		return 1;
551 
552 	/*
553 	 * n_preds, root and filter->preds are protect with preemption disabled.
554 	 */
555 	root = rcu_dereference_sched(filter->root);
556 	if (!root)
557 		return 1;
558 
559 	data.preds = preds = rcu_dereference_sched(filter->preds);
560 	ret = walk_pred_tree(preds, root, filter_match_preds_cb, &data);
561 	WARN_ON(ret);
562 	return data.match;
563 }
564 EXPORT_SYMBOL_GPL(filter_match_preds);
565 
parse_error(struct filter_parse_state * ps,int err,int pos)566 static void parse_error(struct filter_parse_state *ps, int err, int pos)
567 {
568 	ps->lasterr = err;
569 	ps->lasterr_pos = pos;
570 }
571 
remove_filter_string(struct event_filter * filter)572 static void remove_filter_string(struct event_filter *filter)
573 {
574 	if (!filter)
575 		return;
576 
577 	kfree(filter->filter_string);
578 	filter->filter_string = NULL;
579 }
580 
replace_filter_string(struct event_filter * filter,char * filter_string)581 static int replace_filter_string(struct event_filter *filter,
582 				 char *filter_string)
583 {
584 	kfree(filter->filter_string);
585 	filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
586 	if (!filter->filter_string)
587 		return -ENOMEM;
588 
589 	return 0;
590 }
591 
append_filter_string(struct event_filter * filter,char * string)592 static int append_filter_string(struct event_filter *filter,
593 				char *string)
594 {
595 	int newlen;
596 	char *new_filter_string;
597 
598 	BUG_ON(!filter->filter_string);
599 	newlen = strlen(filter->filter_string) + strlen(string) + 1;
600 	new_filter_string = kmalloc(newlen, GFP_KERNEL);
601 	if (!new_filter_string)
602 		return -ENOMEM;
603 
604 	strcpy(new_filter_string, filter->filter_string);
605 	strcat(new_filter_string, string);
606 	kfree(filter->filter_string);
607 	filter->filter_string = new_filter_string;
608 
609 	return 0;
610 }
611 
append_filter_err(struct filter_parse_state * ps,struct event_filter * filter)612 static void append_filter_err(struct filter_parse_state *ps,
613 			      struct event_filter *filter)
614 {
615 	int pos = ps->lasterr_pos;
616 	char *buf, *pbuf;
617 
618 	buf = (char *)__get_free_page(GFP_TEMPORARY);
619 	if (!buf)
620 		return;
621 
622 	append_filter_string(filter, "\n");
623 	memset(buf, ' ', PAGE_SIZE);
624 	if (pos > PAGE_SIZE - 128)
625 		pos = 0;
626 	buf[pos] = '^';
627 	pbuf = &buf[pos] + 1;
628 
629 	sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
630 	append_filter_string(filter, buf);
631 	free_page((unsigned long) buf);
632 }
633 
print_event_filter(struct ftrace_event_call * call,struct trace_seq * s)634 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
635 {
636 	struct event_filter *filter;
637 
638 	mutex_lock(&event_mutex);
639 	filter = call->filter;
640 	if (filter && filter->filter_string)
641 		trace_seq_printf(s, "%s\n", filter->filter_string);
642 	else
643 		trace_seq_printf(s, "none\n");
644 	mutex_unlock(&event_mutex);
645 }
646 
print_subsystem_event_filter(struct event_subsystem * system,struct trace_seq * s)647 void print_subsystem_event_filter(struct event_subsystem *system,
648 				  struct trace_seq *s)
649 {
650 	struct event_filter *filter;
651 
652 	mutex_lock(&event_mutex);
653 	filter = system->filter;
654 	if (filter && filter->filter_string)
655 		trace_seq_printf(s, "%s\n", filter->filter_string);
656 	else
657 		trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
658 	mutex_unlock(&event_mutex);
659 }
660 
661 static struct ftrace_event_field *
__find_event_field(struct list_head * head,char * name)662 __find_event_field(struct list_head *head, char *name)
663 {
664 	struct ftrace_event_field *field;
665 
666 	list_for_each_entry(field, head, link) {
667 		if (!strcmp(field->name, name))
668 			return field;
669 	}
670 
671 	return NULL;
672 }
673 
674 static struct ftrace_event_field *
find_event_field(struct ftrace_event_call * call,char * name)675 find_event_field(struct ftrace_event_call *call, char *name)
676 {
677 	struct ftrace_event_field *field;
678 	struct list_head *head;
679 
680 	field = __find_event_field(&ftrace_common_fields, name);
681 	if (field)
682 		return field;
683 
684 	head = trace_get_fields(call);
685 	return __find_event_field(head, name);
686 }
687 
__alloc_pred_stack(struct pred_stack * stack,int n_preds)688 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
689 {
690 	stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
691 	if (!stack->preds)
692 		return -ENOMEM;
693 	stack->index = n_preds;
694 	return 0;
695 }
696 
__free_pred_stack(struct pred_stack * stack)697 static void __free_pred_stack(struct pred_stack *stack)
698 {
699 	kfree(stack->preds);
700 	stack->index = 0;
701 }
702 
__push_pred_stack(struct pred_stack * stack,struct filter_pred * pred)703 static int __push_pred_stack(struct pred_stack *stack,
704 			     struct filter_pred *pred)
705 {
706 	int index = stack->index;
707 
708 	if (WARN_ON(index == 0))
709 		return -ENOSPC;
710 
711 	stack->preds[--index] = pred;
712 	stack->index = index;
713 	return 0;
714 }
715 
716 static struct filter_pred *
__pop_pred_stack(struct pred_stack * stack)717 __pop_pred_stack(struct pred_stack *stack)
718 {
719 	struct filter_pred *pred;
720 	int index = stack->index;
721 
722 	pred = stack->preds[index++];
723 	if (!pred)
724 		return NULL;
725 
726 	stack->index = index;
727 	return pred;
728 }
729 
filter_set_pred(struct event_filter * filter,int idx,struct pred_stack * stack,struct filter_pred * src)730 static int filter_set_pred(struct event_filter *filter,
731 			   int idx,
732 			   struct pred_stack *stack,
733 			   struct filter_pred *src)
734 {
735 	struct filter_pred *dest = &filter->preds[idx];
736 	struct filter_pred *left;
737 	struct filter_pred *right;
738 
739 	*dest = *src;
740 	dest->index = idx;
741 
742 	if (dest->op == OP_OR || dest->op == OP_AND) {
743 		right = __pop_pred_stack(stack);
744 		left = __pop_pred_stack(stack);
745 		if (!left || !right)
746 			return -EINVAL;
747 		/*
748 		 * If both children can be folded
749 		 * and they are the same op as this op or a leaf,
750 		 * then this op can be folded.
751 		 */
752 		if (left->index & FILTER_PRED_FOLD &&
753 		    (left->op == dest->op ||
754 		     left->left == FILTER_PRED_INVALID) &&
755 		    right->index & FILTER_PRED_FOLD &&
756 		    (right->op == dest->op ||
757 		     right->left == FILTER_PRED_INVALID))
758 			dest->index |= FILTER_PRED_FOLD;
759 
760 		dest->left = left->index & ~FILTER_PRED_FOLD;
761 		dest->right = right->index & ~FILTER_PRED_FOLD;
762 		left->parent = dest->index & ~FILTER_PRED_FOLD;
763 		right->parent = dest->index | FILTER_PRED_IS_RIGHT;
764 	} else {
765 		/*
766 		 * Make dest->left invalid to be used as a quick
767 		 * way to know this is a leaf node.
768 		 */
769 		dest->left = FILTER_PRED_INVALID;
770 
771 		/* All leafs allow folding the parent ops. */
772 		dest->index |= FILTER_PRED_FOLD;
773 	}
774 
775 	return __push_pred_stack(stack, dest);
776 }
777 
__free_preds(struct event_filter * filter)778 static void __free_preds(struct event_filter *filter)
779 {
780 	int i;
781 
782 	if (filter->preds) {
783 		for (i = 0; i < filter->n_preds; i++)
784 			kfree(filter->preds[i].ops);
785 		kfree(filter->preds);
786 		filter->preds = NULL;
787 	}
788 	filter->a_preds = 0;
789 	filter->n_preds = 0;
790 }
791 
filter_disable(struct ftrace_event_call * call)792 static void filter_disable(struct ftrace_event_call *call)
793 {
794 	call->flags &= ~TRACE_EVENT_FL_FILTERED;
795 }
796 
__free_filter(struct event_filter * filter)797 static void __free_filter(struct event_filter *filter)
798 {
799 	if (!filter)
800 		return;
801 
802 	__free_preds(filter);
803 	kfree(filter->filter_string);
804 	kfree(filter);
805 }
806 
807 /*
808  * Called when destroying the ftrace_event_call.
809  * The call is being freed, so we do not need to worry about
810  * the call being currently used. This is for module code removing
811  * the tracepoints from within it.
812  */
destroy_preds(struct ftrace_event_call * call)813 void destroy_preds(struct ftrace_event_call *call)
814 {
815 	__free_filter(call->filter);
816 	call->filter = NULL;
817 }
818 
__alloc_filter(void)819 static struct event_filter *__alloc_filter(void)
820 {
821 	struct event_filter *filter;
822 
823 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
824 	return filter;
825 }
826 
__alloc_preds(struct event_filter * filter,int n_preds)827 static int __alloc_preds(struct event_filter *filter, int n_preds)
828 {
829 	struct filter_pred *pred;
830 	int i;
831 
832 	if (filter->preds)
833 		__free_preds(filter);
834 
835 	filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
836 
837 	if (!filter->preds)
838 		return -ENOMEM;
839 
840 	filter->a_preds = n_preds;
841 	filter->n_preds = 0;
842 
843 	for (i = 0; i < n_preds; i++) {
844 		pred = &filter->preds[i];
845 		pred->fn = filter_pred_none;
846 	}
847 
848 	return 0;
849 }
850 
filter_free_subsystem_preds(struct event_subsystem * system)851 static void filter_free_subsystem_preds(struct event_subsystem *system)
852 {
853 	struct ftrace_event_call *call;
854 
855 	list_for_each_entry(call, &ftrace_events, list) {
856 		if (strcmp(call->class->system, system->name) != 0)
857 			continue;
858 
859 		filter_disable(call);
860 		remove_filter_string(call->filter);
861 	}
862 }
863 
filter_free_subsystem_filters(struct event_subsystem * system)864 static void filter_free_subsystem_filters(struct event_subsystem *system)
865 {
866 	struct ftrace_event_call *call;
867 
868 	list_for_each_entry(call, &ftrace_events, list) {
869 		if (strcmp(call->class->system, system->name) != 0)
870 			continue;
871 		__free_filter(call->filter);
872 		call->filter = NULL;
873 	}
874 }
875 
filter_add_pred(struct filter_parse_state * ps,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack)876 static int filter_add_pred(struct filter_parse_state *ps,
877 			   struct event_filter *filter,
878 			   struct filter_pred *pred,
879 			   struct pred_stack *stack)
880 {
881 	int err;
882 
883 	if (WARN_ON(filter->n_preds == filter->a_preds)) {
884 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
885 		return -ENOSPC;
886 	}
887 
888 	err = filter_set_pred(filter, filter->n_preds, stack, pred);
889 	if (err)
890 		return err;
891 
892 	filter->n_preds++;
893 
894 	return 0;
895 }
896 
filter_assign_type(const char * type)897 int filter_assign_type(const char *type)
898 {
899 	if (strstr(type, "__data_loc") && strstr(type, "char"))
900 		return FILTER_DYN_STRING;
901 
902 	if (strchr(type, '[') && strstr(type, "char"))
903 		return FILTER_STATIC_STRING;
904 
905 	return FILTER_OTHER;
906 }
907 
is_function_field(struct ftrace_event_field * field)908 static bool is_function_field(struct ftrace_event_field *field)
909 {
910 	return field->filter_type == FILTER_TRACE_FN;
911 }
912 
is_string_field(struct ftrace_event_field * field)913 static bool is_string_field(struct ftrace_event_field *field)
914 {
915 	return field->filter_type == FILTER_DYN_STRING ||
916 	       field->filter_type == FILTER_STATIC_STRING ||
917 	       field->filter_type == FILTER_PTR_STRING;
918 }
919 
is_legal_op(struct ftrace_event_field * field,int op)920 static int is_legal_op(struct ftrace_event_field *field, int op)
921 {
922 	if (is_string_field(field) &&
923 	    (op != OP_EQ && op != OP_NE && op != OP_GLOB))
924 		return 0;
925 	if (!is_string_field(field) && op == OP_GLOB)
926 		return 0;
927 
928 	return 1;
929 }
930 
select_comparison_fn(int op,int field_size,int field_is_signed)931 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
932 					     int field_is_signed)
933 {
934 	filter_pred_fn_t fn = NULL;
935 
936 	switch (field_size) {
937 	case 8:
938 		if (op == OP_EQ || op == OP_NE)
939 			fn = filter_pred_64;
940 		else if (field_is_signed)
941 			fn = filter_pred_s64;
942 		else
943 			fn = filter_pred_u64;
944 		break;
945 	case 4:
946 		if (op == OP_EQ || op == OP_NE)
947 			fn = filter_pred_32;
948 		else if (field_is_signed)
949 			fn = filter_pred_s32;
950 		else
951 			fn = filter_pred_u32;
952 		break;
953 	case 2:
954 		if (op == OP_EQ || op == OP_NE)
955 			fn = filter_pred_16;
956 		else if (field_is_signed)
957 			fn = filter_pred_s16;
958 		else
959 			fn = filter_pred_u16;
960 		break;
961 	case 1:
962 		if (op == OP_EQ || op == OP_NE)
963 			fn = filter_pred_8;
964 		else if (field_is_signed)
965 			fn = filter_pred_s8;
966 		else
967 			fn = filter_pred_u8;
968 		break;
969 	}
970 
971 	return fn;
972 }
973 
init_pred(struct filter_parse_state * ps,struct ftrace_event_field * field,struct filter_pred * pred)974 static int init_pred(struct filter_parse_state *ps,
975 		     struct ftrace_event_field *field,
976 		     struct filter_pred *pred)
977 
978 {
979 	filter_pred_fn_t fn = filter_pred_none;
980 	unsigned long long val;
981 	int ret;
982 
983 	pred->offset = field->offset;
984 
985 	if (!is_legal_op(field, pred->op)) {
986 		parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
987 		return -EINVAL;
988 	}
989 
990 	if (is_string_field(field)) {
991 		filter_build_regex(pred);
992 
993 		if (field->filter_type == FILTER_STATIC_STRING) {
994 			fn = filter_pred_string;
995 			pred->regex.field_len = field->size;
996 		} else if (field->filter_type == FILTER_DYN_STRING)
997 			fn = filter_pred_strloc;
998 		else
999 			fn = filter_pred_pchar;
1000 	} else if (is_function_field(field)) {
1001 		if (strcmp(field->name, "ip")) {
1002 			parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
1003 			return -EINVAL;
1004 		}
1005 	} else {
1006 		if (field->is_signed)
1007 			ret = strict_strtoll(pred->regex.pattern, 0, &val);
1008 		else
1009 			ret = strict_strtoull(pred->regex.pattern, 0, &val);
1010 		if (ret) {
1011 			parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
1012 			return -EINVAL;
1013 		}
1014 		pred->val = val;
1015 
1016 		fn = select_comparison_fn(pred->op, field->size,
1017 					  field->is_signed);
1018 		if (!fn) {
1019 			parse_error(ps, FILT_ERR_INVALID_OP, 0);
1020 			return -EINVAL;
1021 		}
1022 	}
1023 
1024 	if (pred->op == OP_NE)
1025 		pred->not = 1;
1026 
1027 	pred->fn = fn;
1028 	return 0;
1029 }
1030 
parse_init(struct filter_parse_state * ps,struct filter_op * ops,char * infix_string)1031 static void parse_init(struct filter_parse_state *ps,
1032 		       struct filter_op *ops,
1033 		       char *infix_string)
1034 {
1035 	memset(ps, '\0', sizeof(*ps));
1036 
1037 	ps->infix.string = infix_string;
1038 	ps->infix.cnt = strlen(infix_string);
1039 	ps->ops = ops;
1040 
1041 	INIT_LIST_HEAD(&ps->opstack);
1042 	INIT_LIST_HEAD(&ps->postfix);
1043 }
1044 
infix_next(struct filter_parse_state * ps)1045 static char infix_next(struct filter_parse_state *ps)
1046 {
1047 	ps->infix.cnt--;
1048 
1049 	return ps->infix.string[ps->infix.tail++];
1050 }
1051 
infix_peek(struct filter_parse_state * ps)1052 static char infix_peek(struct filter_parse_state *ps)
1053 {
1054 	if (ps->infix.tail == strlen(ps->infix.string))
1055 		return 0;
1056 
1057 	return ps->infix.string[ps->infix.tail];
1058 }
1059 
infix_advance(struct filter_parse_state * ps)1060 static void infix_advance(struct filter_parse_state *ps)
1061 {
1062 	ps->infix.cnt--;
1063 	ps->infix.tail++;
1064 }
1065 
is_precedence_lower(struct filter_parse_state * ps,int a,int b)1066 static inline int is_precedence_lower(struct filter_parse_state *ps,
1067 				      int a, int b)
1068 {
1069 	return ps->ops[a].precedence < ps->ops[b].precedence;
1070 }
1071 
is_op_char(struct filter_parse_state * ps,char c)1072 static inline int is_op_char(struct filter_parse_state *ps, char c)
1073 {
1074 	int i;
1075 
1076 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1077 		if (ps->ops[i].string[0] == c)
1078 			return 1;
1079 	}
1080 
1081 	return 0;
1082 }
1083 
infix_get_op(struct filter_parse_state * ps,char firstc)1084 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1085 {
1086 	char nextc = infix_peek(ps);
1087 	char opstr[3];
1088 	int i;
1089 
1090 	opstr[0] = firstc;
1091 	opstr[1] = nextc;
1092 	opstr[2] = '\0';
1093 
1094 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1095 		if (!strcmp(opstr, ps->ops[i].string)) {
1096 			infix_advance(ps);
1097 			return ps->ops[i].id;
1098 		}
1099 	}
1100 
1101 	opstr[1] = '\0';
1102 
1103 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1104 		if (!strcmp(opstr, ps->ops[i].string))
1105 			return ps->ops[i].id;
1106 	}
1107 
1108 	return OP_NONE;
1109 }
1110 
clear_operand_string(struct filter_parse_state * ps)1111 static inline void clear_operand_string(struct filter_parse_state *ps)
1112 {
1113 	memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1114 	ps->operand.tail = 0;
1115 }
1116 
append_operand_char(struct filter_parse_state * ps,char c)1117 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1118 {
1119 	if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1120 		return -EINVAL;
1121 
1122 	ps->operand.string[ps->operand.tail++] = c;
1123 
1124 	return 0;
1125 }
1126 
filter_opstack_push(struct filter_parse_state * ps,int op)1127 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1128 {
1129 	struct opstack_op *opstack_op;
1130 
1131 	opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1132 	if (!opstack_op)
1133 		return -ENOMEM;
1134 
1135 	opstack_op->op = op;
1136 	list_add(&opstack_op->list, &ps->opstack);
1137 
1138 	return 0;
1139 }
1140 
filter_opstack_empty(struct filter_parse_state * ps)1141 static int filter_opstack_empty(struct filter_parse_state *ps)
1142 {
1143 	return list_empty(&ps->opstack);
1144 }
1145 
filter_opstack_top(struct filter_parse_state * ps)1146 static int filter_opstack_top(struct filter_parse_state *ps)
1147 {
1148 	struct opstack_op *opstack_op;
1149 
1150 	if (filter_opstack_empty(ps))
1151 		return OP_NONE;
1152 
1153 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1154 
1155 	return opstack_op->op;
1156 }
1157 
filter_opstack_pop(struct filter_parse_state * ps)1158 static int filter_opstack_pop(struct filter_parse_state *ps)
1159 {
1160 	struct opstack_op *opstack_op;
1161 	int op;
1162 
1163 	if (filter_opstack_empty(ps))
1164 		return OP_NONE;
1165 
1166 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1167 	op = opstack_op->op;
1168 	list_del(&opstack_op->list);
1169 
1170 	kfree(opstack_op);
1171 
1172 	return op;
1173 }
1174 
filter_opstack_clear(struct filter_parse_state * ps)1175 static void filter_opstack_clear(struct filter_parse_state *ps)
1176 {
1177 	while (!filter_opstack_empty(ps))
1178 		filter_opstack_pop(ps);
1179 }
1180 
curr_operand(struct filter_parse_state * ps)1181 static char *curr_operand(struct filter_parse_state *ps)
1182 {
1183 	return ps->operand.string;
1184 }
1185 
postfix_append_operand(struct filter_parse_state * ps,char * operand)1186 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1187 {
1188 	struct postfix_elt *elt;
1189 
1190 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1191 	if (!elt)
1192 		return -ENOMEM;
1193 
1194 	elt->op = OP_NONE;
1195 	elt->operand = kstrdup(operand, GFP_KERNEL);
1196 	if (!elt->operand) {
1197 		kfree(elt);
1198 		return -ENOMEM;
1199 	}
1200 
1201 	list_add_tail(&elt->list, &ps->postfix);
1202 
1203 	return 0;
1204 }
1205 
postfix_append_op(struct filter_parse_state * ps,int op)1206 static int postfix_append_op(struct filter_parse_state *ps, int op)
1207 {
1208 	struct postfix_elt *elt;
1209 
1210 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1211 	if (!elt)
1212 		return -ENOMEM;
1213 
1214 	elt->op = op;
1215 	elt->operand = NULL;
1216 
1217 	list_add_tail(&elt->list, &ps->postfix);
1218 
1219 	return 0;
1220 }
1221 
postfix_clear(struct filter_parse_state * ps)1222 static void postfix_clear(struct filter_parse_state *ps)
1223 {
1224 	struct postfix_elt *elt;
1225 
1226 	while (!list_empty(&ps->postfix)) {
1227 		elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1228 		list_del(&elt->list);
1229 		kfree(elt->operand);
1230 		kfree(elt);
1231 	}
1232 }
1233 
filter_parse(struct filter_parse_state * ps)1234 static int filter_parse(struct filter_parse_state *ps)
1235 {
1236 	int in_string = 0;
1237 	int op, top_op;
1238 	char ch;
1239 
1240 	while ((ch = infix_next(ps))) {
1241 		if (ch == '"') {
1242 			in_string ^= 1;
1243 			continue;
1244 		}
1245 
1246 		if (in_string)
1247 			goto parse_operand;
1248 
1249 		if (isspace(ch))
1250 			continue;
1251 
1252 		if (is_op_char(ps, ch)) {
1253 			op = infix_get_op(ps, ch);
1254 			if (op == OP_NONE) {
1255 				parse_error(ps, FILT_ERR_INVALID_OP, 0);
1256 				return -EINVAL;
1257 			}
1258 
1259 			if (strlen(curr_operand(ps))) {
1260 				postfix_append_operand(ps, curr_operand(ps));
1261 				clear_operand_string(ps);
1262 			}
1263 
1264 			while (!filter_opstack_empty(ps)) {
1265 				top_op = filter_opstack_top(ps);
1266 				if (!is_precedence_lower(ps, top_op, op)) {
1267 					top_op = filter_opstack_pop(ps);
1268 					postfix_append_op(ps, top_op);
1269 					continue;
1270 				}
1271 				break;
1272 			}
1273 
1274 			filter_opstack_push(ps, op);
1275 			continue;
1276 		}
1277 
1278 		if (ch == '(') {
1279 			filter_opstack_push(ps, OP_OPEN_PAREN);
1280 			continue;
1281 		}
1282 
1283 		if (ch == ')') {
1284 			if (strlen(curr_operand(ps))) {
1285 				postfix_append_operand(ps, curr_operand(ps));
1286 				clear_operand_string(ps);
1287 			}
1288 
1289 			top_op = filter_opstack_pop(ps);
1290 			while (top_op != OP_NONE) {
1291 				if (top_op == OP_OPEN_PAREN)
1292 					break;
1293 				postfix_append_op(ps, top_op);
1294 				top_op = filter_opstack_pop(ps);
1295 			}
1296 			if (top_op == OP_NONE) {
1297 				parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1298 				return -EINVAL;
1299 			}
1300 			continue;
1301 		}
1302 parse_operand:
1303 		if (append_operand_char(ps, ch)) {
1304 			parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1305 			return -EINVAL;
1306 		}
1307 	}
1308 
1309 	if (strlen(curr_operand(ps)))
1310 		postfix_append_operand(ps, curr_operand(ps));
1311 
1312 	while (!filter_opstack_empty(ps)) {
1313 		top_op = filter_opstack_pop(ps);
1314 		if (top_op == OP_NONE)
1315 			break;
1316 		if (top_op == OP_OPEN_PAREN) {
1317 			parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1318 			return -EINVAL;
1319 		}
1320 		postfix_append_op(ps, top_op);
1321 	}
1322 
1323 	return 0;
1324 }
1325 
create_pred(struct filter_parse_state * ps,struct ftrace_event_call * call,int op,char * operand1,char * operand2)1326 static struct filter_pred *create_pred(struct filter_parse_state *ps,
1327 				       struct ftrace_event_call *call,
1328 				       int op, char *operand1, char *operand2)
1329 {
1330 	struct ftrace_event_field *field;
1331 	static struct filter_pred pred;
1332 
1333 	memset(&pred, 0, sizeof(pred));
1334 	pred.op = op;
1335 
1336 	if (op == OP_AND || op == OP_OR)
1337 		return &pred;
1338 
1339 	if (!operand1 || !operand2) {
1340 		parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1341 		return NULL;
1342 	}
1343 
1344 	field = find_event_field(call, operand1);
1345 	if (!field) {
1346 		parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
1347 		return NULL;
1348 	}
1349 
1350 	strcpy(pred.regex.pattern, operand2);
1351 	pred.regex.len = strlen(pred.regex.pattern);
1352 	pred.field = field;
1353 	return init_pred(ps, field, &pred) ? NULL : &pred;
1354 }
1355 
check_preds(struct filter_parse_state * ps)1356 static int check_preds(struct filter_parse_state *ps)
1357 {
1358 	int n_normal_preds = 0, n_logical_preds = 0;
1359 	struct postfix_elt *elt;
1360 
1361 	list_for_each_entry(elt, &ps->postfix, list) {
1362 		if (elt->op == OP_NONE)
1363 			continue;
1364 
1365 		if (elt->op == OP_AND || elt->op == OP_OR) {
1366 			n_logical_preds++;
1367 			continue;
1368 		}
1369 		n_normal_preds++;
1370 	}
1371 
1372 	if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1373 		parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1374 		return -EINVAL;
1375 	}
1376 
1377 	return 0;
1378 }
1379 
count_preds(struct filter_parse_state * ps)1380 static int count_preds(struct filter_parse_state *ps)
1381 {
1382 	struct postfix_elt *elt;
1383 	int n_preds = 0;
1384 
1385 	list_for_each_entry(elt, &ps->postfix, list) {
1386 		if (elt->op == OP_NONE)
1387 			continue;
1388 		n_preds++;
1389 	}
1390 
1391 	return n_preds;
1392 }
1393 
1394 struct check_pred_data {
1395 	int count;
1396 	int max;
1397 };
1398 
check_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1399 static int check_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1400 			      int *err, void *data)
1401 {
1402 	struct check_pred_data *d = data;
1403 
1404 	if (WARN_ON(d->count++ > d->max)) {
1405 		*err = -EINVAL;
1406 		return WALK_PRED_ABORT;
1407 	}
1408 	return WALK_PRED_DEFAULT;
1409 }
1410 
1411 /*
1412  * The tree is walked at filtering of an event. If the tree is not correctly
1413  * built, it may cause an infinite loop. Check here that the tree does
1414  * indeed terminate.
1415  */
check_pred_tree(struct event_filter * filter,struct filter_pred * root)1416 static int check_pred_tree(struct event_filter *filter,
1417 			   struct filter_pred *root)
1418 {
1419 	struct check_pred_data data = {
1420 		/*
1421 		 * The max that we can hit a node is three times.
1422 		 * Once going down, once coming up from left, and
1423 		 * once coming up from right. This is more than enough
1424 		 * since leafs are only hit a single time.
1425 		 */
1426 		.max   = 3 * filter->n_preds,
1427 		.count = 0,
1428 	};
1429 
1430 	return walk_pred_tree(filter->preds, root,
1431 			      check_pred_tree_cb, &data);
1432 }
1433 
count_leafs_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1434 static int count_leafs_cb(enum move_type move, struct filter_pred *pred,
1435 			  int *err, void *data)
1436 {
1437 	int *count = data;
1438 
1439 	if ((move == MOVE_DOWN) &&
1440 	    (pred->left == FILTER_PRED_INVALID))
1441 		(*count)++;
1442 
1443 	return WALK_PRED_DEFAULT;
1444 }
1445 
count_leafs(struct filter_pred * preds,struct filter_pred * root)1446 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1447 {
1448 	int count = 0, ret;
1449 
1450 	ret = walk_pred_tree(preds, root, count_leafs_cb, &count);
1451 	WARN_ON(ret);
1452 	return count;
1453 }
1454 
1455 struct fold_pred_data {
1456 	struct filter_pred *root;
1457 	int count;
1458 	int children;
1459 };
1460 
fold_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1461 static int fold_pred_cb(enum move_type move, struct filter_pred *pred,
1462 			int *err, void *data)
1463 {
1464 	struct fold_pred_data *d = data;
1465 	struct filter_pred *root = d->root;
1466 
1467 	if (move != MOVE_DOWN)
1468 		return WALK_PRED_DEFAULT;
1469 	if (pred->left != FILTER_PRED_INVALID)
1470 		return WALK_PRED_DEFAULT;
1471 
1472 	if (WARN_ON(d->count == d->children)) {
1473 		*err = -EINVAL;
1474 		return WALK_PRED_ABORT;
1475 	}
1476 
1477 	pred->index &= ~FILTER_PRED_FOLD;
1478 	root->ops[d->count++] = pred->index;
1479 	return WALK_PRED_DEFAULT;
1480 }
1481 
fold_pred(struct filter_pred * preds,struct filter_pred * root)1482 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1483 {
1484 	struct fold_pred_data data = {
1485 		.root  = root,
1486 		.count = 0,
1487 	};
1488 	int children;
1489 
1490 	/* No need to keep the fold flag */
1491 	root->index &= ~FILTER_PRED_FOLD;
1492 
1493 	/* If the root is a leaf then do nothing */
1494 	if (root->left == FILTER_PRED_INVALID)
1495 		return 0;
1496 
1497 	/* count the children */
1498 	children = count_leafs(preds, &preds[root->left]);
1499 	children += count_leafs(preds, &preds[root->right]);
1500 
1501 	root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
1502 	if (!root->ops)
1503 		return -ENOMEM;
1504 
1505 	root->val = children;
1506 	data.children = children;
1507 	return walk_pred_tree(preds, root, fold_pred_cb, &data);
1508 }
1509 
fold_pred_tree_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)1510 static int fold_pred_tree_cb(enum move_type move, struct filter_pred *pred,
1511 			     int *err, void *data)
1512 {
1513 	struct filter_pred *preds = data;
1514 
1515 	if (move != MOVE_DOWN)
1516 		return WALK_PRED_DEFAULT;
1517 	if (!(pred->index & FILTER_PRED_FOLD))
1518 		return WALK_PRED_DEFAULT;
1519 
1520 	*err = fold_pred(preds, pred);
1521 	if (*err)
1522 		return WALK_PRED_ABORT;
1523 
1524 	/* eveyrhing below is folded, continue with parent */
1525 	return WALK_PRED_PARENT;
1526 }
1527 
1528 /*
1529  * To optimize the processing of the ops, if we have several "ors" or
1530  * "ands" together, we can put them in an array and process them all
1531  * together speeding up the filter logic.
1532  */
fold_pred_tree(struct event_filter * filter,struct filter_pred * root)1533 static int fold_pred_tree(struct event_filter *filter,
1534 			   struct filter_pred *root)
1535 {
1536 	return walk_pred_tree(filter->preds, root, fold_pred_tree_cb,
1537 			      filter->preds);
1538 }
1539 
replace_preds(struct ftrace_event_call * call,struct event_filter * filter,struct filter_parse_state * ps,char * filter_string,bool dry_run)1540 static int replace_preds(struct ftrace_event_call *call,
1541 			 struct event_filter *filter,
1542 			 struct filter_parse_state *ps,
1543 			 char *filter_string,
1544 			 bool dry_run)
1545 {
1546 	char *operand1 = NULL, *operand2 = NULL;
1547 	struct filter_pred *pred;
1548 	struct filter_pred *root;
1549 	struct postfix_elt *elt;
1550 	struct pred_stack stack = { }; /* init to NULL */
1551 	int err;
1552 	int n_preds = 0;
1553 
1554 	n_preds = count_preds(ps);
1555 	if (n_preds >= MAX_FILTER_PRED) {
1556 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1557 		return -ENOSPC;
1558 	}
1559 
1560 	err = check_preds(ps);
1561 	if (err)
1562 		return err;
1563 
1564 	if (!dry_run) {
1565 		err = __alloc_pred_stack(&stack, n_preds);
1566 		if (err)
1567 			return err;
1568 		err = __alloc_preds(filter, n_preds);
1569 		if (err)
1570 			goto fail;
1571 	}
1572 
1573 	n_preds = 0;
1574 	list_for_each_entry(elt, &ps->postfix, list) {
1575 		if (elt->op == OP_NONE) {
1576 			if (!operand1)
1577 				operand1 = elt->operand;
1578 			else if (!operand2)
1579 				operand2 = elt->operand;
1580 			else {
1581 				parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1582 				err = -EINVAL;
1583 				goto fail;
1584 			}
1585 			continue;
1586 		}
1587 
1588 		if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1589 			parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1590 			err = -ENOSPC;
1591 			goto fail;
1592 		}
1593 
1594 		pred = create_pred(ps, call, elt->op, operand1, operand2);
1595 		if (!pred) {
1596 			err = -EINVAL;
1597 			goto fail;
1598 		}
1599 
1600 		if (!dry_run) {
1601 			err = filter_add_pred(ps, filter, pred, &stack);
1602 			if (err)
1603 				goto fail;
1604 		}
1605 
1606 		operand1 = operand2 = NULL;
1607 	}
1608 
1609 	if (!dry_run) {
1610 		/* We should have one item left on the stack */
1611 		pred = __pop_pred_stack(&stack);
1612 		if (!pred)
1613 			return -EINVAL;
1614 		/* This item is where we start from in matching */
1615 		root = pred;
1616 		/* Make sure the stack is empty */
1617 		pred = __pop_pred_stack(&stack);
1618 		if (WARN_ON(pred)) {
1619 			err = -EINVAL;
1620 			filter->root = NULL;
1621 			goto fail;
1622 		}
1623 		err = check_pred_tree(filter, root);
1624 		if (err)
1625 			goto fail;
1626 
1627 		/* Optimize the tree */
1628 		err = fold_pred_tree(filter, root);
1629 		if (err)
1630 			goto fail;
1631 
1632 		/* We don't set root until we know it works */
1633 		barrier();
1634 		filter->root = root;
1635 	}
1636 
1637 	err = 0;
1638 fail:
1639 	__free_pred_stack(&stack);
1640 	return err;
1641 }
1642 
1643 struct filter_list {
1644 	struct list_head	list;
1645 	struct event_filter	*filter;
1646 };
1647 
replace_system_preds(struct event_subsystem * system,struct filter_parse_state * ps,char * filter_string)1648 static int replace_system_preds(struct event_subsystem *system,
1649 				struct filter_parse_state *ps,
1650 				char *filter_string)
1651 {
1652 	struct ftrace_event_call *call;
1653 	struct filter_list *filter_item;
1654 	struct filter_list *tmp;
1655 	LIST_HEAD(filter_list);
1656 	bool fail = true;
1657 	int err;
1658 
1659 	list_for_each_entry(call, &ftrace_events, list) {
1660 
1661 		if (strcmp(call->class->system, system->name) != 0)
1662 			continue;
1663 
1664 		/*
1665 		 * Try to see if the filter can be applied
1666 		 *  (filter arg is ignored on dry_run)
1667 		 */
1668 		err = replace_preds(call, NULL, ps, filter_string, true);
1669 		if (err)
1670 			call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
1671 		else
1672 			call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
1673 	}
1674 
1675 	list_for_each_entry(call, &ftrace_events, list) {
1676 		struct event_filter *filter;
1677 
1678 		if (strcmp(call->class->system, system->name) != 0)
1679 			continue;
1680 
1681 		if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
1682 			continue;
1683 
1684 		filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1685 		if (!filter_item)
1686 			goto fail_mem;
1687 
1688 		list_add_tail(&filter_item->list, &filter_list);
1689 
1690 		filter_item->filter = __alloc_filter();
1691 		if (!filter_item->filter)
1692 			goto fail_mem;
1693 		filter = filter_item->filter;
1694 
1695 		/* Can only fail on no memory */
1696 		err = replace_filter_string(filter, filter_string);
1697 		if (err)
1698 			goto fail_mem;
1699 
1700 		err = replace_preds(call, filter, ps, filter_string, false);
1701 		if (err) {
1702 			filter_disable(call);
1703 			parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1704 			append_filter_err(ps, filter);
1705 		} else
1706 			call->flags |= TRACE_EVENT_FL_FILTERED;
1707 		/*
1708 		 * Regardless of if this returned an error, we still
1709 		 * replace the filter for the call.
1710 		 */
1711 		filter = call->filter;
1712 		rcu_assign_pointer(call->filter, filter_item->filter);
1713 		filter_item->filter = filter;
1714 
1715 		fail = false;
1716 	}
1717 
1718 	if (fail)
1719 		goto fail;
1720 
1721 	/*
1722 	 * The calls can still be using the old filters.
1723 	 * Do a synchronize_sched() to ensure all calls are
1724 	 * done with them before we free them.
1725 	 */
1726 	synchronize_sched();
1727 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1728 		__free_filter(filter_item->filter);
1729 		list_del(&filter_item->list);
1730 		kfree(filter_item);
1731 	}
1732 	return 0;
1733  fail:
1734 	/* No call succeeded */
1735 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1736 		list_del(&filter_item->list);
1737 		kfree(filter_item);
1738 	}
1739 	parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1740 	return -EINVAL;
1741  fail_mem:
1742 	/* If any call succeeded, we still need to sync */
1743 	if (!fail)
1744 		synchronize_sched();
1745 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1746 		__free_filter(filter_item->filter);
1747 		list_del(&filter_item->list);
1748 		kfree(filter_item);
1749 	}
1750 	return -ENOMEM;
1751 }
1752 
create_filter_start(char * filter_str,bool set_str,struct filter_parse_state ** psp,struct event_filter ** filterp)1753 static int create_filter_start(char *filter_str, bool set_str,
1754 			       struct filter_parse_state **psp,
1755 			       struct event_filter **filterp)
1756 {
1757 	struct event_filter *filter;
1758 	struct filter_parse_state *ps = NULL;
1759 	int err = 0;
1760 
1761 	WARN_ON_ONCE(*psp || *filterp);
1762 
1763 	/* allocate everything, and if any fails, free all and fail */
1764 	filter = __alloc_filter();
1765 	if (filter && set_str)
1766 		err = replace_filter_string(filter, filter_str);
1767 
1768 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1769 
1770 	if (!filter || !ps || err) {
1771 		kfree(ps);
1772 		__free_filter(filter);
1773 		return -ENOMEM;
1774 	}
1775 
1776 	/* we're committed to creating a new filter */
1777 	*filterp = filter;
1778 	*psp = ps;
1779 
1780 	parse_init(ps, filter_ops, filter_str);
1781 	err = filter_parse(ps);
1782 	if (err && set_str)
1783 		append_filter_err(ps, filter);
1784 	return err;
1785 }
1786 
create_filter_finish(struct filter_parse_state * ps)1787 static void create_filter_finish(struct filter_parse_state *ps)
1788 {
1789 	if (ps) {
1790 		filter_opstack_clear(ps);
1791 		postfix_clear(ps);
1792 		kfree(ps);
1793 	}
1794 }
1795 
1796 /**
1797  * create_filter - create a filter for a ftrace_event_call
1798  * @call: ftrace_event_call to create a filter for
1799  * @filter_str: filter string
1800  * @set_str: remember @filter_str and enable detailed error in filter
1801  * @filterp: out param for created filter (always updated on return)
1802  *
1803  * Creates a filter for @call with @filter_str.  If @set_str is %true,
1804  * @filter_str is copied and recorded in the new filter.
1805  *
1806  * On success, returns 0 and *@filterp points to the new filter.  On
1807  * failure, returns -errno and *@filterp may point to %NULL or to a new
1808  * filter.  In the latter case, the returned filter contains error
1809  * information if @set_str is %true and the caller is responsible for
1810  * freeing it.
1811  */
create_filter(struct ftrace_event_call * call,char * filter_str,bool set_str,struct event_filter ** filterp)1812 static int create_filter(struct ftrace_event_call *call,
1813 			 char *filter_str, bool set_str,
1814 			 struct event_filter **filterp)
1815 {
1816 	struct event_filter *filter = NULL;
1817 	struct filter_parse_state *ps = NULL;
1818 	int err;
1819 
1820 	err = create_filter_start(filter_str, set_str, &ps, &filter);
1821 	if (!err) {
1822 		err = replace_preds(call, filter, ps, filter_str, false);
1823 		if (err && set_str)
1824 			append_filter_err(ps, filter);
1825 	}
1826 	create_filter_finish(ps);
1827 
1828 	*filterp = filter;
1829 	return err;
1830 }
1831 
1832 /**
1833  * create_system_filter - create a filter for an event_subsystem
1834  * @system: event_subsystem to create a filter for
1835  * @filter_str: filter string
1836  * @filterp: out param for created filter (always updated on return)
1837  *
1838  * Identical to create_filter() except that it creates a subsystem filter
1839  * and always remembers @filter_str.
1840  */
create_system_filter(struct event_subsystem * system,char * filter_str,struct event_filter ** filterp)1841 static int create_system_filter(struct event_subsystem *system,
1842 				char *filter_str, struct event_filter **filterp)
1843 {
1844 	struct event_filter *filter = NULL;
1845 	struct filter_parse_state *ps = NULL;
1846 	int err;
1847 
1848 	err = create_filter_start(filter_str, true, &ps, &filter);
1849 	if (!err) {
1850 		err = replace_system_preds(system, ps, filter_str);
1851 		if (!err) {
1852 			/* System filters just show a default message */
1853 			kfree(filter->filter_string);
1854 			filter->filter_string = NULL;
1855 		} else {
1856 			append_filter_err(ps, filter);
1857 		}
1858 	}
1859 	create_filter_finish(ps);
1860 
1861 	*filterp = filter;
1862 	return err;
1863 }
1864 
apply_event_filter(struct ftrace_event_call * call,char * filter_string)1865 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1866 {
1867 	struct event_filter *filter;
1868 	int err = 0;
1869 
1870 	mutex_lock(&event_mutex);
1871 
1872 	if (!strcmp(strstrip(filter_string), "0")) {
1873 		filter_disable(call);
1874 		filter = call->filter;
1875 		if (!filter)
1876 			goto out_unlock;
1877 		RCU_INIT_POINTER(call->filter, NULL);
1878 		/* Make sure the filter is not being used */
1879 		synchronize_sched();
1880 		__free_filter(filter);
1881 		goto out_unlock;
1882 	}
1883 
1884 	err = create_filter(call, filter_string, true, &filter);
1885 
1886 	/*
1887 	 * Always swap the call filter with the new filter
1888 	 * even if there was an error. If there was an error
1889 	 * in the filter, we disable the filter and show the error
1890 	 * string
1891 	 */
1892 	if (filter) {
1893 		struct event_filter *tmp = call->filter;
1894 
1895 		if (!err)
1896 			call->flags |= TRACE_EVENT_FL_FILTERED;
1897 		else
1898 			filter_disable(call);
1899 
1900 		rcu_assign_pointer(call->filter, filter);
1901 
1902 		if (tmp) {
1903 			/* Make sure the call is done with the filter */
1904 			synchronize_sched();
1905 			__free_filter(tmp);
1906 		}
1907 	}
1908 out_unlock:
1909 	mutex_unlock(&event_mutex);
1910 
1911 	return err;
1912 }
1913 
apply_subsystem_event_filter(struct event_subsystem * system,char * filter_string)1914 int apply_subsystem_event_filter(struct event_subsystem *system,
1915 				 char *filter_string)
1916 {
1917 	struct event_filter *filter;
1918 	int err = 0;
1919 
1920 	mutex_lock(&event_mutex);
1921 
1922 	/* Make sure the system still has events */
1923 	if (!system->nr_events) {
1924 		err = -ENODEV;
1925 		goto out_unlock;
1926 	}
1927 
1928 	if (!strcmp(strstrip(filter_string), "0")) {
1929 		filter_free_subsystem_preds(system);
1930 		remove_filter_string(system->filter);
1931 		filter = system->filter;
1932 		system->filter = NULL;
1933 		/* Ensure all filters are no longer used */
1934 		synchronize_sched();
1935 		filter_free_subsystem_filters(system);
1936 		__free_filter(filter);
1937 		goto out_unlock;
1938 	}
1939 
1940 	err = create_system_filter(system, filter_string, &filter);
1941 	if (filter) {
1942 		/*
1943 		 * No event actually uses the system filter
1944 		 * we can free it without synchronize_sched().
1945 		 */
1946 		__free_filter(system->filter);
1947 		system->filter = filter;
1948 	}
1949 out_unlock:
1950 	mutex_unlock(&event_mutex);
1951 
1952 	return err;
1953 }
1954 
1955 #ifdef CONFIG_PERF_EVENTS
1956 
ftrace_profile_free_filter(struct perf_event * event)1957 void ftrace_profile_free_filter(struct perf_event *event)
1958 {
1959 	struct event_filter *filter = event->filter;
1960 
1961 	event->filter = NULL;
1962 	__free_filter(filter);
1963 }
1964 
1965 struct function_filter_data {
1966 	struct ftrace_ops *ops;
1967 	int first_filter;
1968 	int first_notrace;
1969 };
1970 
1971 #ifdef CONFIG_FUNCTION_TRACER
1972 static char **
ftrace_function_filter_re(char * buf,int len,int * count)1973 ftrace_function_filter_re(char *buf, int len, int *count)
1974 {
1975 	char *str, *sep, **re;
1976 
1977 	str = kstrndup(buf, len, GFP_KERNEL);
1978 	if (!str)
1979 		return NULL;
1980 
1981 	/*
1982 	 * The argv_split function takes white space
1983 	 * as a separator, so convert ',' into spaces.
1984 	 */
1985 	while ((sep = strchr(str, ',')))
1986 		*sep = ' ';
1987 
1988 	re = argv_split(GFP_KERNEL, str, count);
1989 	kfree(str);
1990 	return re;
1991 }
1992 
ftrace_function_set_regexp(struct ftrace_ops * ops,int filter,int reset,char * re,int len)1993 static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
1994 				      int reset, char *re, int len)
1995 {
1996 	int ret;
1997 
1998 	if (filter)
1999 		ret = ftrace_set_filter(ops, re, len, reset);
2000 	else
2001 		ret = ftrace_set_notrace(ops, re, len, reset);
2002 
2003 	return ret;
2004 }
2005 
__ftrace_function_set_filter(int filter,char * buf,int len,struct function_filter_data * data)2006 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2007 					struct function_filter_data *data)
2008 {
2009 	int i, re_cnt, ret;
2010 	int *reset;
2011 	char **re;
2012 
2013 	reset = filter ? &data->first_filter : &data->first_notrace;
2014 
2015 	/*
2016 	 * The 'ip' field could have multiple filters set, separated
2017 	 * either by space or comma. We first cut the filter and apply
2018 	 * all pieces separatelly.
2019 	 */
2020 	re = ftrace_function_filter_re(buf, len, &re_cnt);
2021 	if (!re)
2022 		return -EINVAL;
2023 
2024 	for (i = 0; i < re_cnt; i++) {
2025 		ret = ftrace_function_set_regexp(data->ops, filter, *reset,
2026 						 re[i], strlen(re[i]));
2027 		if (ret)
2028 			break;
2029 
2030 		if (*reset)
2031 			*reset = 0;
2032 	}
2033 
2034 	argv_free(re);
2035 	return ret;
2036 }
2037 
ftrace_function_check_pred(struct filter_pred * pred,int leaf)2038 static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
2039 {
2040 	struct ftrace_event_field *field = pred->field;
2041 
2042 	if (leaf) {
2043 		/*
2044 		 * Check the leaf predicate for function trace, verify:
2045 		 *  - only '==' and '!=' is used
2046 		 *  - the 'ip' field is used
2047 		 */
2048 		if ((pred->op != OP_EQ) && (pred->op != OP_NE))
2049 			return -EINVAL;
2050 
2051 		if (strcmp(field->name, "ip"))
2052 			return -EINVAL;
2053 	} else {
2054 		/*
2055 		 * Check the non leaf predicate for function trace, verify:
2056 		 *  - only '||' is used
2057 		*/
2058 		if (pred->op != OP_OR)
2059 			return -EINVAL;
2060 	}
2061 
2062 	return 0;
2063 }
2064 
ftrace_function_set_filter_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2065 static int ftrace_function_set_filter_cb(enum move_type move,
2066 					 struct filter_pred *pred,
2067 					 int *err, void *data)
2068 {
2069 	/* Checking the node is valid for function trace. */
2070 	if ((move != MOVE_DOWN) ||
2071 	    (pred->left != FILTER_PRED_INVALID)) {
2072 		*err = ftrace_function_check_pred(pred, 0);
2073 	} else {
2074 		*err = ftrace_function_check_pred(pred, 1);
2075 		if (*err)
2076 			return WALK_PRED_ABORT;
2077 
2078 		*err = __ftrace_function_set_filter(pred->op == OP_EQ,
2079 						    pred->regex.pattern,
2080 						    pred->regex.len,
2081 						    data);
2082 	}
2083 
2084 	return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
2085 }
2086 
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2087 static int ftrace_function_set_filter(struct perf_event *event,
2088 				      struct event_filter *filter)
2089 {
2090 	struct function_filter_data data = {
2091 		.first_filter  = 1,
2092 		.first_notrace = 1,
2093 		.ops           = &event->ftrace_ops,
2094 	};
2095 
2096 	return walk_pred_tree(filter->preds, filter->root,
2097 			      ftrace_function_set_filter_cb, &data);
2098 }
2099 #else
ftrace_function_set_filter(struct perf_event * event,struct event_filter * filter)2100 static int ftrace_function_set_filter(struct perf_event *event,
2101 				      struct event_filter *filter)
2102 {
2103 	return -ENODEV;
2104 }
2105 #endif /* CONFIG_FUNCTION_TRACER */
2106 
ftrace_profile_set_filter(struct perf_event * event,int event_id,char * filter_str)2107 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2108 			      char *filter_str)
2109 {
2110 	int err;
2111 	struct event_filter *filter;
2112 	struct ftrace_event_call *call;
2113 
2114 	mutex_lock(&event_mutex);
2115 
2116 	call = event->tp_event;
2117 
2118 	err = -EINVAL;
2119 	if (!call)
2120 		goto out_unlock;
2121 
2122 	err = -EEXIST;
2123 	if (event->filter)
2124 		goto out_unlock;
2125 
2126 	err = create_filter(call, filter_str, false, &filter);
2127 	if (err)
2128 		goto free_filter;
2129 
2130 	if (ftrace_event_is_function(call))
2131 		err = ftrace_function_set_filter(event, filter);
2132 	else
2133 		event->filter = filter;
2134 
2135 free_filter:
2136 	if (err || ftrace_event_is_function(call))
2137 		__free_filter(filter);
2138 
2139 out_unlock:
2140 	mutex_unlock(&event_mutex);
2141 
2142 	return err;
2143 }
2144 
2145 #endif /* CONFIG_PERF_EVENTS */
2146 
2147 #ifdef CONFIG_FTRACE_STARTUP_TEST
2148 
2149 #include <linux/types.h>
2150 #include <linux/tracepoint.h>
2151 
2152 #define CREATE_TRACE_POINTS
2153 #include "trace_events_filter_test.h"
2154 
2155 #define DATA_REC(m, va, vb, vc, vd, ve, vf, vg, vh, nvisit) \
2156 { \
2157 	.filter = FILTER, \
2158 	.rec    = { .a = va, .b = vb, .c = vc, .d = vd, \
2159 		    .e = ve, .f = vf, .g = vg, .h = vh }, \
2160 	.match  = m, \
2161 	.not_visited = nvisit, \
2162 }
2163 #define YES 1
2164 #define NO  0
2165 
2166 static struct test_filter_data_t {
2167 	char *filter;
2168 	struct ftrace_raw_ftrace_test_filter rec;
2169 	int match;
2170 	char *not_visited;
2171 } test_filter_data[] = {
2172 #define FILTER "a == 1 && b == 1 && c == 1 && d == 1 && " \
2173 	       "e == 1 && f == 1 && g == 1 && h == 1"
2174 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, ""),
2175 	DATA_REC(NO,  0, 1, 1, 1, 1, 1, 1, 1, "bcdefgh"),
2176 	DATA_REC(NO,  1, 1, 1, 1, 1, 1, 1, 0, ""),
2177 #undef FILTER
2178 #define FILTER "a == 1 || b == 1 || c == 1 || d == 1 || " \
2179 	       "e == 1 || f == 1 || g == 1 || h == 1"
2180 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 0, ""),
2181 	DATA_REC(YES, 0, 0, 0, 0, 0, 0, 0, 1, ""),
2182 	DATA_REC(YES, 1, 0, 0, 0, 0, 0, 0, 0, "bcdefgh"),
2183 #undef FILTER
2184 #define FILTER "(a == 1 || b == 1) && (c == 1 || d == 1) && " \
2185 	       "(e == 1 || f == 1) && (g == 1 || h == 1)"
2186 	DATA_REC(NO,  0, 0, 1, 1, 1, 1, 1, 1, "dfh"),
2187 	DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2188 	DATA_REC(YES, 1, 0, 1, 0, 0, 1, 0, 1, "bd"),
2189 	DATA_REC(NO,  1, 0, 1, 0, 0, 1, 0, 0, "bd"),
2190 #undef FILTER
2191 #define FILTER "(a == 1 && b == 1) || (c == 1 && d == 1) || " \
2192 	       "(e == 1 && f == 1) || (g == 1 && h == 1)"
2193 	DATA_REC(YES, 1, 0, 1, 1, 1, 1, 1, 1, "efgh"),
2194 	DATA_REC(YES, 0, 0, 0, 0, 0, 0, 1, 1, ""),
2195 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 1, ""),
2196 #undef FILTER
2197 #define FILTER "(a == 1 && b == 1) && (c == 1 && d == 1) && " \
2198 	       "(e == 1 && f == 1) || (g == 1 && h == 1)"
2199 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 0, "gh"),
2200 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 1, ""),
2201 	DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, ""),
2202 #undef FILTER
2203 #define FILTER "((a == 1 || b == 1) || (c == 1 || d == 1) || " \
2204 	       "(e == 1 || f == 1)) && (g == 1 || h == 1)"
2205 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 0, 1, "bcdef"),
2206 	DATA_REC(NO,  0, 0, 0, 0, 0, 0, 0, 0, ""),
2207 	DATA_REC(YES, 1, 1, 1, 1, 1, 0, 1, 1, "h"),
2208 #undef FILTER
2209 #define FILTER "((((((((a == 1) && (b == 1)) || (c == 1)) && (d == 1)) || " \
2210 	       "(e == 1)) && (f == 1)) || (g == 1)) && (h == 1))"
2211 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "ceg"),
2212 	DATA_REC(NO,  0, 1, 0, 1, 0, 1, 0, 1, ""),
2213 	DATA_REC(NO,  1, 0, 1, 0, 1, 0, 1, 0, ""),
2214 #undef FILTER
2215 #define FILTER "((((((((a == 1) || (b == 1)) && (c == 1)) || (d == 1)) && " \
2216 	       "(e == 1)) || (f == 1)) && (g == 1)) || (h == 1))"
2217 	DATA_REC(YES, 1, 1, 1, 1, 1, 1, 1, 1, "bdfh"),
2218 	DATA_REC(YES, 0, 1, 0, 1, 0, 1, 0, 1, ""),
2219 	DATA_REC(YES, 1, 0, 1, 0, 1, 0, 1, 0, "bdfh"),
2220 };
2221 
2222 #undef DATA_REC
2223 #undef FILTER
2224 #undef YES
2225 #undef NO
2226 
2227 #define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
2228 
2229 static int test_pred_visited;
2230 
test_pred_visited_fn(struct filter_pred * pred,void * event)2231 static int test_pred_visited_fn(struct filter_pred *pred, void *event)
2232 {
2233 	struct ftrace_event_field *field = pred->field;
2234 
2235 	test_pred_visited = 1;
2236 	printk(KERN_INFO "\npred visited %s\n", field->name);
2237 	return 1;
2238 }
2239 
test_walk_pred_cb(enum move_type move,struct filter_pred * pred,int * err,void * data)2240 static int test_walk_pred_cb(enum move_type move, struct filter_pred *pred,
2241 			     int *err, void *data)
2242 {
2243 	char *fields = data;
2244 
2245 	if ((move == MOVE_DOWN) &&
2246 	    (pred->left == FILTER_PRED_INVALID)) {
2247 		struct ftrace_event_field *field = pred->field;
2248 
2249 		if (!field) {
2250 			WARN(1, "all leafs should have field defined");
2251 			return WALK_PRED_DEFAULT;
2252 		}
2253 		if (!strchr(fields, *field->name))
2254 			return WALK_PRED_DEFAULT;
2255 
2256 		WARN_ON(!pred->fn);
2257 		pred->fn = test_pred_visited_fn;
2258 	}
2259 	return WALK_PRED_DEFAULT;
2260 }
2261 
ftrace_test_event_filter(void)2262 static __init int ftrace_test_event_filter(void)
2263 {
2264 	int i;
2265 
2266 	printk(KERN_INFO "Testing ftrace filter: ");
2267 
2268 	for (i = 0; i < DATA_CNT; i++) {
2269 		struct event_filter *filter = NULL;
2270 		struct test_filter_data_t *d = &test_filter_data[i];
2271 		int err;
2272 
2273 		err = create_filter(&event_ftrace_test_filter, d->filter,
2274 				    false, &filter);
2275 		if (err) {
2276 			printk(KERN_INFO
2277 			       "Failed to get filter for '%s', err %d\n",
2278 			       d->filter, err);
2279 			__free_filter(filter);
2280 			break;
2281 		}
2282 
2283 		/*
2284 		 * The preemption disabling is not really needed for self
2285 		 * tests, but the rcu dereference will complain without it.
2286 		 */
2287 		preempt_disable();
2288 		if (*d->not_visited)
2289 			walk_pred_tree(filter->preds, filter->root,
2290 				       test_walk_pred_cb,
2291 				       d->not_visited);
2292 
2293 		test_pred_visited = 0;
2294 		err = filter_match_preds(filter, &d->rec);
2295 		preempt_enable();
2296 
2297 		__free_filter(filter);
2298 
2299 		if (test_pred_visited) {
2300 			printk(KERN_INFO
2301 			       "Failed, unwanted pred visited for filter %s\n",
2302 			       d->filter);
2303 			break;
2304 		}
2305 
2306 		if (err != d->match) {
2307 			printk(KERN_INFO
2308 			       "Failed to match filter '%s', expected %d\n",
2309 			       d->filter, d->match);
2310 			break;
2311 		}
2312 	}
2313 
2314 	if (i == DATA_CNT)
2315 		printk(KERN_CONT "OK\n");
2316 
2317 	return 0;
2318 }
2319 
2320 late_initcall(ftrace_test_event_filter);
2321 
2322 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2323