1 /*
2  * trace_events_filter - generic event filtering
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/perf_event.h>
25 #include <linux/slab.h>
26 
27 #include "trace.h"
28 #include "trace_output.h"
29 
30 enum filter_op_ids
31 {
32 	OP_OR,
33 	OP_AND,
34 	OP_GLOB,
35 	OP_NE,
36 	OP_EQ,
37 	OP_LT,
38 	OP_LE,
39 	OP_GT,
40 	OP_GE,
41 	OP_NONE,
42 	OP_OPEN_PAREN,
43 };
44 
45 struct filter_op {
46 	int id;
47 	char *string;
48 	int precedence;
49 };
50 
51 static struct filter_op filter_ops[] = {
52 	{ OP_OR,	"||",		1 },
53 	{ OP_AND,	"&&",		2 },
54 	{ OP_GLOB,	"~",		4 },
55 	{ OP_NE,	"!=",		4 },
56 	{ OP_EQ,	"==",		4 },
57 	{ OP_LT,	"<",		5 },
58 	{ OP_LE,	"<=",		5 },
59 	{ OP_GT,	">",		5 },
60 	{ OP_GE,	">=",		5 },
61 	{ OP_NONE,	"OP_NONE",	0 },
62 	{ OP_OPEN_PAREN, "(",		0 },
63 };
64 
65 enum {
66 	FILT_ERR_NONE,
67 	FILT_ERR_INVALID_OP,
68 	FILT_ERR_UNBALANCED_PAREN,
69 	FILT_ERR_TOO_MANY_OPERANDS,
70 	FILT_ERR_OPERAND_TOO_LONG,
71 	FILT_ERR_FIELD_NOT_FOUND,
72 	FILT_ERR_ILLEGAL_FIELD_OP,
73 	FILT_ERR_ILLEGAL_INTVAL,
74 	FILT_ERR_BAD_SUBSYS_FILTER,
75 	FILT_ERR_TOO_MANY_PREDS,
76 	FILT_ERR_MISSING_FIELD,
77 	FILT_ERR_INVALID_FILTER,
78 };
79 
80 static char *err_text[] = {
81 	"No error",
82 	"Invalid operator",
83 	"Unbalanced parens",
84 	"Too many operands",
85 	"Operand too long",
86 	"Field not found",
87 	"Illegal operation for field type",
88 	"Illegal integer value",
89 	"Couldn't find or set field in one of a subsystem's events",
90 	"Too many terms in predicate expression",
91 	"Missing field name and/or value",
92 	"Meaningless filter expression",
93 };
94 
95 struct opstack_op {
96 	int op;
97 	struct list_head list;
98 };
99 
100 struct postfix_elt {
101 	int op;
102 	char *operand;
103 	struct list_head list;
104 };
105 
106 struct filter_parse_state {
107 	struct filter_op *ops;
108 	struct list_head opstack;
109 	struct list_head postfix;
110 	int lasterr;
111 	int lasterr_pos;
112 
113 	struct {
114 		char *string;
115 		unsigned int cnt;
116 		unsigned int tail;
117 	} infix;
118 
119 	struct {
120 		char string[MAX_FILTER_STR_VAL];
121 		int pos;
122 		unsigned int tail;
123 	} operand;
124 };
125 
126 struct pred_stack {
127 	struct filter_pred	**preds;
128 	int			index;
129 };
130 
131 #define DEFINE_COMPARISON_PRED(type)					\
132 static int filter_pred_##type(struct filter_pred *pred, void *event)	\
133 {									\
134 	type *addr = (type *)(event + pred->offset);			\
135 	type val = (type)pred->val;					\
136 	int match = 0;							\
137 									\
138 	switch (pred->op) {						\
139 	case OP_LT:							\
140 		match = (*addr < val);					\
141 		break;							\
142 	case OP_LE:							\
143 		match = (*addr <= val);					\
144 		break;							\
145 	case OP_GT:							\
146 		match = (*addr > val);					\
147 		break;							\
148 	case OP_GE:							\
149 		match = (*addr >= val);					\
150 		break;							\
151 	default:							\
152 		break;							\
153 	}								\
154 									\
155 	return match;							\
156 }
157 
158 #define DEFINE_EQUALITY_PRED(size)					\
159 static int filter_pred_##size(struct filter_pred *pred, void *event)	\
160 {									\
161 	u##size *addr = (u##size *)(event + pred->offset);		\
162 	u##size val = (u##size)pred->val;				\
163 	int match;							\
164 									\
165 	match = (val == *addr) ^ pred->not;				\
166 									\
167 	return match;							\
168 }
169 
170 DEFINE_COMPARISON_PRED(s64);
171 DEFINE_COMPARISON_PRED(u64);
172 DEFINE_COMPARISON_PRED(s32);
173 DEFINE_COMPARISON_PRED(u32);
174 DEFINE_COMPARISON_PRED(s16);
175 DEFINE_COMPARISON_PRED(u16);
176 DEFINE_COMPARISON_PRED(s8);
177 DEFINE_COMPARISON_PRED(u8);
178 
179 DEFINE_EQUALITY_PRED(64);
180 DEFINE_EQUALITY_PRED(32);
181 DEFINE_EQUALITY_PRED(16);
182 DEFINE_EQUALITY_PRED(8);
183 
184 /* Filter predicate for fixed sized arrays of characters */
filter_pred_string(struct filter_pred * pred,void * event)185 static int filter_pred_string(struct filter_pred *pred, void *event)
186 {
187 	char *addr = (char *)(event + pred->offset);
188 	int cmp, match;
189 
190 	cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
191 
192 	match = cmp ^ pred->not;
193 
194 	return match;
195 }
196 
197 /* Filter predicate for char * pointers */
filter_pred_pchar(struct filter_pred * pred,void * event)198 static int filter_pred_pchar(struct filter_pred *pred, void *event)
199 {
200 	char **addr = (char **)(event + pred->offset);
201 	int cmp, match;
202 	int len = strlen(*addr) + 1;	/* including tailing '\0' */
203 
204 	cmp = pred->regex.match(*addr, &pred->regex, len);
205 
206 	match = cmp ^ pred->not;
207 
208 	return match;
209 }
210 
211 /*
212  * Filter predicate for dynamic sized arrays of characters.
213  * These are implemented through a list of strings at the end
214  * of the entry.
215  * Also each of these strings have a field in the entry which
216  * contains its offset from the beginning of the entry.
217  * We have then first to get this field, dereference it
218  * and add it to the address of the entry, and at last we have
219  * the address of the string.
220  */
filter_pred_strloc(struct filter_pred * pred,void * event)221 static int filter_pred_strloc(struct filter_pred *pred, void *event)
222 {
223 	u32 str_item = *(u32 *)(event + pred->offset);
224 	int str_loc = str_item & 0xffff;
225 	int str_len = str_item >> 16;
226 	char *addr = (char *)(event + str_loc);
227 	int cmp, match;
228 
229 	cmp = pred->regex.match(addr, &pred->regex, str_len);
230 
231 	match = cmp ^ pred->not;
232 
233 	return match;
234 }
235 
filter_pred_none(struct filter_pred * pred,void * event)236 static int filter_pred_none(struct filter_pred *pred, void *event)
237 {
238 	return 0;
239 }
240 
241 /*
242  * regex_match_foo - Basic regex callbacks
243  *
244  * @str: the string to be searched
245  * @r:   the regex structure containing the pattern string
246  * @len: the length of the string to be searched (including '\0')
247  *
248  * Note:
249  * - @str might not be NULL-terminated if it's of type DYN_STRING
250  *   or STATIC_STRING
251  */
252 
regex_match_full(char * str,struct regex * r,int len)253 static int regex_match_full(char *str, struct regex *r, int len)
254 {
255 	if (strncmp(str, r->pattern, len) == 0)
256 		return 1;
257 	return 0;
258 }
259 
regex_match_front(char * str,struct regex * r,int len)260 static int regex_match_front(char *str, struct regex *r, int len)
261 {
262 	if (strncmp(str, r->pattern, r->len) == 0)
263 		return 1;
264 	return 0;
265 }
266 
regex_match_middle(char * str,struct regex * r,int len)267 static int regex_match_middle(char *str, struct regex *r, int len)
268 {
269 	if (strnstr(str, r->pattern, len))
270 		return 1;
271 	return 0;
272 }
273 
regex_match_end(char * str,struct regex * r,int len)274 static int regex_match_end(char *str, struct regex *r, int len)
275 {
276 	int strlen = len - 1;
277 
278 	if (strlen >= r->len &&
279 	    memcmp(str + strlen - r->len, r->pattern, r->len) == 0)
280 		return 1;
281 	return 0;
282 }
283 
284 /**
285  * filter_parse_regex - parse a basic regex
286  * @buff:   the raw regex
287  * @len:    length of the regex
288  * @search: will point to the beginning of the string to compare
289  * @not:    tell whether the match will have to be inverted
290  *
291  * This passes in a buffer containing a regex and this function will
292  * set search to point to the search part of the buffer and
293  * return the type of search it is (see enum above).
294  * This does modify buff.
295  *
296  * Returns enum type.
297  *  search returns the pointer to use for comparison.
298  *  not returns 1 if buff started with a '!'
299  *     0 otherwise.
300  */
filter_parse_regex(char * buff,int len,char ** search,int * not)301 enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
302 {
303 	int type = MATCH_FULL;
304 	int i;
305 
306 	if (buff[0] == '!') {
307 		*not = 1;
308 		buff++;
309 		len--;
310 	} else
311 		*not = 0;
312 
313 	*search = buff;
314 
315 	for (i = 0; i < len; i++) {
316 		if (buff[i] == '*') {
317 			if (!i) {
318 				*search = buff + 1;
319 				type = MATCH_END_ONLY;
320 			} else {
321 				if (type == MATCH_END_ONLY)
322 					type = MATCH_MIDDLE_ONLY;
323 				else
324 					type = MATCH_FRONT_ONLY;
325 				buff[i] = 0;
326 				break;
327 			}
328 		}
329 	}
330 
331 	return type;
332 }
333 
filter_build_regex(struct filter_pred * pred)334 static void filter_build_regex(struct filter_pred *pred)
335 {
336 	struct regex *r = &pred->regex;
337 	char *search;
338 	enum regex_type type = MATCH_FULL;
339 	int not = 0;
340 
341 	if (pred->op == OP_GLOB) {
342 		type = filter_parse_regex(r->pattern, r->len, &search, &not);
343 		r->len = strlen(search);
344 		memmove(r->pattern, search, r->len+1);
345 	}
346 
347 	switch (type) {
348 	case MATCH_FULL:
349 		r->match = regex_match_full;
350 		break;
351 	case MATCH_FRONT_ONLY:
352 		r->match = regex_match_front;
353 		break;
354 	case MATCH_MIDDLE_ONLY:
355 		r->match = regex_match_middle;
356 		break;
357 	case MATCH_END_ONLY:
358 		r->match = regex_match_end;
359 		break;
360 	}
361 
362 	pred->not ^= not;
363 }
364 
365 enum move_type {
366 	MOVE_DOWN,
367 	MOVE_UP_FROM_LEFT,
368 	MOVE_UP_FROM_RIGHT
369 };
370 
371 static struct filter_pred *
get_pred_parent(struct filter_pred * pred,struct filter_pred * preds,int index,enum move_type * move)372 get_pred_parent(struct filter_pred *pred, struct filter_pred *preds,
373 		int index, enum move_type *move)
374 {
375 	if (pred->parent & FILTER_PRED_IS_RIGHT)
376 		*move = MOVE_UP_FROM_RIGHT;
377 	else
378 		*move = MOVE_UP_FROM_LEFT;
379 	pred = &preds[pred->parent & ~FILTER_PRED_IS_RIGHT];
380 
381 	return pred;
382 }
383 
384 /*
385  * A series of AND or ORs where found together. Instead of
386  * climbing up and down the tree branches, an array of the
387  * ops were made in order of checks. We can just move across
388  * the array and short circuit if needed.
389  */
process_ops(struct filter_pred * preds,struct filter_pred * op,void * rec)390 static int process_ops(struct filter_pred *preds,
391 		       struct filter_pred *op, void *rec)
392 {
393 	struct filter_pred *pred;
394 	int match = 0;
395 	int type;
396 	int i;
397 
398 	/*
399 	 * Micro-optimization: We set type to true if op
400 	 * is an OR and false otherwise (AND). Then we
401 	 * just need to test if the match is equal to
402 	 * the type, and if it is, we can short circuit the
403 	 * rest of the checks:
404 	 *
405 	 * if ((match && op->op == OP_OR) ||
406 	 *     (!match && op->op == OP_AND))
407 	 *	  return match;
408 	 */
409 	type = op->op == OP_OR;
410 
411 	for (i = 0; i < op->val; i++) {
412 		pred = &preds[op->ops[i]];
413 		match = pred->fn(pred, rec);
414 		if (!!match == type)
415 			return match;
416 	}
417 	return match;
418 }
419 
420 /* return 1 if event matches, 0 otherwise (discard) */
filter_match_preds(struct event_filter * filter,void * rec)421 int filter_match_preds(struct event_filter *filter, void *rec)
422 {
423 	int match = -1;
424 	enum move_type move = MOVE_DOWN;
425 	struct filter_pred *preds;
426 	struct filter_pred *pred;
427 	struct filter_pred *root;
428 	int n_preds;
429 	int done = 0;
430 
431 	/* no filter is considered a match */
432 	if (!filter)
433 		return 1;
434 
435 	n_preds = filter->n_preds;
436 
437 	if (!n_preds)
438 		return 1;
439 
440 	/*
441 	 * n_preds, root and filter->preds are protect with preemption disabled.
442 	 */
443 	preds = rcu_dereference_sched(filter->preds);
444 	root = rcu_dereference_sched(filter->root);
445 	if (!root)
446 		return 1;
447 
448 	pred = root;
449 
450 	/* match is currently meaningless */
451 	match = -1;
452 
453 	do {
454 		switch (move) {
455 		case MOVE_DOWN:
456 			/* only AND and OR have children */
457 			if (pred->left != FILTER_PRED_INVALID) {
458 				/* If ops is set, then it was folded. */
459 				if (!pred->ops) {
460 					/* keep going to down the left side */
461 					pred = &preds[pred->left];
462 					continue;
463 				}
464 				/* We can treat folded ops as a leaf node */
465 				match = process_ops(preds, pred, rec);
466 			} else
467 				match = pred->fn(pred, rec);
468 			/* If this pred is the only pred */
469 			if (pred == root)
470 				break;
471 			pred = get_pred_parent(pred, preds,
472 					       pred->parent, &move);
473 			continue;
474 		case MOVE_UP_FROM_LEFT:
475 			/*
476 			 * Check for short circuits.
477 			 *
478 			 * Optimization: !!match == (pred->op == OP_OR)
479 			 *   is the same as:
480 			 * if ((match && pred->op == OP_OR) ||
481 			 *     (!match && pred->op == OP_AND))
482 			 */
483 			if (!!match == (pred->op == OP_OR)) {
484 				if (pred == root)
485 					break;
486 				pred = get_pred_parent(pred, preds,
487 						       pred->parent, &move);
488 				continue;
489 			}
490 			/* now go down the right side of the tree. */
491 			pred = &preds[pred->right];
492 			move = MOVE_DOWN;
493 			continue;
494 		case MOVE_UP_FROM_RIGHT:
495 			/* We finished this equation. */
496 			if (pred == root)
497 				break;
498 			pred = get_pred_parent(pred, preds,
499 					       pred->parent, &move);
500 			continue;
501 		}
502 		done = 1;
503 	} while (!done);
504 
505 	return match;
506 }
507 EXPORT_SYMBOL_GPL(filter_match_preds);
508 
parse_error(struct filter_parse_state * ps,int err,int pos)509 static void parse_error(struct filter_parse_state *ps, int err, int pos)
510 {
511 	ps->lasterr = err;
512 	ps->lasterr_pos = pos;
513 }
514 
remove_filter_string(struct event_filter * filter)515 static void remove_filter_string(struct event_filter *filter)
516 {
517 	if (!filter)
518 		return;
519 
520 	kfree(filter->filter_string);
521 	filter->filter_string = NULL;
522 }
523 
replace_filter_string(struct event_filter * filter,char * filter_string)524 static int replace_filter_string(struct event_filter *filter,
525 				 char *filter_string)
526 {
527 	kfree(filter->filter_string);
528 	filter->filter_string = kstrdup(filter_string, GFP_KERNEL);
529 	if (!filter->filter_string)
530 		return -ENOMEM;
531 
532 	return 0;
533 }
534 
append_filter_string(struct event_filter * filter,char * string)535 static int append_filter_string(struct event_filter *filter,
536 				char *string)
537 {
538 	int newlen;
539 	char *new_filter_string;
540 
541 	BUG_ON(!filter->filter_string);
542 	newlen = strlen(filter->filter_string) + strlen(string) + 1;
543 	new_filter_string = kmalloc(newlen, GFP_KERNEL);
544 	if (!new_filter_string)
545 		return -ENOMEM;
546 
547 	strcpy(new_filter_string, filter->filter_string);
548 	strcat(new_filter_string, string);
549 	kfree(filter->filter_string);
550 	filter->filter_string = new_filter_string;
551 
552 	return 0;
553 }
554 
append_filter_err(struct filter_parse_state * ps,struct event_filter * filter)555 static void append_filter_err(struct filter_parse_state *ps,
556 			      struct event_filter *filter)
557 {
558 	int pos = ps->lasterr_pos;
559 	char *buf, *pbuf;
560 
561 	buf = (char *)__get_free_page(GFP_TEMPORARY);
562 	if (!buf)
563 		return;
564 
565 	append_filter_string(filter, "\n");
566 	memset(buf, ' ', PAGE_SIZE);
567 	if (pos > PAGE_SIZE - 128)
568 		pos = 0;
569 	buf[pos] = '^';
570 	pbuf = &buf[pos] + 1;
571 
572 	sprintf(pbuf, "\nparse_error: %s\n", err_text[ps->lasterr]);
573 	append_filter_string(filter, buf);
574 	free_page((unsigned long) buf);
575 }
576 
print_event_filter(struct ftrace_event_call * call,struct trace_seq * s)577 void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
578 {
579 	struct event_filter *filter;
580 
581 	mutex_lock(&event_mutex);
582 	filter = call->filter;
583 	if (filter && filter->filter_string)
584 		trace_seq_printf(s, "%s\n", filter->filter_string);
585 	else
586 		trace_seq_printf(s, "none\n");
587 	mutex_unlock(&event_mutex);
588 }
589 
print_subsystem_event_filter(struct event_subsystem * system,struct trace_seq * s)590 void print_subsystem_event_filter(struct event_subsystem *system,
591 				  struct trace_seq *s)
592 {
593 	struct event_filter *filter;
594 
595 	mutex_lock(&event_mutex);
596 	filter = system->filter;
597 	if (filter && filter->filter_string)
598 		trace_seq_printf(s, "%s\n", filter->filter_string);
599 	else
600 		trace_seq_printf(s, "none\n");
601 	mutex_unlock(&event_mutex);
602 }
603 
604 static struct ftrace_event_field *
__find_event_field(struct list_head * head,char * name)605 __find_event_field(struct list_head *head, char *name)
606 {
607 	struct ftrace_event_field *field;
608 
609 	list_for_each_entry(field, head, link) {
610 		if (!strcmp(field->name, name))
611 			return field;
612 	}
613 
614 	return NULL;
615 }
616 
617 static struct ftrace_event_field *
find_event_field(struct ftrace_event_call * call,char * name)618 find_event_field(struct ftrace_event_call *call, char *name)
619 {
620 	struct ftrace_event_field *field;
621 	struct list_head *head;
622 
623 	field = __find_event_field(&ftrace_common_fields, name);
624 	if (field)
625 		return field;
626 
627 	head = trace_get_fields(call);
628 	return __find_event_field(head, name);
629 }
630 
filter_free_pred(struct filter_pred * pred)631 static void filter_free_pred(struct filter_pred *pred)
632 {
633 	if (!pred)
634 		return;
635 
636 	kfree(pred->field_name);
637 	kfree(pred);
638 }
639 
filter_clear_pred(struct filter_pred * pred)640 static void filter_clear_pred(struct filter_pred *pred)
641 {
642 	kfree(pred->field_name);
643 	pred->field_name = NULL;
644 	pred->regex.len = 0;
645 }
646 
__alloc_pred_stack(struct pred_stack * stack,int n_preds)647 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
648 {
649 	stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
650 	if (!stack->preds)
651 		return -ENOMEM;
652 	stack->index = n_preds;
653 	return 0;
654 }
655 
__free_pred_stack(struct pred_stack * stack)656 static void __free_pred_stack(struct pred_stack *stack)
657 {
658 	kfree(stack->preds);
659 	stack->index = 0;
660 }
661 
__push_pred_stack(struct pred_stack * stack,struct filter_pred * pred)662 static int __push_pred_stack(struct pred_stack *stack,
663 			     struct filter_pred *pred)
664 {
665 	int index = stack->index;
666 
667 	if (WARN_ON(index == 0))
668 		return -ENOSPC;
669 
670 	stack->preds[--index] = pred;
671 	stack->index = index;
672 	return 0;
673 }
674 
675 static struct filter_pred *
__pop_pred_stack(struct pred_stack * stack)676 __pop_pred_stack(struct pred_stack *stack)
677 {
678 	struct filter_pred *pred;
679 	int index = stack->index;
680 
681 	pred = stack->preds[index++];
682 	if (!pred)
683 		return NULL;
684 
685 	stack->index = index;
686 	return pred;
687 }
688 
filter_set_pred(struct event_filter * filter,int idx,struct pred_stack * stack,struct filter_pred * src,filter_pred_fn_t fn)689 static int filter_set_pred(struct event_filter *filter,
690 			   int idx,
691 			   struct pred_stack *stack,
692 			   struct filter_pred *src,
693 			   filter_pred_fn_t fn)
694 {
695 	struct filter_pred *dest = &filter->preds[idx];
696 	struct filter_pred *left;
697 	struct filter_pred *right;
698 
699 	*dest = *src;
700 	if (src->field_name) {
701 		dest->field_name = kstrdup(src->field_name, GFP_KERNEL);
702 		if (!dest->field_name)
703 			return -ENOMEM;
704 	}
705 	dest->fn = fn;
706 	dest->index = idx;
707 
708 	if (dest->op == OP_OR || dest->op == OP_AND) {
709 		right = __pop_pred_stack(stack);
710 		left = __pop_pred_stack(stack);
711 		if (!left || !right)
712 			return -EINVAL;
713 		/*
714 		 * If both children can be folded
715 		 * and they are the same op as this op or a leaf,
716 		 * then this op can be folded.
717 		 */
718 		if (left->index & FILTER_PRED_FOLD &&
719 		    (left->op == dest->op ||
720 		     left->left == FILTER_PRED_INVALID) &&
721 		    right->index & FILTER_PRED_FOLD &&
722 		    (right->op == dest->op ||
723 		     right->left == FILTER_PRED_INVALID))
724 			dest->index |= FILTER_PRED_FOLD;
725 
726 		dest->left = left->index & ~FILTER_PRED_FOLD;
727 		dest->right = right->index & ~FILTER_PRED_FOLD;
728 		left->parent = dest->index & ~FILTER_PRED_FOLD;
729 		right->parent = dest->index | FILTER_PRED_IS_RIGHT;
730 	} else {
731 		/*
732 		 * Make dest->left invalid to be used as a quick
733 		 * way to know this is a leaf node.
734 		 */
735 		dest->left = FILTER_PRED_INVALID;
736 
737 		/* All leafs allow folding the parent ops. */
738 		dest->index |= FILTER_PRED_FOLD;
739 	}
740 
741 	return __push_pred_stack(stack, dest);
742 }
743 
__free_preds(struct event_filter * filter)744 static void __free_preds(struct event_filter *filter)
745 {
746 	int i;
747 
748 	if (filter->preds) {
749 		for (i = 0; i < filter->a_preds; i++)
750 			kfree(filter->preds[i].field_name);
751 		kfree(filter->preds);
752 		filter->preds = NULL;
753 	}
754 	filter->a_preds = 0;
755 	filter->n_preds = 0;
756 }
757 
filter_disable(struct ftrace_event_call * call)758 static void filter_disable(struct ftrace_event_call *call)
759 {
760 	call->flags &= ~TRACE_EVENT_FL_FILTERED;
761 }
762 
__free_filter(struct event_filter * filter)763 static void __free_filter(struct event_filter *filter)
764 {
765 	if (!filter)
766 		return;
767 
768 	__free_preds(filter);
769 	kfree(filter->filter_string);
770 	kfree(filter);
771 }
772 
773 /*
774  * Called when destroying the ftrace_event_call.
775  * The call is being freed, so we do not need to worry about
776  * the call being currently used. This is for module code removing
777  * the tracepoints from within it.
778  */
destroy_preds(struct ftrace_event_call * call)779 void destroy_preds(struct ftrace_event_call *call)
780 {
781 	__free_filter(call->filter);
782 	call->filter = NULL;
783 }
784 
__alloc_filter(void)785 static struct event_filter *__alloc_filter(void)
786 {
787 	struct event_filter *filter;
788 
789 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
790 	return filter;
791 }
792 
__alloc_preds(struct event_filter * filter,int n_preds)793 static int __alloc_preds(struct event_filter *filter, int n_preds)
794 {
795 	struct filter_pred *pred;
796 	int i;
797 
798 	if (filter->preds)
799 		__free_preds(filter);
800 
801 	filter->preds =
802 		kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
803 
804 	if (!filter->preds)
805 		return -ENOMEM;
806 
807 	filter->a_preds = n_preds;
808 	filter->n_preds = 0;
809 
810 	for (i = 0; i < n_preds; i++) {
811 		pred = &filter->preds[i];
812 		pred->fn = filter_pred_none;
813 	}
814 
815 	return 0;
816 }
817 
filter_free_subsystem_preds(struct event_subsystem * system)818 static void filter_free_subsystem_preds(struct event_subsystem *system)
819 {
820 	struct ftrace_event_call *call;
821 
822 	list_for_each_entry(call, &ftrace_events, list) {
823 		if (strcmp(call->class->system, system->name) != 0)
824 			continue;
825 
826 		filter_disable(call);
827 		remove_filter_string(call->filter);
828 	}
829 }
830 
filter_free_subsystem_filters(struct event_subsystem * system)831 static void filter_free_subsystem_filters(struct event_subsystem *system)
832 {
833 	struct ftrace_event_call *call;
834 
835 	list_for_each_entry(call, &ftrace_events, list) {
836 		if (strcmp(call->class->system, system->name) != 0)
837 			continue;
838 		__free_filter(call->filter);
839 		call->filter = NULL;
840 	}
841 }
842 
filter_add_pred_fn(struct filter_parse_state * ps,struct ftrace_event_call * call,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack,filter_pred_fn_t fn)843 static int filter_add_pred_fn(struct filter_parse_state *ps,
844 			      struct ftrace_event_call *call,
845 			      struct event_filter *filter,
846 			      struct filter_pred *pred,
847 			      struct pred_stack *stack,
848 			      filter_pred_fn_t fn)
849 {
850 	int idx, err;
851 
852 	if (WARN_ON(filter->n_preds == filter->a_preds)) {
853 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
854 		return -ENOSPC;
855 	}
856 
857 	idx = filter->n_preds;
858 	filter_clear_pred(&filter->preds[idx]);
859 	err = filter_set_pred(filter, idx, stack, pred, fn);
860 	if (err)
861 		return err;
862 
863 	filter->n_preds++;
864 
865 	return 0;
866 }
867 
filter_assign_type(const char * type)868 int filter_assign_type(const char *type)
869 {
870 	if (strstr(type, "__data_loc") && strstr(type, "char"))
871 		return FILTER_DYN_STRING;
872 
873 	if (strchr(type, '[') && strstr(type, "char"))
874 		return FILTER_STATIC_STRING;
875 
876 	return FILTER_OTHER;
877 }
878 
is_string_field(struct ftrace_event_field * field)879 static bool is_string_field(struct ftrace_event_field *field)
880 {
881 	return field->filter_type == FILTER_DYN_STRING ||
882 	       field->filter_type == FILTER_STATIC_STRING ||
883 	       field->filter_type == FILTER_PTR_STRING;
884 }
885 
is_legal_op(struct ftrace_event_field * field,int op)886 static int is_legal_op(struct ftrace_event_field *field, int op)
887 {
888 	if (is_string_field(field) &&
889 	    (op != OP_EQ && op != OP_NE && op != OP_GLOB))
890 		return 0;
891 	if (!is_string_field(field) && op == OP_GLOB)
892 		return 0;
893 
894 	return 1;
895 }
896 
select_comparison_fn(int op,int field_size,int field_is_signed)897 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
898 					     int field_is_signed)
899 {
900 	filter_pred_fn_t fn = NULL;
901 
902 	switch (field_size) {
903 	case 8:
904 		if (op == OP_EQ || op == OP_NE)
905 			fn = filter_pred_64;
906 		else if (field_is_signed)
907 			fn = filter_pred_s64;
908 		else
909 			fn = filter_pred_u64;
910 		break;
911 	case 4:
912 		if (op == OP_EQ || op == OP_NE)
913 			fn = filter_pred_32;
914 		else if (field_is_signed)
915 			fn = filter_pred_s32;
916 		else
917 			fn = filter_pred_u32;
918 		break;
919 	case 2:
920 		if (op == OP_EQ || op == OP_NE)
921 			fn = filter_pred_16;
922 		else if (field_is_signed)
923 			fn = filter_pred_s16;
924 		else
925 			fn = filter_pred_u16;
926 		break;
927 	case 1:
928 		if (op == OP_EQ || op == OP_NE)
929 			fn = filter_pred_8;
930 		else if (field_is_signed)
931 			fn = filter_pred_s8;
932 		else
933 			fn = filter_pred_u8;
934 		break;
935 	}
936 
937 	return fn;
938 }
939 
filter_add_pred(struct filter_parse_state * ps,struct ftrace_event_call * call,struct event_filter * filter,struct filter_pred * pred,struct pred_stack * stack,bool dry_run)940 static int filter_add_pred(struct filter_parse_state *ps,
941 			   struct ftrace_event_call *call,
942 			   struct event_filter *filter,
943 			   struct filter_pred *pred,
944 			   struct pred_stack *stack,
945 			   bool dry_run)
946 {
947 	struct ftrace_event_field *field;
948 	filter_pred_fn_t fn;
949 	unsigned long long val;
950 	int ret;
951 
952 	fn = pred->fn = filter_pred_none;
953 
954 	if (pred->op == OP_AND)
955 		goto add_pred_fn;
956 	else if (pred->op == OP_OR)
957 		goto add_pred_fn;
958 
959 	field = find_event_field(call, pred->field_name);
960 	if (!field) {
961 		parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
962 		return -EINVAL;
963 	}
964 
965 	pred->offset = field->offset;
966 
967 	if (!is_legal_op(field, pred->op)) {
968 		parse_error(ps, FILT_ERR_ILLEGAL_FIELD_OP, 0);
969 		return -EINVAL;
970 	}
971 
972 	if (is_string_field(field)) {
973 		filter_build_regex(pred);
974 
975 		if (field->filter_type == FILTER_STATIC_STRING) {
976 			fn = filter_pred_string;
977 			pred->regex.field_len = field->size;
978 		} else if (field->filter_type == FILTER_DYN_STRING)
979 			fn = filter_pred_strloc;
980 		else
981 			fn = filter_pred_pchar;
982 	} else {
983 		if (field->is_signed)
984 			ret = strict_strtoll(pred->regex.pattern, 0, &val);
985 		else
986 			ret = strict_strtoull(pred->regex.pattern, 0, &val);
987 		if (ret) {
988 			parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
989 			return -EINVAL;
990 		}
991 		pred->val = val;
992 
993 		fn = select_comparison_fn(pred->op, field->size,
994 					  field->is_signed);
995 		if (!fn) {
996 			parse_error(ps, FILT_ERR_INVALID_OP, 0);
997 			return -EINVAL;
998 		}
999 	}
1000 
1001 	if (pred->op == OP_NE)
1002 		pred->not = 1;
1003 
1004 add_pred_fn:
1005 	if (!dry_run)
1006 		return filter_add_pred_fn(ps, call, filter, pred, stack, fn);
1007 	return 0;
1008 }
1009 
parse_init(struct filter_parse_state * ps,struct filter_op * ops,char * infix_string)1010 static void parse_init(struct filter_parse_state *ps,
1011 		       struct filter_op *ops,
1012 		       char *infix_string)
1013 {
1014 	memset(ps, '\0', sizeof(*ps));
1015 
1016 	ps->infix.string = infix_string;
1017 	ps->infix.cnt = strlen(infix_string);
1018 	ps->ops = ops;
1019 
1020 	INIT_LIST_HEAD(&ps->opstack);
1021 	INIT_LIST_HEAD(&ps->postfix);
1022 }
1023 
infix_next(struct filter_parse_state * ps)1024 static char infix_next(struct filter_parse_state *ps)
1025 {
1026 	ps->infix.cnt--;
1027 
1028 	return ps->infix.string[ps->infix.tail++];
1029 }
1030 
infix_peek(struct filter_parse_state * ps)1031 static char infix_peek(struct filter_parse_state *ps)
1032 {
1033 	if (ps->infix.tail == strlen(ps->infix.string))
1034 		return 0;
1035 
1036 	return ps->infix.string[ps->infix.tail];
1037 }
1038 
infix_advance(struct filter_parse_state * ps)1039 static void infix_advance(struct filter_parse_state *ps)
1040 {
1041 	ps->infix.cnt--;
1042 	ps->infix.tail++;
1043 }
1044 
is_precedence_lower(struct filter_parse_state * ps,int a,int b)1045 static inline int is_precedence_lower(struct filter_parse_state *ps,
1046 				      int a, int b)
1047 {
1048 	return ps->ops[a].precedence < ps->ops[b].precedence;
1049 }
1050 
is_op_char(struct filter_parse_state * ps,char c)1051 static inline int is_op_char(struct filter_parse_state *ps, char c)
1052 {
1053 	int i;
1054 
1055 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1056 		if (ps->ops[i].string[0] == c)
1057 			return 1;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
infix_get_op(struct filter_parse_state * ps,char firstc)1063 static int infix_get_op(struct filter_parse_state *ps, char firstc)
1064 {
1065 	char nextc = infix_peek(ps);
1066 	char opstr[3];
1067 	int i;
1068 
1069 	opstr[0] = firstc;
1070 	opstr[1] = nextc;
1071 	opstr[2] = '\0';
1072 
1073 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1074 		if (!strcmp(opstr, ps->ops[i].string)) {
1075 			infix_advance(ps);
1076 			return ps->ops[i].id;
1077 		}
1078 	}
1079 
1080 	opstr[1] = '\0';
1081 
1082 	for (i = 0; strcmp(ps->ops[i].string, "OP_NONE"); i++) {
1083 		if (!strcmp(opstr, ps->ops[i].string))
1084 			return ps->ops[i].id;
1085 	}
1086 
1087 	return OP_NONE;
1088 }
1089 
clear_operand_string(struct filter_parse_state * ps)1090 static inline void clear_operand_string(struct filter_parse_state *ps)
1091 {
1092 	memset(ps->operand.string, '\0', MAX_FILTER_STR_VAL);
1093 	ps->operand.tail = 0;
1094 }
1095 
append_operand_char(struct filter_parse_state * ps,char c)1096 static inline int append_operand_char(struct filter_parse_state *ps, char c)
1097 {
1098 	if (ps->operand.tail == MAX_FILTER_STR_VAL - 1)
1099 		return -EINVAL;
1100 
1101 	ps->operand.string[ps->operand.tail++] = c;
1102 
1103 	return 0;
1104 }
1105 
filter_opstack_push(struct filter_parse_state * ps,int op)1106 static int filter_opstack_push(struct filter_parse_state *ps, int op)
1107 {
1108 	struct opstack_op *opstack_op;
1109 
1110 	opstack_op = kmalloc(sizeof(*opstack_op), GFP_KERNEL);
1111 	if (!opstack_op)
1112 		return -ENOMEM;
1113 
1114 	opstack_op->op = op;
1115 	list_add(&opstack_op->list, &ps->opstack);
1116 
1117 	return 0;
1118 }
1119 
filter_opstack_empty(struct filter_parse_state * ps)1120 static int filter_opstack_empty(struct filter_parse_state *ps)
1121 {
1122 	return list_empty(&ps->opstack);
1123 }
1124 
filter_opstack_top(struct filter_parse_state * ps)1125 static int filter_opstack_top(struct filter_parse_state *ps)
1126 {
1127 	struct opstack_op *opstack_op;
1128 
1129 	if (filter_opstack_empty(ps))
1130 		return OP_NONE;
1131 
1132 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1133 
1134 	return opstack_op->op;
1135 }
1136 
filter_opstack_pop(struct filter_parse_state * ps)1137 static int filter_opstack_pop(struct filter_parse_state *ps)
1138 {
1139 	struct opstack_op *opstack_op;
1140 	int op;
1141 
1142 	if (filter_opstack_empty(ps))
1143 		return OP_NONE;
1144 
1145 	opstack_op = list_first_entry(&ps->opstack, struct opstack_op, list);
1146 	op = opstack_op->op;
1147 	list_del(&opstack_op->list);
1148 
1149 	kfree(opstack_op);
1150 
1151 	return op;
1152 }
1153 
filter_opstack_clear(struct filter_parse_state * ps)1154 static void filter_opstack_clear(struct filter_parse_state *ps)
1155 {
1156 	while (!filter_opstack_empty(ps))
1157 		filter_opstack_pop(ps);
1158 }
1159 
curr_operand(struct filter_parse_state * ps)1160 static char *curr_operand(struct filter_parse_state *ps)
1161 {
1162 	return ps->operand.string;
1163 }
1164 
postfix_append_operand(struct filter_parse_state * ps,char * operand)1165 static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
1166 {
1167 	struct postfix_elt *elt;
1168 
1169 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1170 	if (!elt)
1171 		return -ENOMEM;
1172 
1173 	elt->op = OP_NONE;
1174 	elt->operand = kstrdup(operand, GFP_KERNEL);
1175 	if (!elt->operand) {
1176 		kfree(elt);
1177 		return -ENOMEM;
1178 	}
1179 
1180 	list_add_tail(&elt->list, &ps->postfix);
1181 
1182 	return 0;
1183 }
1184 
postfix_append_op(struct filter_parse_state * ps,int op)1185 static int postfix_append_op(struct filter_parse_state *ps, int op)
1186 {
1187 	struct postfix_elt *elt;
1188 
1189 	elt = kmalloc(sizeof(*elt), GFP_KERNEL);
1190 	if (!elt)
1191 		return -ENOMEM;
1192 
1193 	elt->op = op;
1194 	elt->operand = NULL;
1195 
1196 	list_add_tail(&elt->list, &ps->postfix);
1197 
1198 	return 0;
1199 }
1200 
postfix_clear(struct filter_parse_state * ps)1201 static void postfix_clear(struct filter_parse_state *ps)
1202 {
1203 	struct postfix_elt *elt;
1204 
1205 	while (!list_empty(&ps->postfix)) {
1206 		elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
1207 		list_del(&elt->list);
1208 		kfree(elt->operand);
1209 		kfree(elt);
1210 	}
1211 }
1212 
filter_parse(struct filter_parse_state * ps)1213 static int filter_parse(struct filter_parse_state *ps)
1214 {
1215 	int in_string = 0;
1216 	int op, top_op;
1217 	char ch;
1218 
1219 	while ((ch = infix_next(ps))) {
1220 		if (ch == '"') {
1221 			in_string ^= 1;
1222 			continue;
1223 		}
1224 
1225 		if (in_string)
1226 			goto parse_operand;
1227 
1228 		if (isspace(ch))
1229 			continue;
1230 
1231 		if (is_op_char(ps, ch)) {
1232 			op = infix_get_op(ps, ch);
1233 			if (op == OP_NONE) {
1234 				parse_error(ps, FILT_ERR_INVALID_OP, 0);
1235 				return -EINVAL;
1236 			}
1237 
1238 			if (strlen(curr_operand(ps))) {
1239 				postfix_append_operand(ps, curr_operand(ps));
1240 				clear_operand_string(ps);
1241 			}
1242 
1243 			while (!filter_opstack_empty(ps)) {
1244 				top_op = filter_opstack_top(ps);
1245 				if (!is_precedence_lower(ps, top_op, op)) {
1246 					top_op = filter_opstack_pop(ps);
1247 					postfix_append_op(ps, top_op);
1248 					continue;
1249 				}
1250 				break;
1251 			}
1252 
1253 			filter_opstack_push(ps, op);
1254 			continue;
1255 		}
1256 
1257 		if (ch == '(') {
1258 			filter_opstack_push(ps, OP_OPEN_PAREN);
1259 			continue;
1260 		}
1261 
1262 		if (ch == ')') {
1263 			if (strlen(curr_operand(ps))) {
1264 				postfix_append_operand(ps, curr_operand(ps));
1265 				clear_operand_string(ps);
1266 			}
1267 
1268 			top_op = filter_opstack_pop(ps);
1269 			while (top_op != OP_NONE) {
1270 				if (top_op == OP_OPEN_PAREN)
1271 					break;
1272 				postfix_append_op(ps, top_op);
1273 				top_op = filter_opstack_pop(ps);
1274 			}
1275 			if (top_op == OP_NONE) {
1276 				parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1277 				return -EINVAL;
1278 			}
1279 			continue;
1280 		}
1281 parse_operand:
1282 		if (append_operand_char(ps, ch)) {
1283 			parse_error(ps, FILT_ERR_OPERAND_TOO_LONG, 0);
1284 			return -EINVAL;
1285 		}
1286 	}
1287 
1288 	if (strlen(curr_operand(ps)))
1289 		postfix_append_operand(ps, curr_operand(ps));
1290 
1291 	while (!filter_opstack_empty(ps)) {
1292 		top_op = filter_opstack_pop(ps);
1293 		if (top_op == OP_NONE)
1294 			break;
1295 		if (top_op == OP_OPEN_PAREN) {
1296 			parse_error(ps, FILT_ERR_UNBALANCED_PAREN, 0);
1297 			return -EINVAL;
1298 		}
1299 		postfix_append_op(ps, top_op);
1300 	}
1301 
1302 	return 0;
1303 }
1304 
create_pred(int op,char * operand1,char * operand2)1305 static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
1306 {
1307 	struct filter_pred *pred;
1308 
1309 	pred = kzalloc(sizeof(*pred), GFP_KERNEL);
1310 	if (!pred)
1311 		return NULL;
1312 
1313 	pred->field_name = kstrdup(operand1, GFP_KERNEL);
1314 	if (!pred->field_name) {
1315 		kfree(pred);
1316 		return NULL;
1317 	}
1318 
1319 	strcpy(pred->regex.pattern, operand2);
1320 	pred->regex.len = strlen(pred->regex.pattern);
1321 
1322 	pred->op = op;
1323 
1324 	return pred;
1325 }
1326 
create_logical_pred(int op)1327 static struct filter_pred *create_logical_pred(int op)
1328 {
1329 	struct filter_pred *pred;
1330 
1331 	pred = kzalloc(sizeof(*pred), GFP_KERNEL);
1332 	if (!pred)
1333 		return NULL;
1334 
1335 	pred->op = op;
1336 
1337 	return pred;
1338 }
1339 
check_preds(struct filter_parse_state * ps)1340 static int check_preds(struct filter_parse_state *ps)
1341 {
1342 	int n_normal_preds = 0, n_logical_preds = 0;
1343 	struct postfix_elt *elt;
1344 
1345 	list_for_each_entry(elt, &ps->postfix, list) {
1346 		if (elt->op == OP_NONE)
1347 			continue;
1348 
1349 		if (elt->op == OP_AND || elt->op == OP_OR) {
1350 			n_logical_preds++;
1351 			continue;
1352 		}
1353 		n_normal_preds++;
1354 	}
1355 
1356 	if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
1357 		parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
1358 		return -EINVAL;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
count_preds(struct filter_parse_state * ps)1364 static int count_preds(struct filter_parse_state *ps)
1365 {
1366 	struct postfix_elt *elt;
1367 	int n_preds = 0;
1368 
1369 	list_for_each_entry(elt, &ps->postfix, list) {
1370 		if (elt->op == OP_NONE)
1371 			continue;
1372 		n_preds++;
1373 	}
1374 
1375 	return n_preds;
1376 }
1377 
1378 /*
1379  * The tree is walked at filtering of an event. If the tree is not correctly
1380  * built, it may cause an infinite loop. Check here that the tree does
1381  * indeed terminate.
1382  */
check_pred_tree(struct event_filter * filter,struct filter_pred * root)1383 static int check_pred_tree(struct event_filter *filter,
1384 			   struct filter_pred *root)
1385 {
1386 	struct filter_pred *preds;
1387 	struct filter_pred *pred;
1388 	enum move_type move = MOVE_DOWN;
1389 	int count = 0;
1390 	int done = 0;
1391 	int max;
1392 
1393 	/*
1394 	 * The max that we can hit a node is three times.
1395 	 * Once going down, once coming up from left, and
1396 	 * once coming up from right. This is more than enough
1397 	 * since leafs are only hit a single time.
1398 	 */
1399 	max = 3 * filter->n_preds;
1400 
1401 	preds = filter->preds;
1402 	if  (!preds)
1403 		return -EINVAL;
1404 	pred = root;
1405 
1406 	do {
1407 		if (WARN_ON(count++ > max))
1408 			return -EINVAL;
1409 
1410 		switch (move) {
1411 		case MOVE_DOWN:
1412 			if (pred->left != FILTER_PRED_INVALID) {
1413 				pred = &preds[pred->left];
1414 				continue;
1415 			}
1416 			/* A leaf at the root is just a leaf in the tree */
1417 			if (pred == root)
1418 				break;
1419 			pred = get_pred_parent(pred, preds,
1420 					       pred->parent, &move);
1421 			continue;
1422 		case MOVE_UP_FROM_LEFT:
1423 			pred = &preds[pred->right];
1424 			move = MOVE_DOWN;
1425 			continue;
1426 		case MOVE_UP_FROM_RIGHT:
1427 			if (pred == root)
1428 				break;
1429 			pred = get_pred_parent(pred, preds,
1430 					       pred->parent, &move);
1431 			continue;
1432 		}
1433 		done = 1;
1434 	} while (!done);
1435 
1436 	/* We are fine. */
1437 	return 0;
1438 }
1439 
count_leafs(struct filter_pred * preds,struct filter_pred * root)1440 static int count_leafs(struct filter_pred *preds, struct filter_pred *root)
1441 {
1442 	struct filter_pred *pred;
1443 	enum move_type move = MOVE_DOWN;
1444 	int count = 0;
1445 	int done = 0;
1446 
1447 	pred = root;
1448 
1449 	do {
1450 		switch (move) {
1451 		case MOVE_DOWN:
1452 			if (pred->left != FILTER_PRED_INVALID) {
1453 				pred = &preds[pred->left];
1454 				continue;
1455 			}
1456 			/* A leaf at the root is just a leaf in the tree */
1457 			if (pred == root)
1458 				return 1;
1459 			count++;
1460 			pred = get_pred_parent(pred, preds,
1461 					       pred->parent, &move);
1462 			continue;
1463 		case MOVE_UP_FROM_LEFT:
1464 			pred = &preds[pred->right];
1465 			move = MOVE_DOWN;
1466 			continue;
1467 		case MOVE_UP_FROM_RIGHT:
1468 			if (pred == root)
1469 				break;
1470 			pred = get_pred_parent(pred, preds,
1471 					       pred->parent, &move);
1472 			continue;
1473 		}
1474 		done = 1;
1475 	} while (!done);
1476 
1477 	return count;
1478 }
1479 
fold_pred(struct filter_pred * preds,struct filter_pred * root)1480 static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
1481 {
1482 	struct filter_pred *pred;
1483 	enum move_type move = MOVE_DOWN;
1484 	int count = 0;
1485 	int children;
1486 	int done = 0;
1487 
1488 	/* No need to keep the fold flag */
1489 	root->index &= ~FILTER_PRED_FOLD;
1490 
1491 	/* If the root is a leaf then do nothing */
1492 	if (root->left == FILTER_PRED_INVALID)
1493 		return 0;
1494 
1495 	/* count the children */
1496 	children = count_leafs(preds, &preds[root->left]);
1497 	children += count_leafs(preds, &preds[root->right]);
1498 
1499 	root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
1500 	if (!root->ops)
1501 		return -ENOMEM;
1502 
1503 	root->val = children;
1504 
1505 	pred = root;
1506 	do {
1507 		switch (move) {
1508 		case MOVE_DOWN:
1509 			if (pred->left != FILTER_PRED_INVALID) {
1510 				pred = &preds[pred->left];
1511 				continue;
1512 			}
1513 			if (WARN_ON(count == children))
1514 				return -EINVAL;
1515 			pred->index &= ~FILTER_PRED_FOLD;
1516 			root->ops[count++] = pred->index;
1517 			pred = get_pred_parent(pred, preds,
1518 					       pred->parent, &move);
1519 			continue;
1520 		case MOVE_UP_FROM_LEFT:
1521 			pred = &preds[pred->right];
1522 			move = MOVE_DOWN;
1523 			continue;
1524 		case MOVE_UP_FROM_RIGHT:
1525 			if (pred == root)
1526 				break;
1527 			pred = get_pred_parent(pred, preds,
1528 					       pred->parent, &move);
1529 			continue;
1530 		}
1531 		done = 1;
1532 	} while (!done);
1533 
1534 	return 0;
1535 }
1536 
1537 /*
1538  * To optimize the processing of the ops, if we have several "ors" or
1539  * "ands" together, we can put them in an array and process them all
1540  * together speeding up the filter logic.
1541  */
fold_pred_tree(struct event_filter * filter,struct filter_pred * root)1542 static int fold_pred_tree(struct event_filter *filter,
1543 			   struct filter_pred *root)
1544 {
1545 	struct filter_pred *preds;
1546 	struct filter_pred *pred;
1547 	enum move_type move = MOVE_DOWN;
1548 	int done = 0;
1549 	int err;
1550 
1551 	preds = filter->preds;
1552 	if  (!preds)
1553 		return -EINVAL;
1554 	pred = root;
1555 
1556 	do {
1557 		switch (move) {
1558 		case MOVE_DOWN:
1559 			if (pred->index & FILTER_PRED_FOLD) {
1560 				err = fold_pred(preds, pred);
1561 				if (err)
1562 					return err;
1563 				/* Folded nodes are like leafs */
1564 			} else if (pred->left != FILTER_PRED_INVALID) {
1565 				pred = &preds[pred->left];
1566 				continue;
1567 			}
1568 
1569 			/* A leaf at the root is just a leaf in the tree */
1570 			if (pred == root)
1571 				break;
1572 			pred = get_pred_parent(pred, preds,
1573 					       pred->parent, &move);
1574 			continue;
1575 		case MOVE_UP_FROM_LEFT:
1576 			pred = &preds[pred->right];
1577 			move = MOVE_DOWN;
1578 			continue;
1579 		case MOVE_UP_FROM_RIGHT:
1580 			if (pred == root)
1581 				break;
1582 			pred = get_pred_parent(pred, preds,
1583 					       pred->parent, &move);
1584 			continue;
1585 		}
1586 		done = 1;
1587 	} while (!done);
1588 
1589 	return 0;
1590 }
1591 
replace_preds(struct ftrace_event_call * call,struct event_filter * filter,struct filter_parse_state * ps,char * filter_string,bool dry_run)1592 static int replace_preds(struct ftrace_event_call *call,
1593 			 struct event_filter *filter,
1594 			 struct filter_parse_state *ps,
1595 			 char *filter_string,
1596 			 bool dry_run)
1597 {
1598 	char *operand1 = NULL, *operand2 = NULL;
1599 	struct filter_pred *pred;
1600 	struct filter_pred *root;
1601 	struct postfix_elt *elt;
1602 	struct pred_stack stack = { }; /* init to NULL */
1603 	int err;
1604 	int n_preds = 0;
1605 
1606 	n_preds = count_preds(ps);
1607 	if (n_preds >= MAX_FILTER_PRED) {
1608 		parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1609 		return -ENOSPC;
1610 	}
1611 
1612 	err = check_preds(ps);
1613 	if (err)
1614 		return err;
1615 
1616 	if (!dry_run) {
1617 		err = __alloc_pred_stack(&stack, n_preds);
1618 		if (err)
1619 			return err;
1620 		err = __alloc_preds(filter, n_preds);
1621 		if (err)
1622 			goto fail;
1623 	}
1624 
1625 	n_preds = 0;
1626 	list_for_each_entry(elt, &ps->postfix, list) {
1627 		if (elt->op == OP_NONE) {
1628 			if (!operand1)
1629 				operand1 = elt->operand;
1630 			else if (!operand2)
1631 				operand2 = elt->operand;
1632 			else {
1633 				parse_error(ps, FILT_ERR_TOO_MANY_OPERANDS, 0);
1634 				err = -EINVAL;
1635 				goto fail;
1636 			}
1637 			continue;
1638 		}
1639 
1640 		if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1641 			parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1642 			err = -ENOSPC;
1643 			goto fail;
1644 		}
1645 
1646 		if (elt->op == OP_AND || elt->op == OP_OR) {
1647 			pred = create_logical_pred(elt->op);
1648 			goto add_pred;
1649 		}
1650 
1651 		if (!operand1 || !operand2) {
1652 			parse_error(ps, FILT_ERR_MISSING_FIELD, 0);
1653 			err = -EINVAL;
1654 			goto fail;
1655 		}
1656 
1657 		pred = create_pred(elt->op, operand1, operand2);
1658 add_pred:
1659 		if (!pred) {
1660 			err = -ENOMEM;
1661 			goto fail;
1662 		}
1663 		err = filter_add_pred(ps, call, filter, pred, &stack, dry_run);
1664 		filter_free_pred(pred);
1665 		if (err)
1666 			goto fail;
1667 
1668 		operand1 = operand2 = NULL;
1669 	}
1670 
1671 	if (!dry_run) {
1672 		/* We should have one item left on the stack */
1673 		pred = __pop_pred_stack(&stack);
1674 		if (!pred)
1675 			return -EINVAL;
1676 		/* This item is where we start from in matching */
1677 		root = pred;
1678 		/* Make sure the stack is empty */
1679 		pred = __pop_pred_stack(&stack);
1680 		if (WARN_ON(pred)) {
1681 			err = -EINVAL;
1682 			filter->root = NULL;
1683 			goto fail;
1684 		}
1685 		err = check_pred_tree(filter, root);
1686 		if (err)
1687 			goto fail;
1688 
1689 		/* Optimize the tree */
1690 		err = fold_pred_tree(filter, root);
1691 		if (err)
1692 			goto fail;
1693 
1694 		/* We don't set root until we know it works */
1695 		barrier();
1696 		filter->root = root;
1697 	}
1698 
1699 	err = 0;
1700 fail:
1701 	__free_pred_stack(&stack);
1702 	return err;
1703 }
1704 
1705 struct filter_list {
1706 	struct list_head	list;
1707 	struct event_filter	*filter;
1708 };
1709 
replace_system_preds(struct event_subsystem * system,struct filter_parse_state * ps,char * filter_string)1710 static int replace_system_preds(struct event_subsystem *system,
1711 				struct filter_parse_state *ps,
1712 				char *filter_string)
1713 {
1714 	struct ftrace_event_call *call;
1715 	struct filter_list *filter_item;
1716 	struct filter_list *tmp;
1717 	LIST_HEAD(filter_list);
1718 	bool fail = true;
1719 	int err;
1720 
1721 	list_for_each_entry(call, &ftrace_events, list) {
1722 
1723 		if (strcmp(call->class->system, system->name) != 0)
1724 			continue;
1725 
1726 		/*
1727 		 * Try to see if the filter can be applied
1728 		 *  (filter arg is ignored on dry_run)
1729 		 */
1730 		err = replace_preds(call, NULL, ps, filter_string, true);
1731 		if (err)
1732 			goto fail;
1733 	}
1734 
1735 	list_for_each_entry(call, &ftrace_events, list) {
1736 		struct event_filter *filter;
1737 
1738 		if (strcmp(call->class->system, system->name) != 0)
1739 			continue;
1740 
1741 		filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
1742 		if (!filter_item)
1743 			goto fail_mem;
1744 
1745 		list_add_tail(&filter_item->list, &filter_list);
1746 
1747 		filter_item->filter = __alloc_filter();
1748 		if (!filter_item->filter)
1749 			goto fail_mem;
1750 		filter = filter_item->filter;
1751 
1752 		/* Can only fail on no memory */
1753 		err = replace_filter_string(filter, filter_string);
1754 		if (err)
1755 			goto fail_mem;
1756 
1757 		err = replace_preds(call, filter, ps, filter_string, false);
1758 		if (err) {
1759 			filter_disable(call);
1760 			parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1761 			append_filter_err(ps, filter);
1762 		} else
1763 			call->flags |= TRACE_EVENT_FL_FILTERED;
1764 		/*
1765 		 * Regardless of if this returned an error, we still
1766 		 * replace the filter for the call.
1767 		 */
1768 		filter = call->filter;
1769 		call->filter = filter_item->filter;
1770 		filter_item->filter = filter;
1771 
1772 		fail = false;
1773 	}
1774 
1775 	if (fail)
1776 		goto fail;
1777 
1778 	/*
1779 	 * The calls can still be using the old filters.
1780 	 * Do a synchronize_sched() to ensure all calls are
1781 	 * done with them before we free them.
1782 	 */
1783 	synchronize_sched();
1784 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1785 		__free_filter(filter_item->filter);
1786 		list_del(&filter_item->list);
1787 		kfree(filter_item);
1788 	}
1789 	return 0;
1790  fail:
1791 	/* No call succeeded */
1792 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1793 		list_del(&filter_item->list);
1794 		kfree(filter_item);
1795 	}
1796 	parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1797 	return -EINVAL;
1798  fail_mem:
1799 	/* If any call succeeded, we still need to sync */
1800 	if (!fail)
1801 		synchronize_sched();
1802 	list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
1803 		__free_filter(filter_item->filter);
1804 		list_del(&filter_item->list);
1805 		kfree(filter_item);
1806 	}
1807 	return -ENOMEM;
1808 }
1809 
apply_event_filter(struct ftrace_event_call * call,char * filter_string)1810 int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1811 {
1812 	struct filter_parse_state *ps;
1813 	struct event_filter *filter;
1814 	struct event_filter *tmp;
1815 	int err = 0;
1816 
1817 	mutex_lock(&event_mutex);
1818 
1819 	if (!strcmp(strstrip(filter_string), "0")) {
1820 		filter_disable(call);
1821 		filter = call->filter;
1822 		if (!filter)
1823 			goto out_unlock;
1824 		call->filter = NULL;
1825 		/* Make sure the filter is not being used */
1826 		synchronize_sched();
1827 		__free_filter(filter);
1828 		goto out_unlock;
1829 	}
1830 
1831 	err = -ENOMEM;
1832 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1833 	if (!ps)
1834 		goto out_unlock;
1835 
1836 	filter = __alloc_filter();
1837 	if (!filter) {
1838 		kfree(ps);
1839 		goto out_unlock;
1840 	}
1841 
1842 	replace_filter_string(filter, filter_string);
1843 
1844 	parse_init(ps, filter_ops, filter_string);
1845 	err = filter_parse(ps);
1846 	if (err) {
1847 		append_filter_err(ps, filter);
1848 		goto out;
1849 	}
1850 
1851 	err = replace_preds(call, filter, ps, filter_string, false);
1852 	if (err) {
1853 		filter_disable(call);
1854 		append_filter_err(ps, filter);
1855 	} else
1856 		call->flags |= TRACE_EVENT_FL_FILTERED;
1857 out:
1858 	/*
1859 	 * Always swap the call filter with the new filter
1860 	 * even if there was an error. If there was an error
1861 	 * in the filter, we disable the filter and show the error
1862 	 * string
1863 	 */
1864 	tmp = call->filter;
1865 	call->filter = filter;
1866 	if (tmp) {
1867 		/* Make sure the call is done with the filter */
1868 		synchronize_sched();
1869 		__free_filter(tmp);
1870 	}
1871 	filter_opstack_clear(ps);
1872 	postfix_clear(ps);
1873 	kfree(ps);
1874 out_unlock:
1875 	mutex_unlock(&event_mutex);
1876 
1877 	return err;
1878 }
1879 
apply_subsystem_event_filter(struct event_subsystem * system,char * filter_string)1880 int apply_subsystem_event_filter(struct event_subsystem *system,
1881 				 char *filter_string)
1882 {
1883 	struct filter_parse_state *ps;
1884 	struct event_filter *filter;
1885 	int err = 0;
1886 
1887 	mutex_lock(&event_mutex);
1888 
1889 	if (!strcmp(strstrip(filter_string), "0")) {
1890 		filter_free_subsystem_preds(system);
1891 		remove_filter_string(system->filter);
1892 		filter = system->filter;
1893 		system->filter = NULL;
1894 		/* Ensure all filters are no longer used */
1895 		synchronize_sched();
1896 		filter_free_subsystem_filters(system);
1897 		__free_filter(filter);
1898 		goto out_unlock;
1899 	}
1900 
1901 	err = -ENOMEM;
1902 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1903 	if (!ps)
1904 		goto out_unlock;
1905 
1906 	filter = __alloc_filter();
1907 	if (!filter)
1908 		goto out;
1909 
1910 	replace_filter_string(filter, filter_string);
1911 	/*
1912 	 * No event actually uses the system filter
1913 	 * we can free it without synchronize_sched().
1914 	 */
1915 	__free_filter(system->filter);
1916 	system->filter = filter;
1917 
1918 	parse_init(ps, filter_ops, filter_string);
1919 	err = filter_parse(ps);
1920 	if (err) {
1921 		append_filter_err(ps, system->filter);
1922 		goto out;
1923 	}
1924 
1925 	err = replace_system_preds(system, ps, filter_string);
1926 	if (err)
1927 		append_filter_err(ps, system->filter);
1928 
1929 out:
1930 	filter_opstack_clear(ps);
1931 	postfix_clear(ps);
1932 	kfree(ps);
1933 out_unlock:
1934 	mutex_unlock(&event_mutex);
1935 
1936 	return err;
1937 }
1938 
1939 #ifdef CONFIG_PERF_EVENTS
1940 
ftrace_profile_free_filter(struct perf_event * event)1941 void ftrace_profile_free_filter(struct perf_event *event)
1942 {
1943 	struct event_filter *filter = event->filter;
1944 
1945 	event->filter = NULL;
1946 	__free_filter(filter);
1947 }
1948 
ftrace_profile_set_filter(struct perf_event * event,int event_id,char * filter_str)1949 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1950 			      char *filter_str)
1951 {
1952 	int err;
1953 	struct event_filter *filter;
1954 	struct filter_parse_state *ps;
1955 	struct ftrace_event_call *call = NULL;
1956 
1957 	mutex_lock(&event_mutex);
1958 
1959 	list_for_each_entry(call, &ftrace_events, list) {
1960 		if (call->event.type == event_id)
1961 			break;
1962 	}
1963 
1964 	err = -EINVAL;
1965 	if (&call->list == &ftrace_events)
1966 		goto out_unlock;
1967 
1968 	err = -EEXIST;
1969 	if (event->filter)
1970 		goto out_unlock;
1971 
1972 	filter = __alloc_filter();
1973 	if (!filter) {
1974 		err = PTR_ERR(filter);
1975 		goto out_unlock;
1976 	}
1977 
1978 	err = -ENOMEM;
1979 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1980 	if (!ps)
1981 		goto free_filter;
1982 
1983 	parse_init(ps, filter_ops, filter_str);
1984 	err = filter_parse(ps);
1985 	if (err)
1986 		goto free_ps;
1987 
1988 	err = replace_preds(call, filter, ps, filter_str, false);
1989 	if (!err)
1990 		event->filter = filter;
1991 
1992 free_ps:
1993 	filter_opstack_clear(ps);
1994 	postfix_clear(ps);
1995 	kfree(ps);
1996 
1997 free_filter:
1998 	if (err)
1999 		__free_filter(filter);
2000 
2001 out_unlock:
2002 	mutex_unlock(&event_mutex);
2003 
2004 	return err;
2005 }
2006 
2007 #endif /* CONFIG_PERF_EVENTS */
2008 
2009