1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
9
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
19 #include <linux/filter.h>
20
21 #include "trace_dynevent.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
24
25 #define UPROBE_EVENT_SYSTEM "uprobes"
26
27 struct uprobe_trace_entry_head {
28 struct trace_entry ent;
29 unsigned long vaddr[];
30 };
31
32 #define SIZEOF_TRACE_ENTRY(is_return) \
33 (sizeof(struct uprobe_trace_entry_head) + \
34 sizeof(unsigned long) * (is_return ? 2 : 1))
35
36 #define DATAOF_TRACE_ENTRY(entry, is_return) \
37 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
38
39 static int trace_uprobe_create(const char *raw_command);
40 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
41 static int trace_uprobe_release(struct dyn_event *ev);
42 static bool trace_uprobe_is_busy(struct dyn_event *ev);
43 static bool trace_uprobe_match(const char *system, const char *event,
44 int argc, const char **argv, struct dyn_event *ev);
45
46 static struct dyn_event_operations trace_uprobe_ops = {
47 .create = trace_uprobe_create,
48 .show = trace_uprobe_show,
49 .is_busy = trace_uprobe_is_busy,
50 .free = trace_uprobe_release,
51 .match = trace_uprobe_match,
52 };
53
54 /*
55 * uprobe event core functions
56 */
57 struct trace_uprobe {
58 struct dyn_event devent;
59 struct uprobe_consumer consumer;
60 struct path path;
61 struct inode *inode;
62 char *filename;
63 unsigned long offset;
64 unsigned long ref_ctr_offset;
65 unsigned long nhit;
66 struct trace_probe tp;
67 };
68
is_trace_uprobe(struct dyn_event * ev)69 static bool is_trace_uprobe(struct dyn_event *ev)
70 {
71 return ev->ops == &trace_uprobe_ops;
72 }
73
to_trace_uprobe(struct dyn_event * ev)74 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
75 {
76 return container_of(ev, struct trace_uprobe, devent);
77 }
78
79 /**
80 * for_each_trace_uprobe - iterate over the trace_uprobe list
81 * @pos: the struct trace_uprobe * for each entry
82 * @dpos: the struct dyn_event * to use as a loop cursor
83 */
84 #define for_each_trace_uprobe(pos, dpos) \
85 for_each_dyn_event(dpos) \
86 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
87
88 static int register_uprobe_event(struct trace_uprobe *tu);
89 static int unregister_uprobe_event(struct trace_uprobe *tu);
90
91 struct uprobe_dispatch_data {
92 struct trace_uprobe *tu;
93 unsigned long bp_addr;
94 };
95
96 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
97 static int uretprobe_dispatcher(struct uprobe_consumer *con,
98 unsigned long func, struct pt_regs *regs);
99
100 #ifdef CONFIG_STACK_GROWSUP
adjust_stack_addr(unsigned long addr,unsigned int n)101 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
102 {
103 return addr - (n * sizeof(long));
104 }
105 #else
adjust_stack_addr(unsigned long addr,unsigned int n)106 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
107 {
108 return addr + (n * sizeof(long));
109 }
110 #endif
111
get_user_stack_nth(struct pt_regs * regs,unsigned int n)112 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
113 {
114 unsigned long ret;
115 unsigned long addr = user_stack_pointer(regs);
116
117 addr = adjust_stack_addr(addr, n);
118
119 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
120 return 0;
121
122 return ret;
123 }
124
125 /*
126 * Uprobes-specific fetch functions
127 */
128 static nokprobe_inline int
probe_mem_read(void * dest,void * src,size_t size)129 probe_mem_read(void *dest, void *src, size_t size)
130 {
131 void __user *vaddr = (void __force __user *)src;
132
133 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
134 }
135
136 static nokprobe_inline int
probe_mem_read_user(void * dest,void * src,size_t size)137 probe_mem_read_user(void *dest, void *src, size_t size)
138 {
139 return probe_mem_read(dest, src, size);
140 }
141
142 /*
143 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
144 * length and relative data location.
145 */
146 static nokprobe_inline int
fetch_store_string(unsigned long addr,void * dest,void * base)147 fetch_store_string(unsigned long addr, void *dest, void *base)
148 {
149 long ret;
150 u32 loc = *(u32 *)dest;
151 int maxlen = get_loc_len(loc);
152 u8 *dst = get_loc_data(dest, base);
153 void __user *src = (void __force __user *) addr;
154
155 if (unlikely(!maxlen))
156 return -ENOMEM;
157
158 if (addr == FETCH_TOKEN_COMM)
159 ret = strlcpy(dst, current->comm, maxlen);
160 else
161 ret = strncpy_from_user(dst, src, maxlen);
162 if (ret >= 0) {
163 if (ret == maxlen)
164 dst[ret - 1] = '\0';
165 else
166 /*
167 * Include the terminating null byte. In this case it
168 * was copied by strncpy_from_user but not accounted
169 * for in ret.
170 */
171 ret++;
172 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
173 }
174
175 return ret;
176 }
177
178 static nokprobe_inline int
fetch_store_string_user(unsigned long addr,void * dest,void * base)179 fetch_store_string_user(unsigned long addr, void *dest, void *base)
180 {
181 return fetch_store_string(addr, dest, base);
182 }
183
184 /* Return the length of string -- including null terminal byte */
185 static nokprobe_inline int
fetch_store_strlen(unsigned long addr)186 fetch_store_strlen(unsigned long addr)
187 {
188 int len;
189 void __user *vaddr = (void __force __user *) addr;
190
191 if (addr == FETCH_TOKEN_COMM)
192 len = strlen(current->comm) + 1;
193 else
194 len = strnlen_user(vaddr, MAX_STRING_SIZE);
195
196 return (len > MAX_STRING_SIZE) ? 0 : len;
197 }
198
199 static nokprobe_inline int
fetch_store_strlen_user(unsigned long addr)200 fetch_store_strlen_user(unsigned long addr)
201 {
202 return fetch_store_strlen(addr);
203 }
204
translate_user_vaddr(unsigned long file_offset)205 static unsigned long translate_user_vaddr(unsigned long file_offset)
206 {
207 unsigned long base_addr;
208 struct uprobe_dispatch_data *udd;
209
210 udd = (void *) current->utask->vaddr;
211
212 base_addr = udd->bp_addr - udd->tu->offset;
213 return base_addr + file_offset;
214 }
215
216 /* Note that we don't verify it, since the code does not come from user space */
217 static int
process_fetch_insn(struct fetch_insn * code,void * rec,void * dest,void * base)218 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
219 void *base)
220 {
221 struct pt_regs *regs = rec;
222 unsigned long val;
223
224 /* 1st stage: get value from context */
225 switch (code->op) {
226 case FETCH_OP_REG:
227 val = regs_get_register(regs, code->param);
228 break;
229 case FETCH_OP_STACK:
230 val = get_user_stack_nth(regs, code->param);
231 break;
232 case FETCH_OP_STACKP:
233 val = user_stack_pointer(regs);
234 break;
235 case FETCH_OP_RETVAL:
236 val = regs_return_value(regs);
237 break;
238 case FETCH_OP_IMM:
239 val = code->immediate;
240 break;
241 case FETCH_OP_COMM:
242 val = FETCH_TOKEN_COMM;
243 break;
244 case FETCH_OP_DATA:
245 val = (unsigned long)code->data;
246 break;
247 case FETCH_OP_FOFFS:
248 val = translate_user_vaddr(code->immediate);
249 break;
250 default:
251 return -EILSEQ;
252 }
253 code++;
254
255 return process_fetch_insn_bottom(code, val, dest, base);
256 }
NOKPROBE_SYMBOL(process_fetch_insn)257 NOKPROBE_SYMBOL(process_fetch_insn)
258
259 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
260 {
261 rwlock_init(&filter->rwlock);
262 filter->nr_systemwide = 0;
263 INIT_LIST_HEAD(&filter->perf_events);
264 }
265
uprobe_filter_is_empty(struct trace_uprobe_filter * filter)266 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
267 {
268 return !filter->nr_systemwide && list_empty(&filter->perf_events);
269 }
270
is_ret_probe(struct trace_uprobe * tu)271 static inline bool is_ret_probe(struct trace_uprobe *tu)
272 {
273 return tu->consumer.ret_handler != NULL;
274 }
275
trace_uprobe_is_busy(struct dyn_event * ev)276 static bool trace_uprobe_is_busy(struct dyn_event *ev)
277 {
278 struct trace_uprobe *tu = to_trace_uprobe(ev);
279
280 return trace_probe_is_enabled(&tu->tp);
281 }
282
trace_uprobe_match_command_head(struct trace_uprobe * tu,int argc,const char ** argv)283 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
284 int argc, const char **argv)
285 {
286 char buf[MAX_ARGSTR_LEN + 1];
287 int len;
288
289 if (!argc)
290 return true;
291
292 len = strlen(tu->filename);
293 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
294 return false;
295
296 if (tu->ref_ctr_offset == 0)
297 snprintf(buf, sizeof(buf), "0x%0*lx",
298 (int)(sizeof(void *) * 2), tu->offset);
299 else
300 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
301 (int)(sizeof(void *) * 2), tu->offset,
302 tu->ref_ctr_offset);
303 if (strcmp(buf, &argv[0][len + 1]))
304 return false;
305
306 argc--; argv++;
307
308 return trace_probe_match_command_args(&tu->tp, argc, argv);
309 }
310
trace_uprobe_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)311 static bool trace_uprobe_match(const char *system, const char *event,
312 int argc, const char **argv, struct dyn_event *ev)
313 {
314 struct trace_uprobe *tu = to_trace_uprobe(ev);
315
316 return (event[0] == '\0' ||
317 strcmp(trace_probe_name(&tu->tp), event) == 0) &&
318 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319 trace_uprobe_match_command_head(tu, argc, argv);
320 }
321
322 static nokprobe_inline struct trace_uprobe *
trace_uprobe_primary_from_call(struct trace_event_call * call)323 trace_uprobe_primary_from_call(struct trace_event_call *call)
324 {
325 struct trace_probe *tp;
326
327 tp = trace_probe_primary_from_call(call);
328 if (WARN_ON_ONCE(!tp))
329 return NULL;
330
331 return container_of(tp, struct trace_uprobe, tp);
332 }
333
334 /*
335 * Allocate new trace_uprobe and initialize it (including uprobes).
336 */
337 static struct trace_uprobe *
alloc_trace_uprobe(const char * group,const char * event,int nargs,bool is_ret)338 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
339 {
340 struct trace_uprobe *tu;
341 int ret;
342
343 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
344 if (!tu)
345 return ERR_PTR(-ENOMEM);
346
347 ret = trace_probe_init(&tu->tp, event, group, true);
348 if (ret < 0)
349 goto error;
350
351 dyn_event_init(&tu->devent, &trace_uprobe_ops);
352 tu->consumer.handler = uprobe_dispatcher;
353 if (is_ret)
354 tu->consumer.ret_handler = uretprobe_dispatcher;
355 init_trace_uprobe_filter(tu->tp.event->filter);
356 return tu;
357
358 error:
359 kfree(tu);
360
361 return ERR_PTR(ret);
362 }
363
free_trace_uprobe(struct trace_uprobe * tu)364 static void free_trace_uprobe(struct trace_uprobe *tu)
365 {
366 if (!tu)
367 return;
368
369 path_put(&tu->path);
370 trace_probe_cleanup(&tu->tp);
371 kfree(tu->filename);
372 kfree(tu);
373 }
374
find_probe_event(const char * event,const char * group)375 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
376 {
377 struct dyn_event *pos;
378 struct trace_uprobe *tu;
379
380 for_each_trace_uprobe(tu, pos)
381 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
383 return tu;
384
385 return NULL;
386 }
387
388 /* Unregister a trace_uprobe and probe_event */
unregister_trace_uprobe(struct trace_uprobe * tu)389 static int unregister_trace_uprobe(struct trace_uprobe *tu)
390 {
391 int ret;
392
393 if (trace_probe_has_sibling(&tu->tp))
394 goto unreg;
395
396 /* If there's a reference to the dynamic event */
397 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
398 return -EBUSY;
399
400 ret = unregister_uprobe_event(tu);
401 if (ret)
402 return ret;
403
404 unreg:
405 dyn_event_remove(&tu->devent);
406 trace_probe_unlink(&tu->tp);
407 free_trace_uprobe(tu);
408 return 0;
409 }
410
trace_uprobe_has_same_uprobe(struct trace_uprobe * orig,struct trace_uprobe * comp)411 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
412 struct trace_uprobe *comp)
413 {
414 struct trace_probe_event *tpe = orig->tp.event;
415 struct inode *comp_inode = d_real_inode(comp->path.dentry);
416 int i;
417
418 list_for_each_entry(orig, &tpe->probes, tp.list) {
419 if (comp_inode != d_real_inode(orig->path.dentry) ||
420 comp->offset != orig->offset)
421 continue;
422
423 /*
424 * trace_probe_compare_arg_type() ensured that nr_args and
425 * each argument name and type are same. Let's compare comm.
426 */
427 for (i = 0; i < orig->tp.nr_args; i++) {
428 if (strcmp(orig->tp.args[i].comm,
429 comp->tp.args[i].comm))
430 break;
431 }
432
433 if (i == orig->tp.nr_args)
434 return true;
435 }
436
437 return false;
438 }
439
append_trace_uprobe(struct trace_uprobe * tu,struct trace_uprobe * to)440 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
441 {
442 int ret;
443
444 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
445 if (ret) {
446 /* Note that argument starts index = 2 */
447 trace_probe_log_set_index(ret + 1);
448 trace_probe_log_err(0, DIFF_ARG_TYPE);
449 return -EEXIST;
450 }
451 if (trace_uprobe_has_same_uprobe(to, tu)) {
452 trace_probe_log_set_index(0);
453 trace_probe_log_err(0, SAME_PROBE);
454 return -EEXIST;
455 }
456
457 /* Append to existing event */
458 ret = trace_probe_append(&tu->tp, &to->tp);
459 if (!ret)
460 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
461
462 return ret;
463 }
464
465 /*
466 * Uprobe with multiple reference counter is not allowed. i.e.
467 * If inode and offset matches, reference counter offset *must*
468 * match as well. Though, there is one exception: If user is
469 * replacing old trace_uprobe with new one(same group/event),
470 * then we allow same uprobe with new reference counter as far
471 * as the new one does not conflict with any other existing
472 * ones.
473 */
validate_ref_ctr_offset(struct trace_uprobe * new)474 static int validate_ref_ctr_offset(struct trace_uprobe *new)
475 {
476 struct dyn_event *pos;
477 struct trace_uprobe *tmp;
478 struct inode *new_inode = d_real_inode(new->path.dentry);
479
480 for_each_trace_uprobe(tmp, pos) {
481 if (new_inode == d_real_inode(tmp->path.dentry) &&
482 new->offset == tmp->offset &&
483 new->ref_ctr_offset != tmp->ref_ctr_offset) {
484 pr_warn("Reference counter offset mismatch.");
485 return -EINVAL;
486 }
487 }
488 return 0;
489 }
490
491 /* Register a trace_uprobe and probe_event */
register_trace_uprobe(struct trace_uprobe * tu)492 static int register_trace_uprobe(struct trace_uprobe *tu)
493 {
494 struct trace_uprobe *old_tu;
495 int ret;
496
497 mutex_lock(&event_mutex);
498
499 ret = validate_ref_ctr_offset(tu);
500 if (ret)
501 goto end;
502
503 /* register as an event */
504 old_tu = find_probe_event(trace_probe_name(&tu->tp),
505 trace_probe_group_name(&tu->tp));
506 if (old_tu) {
507 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
508 trace_probe_log_set_index(0);
509 trace_probe_log_err(0, DIFF_PROBE_TYPE);
510 ret = -EEXIST;
511 } else {
512 ret = append_trace_uprobe(tu, old_tu);
513 }
514 goto end;
515 }
516
517 ret = register_uprobe_event(tu);
518 if (ret) {
519 if (ret == -EEXIST) {
520 trace_probe_log_set_index(0);
521 trace_probe_log_err(0, EVENT_EXIST);
522 } else
523 pr_warn("Failed to register probe event(%d)\n", ret);
524 goto end;
525 }
526
527 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
528
529 end:
530 mutex_unlock(&event_mutex);
531
532 return ret;
533 }
534
535 /*
536 * Argument syntax:
537 * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
538 */
__trace_uprobe_create(int argc,const char ** argv)539 static int __trace_uprobe_create(int argc, const char **argv)
540 {
541 struct trace_uprobe *tu;
542 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
543 char *arg, *filename, *rctr, *rctr_end, *tmp;
544 char buf[MAX_EVENT_NAME_LEN];
545 char gbuf[MAX_EVENT_NAME_LEN];
546 enum probe_print_type ptype;
547 struct path path;
548 unsigned long offset, ref_ctr_offset;
549 bool is_return = false;
550 int i, ret;
551
552 ref_ctr_offset = 0;
553
554 switch (argv[0][0]) {
555 case 'r':
556 is_return = true;
557 break;
558 case 'p':
559 break;
560 default:
561 return -ECANCELED;
562 }
563
564 if (argc < 2)
565 return -ECANCELED;
566
567 if (argv[0][1] == ':')
568 event = &argv[0][2];
569
570 if (!strchr(argv[1], '/'))
571 return -ECANCELED;
572
573 filename = kstrdup(argv[1], GFP_KERNEL);
574 if (!filename)
575 return -ENOMEM;
576
577 /* Find the last occurrence, in case the path contains ':' too. */
578 arg = strrchr(filename, ':');
579 if (!arg || !isdigit(arg[1])) {
580 kfree(filename);
581 return -ECANCELED;
582 }
583
584 trace_probe_log_init("trace_uprobe", argc, argv);
585 trace_probe_log_set_index(1); /* filename is the 2nd argument */
586
587 *arg++ = '\0';
588 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
589 if (ret) {
590 trace_probe_log_err(0, FILE_NOT_FOUND);
591 kfree(filename);
592 trace_probe_log_clear();
593 return ret;
594 }
595 if (!d_is_reg(path.dentry)) {
596 trace_probe_log_err(0, NO_REGULAR_FILE);
597 ret = -EINVAL;
598 goto fail_address_parse;
599 }
600
601 /* Parse reference counter offset if specified. */
602 rctr = strchr(arg, '(');
603 if (rctr) {
604 rctr_end = strchr(rctr, ')');
605 if (!rctr_end) {
606 ret = -EINVAL;
607 rctr_end = rctr + strlen(rctr);
608 trace_probe_log_err(rctr_end - filename,
609 REFCNT_OPEN_BRACE);
610 goto fail_address_parse;
611 } else if (rctr_end[1] != '\0') {
612 ret = -EINVAL;
613 trace_probe_log_err(rctr_end + 1 - filename,
614 BAD_REFCNT_SUFFIX);
615 goto fail_address_parse;
616 }
617
618 *rctr++ = '\0';
619 *rctr_end = '\0';
620 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
621 if (ret) {
622 trace_probe_log_err(rctr - filename, BAD_REFCNT);
623 goto fail_address_parse;
624 }
625 }
626
627 /* Check if there is %return suffix */
628 tmp = strchr(arg, '%');
629 if (tmp) {
630 if (!strcmp(tmp, "%return")) {
631 *tmp = '\0';
632 is_return = true;
633 } else {
634 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
635 ret = -EINVAL;
636 goto fail_address_parse;
637 }
638 }
639
640 /* Parse uprobe offset. */
641 ret = kstrtoul(arg, 0, &offset);
642 if (ret) {
643 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
644 goto fail_address_parse;
645 }
646
647 /* setup a probe */
648 trace_probe_log_set_index(0);
649 if (event) {
650 ret = traceprobe_parse_event_name(&event, &group, gbuf,
651 event - argv[0]);
652 if (ret)
653 goto fail_address_parse;
654 }
655
656 if (!event) {
657 char *tail;
658 char *ptr;
659
660 tail = kstrdup(kbasename(filename), GFP_KERNEL);
661 if (!tail) {
662 ret = -ENOMEM;
663 goto fail_address_parse;
664 }
665
666 ptr = strpbrk(tail, ".-_");
667 if (ptr)
668 *ptr = '\0';
669
670 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
671 event = buf;
672 kfree(tail);
673 }
674
675 argc -= 2;
676 argv += 2;
677
678 tu = alloc_trace_uprobe(group, event, argc, is_return);
679 if (IS_ERR(tu)) {
680 ret = PTR_ERR(tu);
681 /* This must return -ENOMEM otherwise there is a bug */
682 WARN_ON_ONCE(ret != -ENOMEM);
683 goto fail_address_parse;
684 }
685 tu->offset = offset;
686 tu->ref_ctr_offset = ref_ctr_offset;
687 tu->path = path;
688 tu->filename = filename;
689
690 /* parse arguments */
691 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
692 trace_probe_log_set_index(i + 2);
693 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i],
694 is_return ? TPARG_FL_RETURN : 0);
695 if (ret)
696 goto error;
697 }
698
699 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
700 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
701 if (ret < 0)
702 goto error;
703
704 ret = register_trace_uprobe(tu);
705 if (!ret)
706 goto out;
707
708 error:
709 free_trace_uprobe(tu);
710 out:
711 trace_probe_log_clear();
712 return ret;
713
714 fail_address_parse:
715 trace_probe_log_clear();
716 path_put(&path);
717 kfree(filename);
718
719 return ret;
720 }
721
trace_uprobe_create(const char * raw_command)722 int trace_uprobe_create(const char *raw_command)
723 {
724 return trace_probe_create(raw_command, __trace_uprobe_create);
725 }
726
create_or_delete_trace_uprobe(const char * raw_command)727 static int create_or_delete_trace_uprobe(const char *raw_command)
728 {
729 int ret;
730
731 if (raw_command[0] == '-')
732 return dyn_event_release(raw_command, &trace_uprobe_ops);
733
734 ret = trace_uprobe_create(raw_command);
735 return ret == -ECANCELED ? -EINVAL : ret;
736 }
737
trace_uprobe_release(struct dyn_event * ev)738 static int trace_uprobe_release(struct dyn_event *ev)
739 {
740 struct trace_uprobe *tu = to_trace_uprobe(ev);
741
742 return unregister_trace_uprobe(tu);
743 }
744
745 /* Probes listing interfaces */
trace_uprobe_show(struct seq_file * m,struct dyn_event * ev)746 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
747 {
748 struct trace_uprobe *tu = to_trace_uprobe(ev);
749 char c = is_ret_probe(tu) ? 'r' : 'p';
750 int i;
751
752 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
753 trace_probe_name(&tu->tp), tu->filename,
754 (int)(sizeof(void *) * 2), tu->offset);
755
756 if (tu->ref_ctr_offset)
757 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
758
759 for (i = 0; i < tu->tp.nr_args; i++)
760 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
761
762 seq_putc(m, '\n');
763 return 0;
764 }
765
probes_seq_show(struct seq_file * m,void * v)766 static int probes_seq_show(struct seq_file *m, void *v)
767 {
768 struct dyn_event *ev = v;
769
770 if (!is_trace_uprobe(ev))
771 return 0;
772
773 return trace_uprobe_show(m, ev);
774 }
775
776 static const struct seq_operations probes_seq_op = {
777 .start = dyn_event_seq_start,
778 .next = dyn_event_seq_next,
779 .stop = dyn_event_seq_stop,
780 .show = probes_seq_show
781 };
782
probes_open(struct inode * inode,struct file * file)783 static int probes_open(struct inode *inode, struct file *file)
784 {
785 int ret;
786
787 ret = security_locked_down(LOCKDOWN_TRACEFS);
788 if (ret)
789 return ret;
790
791 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
792 ret = dyn_events_release_all(&trace_uprobe_ops);
793 if (ret)
794 return ret;
795 }
796
797 return seq_open(file, &probes_seq_op);
798 }
799
probes_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)800 static ssize_t probes_write(struct file *file, const char __user *buffer,
801 size_t count, loff_t *ppos)
802 {
803 return trace_parse_run_command(file, buffer, count, ppos,
804 create_or_delete_trace_uprobe);
805 }
806
807 static const struct file_operations uprobe_events_ops = {
808 .owner = THIS_MODULE,
809 .open = probes_open,
810 .read = seq_read,
811 .llseek = seq_lseek,
812 .release = seq_release,
813 .write = probes_write,
814 };
815
816 /* Probes profiling interfaces */
probes_profile_seq_show(struct seq_file * m,void * v)817 static int probes_profile_seq_show(struct seq_file *m, void *v)
818 {
819 struct dyn_event *ev = v;
820 struct trace_uprobe *tu;
821
822 if (!is_trace_uprobe(ev))
823 return 0;
824
825 tu = to_trace_uprobe(ev);
826 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
827 trace_probe_name(&tu->tp), tu->nhit);
828 return 0;
829 }
830
831 static const struct seq_operations profile_seq_op = {
832 .start = dyn_event_seq_start,
833 .next = dyn_event_seq_next,
834 .stop = dyn_event_seq_stop,
835 .show = probes_profile_seq_show
836 };
837
profile_open(struct inode * inode,struct file * file)838 static int profile_open(struct inode *inode, struct file *file)
839 {
840 int ret;
841
842 ret = security_locked_down(LOCKDOWN_TRACEFS);
843 if (ret)
844 return ret;
845
846 return seq_open(file, &profile_seq_op);
847 }
848
849 static const struct file_operations uprobe_profile_ops = {
850 .owner = THIS_MODULE,
851 .open = profile_open,
852 .read = seq_read,
853 .llseek = seq_lseek,
854 .release = seq_release,
855 };
856
857 struct uprobe_cpu_buffer {
858 struct mutex mutex;
859 void *buf;
860 };
861 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
862 static int uprobe_buffer_refcnt;
863
uprobe_buffer_init(void)864 static int uprobe_buffer_init(void)
865 {
866 int cpu, err_cpu;
867
868 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
869 if (uprobe_cpu_buffer == NULL)
870 return -ENOMEM;
871
872 for_each_possible_cpu(cpu) {
873 struct page *p = alloc_pages_node(cpu_to_node(cpu),
874 GFP_KERNEL, 0);
875 if (p == NULL) {
876 err_cpu = cpu;
877 goto err;
878 }
879 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
880 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
881 }
882
883 return 0;
884
885 err:
886 for_each_possible_cpu(cpu) {
887 if (cpu == err_cpu)
888 break;
889 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
890 }
891
892 free_percpu(uprobe_cpu_buffer);
893 return -ENOMEM;
894 }
895
uprobe_buffer_enable(void)896 static int uprobe_buffer_enable(void)
897 {
898 int ret = 0;
899
900 BUG_ON(!mutex_is_locked(&event_mutex));
901
902 if (uprobe_buffer_refcnt++ == 0) {
903 ret = uprobe_buffer_init();
904 if (ret < 0)
905 uprobe_buffer_refcnt--;
906 }
907
908 return ret;
909 }
910
uprobe_buffer_disable(void)911 static void uprobe_buffer_disable(void)
912 {
913 int cpu;
914
915 BUG_ON(!mutex_is_locked(&event_mutex));
916
917 if (--uprobe_buffer_refcnt == 0) {
918 for_each_possible_cpu(cpu)
919 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
920 cpu)->buf);
921
922 free_percpu(uprobe_cpu_buffer);
923 uprobe_cpu_buffer = NULL;
924 }
925 }
926
uprobe_buffer_get(void)927 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
928 {
929 struct uprobe_cpu_buffer *ucb;
930 int cpu;
931
932 cpu = raw_smp_processor_id();
933 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
934
935 /*
936 * Use per-cpu buffers for fastest access, but we might migrate
937 * so the mutex makes sure we have sole access to it.
938 */
939 mutex_lock(&ucb->mutex);
940
941 return ucb;
942 }
943
uprobe_buffer_put(struct uprobe_cpu_buffer * ucb)944 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
945 {
946 mutex_unlock(&ucb->mutex);
947 }
948
__uprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize,struct trace_event_file * trace_file)949 static void __uprobe_trace_func(struct trace_uprobe *tu,
950 unsigned long func, struct pt_regs *regs,
951 struct uprobe_cpu_buffer *ucb, int dsize,
952 struct trace_event_file *trace_file)
953 {
954 struct uprobe_trace_entry_head *entry;
955 struct trace_event_buffer fbuffer;
956 void *data;
957 int size, esize;
958 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
959
960 WARN_ON(call != trace_file->event_call);
961
962 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
963 return;
964
965 if (trace_trigger_soft_disabled(trace_file))
966 return;
967
968 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
969 size = esize + tu->tp.size + dsize;
970 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
971 if (!entry)
972 return;
973
974 if (is_ret_probe(tu)) {
975 entry->vaddr[0] = func;
976 entry->vaddr[1] = instruction_pointer(regs);
977 data = DATAOF_TRACE_ENTRY(entry, true);
978 } else {
979 entry->vaddr[0] = instruction_pointer(regs);
980 data = DATAOF_TRACE_ENTRY(entry, false);
981 }
982
983 memcpy(data, ucb->buf, tu->tp.size + dsize);
984
985 trace_event_buffer_commit(&fbuffer);
986 }
987
988 /* uprobe handler */
uprobe_trace_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)989 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
990 struct uprobe_cpu_buffer *ucb, int dsize)
991 {
992 struct event_file_link *link;
993
994 if (is_ret_probe(tu))
995 return 0;
996
997 rcu_read_lock();
998 trace_probe_for_each_link_rcu(link, &tu->tp)
999 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1000 rcu_read_unlock();
1001
1002 return 0;
1003 }
1004
uretprobe_trace_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1005 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1006 struct pt_regs *regs,
1007 struct uprobe_cpu_buffer *ucb, int dsize)
1008 {
1009 struct event_file_link *link;
1010
1011 rcu_read_lock();
1012 trace_probe_for_each_link_rcu(link, &tu->tp)
1013 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1014 rcu_read_unlock();
1015 }
1016
1017 /* Event entry printers */
1018 static enum print_line_t
print_uprobe_event(struct trace_iterator * iter,int flags,struct trace_event * event)1019 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1020 {
1021 struct uprobe_trace_entry_head *entry;
1022 struct trace_seq *s = &iter->seq;
1023 struct trace_uprobe *tu;
1024 u8 *data;
1025
1026 entry = (struct uprobe_trace_entry_head *)iter->ent;
1027 tu = trace_uprobe_primary_from_call(
1028 container_of(event, struct trace_event_call, event));
1029 if (unlikely(!tu))
1030 goto out;
1031
1032 if (is_ret_probe(tu)) {
1033 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1034 trace_probe_name(&tu->tp),
1035 entry->vaddr[1], entry->vaddr[0]);
1036 data = DATAOF_TRACE_ENTRY(entry, true);
1037 } else {
1038 trace_seq_printf(s, "%s: (0x%lx)",
1039 trace_probe_name(&tu->tp),
1040 entry->vaddr[0]);
1041 data = DATAOF_TRACE_ENTRY(entry, false);
1042 }
1043
1044 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1045 goto out;
1046
1047 trace_seq_putc(s, '\n');
1048
1049 out:
1050 return trace_handle_return(s);
1051 }
1052
1053 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1054 enum uprobe_filter_ctx ctx,
1055 struct mm_struct *mm);
1056
trace_uprobe_enable(struct trace_uprobe * tu,filter_func_t filter)1057 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1058 {
1059 int ret;
1060
1061 tu->consumer.filter = filter;
1062 tu->inode = d_real_inode(tu->path.dentry);
1063
1064 if (tu->ref_ctr_offset)
1065 ret = uprobe_register_refctr(tu->inode, tu->offset,
1066 tu->ref_ctr_offset, &tu->consumer);
1067 else
1068 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1069
1070 if (ret)
1071 tu->inode = NULL;
1072
1073 return ret;
1074 }
1075
__probe_event_disable(struct trace_probe * tp)1076 static void __probe_event_disable(struct trace_probe *tp)
1077 {
1078 struct trace_uprobe *tu;
1079
1080 tu = container_of(tp, struct trace_uprobe, tp);
1081 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1082
1083 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1084 if (!tu->inode)
1085 continue;
1086
1087 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1088 tu->inode = NULL;
1089 }
1090 }
1091
probe_event_enable(struct trace_event_call * call,struct trace_event_file * file,filter_func_t filter)1092 static int probe_event_enable(struct trace_event_call *call,
1093 struct trace_event_file *file, filter_func_t filter)
1094 {
1095 struct trace_probe *tp;
1096 struct trace_uprobe *tu;
1097 bool enabled;
1098 int ret;
1099
1100 tp = trace_probe_primary_from_call(call);
1101 if (WARN_ON_ONCE(!tp))
1102 return -ENODEV;
1103 enabled = trace_probe_is_enabled(tp);
1104
1105 /* This may also change "enabled" state */
1106 if (file) {
1107 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1108 return -EINTR;
1109
1110 ret = trace_probe_add_file(tp, file);
1111 if (ret < 0)
1112 return ret;
1113 } else {
1114 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1115 return -EINTR;
1116
1117 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1118 }
1119
1120 tu = container_of(tp, struct trace_uprobe, tp);
1121 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1122
1123 if (enabled)
1124 return 0;
1125
1126 ret = uprobe_buffer_enable();
1127 if (ret)
1128 goto err_flags;
1129
1130 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1131 ret = trace_uprobe_enable(tu, filter);
1132 if (ret) {
1133 __probe_event_disable(tp);
1134 goto err_buffer;
1135 }
1136 }
1137
1138 return 0;
1139
1140 err_buffer:
1141 uprobe_buffer_disable();
1142
1143 err_flags:
1144 if (file)
1145 trace_probe_remove_file(tp, file);
1146 else
1147 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1148
1149 return ret;
1150 }
1151
probe_event_disable(struct trace_event_call * call,struct trace_event_file * file)1152 static void probe_event_disable(struct trace_event_call *call,
1153 struct trace_event_file *file)
1154 {
1155 struct trace_probe *tp;
1156
1157 tp = trace_probe_primary_from_call(call);
1158 if (WARN_ON_ONCE(!tp))
1159 return;
1160
1161 if (!trace_probe_is_enabled(tp))
1162 return;
1163
1164 if (file) {
1165 if (trace_probe_remove_file(tp, file) < 0)
1166 return;
1167
1168 if (trace_probe_is_enabled(tp))
1169 return;
1170 } else
1171 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1172
1173 __probe_event_disable(tp);
1174 uprobe_buffer_disable();
1175 }
1176
uprobe_event_define_fields(struct trace_event_call * event_call)1177 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1178 {
1179 int ret, size;
1180 struct uprobe_trace_entry_head field;
1181 struct trace_uprobe *tu;
1182
1183 tu = trace_uprobe_primary_from_call(event_call);
1184 if (unlikely(!tu))
1185 return -ENODEV;
1186
1187 if (is_ret_probe(tu)) {
1188 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1189 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1190 size = SIZEOF_TRACE_ENTRY(true);
1191 } else {
1192 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1193 size = SIZEOF_TRACE_ENTRY(false);
1194 }
1195
1196 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1197 }
1198
1199 #ifdef CONFIG_PERF_EVENTS
1200 static bool
__uprobe_perf_filter(struct trace_uprobe_filter * filter,struct mm_struct * mm)1201 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1202 {
1203 struct perf_event *event;
1204
1205 if (filter->nr_systemwide)
1206 return true;
1207
1208 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1209 if (event->hw.target->mm == mm)
1210 return true;
1211 }
1212
1213 return false;
1214 }
1215
1216 static inline bool
trace_uprobe_filter_event(struct trace_uprobe_filter * filter,struct perf_event * event)1217 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1218 struct perf_event *event)
1219 {
1220 return __uprobe_perf_filter(filter, event->hw.target->mm);
1221 }
1222
trace_uprobe_filter_remove(struct trace_uprobe_filter * filter,struct perf_event * event)1223 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1224 struct perf_event *event)
1225 {
1226 bool done;
1227
1228 write_lock(&filter->rwlock);
1229 if (event->hw.target) {
1230 list_del(&event->hw.tp_list);
1231 done = filter->nr_systemwide ||
1232 (event->hw.target->flags & PF_EXITING) ||
1233 trace_uprobe_filter_event(filter, event);
1234 } else {
1235 filter->nr_systemwide--;
1236 done = filter->nr_systemwide;
1237 }
1238 write_unlock(&filter->rwlock);
1239
1240 return done;
1241 }
1242
1243 /* This returns true if the filter always covers target mm */
trace_uprobe_filter_add(struct trace_uprobe_filter * filter,struct perf_event * event)1244 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1245 struct perf_event *event)
1246 {
1247 bool done;
1248
1249 write_lock(&filter->rwlock);
1250 if (event->hw.target) {
1251 /*
1252 * event->parent != NULL means copy_process(), we can avoid
1253 * uprobe_apply(). current->mm must be probed and we can rely
1254 * on dup_mmap() which preserves the already installed bp's.
1255 *
1256 * attr.enable_on_exec means that exec/mmap will install the
1257 * breakpoints we need.
1258 */
1259 done = filter->nr_systemwide ||
1260 event->parent || event->attr.enable_on_exec ||
1261 trace_uprobe_filter_event(filter, event);
1262 list_add(&event->hw.tp_list, &filter->perf_events);
1263 } else {
1264 done = filter->nr_systemwide;
1265 filter->nr_systemwide++;
1266 }
1267 write_unlock(&filter->rwlock);
1268
1269 return done;
1270 }
1271
uprobe_perf_close(struct trace_event_call * call,struct perf_event * event)1272 static int uprobe_perf_close(struct trace_event_call *call,
1273 struct perf_event *event)
1274 {
1275 struct trace_probe *tp;
1276 struct trace_uprobe *tu;
1277 int ret = 0;
1278
1279 tp = trace_probe_primary_from_call(call);
1280 if (WARN_ON_ONCE(!tp))
1281 return -ENODEV;
1282
1283 tu = container_of(tp, struct trace_uprobe, tp);
1284 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1285 return 0;
1286
1287 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1288 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1289 if (ret)
1290 break;
1291 }
1292
1293 return ret;
1294 }
1295
uprobe_perf_open(struct trace_event_call * call,struct perf_event * event)1296 static int uprobe_perf_open(struct trace_event_call *call,
1297 struct perf_event *event)
1298 {
1299 struct trace_probe *tp;
1300 struct trace_uprobe *tu;
1301 int err = 0;
1302
1303 tp = trace_probe_primary_from_call(call);
1304 if (WARN_ON_ONCE(!tp))
1305 return -ENODEV;
1306
1307 tu = container_of(tp, struct trace_uprobe, tp);
1308 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1309 return 0;
1310
1311 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1312 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1313 if (err) {
1314 uprobe_perf_close(call, event);
1315 break;
1316 }
1317 }
1318
1319 return err;
1320 }
1321
uprobe_perf_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)1322 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1323 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1324 {
1325 struct trace_uprobe_filter *filter;
1326 struct trace_uprobe *tu;
1327 int ret;
1328
1329 tu = container_of(uc, struct trace_uprobe, consumer);
1330 filter = tu->tp.event->filter;
1331
1332 read_lock(&filter->rwlock);
1333 ret = __uprobe_perf_filter(filter, mm);
1334 read_unlock(&filter->rwlock);
1335
1336 return ret;
1337 }
1338
__uprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1339 static void __uprobe_perf_func(struct trace_uprobe *tu,
1340 unsigned long func, struct pt_regs *regs,
1341 struct uprobe_cpu_buffer *ucb, int dsize)
1342 {
1343 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1344 struct uprobe_trace_entry_head *entry;
1345 struct hlist_head *head;
1346 void *data;
1347 int size, esize;
1348 int rctx;
1349
1350 #ifdef CONFIG_BPF_EVENTS
1351 if (bpf_prog_array_valid(call)) {
1352 u32 ret;
1353
1354 ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run);
1355 if (!ret)
1356 return;
1357 }
1358 #endif /* CONFIG_BPF_EVENTS */
1359
1360 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1361
1362 size = esize + tu->tp.size + dsize;
1363 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1364 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1365 return;
1366
1367 preempt_disable();
1368 head = this_cpu_ptr(call->perf_events);
1369 if (hlist_empty(head))
1370 goto out;
1371
1372 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1373 if (!entry)
1374 goto out;
1375
1376 if (is_ret_probe(tu)) {
1377 entry->vaddr[0] = func;
1378 entry->vaddr[1] = instruction_pointer(regs);
1379 data = DATAOF_TRACE_ENTRY(entry, true);
1380 } else {
1381 entry->vaddr[0] = instruction_pointer(regs);
1382 data = DATAOF_TRACE_ENTRY(entry, false);
1383 }
1384
1385 memcpy(data, ucb->buf, tu->tp.size + dsize);
1386
1387 if (size - esize > tu->tp.size + dsize) {
1388 int len = tu->tp.size + dsize;
1389
1390 memset(data + len, 0, size - esize - len);
1391 }
1392
1393 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1394 head, NULL);
1395 out:
1396 preempt_enable();
1397 }
1398
1399 /* uprobe profile handler */
uprobe_perf_func(struct trace_uprobe * tu,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1400 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1401 struct uprobe_cpu_buffer *ucb, int dsize)
1402 {
1403 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1404 return UPROBE_HANDLER_REMOVE;
1405
1406 if (!is_ret_probe(tu))
1407 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1408 return 0;
1409 }
1410
uretprobe_perf_func(struct trace_uprobe * tu,unsigned long func,struct pt_regs * regs,struct uprobe_cpu_buffer * ucb,int dsize)1411 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1412 struct pt_regs *regs,
1413 struct uprobe_cpu_buffer *ucb, int dsize)
1414 {
1415 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1416 }
1417
bpf_get_uprobe_info(const struct perf_event * event,u32 * fd_type,const char ** filename,u64 * probe_offset,bool perf_type_tracepoint)1418 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1419 const char **filename, u64 *probe_offset,
1420 bool perf_type_tracepoint)
1421 {
1422 const char *pevent = trace_event_name(event->tp_event);
1423 const char *group = event->tp_event->class->system;
1424 struct trace_uprobe *tu;
1425
1426 if (perf_type_tracepoint)
1427 tu = find_probe_event(pevent, group);
1428 else
1429 tu = trace_uprobe_primary_from_call(event->tp_event);
1430 if (!tu)
1431 return -EINVAL;
1432
1433 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1434 : BPF_FD_TYPE_UPROBE;
1435 *filename = tu->filename;
1436 *probe_offset = tu->offset;
1437 return 0;
1438 }
1439 #endif /* CONFIG_PERF_EVENTS */
1440
1441 static int
trace_uprobe_register(struct trace_event_call * event,enum trace_reg type,void * data)1442 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1443 void *data)
1444 {
1445 struct trace_event_file *file = data;
1446
1447 switch (type) {
1448 case TRACE_REG_REGISTER:
1449 return probe_event_enable(event, file, NULL);
1450
1451 case TRACE_REG_UNREGISTER:
1452 probe_event_disable(event, file);
1453 return 0;
1454
1455 #ifdef CONFIG_PERF_EVENTS
1456 case TRACE_REG_PERF_REGISTER:
1457 return probe_event_enable(event, NULL, uprobe_perf_filter);
1458
1459 case TRACE_REG_PERF_UNREGISTER:
1460 probe_event_disable(event, NULL);
1461 return 0;
1462
1463 case TRACE_REG_PERF_OPEN:
1464 return uprobe_perf_open(event, data);
1465
1466 case TRACE_REG_PERF_CLOSE:
1467 return uprobe_perf_close(event, data);
1468
1469 #endif
1470 default:
1471 return 0;
1472 }
1473 }
1474
uprobe_dispatcher(struct uprobe_consumer * con,struct pt_regs * regs)1475 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1476 {
1477 struct trace_uprobe *tu;
1478 struct uprobe_dispatch_data udd;
1479 struct uprobe_cpu_buffer *ucb;
1480 int dsize, esize;
1481 int ret = 0;
1482
1483
1484 tu = container_of(con, struct trace_uprobe, consumer);
1485 tu->nhit++;
1486
1487 udd.tu = tu;
1488 udd.bp_addr = instruction_pointer(regs);
1489
1490 current->utask->vaddr = (unsigned long) &udd;
1491
1492 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1493 return 0;
1494
1495 dsize = __get_data_size(&tu->tp, regs);
1496 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1497
1498 ucb = uprobe_buffer_get();
1499 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1500
1501 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1502 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1503
1504 #ifdef CONFIG_PERF_EVENTS
1505 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1506 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1507 #endif
1508 uprobe_buffer_put(ucb);
1509 return ret;
1510 }
1511
uretprobe_dispatcher(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)1512 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1513 unsigned long func, struct pt_regs *regs)
1514 {
1515 struct trace_uprobe *tu;
1516 struct uprobe_dispatch_data udd;
1517 struct uprobe_cpu_buffer *ucb;
1518 int dsize, esize;
1519
1520 tu = container_of(con, struct trace_uprobe, consumer);
1521
1522 udd.tu = tu;
1523 udd.bp_addr = func;
1524
1525 current->utask->vaddr = (unsigned long) &udd;
1526
1527 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1528 return 0;
1529
1530 dsize = __get_data_size(&tu->tp, regs);
1531 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1532
1533 ucb = uprobe_buffer_get();
1534 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1535
1536 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1537 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1538
1539 #ifdef CONFIG_PERF_EVENTS
1540 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1541 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1542 #endif
1543 uprobe_buffer_put(ucb);
1544 return 0;
1545 }
1546
1547 static struct trace_event_functions uprobe_funcs = {
1548 .trace = print_uprobe_event
1549 };
1550
1551 static struct trace_event_fields uprobe_fields_array[] = {
1552 { .type = TRACE_FUNCTION_TYPE,
1553 .define_fields = uprobe_event_define_fields },
1554 {}
1555 };
1556
init_trace_event_call(struct trace_uprobe * tu)1557 static inline void init_trace_event_call(struct trace_uprobe *tu)
1558 {
1559 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1560 call->event.funcs = &uprobe_funcs;
1561 call->class->fields_array = uprobe_fields_array;
1562
1563 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1564 call->class->reg = trace_uprobe_register;
1565 }
1566
register_uprobe_event(struct trace_uprobe * tu)1567 static int register_uprobe_event(struct trace_uprobe *tu)
1568 {
1569 init_trace_event_call(tu);
1570
1571 return trace_probe_register_event_call(&tu->tp);
1572 }
1573
unregister_uprobe_event(struct trace_uprobe * tu)1574 static int unregister_uprobe_event(struct trace_uprobe *tu)
1575 {
1576 return trace_probe_unregister_event_call(&tu->tp);
1577 }
1578
1579 #ifdef CONFIG_PERF_EVENTS
1580 struct trace_event_call *
create_local_trace_uprobe(char * name,unsigned long offs,unsigned long ref_ctr_offset,bool is_return)1581 create_local_trace_uprobe(char *name, unsigned long offs,
1582 unsigned long ref_ctr_offset, bool is_return)
1583 {
1584 enum probe_print_type ptype;
1585 struct trace_uprobe *tu;
1586 struct path path;
1587 int ret;
1588
1589 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1590 if (ret)
1591 return ERR_PTR(ret);
1592
1593 if (!d_is_reg(path.dentry)) {
1594 path_put(&path);
1595 return ERR_PTR(-EINVAL);
1596 }
1597
1598 /*
1599 * local trace_kprobes are not added to dyn_event, so they are never
1600 * searched in find_trace_kprobe(). Therefore, there is no concern of
1601 * duplicated name "DUMMY_EVENT" here.
1602 */
1603 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1604 is_return);
1605
1606 if (IS_ERR(tu)) {
1607 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1608 (int)PTR_ERR(tu));
1609 path_put(&path);
1610 return ERR_CAST(tu);
1611 }
1612
1613 tu->offset = offs;
1614 tu->path = path;
1615 tu->ref_ctr_offset = ref_ctr_offset;
1616 tu->filename = kstrdup(name, GFP_KERNEL);
1617 if (!tu->filename) {
1618 ret = -ENOMEM;
1619 goto error;
1620 }
1621
1622 init_trace_event_call(tu);
1623
1624 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1625 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1626 ret = -ENOMEM;
1627 goto error;
1628 }
1629
1630 return trace_probe_event_call(&tu->tp);
1631 error:
1632 free_trace_uprobe(tu);
1633 return ERR_PTR(ret);
1634 }
1635
destroy_local_trace_uprobe(struct trace_event_call * event_call)1636 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1637 {
1638 struct trace_uprobe *tu;
1639
1640 tu = trace_uprobe_primary_from_call(event_call);
1641
1642 free_trace_uprobe(tu);
1643 }
1644 #endif /* CONFIG_PERF_EVENTS */
1645
1646 /* Make a trace interface for controlling probe points */
init_uprobe_trace(void)1647 static __init int init_uprobe_trace(void)
1648 {
1649 int ret;
1650
1651 ret = dyn_event_register(&trace_uprobe_ops);
1652 if (ret)
1653 return ret;
1654
1655 ret = tracing_init_dentry();
1656 if (ret)
1657 return 0;
1658
1659 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1660 NULL, &uprobe_events_ops);
1661 /* Profile interface */
1662 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1663 NULL, &uprobe_profile_ops);
1664 return 0;
1665 }
1666
1667 fs_initcall(init_uprobe_trace);
1668