1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace event based perf event profiling/tracing
4 *
5 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
6 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/kprobes.h>
11 #include <linux/security.h>
12 #include "trace.h"
13 #include "trace_probe.h"
14
15 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
16
17 /*
18 * Force it to be aligned to unsigned long to avoid misaligned accesses
19 * surprises
20 */
21 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
22 perf_trace_t;
23
24 /* Count the events in use (per event id, not per instance) */
25 static int total_ref_count;
26
perf_trace_event_perm(struct trace_event_call * tp_event,struct perf_event * p_event)27 static int perf_trace_event_perm(struct trace_event_call *tp_event,
28 struct perf_event *p_event)
29 {
30 int ret;
31
32 if (tp_event->perf_perm) {
33 ret = tp_event->perf_perm(tp_event, p_event);
34 if (ret)
35 return ret;
36 }
37
38 /*
39 * We checked and allowed to create parent,
40 * allow children without checking.
41 */
42 if (p_event->parent)
43 return 0;
44
45 /*
46 * It's ok to check current process (owner) permissions in here,
47 * because code below is called only via perf_event_open syscall.
48 */
49
50 /* The ftrace function trace is allowed only for root. */
51 if (ftrace_event_is_function(tp_event)) {
52 ret = perf_allow_tracepoint(&p_event->attr);
53 if (ret)
54 return ret;
55
56 if (!is_sampling_event(p_event))
57 return 0;
58
59 /*
60 * We don't allow user space callchains for function trace
61 * event, due to issues with page faults while tracing page
62 * fault handler and its overall trickiness nature.
63 */
64 if (!p_event->attr.exclude_callchain_user)
65 return -EINVAL;
66
67 /*
68 * Same reason to disable user stack dump as for user space
69 * callchains above.
70 */
71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
72 return -EINVAL;
73 }
74
75 /* No tracing, just counting, so no obvious leak */
76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
77 return 0;
78
79 /* Some events are ok to be traced by non-root users... */
80 if (p_event->attach_state == PERF_ATTACH_TASK) {
81 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
82 return 0;
83 }
84
85 /*
86 * ...otherwise raw tracepoint data can be a severe data leak,
87 * only allow root to have these.
88 */
89 ret = perf_allow_tracepoint(&p_event->attr);
90 if (ret)
91 return ret;
92
93 return 0;
94 }
95
perf_trace_event_reg(struct trace_event_call * tp_event,struct perf_event * p_event)96 static int perf_trace_event_reg(struct trace_event_call *tp_event,
97 struct perf_event *p_event)
98 {
99 struct hlist_head __percpu *list;
100 int ret = -ENOMEM;
101 int cpu;
102
103 p_event->tp_event = tp_event;
104 if (tp_event->perf_refcount++ > 0)
105 return 0;
106
107 list = alloc_percpu(struct hlist_head);
108 if (!list)
109 goto fail;
110
111 for_each_possible_cpu(cpu)
112 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
113
114 tp_event->perf_events = list;
115
116 if (!total_ref_count) {
117 char __percpu *buf;
118 int i;
119
120 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
121 buf = (char __percpu *)alloc_percpu(perf_trace_t);
122 if (!buf)
123 goto fail;
124
125 perf_trace_buf[i] = buf;
126 }
127 }
128
129 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
130 if (ret)
131 goto fail;
132
133 total_ref_count++;
134 return 0;
135
136 fail:
137 if (!total_ref_count) {
138 int i;
139
140 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
141 free_percpu(perf_trace_buf[i]);
142 perf_trace_buf[i] = NULL;
143 }
144 }
145
146 if (!--tp_event->perf_refcount) {
147 free_percpu(tp_event->perf_events);
148 tp_event->perf_events = NULL;
149 }
150
151 return ret;
152 }
153
perf_trace_event_unreg(struct perf_event * p_event)154 static void perf_trace_event_unreg(struct perf_event *p_event)
155 {
156 struct trace_event_call *tp_event = p_event->tp_event;
157 int i;
158
159 if (--tp_event->perf_refcount > 0)
160 return;
161
162 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
163
164 /*
165 * Ensure our callback won't be called anymore. The buffers
166 * will be freed after that.
167 */
168 tracepoint_synchronize_unregister();
169
170 free_percpu(tp_event->perf_events);
171 tp_event->perf_events = NULL;
172
173 if (!--total_ref_count) {
174 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
175 free_percpu(perf_trace_buf[i]);
176 perf_trace_buf[i] = NULL;
177 }
178 }
179 }
180
perf_trace_event_open(struct perf_event * p_event)181 static int perf_trace_event_open(struct perf_event *p_event)
182 {
183 struct trace_event_call *tp_event = p_event->tp_event;
184 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
185 }
186
perf_trace_event_close(struct perf_event * p_event)187 static void perf_trace_event_close(struct perf_event *p_event)
188 {
189 struct trace_event_call *tp_event = p_event->tp_event;
190 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
191 }
192
perf_trace_event_init(struct trace_event_call * tp_event,struct perf_event * p_event)193 static int perf_trace_event_init(struct trace_event_call *tp_event,
194 struct perf_event *p_event)
195 {
196 int ret;
197
198 ret = perf_trace_event_perm(tp_event, p_event);
199 if (ret)
200 return ret;
201
202 ret = perf_trace_event_reg(tp_event, p_event);
203 if (ret)
204 return ret;
205
206 ret = perf_trace_event_open(p_event);
207 if (ret) {
208 perf_trace_event_unreg(p_event);
209 return ret;
210 }
211
212 return 0;
213 }
214
perf_trace_init(struct perf_event * p_event)215 int perf_trace_init(struct perf_event *p_event)
216 {
217 struct trace_event_call *tp_event;
218 u64 event_id = p_event->attr.config;
219 int ret = -EINVAL;
220
221 mutex_lock(&event_mutex);
222 list_for_each_entry(tp_event, &ftrace_events, list) {
223 if (tp_event->event.type == event_id &&
224 tp_event->class && tp_event->class->reg &&
225 trace_event_try_get_ref(tp_event)) {
226 ret = perf_trace_event_init(tp_event, p_event);
227 if (ret)
228 trace_event_put_ref(tp_event);
229 break;
230 }
231 }
232 mutex_unlock(&event_mutex);
233
234 return ret;
235 }
236
perf_trace_destroy(struct perf_event * p_event)237 void perf_trace_destroy(struct perf_event *p_event)
238 {
239 mutex_lock(&event_mutex);
240 perf_trace_event_close(p_event);
241 perf_trace_event_unreg(p_event);
242 trace_event_put_ref(p_event->tp_event);
243 mutex_unlock(&event_mutex);
244 }
245
246 #ifdef CONFIG_KPROBE_EVENTS
perf_kprobe_init(struct perf_event * p_event,bool is_retprobe)247 int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
248 {
249 int ret;
250 char *func = NULL;
251 struct trace_event_call *tp_event;
252
253 if (p_event->attr.kprobe_func) {
254 func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
255 if (!func)
256 return -ENOMEM;
257 ret = strncpy_from_user(
258 func, u64_to_user_ptr(p_event->attr.kprobe_func),
259 KSYM_NAME_LEN);
260 if (ret == KSYM_NAME_LEN)
261 ret = -E2BIG;
262 if (ret < 0)
263 goto out;
264
265 if (func[0] == '\0') {
266 kfree(func);
267 func = NULL;
268 }
269 }
270
271 tp_event = create_local_trace_kprobe(
272 func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
273 p_event->attr.probe_offset, is_retprobe);
274 if (IS_ERR(tp_event)) {
275 ret = PTR_ERR(tp_event);
276 goto out;
277 }
278
279 mutex_lock(&event_mutex);
280 ret = perf_trace_event_init(tp_event, p_event);
281 if (ret)
282 destroy_local_trace_kprobe(tp_event);
283 mutex_unlock(&event_mutex);
284 out:
285 kfree(func);
286 return ret;
287 }
288
perf_kprobe_destroy(struct perf_event * p_event)289 void perf_kprobe_destroy(struct perf_event *p_event)
290 {
291 mutex_lock(&event_mutex);
292 perf_trace_event_close(p_event);
293 perf_trace_event_unreg(p_event);
294 trace_event_put_ref(p_event->tp_event);
295 mutex_unlock(&event_mutex);
296
297 destroy_local_trace_kprobe(p_event->tp_event);
298 }
299 #endif /* CONFIG_KPROBE_EVENTS */
300
301 #ifdef CONFIG_UPROBE_EVENTS
perf_uprobe_init(struct perf_event * p_event,unsigned long ref_ctr_offset,bool is_retprobe)302 int perf_uprobe_init(struct perf_event *p_event,
303 unsigned long ref_ctr_offset, bool is_retprobe)
304 {
305 int ret;
306 char *path = NULL;
307 struct trace_event_call *tp_event;
308
309 if (!p_event->attr.uprobe_path)
310 return -EINVAL;
311
312 path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
313 PATH_MAX);
314 if (IS_ERR(path)) {
315 ret = PTR_ERR(path);
316 return (ret == -EINVAL) ? -E2BIG : ret;
317 }
318 if (path[0] == '\0') {
319 ret = -EINVAL;
320 goto out;
321 }
322
323 tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
324 ref_ctr_offset, is_retprobe);
325 if (IS_ERR(tp_event)) {
326 ret = PTR_ERR(tp_event);
327 goto out;
328 }
329
330 /*
331 * local trace_uprobe need to hold event_mutex to call
332 * uprobe_buffer_enable() and uprobe_buffer_disable().
333 * event_mutex is not required for local trace_kprobes.
334 */
335 mutex_lock(&event_mutex);
336 ret = perf_trace_event_init(tp_event, p_event);
337 if (ret)
338 destroy_local_trace_uprobe(tp_event);
339 mutex_unlock(&event_mutex);
340 out:
341 kfree(path);
342 return ret;
343 }
344
perf_uprobe_destroy(struct perf_event * p_event)345 void perf_uprobe_destroy(struct perf_event *p_event)
346 {
347 mutex_lock(&event_mutex);
348 perf_trace_event_close(p_event);
349 perf_trace_event_unreg(p_event);
350 trace_event_put_ref(p_event->tp_event);
351 mutex_unlock(&event_mutex);
352 destroy_local_trace_uprobe(p_event->tp_event);
353 }
354 #endif /* CONFIG_UPROBE_EVENTS */
355
perf_trace_add(struct perf_event * p_event,int flags)356 int perf_trace_add(struct perf_event *p_event, int flags)
357 {
358 struct trace_event_call *tp_event = p_event->tp_event;
359
360 if (!(flags & PERF_EF_START))
361 p_event->hw.state = PERF_HES_STOPPED;
362
363 /*
364 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
365 * and we need to take the default action of enqueueing our event on
366 * the right per-cpu hlist.
367 */
368 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
369 struct hlist_head __percpu *pcpu_list;
370 struct hlist_head *list;
371
372 pcpu_list = tp_event->perf_events;
373 if (WARN_ON_ONCE(!pcpu_list))
374 return -EINVAL;
375
376 list = this_cpu_ptr(pcpu_list);
377 hlist_add_head_rcu(&p_event->hlist_entry, list);
378 }
379
380 return 0;
381 }
382
perf_trace_del(struct perf_event * p_event,int flags)383 void perf_trace_del(struct perf_event *p_event, int flags)
384 {
385 struct trace_event_call *tp_event = p_event->tp_event;
386
387 /*
388 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
389 * and we need to take the default action of dequeueing our event from
390 * the right per-cpu hlist.
391 */
392 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
393 hlist_del_rcu(&p_event->hlist_entry);
394 }
395
perf_trace_buf_alloc(int size,struct pt_regs ** regs,int * rctxp)396 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
397 {
398 char *raw_data;
399 int rctx;
400
401 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
402
403 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
404 "perf buffer not large enough, wanted %d, have %d",
405 size, PERF_MAX_TRACE_SIZE))
406 return NULL;
407
408 *rctxp = rctx = perf_swevent_get_recursion_context();
409 if (rctx < 0)
410 return NULL;
411
412 if (regs)
413 *regs = this_cpu_ptr(&__perf_regs[rctx]);
414 raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
415
416 /* zero the dead bytes from align to not leak stack to user */
417 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
418 return raw_data;
419 }
420 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
421 NOKPROBE_SYMBOL(perf_trace_buf_alloc);
422
perf_trace_buf_update(void * record,u16 type)423 void perf_trace_buf_update(void *record, u16 type)
424 {
425 struct trace_entry *entry = record;
426
427 tracing_generic_entry_update(entry, type, tracing_gen_ctx());
428 }
429 NOKPROBE_SYMBOL(perf_trace_buf_update);
430
431 #ifdef CONFIG_FUNCTION_TRACER
432 static void
perf_ftrace_function_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)433 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
434 struct ftrace_ops *ops, struct ftrace_regs *fregs)
435 {
436 struct ftrace_entry *entry;
437 struct perf_event *event;
438 struct hlist_head head;
439 struct pt_regs regs;
440 int rctx;
441 int bit;
442
443 if (!rcu_is_watching())
444 return;
445
446 bit = ftrace_test_recursion_trylock(ip, parent_ip);
447 if (bit < 0)
448 return;
449
450 if ((unsigned long)ops->private != smp_processor_id())
451 goto out;
452
453 event = container_of(ops, struct perf_event, ftrace_ops);
454
455 /*
456 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
457 * the perf code does is hlist_for_each_entry_rcu(), so we can
458 * get away with simply setting the @head.first pointer in order
459 * to create a singular list.
460 */
461 head.first = &event->hlist_entry;
462
463 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
464 sizeof(u64)) - sizeof(u32))
465
466 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
467
468 memset(®s, 0, sizeof(regs));
469 perf_fetch_caller_regs(®s);
470
471 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
472 if (!entry)
473 goto out;
474
475 entry->ip = ip;
476 entry->parent_ip = parent_ip;
477 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
478 1, ®s, &head, NULL);
479
480 out:
481 ftrace_test_recursion_unlock(bit);
482 #undef ENTRY_SIZE
483 }
484
perf_ftrace_function_register(struct perf_event * event)485 static int perf_ftrace_function_register(struct perf_event *event)
486 {
487 struct ftrace_ops *ops = &event->ftrace_ops;
488
489 ops->func = perf_ftrace_function_call;
490 ops->private = (void *)(unsigned long)nr_cpu_ids;
491
492 return register_ftrace_function(ops);
493 }
494
perf_ftrace_function_unregister(struct perf_event * event)495 static int perf_ftrace_function_unregister(struct perf_event *event)
496 {
497 struct ftrace_ops *ops = &event->ftrace_ops;
498 int ret = unregister_ftrace_function(ops);
499 ftrace_free_filter(ops);
500 return ret;
501 }
502
perf_ftrace_event_register(struct trace_event_call * call,enum trace_reg type,void * data)503 int perf_ftrace_event_register(struct trace_event_call *call,
504 enum trace_reg type, void *data)
505 {
506 struct perf_event *event = data;
507
508 switch (type) {
509 case TRACE_REG_REGISTER:
510 case TRACE_REG_UNREGISTER:
511 break;
512 case TRACE_REG_PERF_REGISTER:
513 case TRACE_REG_PERF_UNREGISTER:
514 return 0;
515 case TRACE_REG_PERF_OPEN:
516 return perf_ftrace_function_register(data);
517 case TRACE_REG_PERF_CLOSE:
518 return perf_ftrace_function_unregister(data);
519 case TRACE_REG_PERF_ADD:
520 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
521 return 1;
522 case TRACE_REG_PERF_DEL:
523 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
524 return 1;
525 }
526
527 return -EINVAL;
528 }
529 #endif /* CONFIG_FUNCTION_TRACER */
530