1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <ctype.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <libelf.h>
8 #include <gelf.h>
9 #include <unistd.h>
10 #include <linux/ptrace.h>
11 #include <linux/kernel.h>
12
13 /* s8 will be marked as poison while it's a reg of riscv */
14 #if defined(__riscv)
15 #define rv_s8 s8
16 #endif
17
18 #include "bpf.h"
19 #include "libbpf.h"
20 #include "libbpf_common.h"
21 #include "libbpf_internal.h"
22 #include "hashmap.h"
23
24 /* libbpf's USDT support consists of BPF-side state/code and user-space
25 * state/code working together in concert. BPF-side parts are defined in
26 * usdt.bpf.h header library. User-space state is encapsulated by struct
27 * usdt_manager and all the supporting code centered around usdt_manager.
28 *
29 * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
30 * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
31 * don't support BPF cookie (see below). These two maps are implicitly
32 * embedded into user's end BPF object file when user's code included
33 * usdt.bpf.h. This means that libbpf doesn't do anything special to create
34 * these USDT support maps. They are created by normal libbpf logic of
35 * instantiating BPF maps when opening and loading BPF object.
36 *
37 * As such, libbpf is basically unaware of the need to do anything
38 * USDT-related until the very first call to bpf_program__attach_usdt(), which
39 * can be called by user explicitly or happen automatically during skeleton
40 * attach (or, equivalently, through generic bpf_program__attach() call). At
41 * this point, libbpf will instantiate and initialize struct usdt_manager and
42 * store it in bpf_object. USDT manager is per-BPF object construct, as each
43 * independent BPF object might or might not have USDT programs, and thus all
44 * the expected USDT-related state. There is no coordination between two
45 * bpf_object in parts of USDT attachment, they are oblivious of each other's
46 * existence and libbpf is just oblivious, dealing with bpf_object-specific
47 * USDT state.
48 *
49 * Quick crash course on USDTs.
50 *
51 * From user-space application's point of view, USDT is essentially just
52 * a slightly special function call that normally has zero overhead, unless it
53 * is being traced by some external entity (e.g, BPF-based tool). Here's how
54 * a typical application can trigger USDT probe:
55 *
56 * #include <sys/sdt.h> // provided by systemtap-sdt-devel package
57 * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
58 *
59 * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
60 *
61 * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
62 * individual USDT has a fixed number of arguments (3 in the above example)
63 * and specifies values of each argument as if it was a function call.
64 *
65 * USDT call is actually not a function call, but is instead replaced by
66 * a single NOP instruction (thus zero overhead, effectively). But in addition
67 * to that, those USDT macros generate special SHT_NOTE ELF records in
68 * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
69 * `readelf -n <binary>`:
70 *
71 * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors)
72 * Provider: test
73 * Name: usdt12
74 * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
75 * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
76 *
77 * In this case we have USDT test:usdt12 with 12 arguments.
78 *
79 * Location and base are offsets used to calculate absolute IP address of that
80 * NOP instruction that kernel can replace with an interrupt instruction to
81 * trigger instrumentation code (BPF program for all that we care about).
82 *
83 * Semaphore above is and optional feature. It records an address of a 2-byte
84 * refcount variable (normally in '.probes' ELF section) used for signaling if
85 * there is anything that is attached to USDT. This is useful for user
86 * applications if, for example, they need to prepare some arguments that are
87 * passed only to USDTs and preparation is expensive. By checking if USDT is
88 * "activated", an application can avoid paying those costs unnecessarily.
89 * Recent enough kernel has built-in support for automatically managing this
90 * refcount, which libbpf expects and relies on. If USDT is defined without
91 * associated semaphore, this value will be zero. See selftests for semaphore
92 * examples.
93 *
94 * Arguments is the most interesting part. This USDT specification string is
95 * providing information about all the USDT arguments and their locations. The
96 * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
97 * whether the argument is signed or unsigned (negative size means signed).
98 * The part after @ sign is assembly-like definition of argument location
99 * (see [0] for more details). Technically, assembler can provide some pretty
100 * advanced definitions, but libbpf is currently supporting three most common
101 * cases:
102 * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
103 * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
104 * whose value is in register %rdx";
105 * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
106 * specifies signed 32-bit integer stored at offset -1204 bytes from
107 * memory address stored in %rbp.
108 *
109 * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
110 *
111 * During attachment, libbpf parses all the relevant USDT specifications and
112 * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
113 * code through spec map. This allows BPF applications to quickly fetch the
114 * actual value at runtime using a simple BPF-side code.
115 *
116 * With basics out of the way, let's go over less immediately obvious aspects
117 * of supporting USDTs.
118 *
119 * First, there is no special USDT BPF program type. It is actually just
120 * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
121 * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
122 * that uprobe is usually attached at the function entry, while USDT will
123 * normally will be somewhere inside the function. But it should always be
124 * pointing to NOP instruction, which makes such uprobes the fastest uprobe
125 * kind.
126 *
127 * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
128 * macro invocations can end up being inlined many-many times, depending on
129 * specifics of each individual user application. So single conceptual USDT
130 * (identified by provider:name pair of identifiers) is, generally speaking,
131 * multiple uprobe locations (USDT call sites) in different places in user
132 * application. Further, again due to inlining, each USDT call site might end
133 * up having the same argument #N be located in a different place. In one call
134 * site it could be a constant, in another will end up in a register, and in
135 * yet another could be some other register or even somewhere on the stack.
136 *
137 * As such, "attaching to USDT" means (in general case) attaching the same
138 * uprobe BPF program to multiple target locations in user application, each
139 * potentially having a completely different USDT spec associated with it.
140 * To wire all this up together libbpf allocates a unique integer spec ID for
141 * each unique USDT spec. Spec IDs are allocated as sequential small integers
142 * so that they can be used as keys in array BPF map (for performance reasons).
143 * Spec ID allocation and accounting is big part of what usdt_manager is
144 * about. This state has to be maintained per-BPF object and coordinate
145 * between different USDT attachments within the same BPF object.
146 *
147 * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
148 * as struct usdt_spec. Each invocation of BPF program at runtime needs to
149 * know its associated spec ID. It gets it either through BPF cookie, which
150 * libbpf sets to spec ID during attach time, or, if kernel is too old to
151 * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
152 * case. The latter means that some modes of operation can't be supported
153 * without BPF cookie. Such mode is attaching to shared library "generically",
154 * without specifying target process. In such case, it's impossible to
155 * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
156 * is not supported without BPF cookie support.
157 *
158 * Note that libbpf is using BPF cookie functionality for its own internal
159 * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
160 * provides conceptually equivalent USDT cookie support. It's still u64
161 * user-provided value that can be associated with USDT attachment. Note that
162 * this will be the same value for all USDT call sites within the same single
163 * *logical* USDT attachment. This makes sense because to user attaching to
164 * USDT is a single BPF program triggered for singular USDT probe. The fact
165 * that this is done at multiple actual locations is a mostly hidden
166 * implementation details. This USDT cookie value can be fetched with
167 * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
168 *
169 * Lastly, while single USDT can have tons of USDT call sites, it doesn't
170 * necessarily have that many different USDT specs. It very well might be
171 * that 1000 USDT call sites only need 5 different USDT specs, because all the
172 * arguments are typically contained in a small set of registers or stack
173 * locations. As such, it's wasteful to allocate as many USDT spec IDs as
174 * there are USDT call sites. So libbpf tries to be frugal and performs
175 * on-the-fly deduplication during a single USDT attachment to only allocate
176 * the minimal required amount of unique USDT specs (and thus spec IDs). This
177 * is trivially achieved by using USDT spec string (Arguments string from USDT
178 * note) as a lookup key in a hashmap. USDT spec string uniquely defines
179 * everything about how to fetch USDT arguments, so two USDT call sites
180 * sharing USDT spec string can safely share the same USDT spec and spec ID.
181 * Note, this spec string deduplication is happening only during the same USDT
182 * attachment, so each USDT spec shares the same USDT cookie value. This is
183 * not generally true for other USDT attachments within the same BPF object,
184 * as even if USDT spec string is the same, USDT cookie value can be
185 * different. It was deemed excessive to try to deduplicate across independent
186 * USDT attachments by taking into account USDT spec string *and* USDT cookie
187 * value, which would complicated spec ID accounting significantly for little
188 * gain.
189 */
190
191 #define USDT_BASE_SEC ".stapsdt.base"
192 #define USDT_SEMA_SEC ".probes"
193 #define USDT_NOTE_SEC ".note.stapsdt"
194 #define USDT_NOTE_TYPE 3
195 #define USDT_NOTE_NAME "stapsdt"
196
197 /* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
198 enum usdt_arg_type {
199 USDT_ARG_CONST,
200 USDT_ARG_REG,
201 USDT_ARG_REG_DEREF,
202 };
203
204 /* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
205 struct usdt_arg_spec {
206 __u64 val_off;
207 enum usdt_arg_type arg_type;
208 short reg_off;
209 bool arg_signed;
210 char arg_bitshift;
211 };
212
213 /* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
214 #define USDT_MAX_ARG_CNT 12
215
216 /* should match struct __bpf_usdt_spec from usdt.bpf.h */
217 struct usdt_spec {
218 struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
219 __u64 usdt_cookie;
220 short arg_cnt;
221 };
222
223 struct usdt_note {
224 const char *provider;
225 const char *name;
226 /* USDT args specification string, e.g.:
227 * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
228 */
229 const char *args;
230 long loc_addr;
231 long base_addr;
232 long sema_addr;
233 };
234
235 struct usdt_target {
236 long abs_ip;
237 long rel_ip;
238 long sema_off;
239 struct usdt_spec spec;
240 const char *spec_str;
241 };
242
243 struct usdt_manager {
244 struct bpf_map *specs_map;
245 struct bpf_map *ip_to_spec_id_map;
246
247 int *free_spec_ids;
248 size_t free_spec_cnt;
249 size_t next_free_spec_id;
250
251 bool has_bpf_cookie;
252 bool has_sema_refcnt;
253 };
254
usdt_manager_new(struct bpf_object * obj)255 struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
256 {
257 static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
258 struct usdt_manager *man;
259 struct bpf_map *specs_map, *ip_to_spec_id_map;
260
261 specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
262 ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
263 if (!specs_map || !ip_to_spec_id_map) {
264 pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
265 return ERR_PTR(-ESRCH);
266 }
267
268 man = calloc(1, sizeof(*man));
269 if (!man)
270 return ERR_PTR(-ENOMEM);
271
272 man->specs_map = specs_map;
273 man->ip_to_spec_id_map = ip_to_spec_id_map;
274
275 /* Detect if BPF cookie is supported for kprobes.
276 * We don't need IP-to-ID mapping if we can use BPF cookies.
277 * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
278 */
279 man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
280
281 /* Detect kernel support for automatic refcounting of USDT semaphore.
282 * If this is not supported, USDTs with semaphores will not be supported.
283 * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
284 */
285 man->has_sema_refcnt = access(ref_ctr_sysfs_path, F_OK) == 0;
286
287 return man;
288 }
289
usdt_manager_free(struct usdt_manager * man)290 void usdt_manager_free(struct usdt_manager *man)
291 {
292 if (IS_ERR_OR_NULL(man))
293 return;
294
295 free(man->free_spec_ids);
296 free(man);
297 }
298
sanity_check_usdt_elf(Elf * elf,const char * path)299 static int sanity_check_usdt_elf(Elf *elf, const char *path)
300 {
301 GElf_Ehdr ehdr;
302 int endianness;
303
304 if (elf_kind(elf) != ELF_K_ELF) {
305 pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
306 return -EBADF;
307 }
308
309 switch (gelf_getclass(elf)) {
310 case ELFCLASS64:
311 if (sizeof(void *) != 8) {
312 pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
313 return -EBADF;
314 }
315 break;
316 case ELFCLASS32:
317 if (sizeof(void *) != 4) {
318 pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
319 return -EBADF;
320 }
321 break;
322 default:
323 pr_warn("usdt: unsupported ELF class for '%s'\n", path);
324 return -EBADF;
325 }
326
327 if (!gelf_getehdr(elf, &ehdr))
328 return -EINVAL;
329
330 if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
331 pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
332 path, ehdr.e_type);
333 return -EBADF;
334 }
335
336 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
337 endianness = ELFDATA2LSB;
338 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
339 endianness = ELFDATA2MSB;
340 #else
341 # error "Unrecognized __BYTE_ORDER__"
342 #endif
343 if (endianness != ehdr.e_ident[EI_DATA]) {
344 pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
345 return -EBADF;
346 }
347
348 return 0;
349 }
350
find_elf_sec_by_name(Elf * elf,const char * sec_name,GElf_Shdr * shdr,Elf_Scn ** scn)351 static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
352 {
353 Elf_Scn *sec = NULL;
354 size_t shstrndx;
355
356 if (elf_getshdrstrndx(elf, &shstrndx))
357 return -EINVAL;
358
359 /* check if ELF is corrupted and avoid calling elf_strptr if yes */
360 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
361 return -EINVAL;
362
363 while ((sec = elf_nextscn(elf, sec)) != NULL) {
364 char *name;
365
366 if (!gelf_getshdr(sec, shdr))
367 return -EINVAL;
368
369 name = elf_strptr(elf, shstrndx, shdr->sh_name);
370 if (name && strcmp(sec_name, name) == 0) {
371 *scn = sec;
372 return 0;
373 }
374 }
375
376 return -ENOENT;
377 }
378
379 struct elf_seg {
380 long start;
381 long end;
382 long offset;
383 bool is_exec;
384 };
385
cmp_elf_segs(const void * _a,const void * _b)386 static int cmp_elf_segs(const void *_a, const void *_b)
387 {
388 const struct elf_seg *a = _a;
389 const struct elf_seg *b = _b;
390
391 return a->start < b->start ? -1 : 1;
392 }
393
parse_elf_segs(Elf * elf,const char * path,struct elf_seg ** segs,size_t * seg_cnt)394 static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
395 {
396 GElf_Phdr phdr;
397 size_t n;
398 int i, err;
399 struct elf_seg *seg;
400 void *tmp;
401
402 *seg_cnt = 0;
403
404 if (elf_getphdrnum(elf, &n)) {
405 err = -errno;
406 return err;
407 }
408
409 for (i = 0; i < n; i++) {
410 if (!gelf_getphdr(elf, i, &phdr)) {
411 err = -errno;
412 return err;
413 }
414
415 pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
416 i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
417 (long)phdr.p_type, (long)phdr.p_flags);
418 if (phdr.p_type != PT_LOAD)
419 continue;
420
421 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
422 if (!tmp)
423 return -ENOMEM;
424
425 *segs = tmp;
426 seg = *segs + *seg_cnt;
427 (*seg_cnt)++;
428
429 seg->start = phdr.p_vaddr;
430 seg->end = phdr.p_vaddr + phdr.p_memsz;
431 seg->offset = phdr.p_offset;
432 seg->is_exec = phdr.p_flags & PF_X;
433 }
434
435 if (*seg_cnt == 0) {
436 pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
437 return -ESRCH;
438 }
439
440 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
441 return 0;
442 }
443
parse_vma_segs(int pid,const char * lib_path,struct elf_seg ** segs,size_t * seg_cnt)444 static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
445 {
446 char path[PATH_MAX], line[PATH_MAX], mode[16];
447 size_t seg_start, seg_end, seg_off;
448 struct elf_seg *seg;
449 int tmp_pid, i, err;
450 FILE *f;
451
452 *seg_cnt = 0;
453
454 /* Handle containerized binaries only accessible from
455 * /proc/<pid>/root/<path>. They will be reported as just /<path> in
456 * /proc/<pid>/maps.
457 */
458 if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
459 goto proceed;
460
461 if (!realpath(lib_path, path)) {
462 pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
463 lib_path, -errno);
464 libbpf_strlcpy(path, lib_path, sizeof(path));
465 }
466
467 proceed:
468 sprintf(line, "/proc/%d/maps", pid);
469 f = fopen(line, "r");
470 if (!f) {
471 err = -errno;
472 pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
473 line, lib_path, err);
474 return err;
475 }
476
477 /* We need to handle lines with no path at the end:
478 *
479 * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so
480 * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
481 * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
482 */
483 while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
484 &seg_start, &seg_end, mode, &seg_off, line) == 5) {
485 void *tmp;
486
487 /* to handle no path case (see above) we need to capture line
488 * without skipping any whitespaces. So we need to strip
489 * leading whitespaces manually here
490 */
491 i = 0;
492 while (isblank(line[i]))
493 i++;
494 if (strcmp(line + i, path) != 0)
495 continue;
496
497 pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
498 path, seg_start, seg_end, mode, seg_off);
499
500 /* ignore non-executable sections for shared libs */
501 if (mode[2] != 'x')
502 continue;
503
504 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
505 if (!tmp) {
506 err = -ENOMEM;
507 goto err_out;
508 }
509
510 *segs = tmp;
511 seg = *segs + *seg_cnt;
512 *seg_cnt += 1;
513
514 seg->start = seg_start;
515 seg->end = seg_end;
516 seg->offset = seg_off;
517 seg->is_exec = true;
518 }
519
520 if (*seg_cnt == 0) {
521 pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
522 lib_path, path, pid);
523 err = -ESRCH;
524 goto err_out;
525 }
526
527 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
528 err = 0;
529 err_out:
530 fclose(f);
531 return err;
532 }
533
find_elf_seg(struct elf_seg * segs,size_t seg_cnt,long virtaddr)534 static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
535 {
536 struct elf_seg *seg;
537 int i;
538
539 /* for ELF binaries (both executables and shared libraries), we are
540 * given virtual address (absolute for executables, relative for
541 * libraries) which should match address range of [seg_start, seg_end)
542 */
543 for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
544 if (seg->start <= virtaddr && virtaddr < seg->end)
545 return seg;
546 }
547 return NULL;
548 }
549
find_vma_seg(struct elf_seg * segs,size_t seg_cnt,long offset)550 static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
551 {
552 struct elf_seg *seg;
553 int i;
554
555 /* for VMA segments from /proc/<pid>/maps file, provided "address" is
556 * actually a file offset, so should be fall within logical
557 * offset-based range of [offset_start, offset_end)
558 */
559 for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
560 if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
561 return seg;
562 }
563 return NULL;
564 }
565
566 static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
567 const char *data, size_t name_off, size_t desc_off,
568 struct usdt_note *usdt_note);
569
570 static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
571
collect_usdt_targets(struct usdt_manager * man,Elf * elf,const char * path,pid_t pid,const char * usdt_provider,const char * usdt_name,__u64 usdt_cookie,struct usdt_target ** out_targets,size_t * out_target_cnt)572 static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
573 const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
574 struct usdt_target **out_targets, size_t *out_target_cnt)
575 {
576 size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
577 struct elf_seg *segs = NULL, *vma_segs = NULL;
578 struct usdt_target *targets = NULL, *target;
579 long base_addr = 0;
580 Elf_Scn *notes_scn, *base_scn;
581 GElf_Shdr base_shdr, notes_shdr;
582 GElf_Ehdr ehdr;
583 GElf_Nhdr nhdr;
584 Elf_Data *data;
585 int err;
586
587 *out_targets = NULL;
588 *out_target_cnt = 0;
589
590 err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, ¬es_shdr, ¬es_scn);
591 if (err) {
592 pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
593 return err;
594 }
595
596 if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
597 pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
598 return -EINVAL;
599 }
600
601 err = parse_elf_segs(elf, path, &segs, &seg_cnt);
602 if (err) {
603 pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
604 goto err_out;
605 }
606
607 /* .stapsdt.base ELF section is optional, but is used for prelink
608 * offset compensation (see a big comment further below)
609 */
610 if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
611 base_addr = base_shdr.sh_addr;
612
613 data = elf_getdata(notes_scn, 0);
614 off = 0;
615 while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
616 long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
617 struct usdt_note note;
618 struct elf_seg *seg = NULL;
619 void *tmp;
620
621 err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, ¬e);
622 if (err)
623 goto err_out;
624
625 if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
626 continue;
627
628 /* We need to compensate "prelink effect". See [0] for details,
629 * relevant parts quoted here:
630 *
631 * Each SDT probe also expands into a non-allocated ELF note. You can
632 * find this by looking at SHT_NOTE sections and decoding the format;
633 * see below for details. Because the note is non-allocated, it means
634 * there is no runtime cost, and also preserved in both stripped files
635 * and .debug files.
636 *
637 * However, this means that prelink won't adjust the note's contents
638 * for address offsets. Instead, this is done via the .stapsdt.base
639 * section. This is a special section that is added to the text. We
640 * will only ever have one of these sections in a final link and it
641 * will only ever be one byte long. Nothing about this section itself
642 * matters, we just use it as a marker to detect prelink address
643 * adjustments.
644 *
645 * Each probe note records the link-time address of the .stapsdt.base
646 * section alongside the probe PC address. The decoder compares the
647 * base address stored in the note with the .stapsdt.base section's
648 * sh_addr. Initially these are the same, but the section header will
649 * be adjusted by prelink. So the decoder applies the difference to
650 * the probe PC address to get the correct prelinked PC address; the
651 * same adjustment is applied to the semaphore address, if any.
652 *
653 * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
654 */
655 usdt_rel_ip = usdt_abs_ip = note.loc_addr;
656 if (base_addr) {
657 usdt_abs_ip += base_addr - note.base_addr;
658 usdt_rel_ip += base_addr - note.base_addr;
659 }
660
661 /* When attaching uprobes (which is what USDTs basically are)
662 * kernel expects file offset to be specified, not a relative
663 * virtual address, so we need to translate virtual address to
664 * file offset, for both ET_EXEC and ET_DYN binaries.
665 */
666 seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
667 if (!seg) {
668 err = -ESRCH;
669 pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
670 usdt_provider, usdt_name, path, usdt_abs_ip);
671 goto err_out;
672 }
673 if (!seg->is_exec) {
674 err = -ESRCH;
675 pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
676 path, seg->start, seg->end, usdt_provider, usdt_name,
677 usdt_abs_ip);
678 goto err_out;
679 }
680 /* translate from virtual address to file offset */
681 usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
682
683 if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
684 /* If we don't have BPF cookie support but need to
685 * attach to a shared library, we'll need to know and
686 * record absolute addresses of attach points due to
687 * the need to lookup USDT spec by absolute IP of
688 * triggered uprobe. Doing this resolution is only
689 * possible when we have a specific PID of the process
690 * that's using specified shared library. BPF cookie
691 * removes the absolute address limitation as we don't
692 * need to do this lookup (we just use BPF cookie as
693 * an index of USDT spec), so for newer kernels with
694 * BPF cookie support libbpf supports USDT attachment
695 * to shared libraries with no PID filter.
696 */
697 if (pid < 0) {
698 pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
699 err = -ENOTSUP;
700 goto err_out;
701 }
702
703 /* vma_segs are lazily initialized only if necessary */
704 if (vma_seg_cnt == 0) {
705 err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
706 if (err) {
707 pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
708 pid, path, err);
709 goto err_out;
710 }
711 }
712
713 seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
714 if (!seg) {
715 err = -ESRCH;
716 pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
717 usdt_provider, usdt_name, path, usdt_rel_ip);
718 goto err_out;
719 }
720
721 usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
722 }
723
724 pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
725 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
726 note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
727 seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
728
729 /* Adjust semaphore address to be a file offset */
730 if (note.sema_addr) {
731 if (!man->has_sema_refcnt) {
732 pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
733 usdt_provider, usdt_name, path);
734 err = -ENOTSUP;
735 goto err_out;
736 }
737
738 seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
739 if (!seg) {
740 err = -ESRCH;
741 pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
742 usdt_provider, usdt_name, path, note.sema_addr);
743 goto err_out;
744 }
745 if (seg->is_exec) {
746 err = -ESRCH;
747 pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
748 path, seg->start, seg->end, usdt_provider, usdt_name,
749 note.sema_addr);
750 goto err_out;
751 }
752
753 usdt_sema_off = note.sema_addr - seg->start + seg->offset;
754
755 pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
756 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
757 path, note.sema_addr, note.base_addr, usdt_sema_off,
758 seg->start, seg->end, seg->offset);
759 }
760
761 /* Record adjusted addresses and offsets and parse USDT spec */
762 tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
763 if (!tmp) {
764 err = -ENOMEM;
765 goto err_out;
766 }
767 targets = tmp;
768
769 target = &targets[target_cnt];
770 memset(target, 0, sizeof(*target));
771
772 target->abs_ip = usdt_abs_ip;
773 target->rel_ip = usdt_rel_ip;
774 target->sema_off = usdt_sema_off;
775
776 /* notes.args references strings from Elf itself, so they can
777 * be referenced safely until elf_end() call
778 */
779 target->spec_str = note.args;
780
781 err = parse_usdt_spec(&target->spec, ¬e, usdt_cookie);
782 if (err)
783 goto err_out;
784
785 target_cnt++;
786 }
787
788 *out_targets = targets;
789 *out_target_cnt = target_cnt;
790 err = target_cnt;
791
792 err_out:
793 free(segs);
794 free(vma_segs);
795 if (err < 0)
796 free(targets);
797 return err;
798 }
799
800 struct bpf_link_usdt {
801 struct bpf_link link;
802
803 struct usdt_manager *usdt_man;
804
805 size_t spec_cnt;
806 int *spec_ids;
807
808 size_t uprobe_cnt;
809 struct {
810 long abs_ip;
811 struct bpf_link *link;
812 } *uprobes;
813 };
814
bpf_link_usdt_detach(struct bpf_link * link)815 static int bpf_link_usdt_detach(struct bpf_link *link)
816 {
817 struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
818 struct usdt_manager *man = usdt_link->usdt_man;
819 int i;
820
821 for (i = 0; i < usdt_link->uprobe_cnt; i++) {
822 /* detach underlying uprobe link */
823 bpf_link__destroy(usdt_link->uprobes[i].link);
824 /* there is no need to update specs map because it will be
825 * unconditionally overwritten on subsequent USDT attaches,
826 * but if BPF cookies are not used we need to remove entry
827 * from ip_to_spec_id map, otherwise we'll run into false
828 * conflicting IP errors
829 */
830 if (!man->has_bpf_cookie) {
831 /* not much we can do about errors here */
832 (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
833 &usdt_link->uprobes[i].abs_ip);
834 }
835 }
836
837 /* try to return the list of previously used spec IDs to usdt_manager
838 * for future reuse for subsequent USDT attaches
839 */
840 if (!man->free_spec_ids) {
841 /* if there were no free spec IDs yet, just transfer our IDs */
842 man->free_spec_ids = usdt_link->spec_ids;
843 man->free_spec_cnt = usdt_link->spec_cnt;
844 usdt_link->spec_ids = NULL;
845 } else {
846 /* otherwise concat IDs */
847 size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
848 int *new_free_ids;
849
850 new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
851 sizeof(*new_free_ids));
852 /* If we couldn't resize free_spec_ids, we'll just leak
853 * a bunch of free IDs; this is very unlikely to happen and if
854 * system is so exhausted on memory, it's the least of user's
855 * concerns, probably.
856 * So just do our best here to return those IDs to usdt_manager.
857 */
858 if (new_free_ids) {
859 memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
860 usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
861 man->free_spec_ids = new_free_ids;
862 man->free_spec_cnt = new_cnt;
863 }
864 }
865
866 return 0;
867 }
868
bpf_link_usdt_dealloc(struct bpf_link * link)869 static void bpf_link_usdt_dealloc(struct bpf_link *link)
870 {
871 struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
872
873 free(usdt_link->spec_ids);
874 free(usdt_link->uprobes);
875 free(usdt_link);
876 }
877
specs_hash_fn(const void * key,void * ctx)878 static size_t specs_hash_fn(const void *key, void *ctx)
879 {
880 const char *s = key;
881
882 return str_hash(s);
883 }
884
specs_equal_fn(const void * key1,const void * key2,void * ctx)885 static bool specs_equal_fn(const void *key1, const void *key2, void *ctx)
886 {
887 const char *s1 = key1;
888 const char *s2 = key2;
889
890 return strcmp(s1, s2) == 0;
891 }
892
allocate_spec_id(struct usdt_manager * man,struct hashmap * specs_hash,struct bpf_link_usdt * link,struct usdt_target * target,int * spec_id,bool * is_new)893 static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
894 struct bpf_link_usdt *link, struct usdt_target *target,
895 int *spec_id, bool *is_new)
896 {
897 void *tmp;
898 int err;
899
900 /* check if we already allocated spec ID for this spec string */
901 if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
902 *spec_id = (long)tmp;
903 *is_new = false;
904 return 0;
905 }
906
907 /* otherwise it's a new ID that needs to be set up in specs map and
908 * returned back to usdt_manager when USDT link is detached
909 */
910 tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
911 if (!tmp)
912 return -ENOMEM;
913 link->spec_ids = tmp;
914
915 /* get next free spec ID, giving preference to free list, if not empty */
916 if (man->free_spec_cnt) {
917 *spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
918
919 /* cache spec ID for current spec string for future lookups */
920 err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
921 if (err)
922 return err;
923
924 man->free_spec_cnt--;
925 } else {
926 /* don't allocate spec ID bigger than what fits in specs map */
927 if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
928 return -E2BIG;
929
930 *spec_id = man->next_free_spec_id;
931
932 /* cache spec ID for current spec string for future lookups */
933 err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
934 if (err)
935 return err;
936
937 man->next_free_spec_id++;
938 }
939
940 /* remember new spec ID in the link for later return back to free list on detach */
941 link->spec_ids[link->spec_cnt] = *spec_id;
942 link->spec_cnt++;
943 *is_new = true;
944 return 0;
945 }
946
usdt_manager_attach_usdt(struct usdt_manager * man,const struct bpf_program * prog,pid_t pid,const char * path,const char * usdt_provider,const char * usdt_name,__u64 usdt_cookie)947 struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
948 pid_t pid, const char *path,
949 const char *usdt_provider, const char *usdt_name,
950 __u64 usdt_cookie)
951 {
952 int i, fd, err, spec_map_fd, ip_map_fd;
953 LIBBPF_OPTS(bpf_uprobe_opts, opts);
954 struct hashmap *specs_hash = NULL;
955 struct bpf_link_usdt *link = NULL;
956 struct usdt_target *targets = NULL;
957 size_t target_cnt;
958 Elf *elf;
959
960 spec_map_fd = bpf_map__fd(man->specs_map);
961 ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
962
963 /* TODO: perform path resolution similar to uprobe's */
964 fd = open(path, O_RDONLY);
965 if (fd < 0) {
966 err = -errno;
967 pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
968 return libbpf_err_ptr(err);
969 }
970
971 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
972 if (!elf) {
973 err = -EBADF;
974 pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
975 goto err_out;
976 }
977
978 err = sanity_check_usdt_elf(elf, path);
979 if (err)
980 goto err_out;
981
982 /* normalize PID filter */
983 if (pid < 0)
984 pid = -1;
985 else if (pid == 0)
986 pid = getpid();
987
988 /* discover USDT in given binary, optionally limiting
989 * activations to a given PID, if pid > 0
990 */
991 err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
992 usdt_cookie, &targets, &target_cnt);
993 if (err <= 0) {
994 err = (err == 0) ? -ENOENT : err;
995 goto err_out;
996 }
997
998 specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
999 if (IS_ERR(specs_hash)) {
1000 err = PTR_ERR(specs_hash);
1001 goto err_out;
1002 }
1003
1004 link = calloc(1, sizeof(*link));
1005 if (!link) {
1006 err = -ENOMEM;
1007 goto err_out;
1008 }
1009
1010 link->usdt_man = man;
1011 link->link.detach = &bpf_link_usdt_detach;
1012 link->link.dealloc = &bpf_link_usdt_dealloc;
1013
1014 link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
1015 if (!link->uprobes) {
1016 err = -ENOMEM;
1017 goto err_out;
1018 }
1019
1020 for (i = 0; i < target_cnt; i++) {
1021 struct usdt_target *target = &targets[i];
1022 struct bpf_link *uprobe_link;
1023 bool is_new;
1024 int spec_id;
1025
1026 /* Spec ID can be either reused or newly allocated. If it is
1027 * newly allocated, we'll need to fill out spec map, otherwise
1028 * entire spec should be valid and can be just used by a new
1029 * uprobe. We reuse spec when USDT arg spec is identical. We
1030 * also never share specs between two different USDT
1031 * attachments ("links"), so all the reused specs already
1032 * share USDT cookie value implicitly.
1033 */
1034 err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
1035 if (err)
1036 goto err_out;
1037
1038 if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
1039 err = -errno;
1040 pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
1041 spec_id, usdt_provider, usdt_name, path, err);
1042 goto err_out;
1043 }
1044 if (!man->has_bpf_cookie &&
1045 bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
1046 err = -errno;
1047 if (err == -EEXIST) {
1048 pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
1049 spec_id, usdt_provider, usdt_name, path);
1050 } else {
1051 pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
1052 target->abs_ip, spec_id, usdt_provider, usdt_name,
1053 path, err);
1054 }
1055 goto err_out;
1056 }
1057
1058 opts.ref_ctr_offset = target->sema_off;
1059 opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
1060 uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
1061 target->rel_ip, &opts);
1062 err = libbpf_get_error(uprobe_link);
1063 if (err) {
1064 pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
1065 i, usdt_provider, usdt_name, path, err);
1066 goto err_out;
1067 }
1068
1069 link->uprobes[i].link = uprobe_link;
1070 link->uprobes[i].abs_ip = target->abs_ip;
1071 link->uprobe_cnt++;
1072 }
1073
1074 free(targets);
1075 hashmap__free(specs_hash);
1076 elf_end(elf);
1077 close(fd);
1078
1079 return &link->link;
1080
1081 err_out:
1082 if (link)
1083 bpf_link__destroy(&link->link);
1084 free(targets);
1085 hashmap__free(specs_hash);
1086 if (elf)
1087 elf_end(elf);
1088 close(fd);
1089 return libbpf_err_ptr(err);
1090 }
1091
1092 /* Parse out USDT ELF note from '.note.stapsdt' section.
1093 * Logic inspired by perf's code.
1094 */
parse_usdt_note(Elf * elf,const char * path,GElf_Nhdr * nhdr,const char * data,size_t name_off,size_t desc_off,struct usdt_note * note)1095 static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
1096 const char *data, size_t name_off, size_t desc_off,
1097 struct usdt_note *note)
1098 {
1099 const char *provider, *name, *args;
1100 long addrs[3];
1101 size_t len;
1102
1103 /* sanity check USDT note name and type first */
1104 if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
1105 return -EINVAL;
1106 if (nhdr->n_type != USDT_NOTE_TYPE)
1107 return -EINVAL;
1108
1109 /* sanity check USDT note contents ("description" in ELF terminology) */
1110 len = nhdr->n_descsz;
1111 data = data + desc_off;
1112
1113 /* +3 is the very minimum required to store three empty strings */
1114 if (len < sizeof(addrs) + 3)
1115 return -EINVAL;
1116
1117 /* get location, base, and semaphore addrs */
1118 memcpy(&addrs, data, sizeof(addrs));
1119
1120 /* parse string fields: provider, name, args */
1121 provider = data + sizeof(addrs);
1122
1123 name = (const char *)memchr(provider, '\0', data + len - provider);
1124 if (!name) /* non-zero-terminated provider */
1125 return -EINVAL;
1126 name++;
1127 if (name >= data + len || *name == '\0') /* missing or empty name */
1128 return -EINVAL;
1129
1130 args = memchr(name, '\0', data + len - name);
1131 if (!args) /* non-zero-terminated name */
1132 return -EINVAL;
1133 ++args;
1134 if (args >= data + len) /* missing arguments spec */
1135 return -EINVAL;
1136
1137 note->provider = provider;
1138 note->name = name;
1139 if (*args == '\0' || *args == ':')
1140 note->args = "";
1141 else
1142 note->args = args;
1143 note->loc_addr = addrs[0];
1144 note->base_addr = addrs[1];
1145 note->sema_addr = addrs[2];
1146
1147 return 0;
1148 }
1149
1150 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg);
1151
parse_usdt_spec(struct usdt_spec * spec,const struct usdt_note * note,__u64 usdt_cookie)1152 static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
1153 {
1154 const char *s;
1155 int len;
1156
1157 spec->usdt_cookie = usdt_cookie;
1158 spec->arg_cnt = 0;
1159
1160 s = note->args;
1161 while (s[0]) {
1162 if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
1163 pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
1164 USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
1165 return -E2BIG;
1166 }
1167
1168 len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]);
1169 if (len < 0)
1170 return len;
1171
1172 s += len;
1173 spec->arg_cnt++;
1174 }
1175
1176 return 0;
1177 }
1178
1179 /* Architecture-specific logic for parsing USDT argument location specs */
1180
1181 #if defined(__x86_64__) || defined(__i386__)
1182
calc_pt_regs_off(const char * reg_name)1183 static int calc_pt_regs_off(const char *reg_name)
1184 {
1185 static struct {
1186 const char *names[4];
1187 size_t pt_regs_off;
1188 } reg_map[] = {
1189 #ifdef __x86_64__
1190 #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
1191 #else
1192 #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
1193 #endif
1194 { {"rip", "eip", "", ""}, reg_off(rip, eip) },
1195 { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
1196 { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
1197 { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
1198 { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
1199 { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
1200 { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
1201 { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
1202 { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
1203 #undef reg_off
1204 #ifdef __x86_64__
1205 { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
1206 { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
1207 { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
1208 { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
1209 { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
1210 { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
1211 { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
1212 { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
1213 #endif
1214 };
1215 int i, j;
1216
1217 for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1218 for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
1219 if (strcmp(reg_name, reg_map[i].names[j]) == 0)
1220 return reg_map[i].pt_regs_off;
1221 }
1222 }
1223
1224 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1225 return -ENOENT;
1226 }
1227
parse_usdt_arg(const char * arg_str,int arg_num,struct usdt_arg_spec * arg)1228 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
1229 {
1230 char *reg_name = NULL;
1231 int arg_sz, len, reg_off;
1232 long off;
1233
1234 if (sscanf(arg_str, " %d @ %ld ( %%%m[^)] ) %n", &arg_sz, &off, ®_name, &len) == 3) {
1235 /* Memory dereference case, e.g., -4@-20(%rbp) */
1236 arg->arg_type = USDT_ARG_REG_DEREF;
1237 arg->val_off = off;
1238 reg_off = calc_pt_regs_off(reg_name);
1239 free(reg_name);
1240 if (reg_off < 0)
1241 return reg_off;
1242 arg->reg_off = reg_off;
1243 } else if (sscanf(arg_str, " %d @ %%%ms %n", &arg_sz, ®_name, &len) == 2) {
1244 /* Register read case, e.g., -4@%eax */
1245 arg->arg_type = USDT_ARG_REG;
1246 arg->val_off = 0;
1247
1248 reg_off = calc_pt_regs_off(reg_name);
1249 free(reg_name);
1250 if (reg_off < 0)
1251 return reg_off;
1252 arg->reg_off = reg_off;
1253 } else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) {
1254 /* Constant value case, e.g., 4@$71 */
1255 arg->arg_type = USDT_ARG_CONST;
1256 arg->val_off = off;
1257 arg->reg_off = 0;
1258 } else {
1259 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1260 return -EINVAL;
1261 }
1262
1263 arg->arg_signed = arg_sz < 0;
1264 if (arg_sz < 0)
1265 arg_sz = -arg_sz;
1266
1267 switch (arg_sz) {
1268 case 1: case 2: case 4: case 8:
1269 arg->arg_bitshift = 64 - arg_sz * 8;
1270 break;
1271 default:
1272 pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1273 arg_num, arg_str, arg_sz);
1274 return -EINVAL;
1275 }
1276
1277 return len;
1278 }
1279
1280 #elif defined(__s390x__)
1281
1282 /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
1283
parse_usdt_arg(const char * arg_str,int arg_num,struct usdt_arg_spec * arg)1284 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
1285 {
1286 unsigned int reg;
1287 int arg_sz, len;
1288 long off;
1289
1290 if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, ®, &len) == 3) {
1291 /* Memory dereference case, e.g., -2@-28(%r15) */
1292 arg->arg_type = USDT_ARG_REG_DEREF;
1293 arg->val_off = off;
1294 if (reg > 15) {
1295 pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1296 return -EINVAL;
1297 }
1298 arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1299 } else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, ®, &len) == 2) {
1300 /* Register read case, e.g., -8@%r0 */
1301 arg->arg_type = USDT_ARG_REG;
1302 arg->val_off = 0;
1303 if (reg > 15) {
1304 pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1305 return -EINVAL;
1306 }
1307 arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1308 } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
1309 /* Constant value case, e.g., 4@71 */
1310 arg->arg_type = USDT_ARG_CONST;
1311 arg->val_off = off;
1312 arg->reg_off = 0;
1313 } else {
1314 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1315 return -EINVAL;
1316 }
1317
1318 arg->arg_signed = arg_sz < 0;
1319 if (arg_sz < 0)
1320 arg_sz = -arg_sz;
1321
1322 switch (arg_sz) {
1323 case 1: case 2: case 4: case 8:
1324 arg->arg_bitshift = 64 - arg_sz * 8;
1325 break;
1326 default:
1327 pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1328 arg_num, arg_str, arg_sz);
1329 return -EINVAL;
1330 }
1331
1332 return len;
1333 }
1334
1335 #elif defined(__aarch64__)
1336
calc_pt_regs_off(const char * reg_name)1337 static int calc_pt_regs_off(const char *reg_name)
1338 {
1339 int reg_num;
1340
1341 if (sscanf(reg_name, "x%d", ®_num) == 1) {
1342 if (reg_num >= 0 && reg_num < 31)
1343 return offsetof(struct user_pt_regs, regs[reg_num]);
1344 } else if (strcmp(reg_name, "sp") == 0) {
1345 return offsetof(struct user_pt_regs, sp);
1346 }
1347 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1348 return -ENOENT;
1349 }
1350
parse_usdt_arg(const char * arg_str,int arg_num,struct usdt_arg_spec * arg)1351 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
1352 {
1353 char *reg_name = NULL;
1354 int arg_sz, len, reg_off;
1355 long off;
1356
1357 if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, ®_name, &off, &len) == 3) {
1358 /* Memory dereference case, e.g., -4@[sp, 96] */
1359 arg->arg_type = USDT_ARG_REG_DEREF;
1360 arg->val_off = off;
1361 reg_off = calc_pt_regs_off(reg_name);
1362 free(reg_name);
1363 if (reg_off < 0)
1364 return reg_off;
1365 arg->reg_off = reg_off;
1366 } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, ®_name, &len) == 2) {
1367 /* Memory dereference case, e.g., -4@[sp] */
1368 arg->arg_type = USDT_ARG_REG_DEREF;
1369 arg->val_off = 0;
1370 reg_off = calc_pt_regs_off(reg_name);
1371 free(reg_name);
1372 if (reg_off < 0)
1373 return reg_off;
1374 arg->reg_off = reg_off;
1375 } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
1376 /* Constant value case, e.g., 4@5 */
1377 arg->arg_type = USDT_ARG_CONST;
1378 arg->val_off = off;
1379 arg->reg_off = 0;
1380 } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, ®_name, &len) == 2) {
1381 /* Register read case, e.g., -8@x4 */
1382 arg->arg_type = USDT_ARG_REG;
1383 arg->val_off = 0;
1384 reg_off = calc_pt_regs_off(reg_name);
1385 free(reg_name);
1386 if (reg_off < 0)
1387 return reg_off;
1388 arg->reg_off = reg_off;
1389 } else {
1390 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1391 return -EINVAL;
1392 }
1393
1394 arg->arg_signed = arg_sz < 0;
1395 if (arg_sz < 0)
1396 arg_sz = -arg_sz;
1397
1398 switch (arg_sz) {
1399 case 1: case 2: case 4: case 8:
1400 arg->arg_bitshift = 64 - arg_sz * 8;
1401 break;
1402 default:
1403 pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1404 arg_num, arg_str, arg_sz);
1405 return -EINVAL;
1406 }
1407
1408 return len;
1409 }
1410
1411 #elif defined(__riscv)
1412
calc_pt_regs_off(const char * reg_name)1413 static int calc_pt_regs_off(const char *reg_name)
1414 {
1415 static struct {
1416 const char *name;
1417 size_t pt_regs_off;
1418 } reg_map[] = {
1419 { "ra", offsetof(struct user_regs_struct, ra) },
1420 { "sp", offsetof(struct user_regs_struct, sp) },
1421 { "gp", offsetof(struct user_regs_struct, gp) },
1422 { "tp", offsetof(struct user_regs_struct, tp) },
1423 { "a0", offsetof(struct user_regs_struct, a0) },
1424 { "a1", offsetof(struct user_regs_struct, a1) },
1425 { "a2", offsetof(struct user_regs_struct, a2) },
1426 { "a3", offsetof(struct user_regs_struct, a3) },
1427 { "a4", offsetof(struct user_regs_struct, a4) },
1428 { "a5", offsetof(struct user_regs_struct, a5) },
1429 { "a6", offsetof(struct user_regs_struct, a6) },
1430 { "a7", offsetof(struct user_regs_struct, a7) },
1431 { "s0", offsetof(struct user_regs_struct, s0) },
1432 { "s1", offsetof(struct user_regs_struct, s1) },
1433 { "s2", offsetof(struct user_regs_struct, s2) },
1434 { "s3", offsetof(struct user_regs_struct, s3) },
1435 { "s4", offsetof(struct user_regs_struct, s4) },
1436 { "s5", offsetof(struct user_regs_struct, s5) },
1437 { "s6", offsetof(struct user_regs_struct, s6) },
1438 { "s7", offsetof(struct user_regs_struct, s7) },
1439 { "s8", offsetof(struct user_regs_struct, rv_s8) },
1440 { "s9", offsetof(struct user_regs_struct, s9) },
1441 { "s10", offsetof(struct user_regs_struct, s10) },
1442 { "s11", offsetof(struct user_regs_struct, s11) },
1443 { "t0", offsetof(struct user_regs_struct, t0) },
1444 { "t1", offsetof(struct user_regs_struct, t1) },
1445 { "t2", offsetof(struct user_regs_struct, t2) },
1446 { "t3", offsetof(struct user_regs_struct, t3) },
1447 { "t4", offsetof(struct user_regs_struct, t4) },
1448 { "t5", offsetof(struct user_regs_struct, t5) },
1449 { "t6", offsetof(struct user_regs_struct, t6) },
1450 };
1451 int i;
1452
1453 for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1454 if (strcmp(reg_name, reg_map[i].name) == 0)
1455 return reg_map[i].pt_regs_off;
1456 }
1457
1458 pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1459 return -ENOENT;
1460 }
1461
parse_usdt_arg(const char * arg_str,int arg_num,struct usdt_arg_spec * arg)1462 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
1463 {
1464 char *reg_name = NULL;
1465 int arg_sz, len, reg_off;
1466 long off;
1467
1468 if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, ®_name, &len) == 3) {
1469 /* Memory dereference case, e.g., -8@-88(s0) */
1470 arg->arg_type = USDT_ARG_REG_DEREF;
1471 arg->val_off = off;
1472 reg_off = calc_pt_regs_off(reg_name);
1473 free(reg_name);
1474 if (reg_off < 0)
1475 return reg_off;
1476 arg->reg_off = reg_off;
1477 } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
1478 /* Constant value case, e.g., 4@5 */
1479 arg->arg_type = USDT_ARG_CONST;
1480 arg->val_off = off;
1481 arg->reg_off = 0;
1482 } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, ®_name, &len) == 2) {
1483 /* Register read case, e.g., -8@a1 */
1484 arg->arg_type = USDT_ARG_REG;
1485 arg->val_off = 0;
1486 reg_off = calc_pt_regs_off(reg_name);
1487 free(reg_name);
1488 if (reg_off < 0)
1489 return reg_off;
1490 arg->reg_off = reg_off;
1491 } else {
1492 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1493 return -EINVAL;
1494 }
1495
1496 arg->arg_signed = arg_sz < 0;
1497 if (arg_sz < 0)
1498 arg_sz = -arg_sz;
1499
1500 switch (arg_sz) {
1501 case 1: case 2: case 4: case 8:
1502 arg->arg_bitshift = 64 - arg_sz * 8;
1503 break;
1504 default:
1505 pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1506 arg_num, arg_str, arg_sz);
1507 return -EINVAL;
1508 }
1509
1510 return len;
1511 }
1512
1513 #else
1514
parse_usdt_arg(const char * arg_str,int arg_num,struct usdt_arg_spec * arg)1515 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
1516 {
1517 pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
1518 return -ENOTSUP;
1519 }
1520
1521 #endif
1522