1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include "util/cgroup.h"
4 #include "util/data.h"
5 #include "util/debug.h"
6 #include "util/dso.h"
7 #include "util/event.h"
8 #include "util/evlist.h"
9 #include "util/machine.h"
10 #include "util/map.h"
11 #include "util/map_symbol.h"
12 #include "util/branch.h"
13 #include "util/memswap.h"
14 #include "util/namespaces.h"
15 #include "util/session.h"
16 #include "util/stat.h"
17 #include "util/symbol.h"
18 #include "util/synthetic-events.h"
19 #include "util/target.h"
20 #include "util/time-utils.h"
21 #include <linux/bitops.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/zalloc.h>
25 #include <linux/perf_event.h>
26 #include <asm/bug.h>
27 #include <perf/evsel.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <unistd.h>
45
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49
perf_tool__process_synth_event(struct perf_tool * tool,union perf_event * event,struct machine * machine,perf_event__handler_t process)50 int perf_tool__process_synth_event(struct perf_tool *tool,
51 union perf_event *event,
52 struct machine *machine,
53 perf_event__handler_t process)
54 {
55 struct perf_sample synth_sample = {
56 .pid = -1,
57 .tid = -1,
58 .time = -1,
59 .stream_id = -1,
60 .cpu = -1,
61 .period = 1,
62 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63 };
64
65 return process(tool, event, &synth_sample, machine);
66 };
67
68 /*
69 * Assumes that the first 4095 bytes of /proc/pid/stat contains
70 * the comm, tgid and ppid.
71 */
perf_event__get_comm_ids(pid_t pid,pid_t tid,char * comm,size_t len,pid_t * tgid,pid_t * ppid,bool * kernel)72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
73 pid_t *tgid, pid_t *ppid, bool *kernel)
74 {
75 char bf[4096];
76 int fd;
77 size_t size = 0;
78 ssize_t n;
79 char *name, *tgids, *ppids, *vmpeak, *threads;
80
81 *tgid = -1;
82 *ppid = -1;
83
84 if (pid)
85 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
86 else
87 snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
88
89 fd = open(bf, O_RDONLY);
90 if (fd < 0) {
91 pr_debug("couldn't open %s\n", bf);
92 return -1;
93 }
94
95 n = read(fd, bf, sizeof(bf) - 1);
96 close(fd);
97 if (n <= 0) {
98 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
99 tid);
100 return -1;
101 }
102 bf[n] = '\0';
103
104 name = strstr(bf, "Name:");
105 tgids = strstr(name ?: bf, "Tgid:");
106 ppids = strstr(tgids ?: bf, "PPid:");
107 vmpeak = strstr(ppids ?: bf, "VmPeak:");
108
109 if (vmpeak)
110 threads = NULL;
111 else
112 threads = strstr(ppids ?: bf, "Threads:");
113
114 if (name) {
115 char *nl;
116
117 name = skip_spaces(name + 5); /* strlen("Name:") */
118 nl = strchr(name, '\n');
119 if (nl)
120 *nl = '\0';
121
122 size = strlen(name);
123 if (size >= len)
124 size = len - 1;
125 memcpy(comm, name, size);
126 comm[size] = '\0';
127 } else {
128 pr_debug("Name: string not found for pid %d\n", tid);
129 }
130
131 if (tgids) {
132 tgids += 5; /* strlen("Tgid:") */
133 *tgid = atoi(tgids);
134 } else {
135 pr_debug("Tgid: string not found for pid %d\n", tid);
136 }
137
138 if (ppids) {
139 ppids += 5; /* strlen("PPid:") */
140 *ppid = atoi(ppids);
141 } else {
142 pr_debug("PPid: string not found for pid %d\n", tid);
143 }
144
145 if (!vmpeak && threads)
146 *kernel = true;
147 else
148 *kernel = false;
149
150 return 0;
151 }
152
perf_event__prepare_comm(union perf_event * event,pid_t pid,pid_t tid,struct machine * machine,pid_t * tgid,pid_t * ppid,bool * kernel)153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
154 struct machine *machine,
155 pid_t *tgid, pid_t *ppid, bool *kernel)
156 {
157 size_t size;
158
159 *ppid = -1;
160
161 memset(&event->comm, 0, sizeof(event->comm));
162
163 if (machine__is_host(machine)) {
164 if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
165 sizeof(event->comm.comm),
166 tgid, ppid, kernel) != 0) {
167 return -1;
168 }
169 } else {
170 *tgid = machine->pid;
171 }
172
173 if (*tgid < 0)
174 return -1;
175
176 event->comm.pid = *tgid;
177 event->comm.header.type = PERF_RECORD_COMM;
178
179 size = strlen(event->comm.comm) + 1;
180 size = PERF_ALIGN(size, sizeof(u64));
181 memset(event->comm.comm + size, 0, machine->id_hdr_size);
182 event->comm.header.size = (sizeof(event->comm) -
183 (sizeof(event->comm.comm) - size) +
184 machine->id_hdr_size);
185 event->comm.tid = tid;
186
187 return 0;
188 }
189
perf_event__synthesize_comm(struct perf_tool * tool,union perf_event * event,pid_t pid,perf_event__handler_t process,struct machine * machine)190 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191 union perf_event *event, pid_t pid,
192 perf_event__handler_t process,
193 struct machine *machine)
194 {
195 pid_t tgid, ppid;
196 bool kernel_thread;
197
198 if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
199 &kernel_thread) != 0)
200 return -1;
201
202 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
203 return -1;
204
205 return tgid;
206 }
207
perf_event__get_ns_link_info(pid_t pid,const char * ns,struct perf_ns_link_info * ns_link_info)208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
209 struct perf_ns_link_info *ns_link_info)
210 {
211 struct stat64 st;
212 char proc_ns[128];
213
214 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
215 if (stat64(proc_ns, &st) == 0) {
216 ns_link_info->dev = st.st_dev;
217 ns_link_info->ino = st.st_ino;
218 }
219 }
220
perf_event__synthesize_namespaces(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine)221 int perf_event__synthesize_namespaces(struct perf_tool *tool,
222 union perf_event *event,
223 pid_t pid, pid_t tgid,
224 perf_event__handler_t process,
225 struct machine *machine)
226 {
227 u32 idx;
228 struct perf_ns_link_info *ns_link_info;
229
230 if (!tool || !tool->namespace_events)
231 return 0;
232
233 memset(&event->namespaces, 0, (sizeof(event->namespaces) +
234 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235 machine->id_hdr_size));
236
237 event->namespaces.pid = tgid;
238 event->namespaces.tid = pid;
239
240 event->namespaces.nr_namespaces = NR_NAMESPACES;
241
242 ns_link_info = event->namespaces.link_info;
243
244 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
245 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
246 &ns_link_info[idx]);
247
248 event->namespaces.header.type = PERF_RECORD_NAMESPACES;
249
250 event->namespaces.header.size = (sizeof(event->namespaces) +
251 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
252 machine->id_hdr_size);
253
254 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
255 return -1;
256
257 return 0;
258 }
259
perf_event__synthesize_fork(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,pid_t ppid,perf_event__handler_t process,struct machine * machine)260 static int perf_event__synthesize_fork(struct perf_tool *tool,
261 union perf_event *event,
262 pid_t pid, pid_t tgid, pid_t ppid,
263 perf_event__handler_t process,
264 struct machine *machine)
265 {
266 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
267
268 /*
269 * for main thread set parent to ppid from status file. For other
270 * threads set parent pid to main thread. ie., assume main thread
271 * spawns all threads in a process
272 */
273 if (tgid == pid) {
274 event->fork.ppid = ppid;
275 event->fork.ptid = ppid;
276 } else {
277 event->fork.ppid = tgid;
278 event->fork.ptid = tgid;
279 }
280 event->fork.pid = tgid;
281 event->fork.tid = pid;
282 event->fork.header.type = PERF_RECORD_FORK;
283 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
284
285 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
286
287 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
288 return -1;
289
290 return 0;
291 }
292
read_proc_maps_line(struct io * io,__u64 * start,__u64 * end,u32 * prot,u32 * flags,__u64 * offset,u32 * maj,u32 * min,__u64 * inode,ssize_t pathname_size,char * pathname)293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
294 u32 *prot, u32 *flags, __u64 *offset,
295 u32 *maj, u32 *min,
296 __u64 *inode,
297 ssize_t pathname_size, char *pathname)
298 {
299 __u64 temp;
300 int ch;
301 char *start_pathname = pathname;
302
303 if (io__get_hex(io, start) != '-')
304 return false;
305 if (io__get_hex(io, end) != ' ')
306 return false;
307
308 /* map protection and flags bits */
309 *prot = 0;
310 ch = io__get_char(io);
311 if (ch == 'r')
312 *prot |= PROT_READ;
313 else if (ch != '-')
314 return false;
315 ch = io__get_char(io);
316 if (ch == 'w')
317 *prot |= PROT_WRITE;
318 else if (ch != '-')
319 return false;
320 ch = io__get_char(io);
321 if (ch == 'x')
322 *prot |= PROT_EXEC;
323 else if (ch != '-')
324 return false;
325 ch = io__get_char(io);
326 if (ch == 's')
327 *flags = MAP_SHARED;
328 else if (ch == 'p')
329 *flags = MAP_PRIVATE;
330 else
331 return false;
332 if (io__get_char(io) != ' ')
333 return false;
334
335 if (io__get_hex(io, offset) != ' ')
336 return false;
337
338 if (io__get_hex(io, &temp) != ':')
339 return false;
340 *maj = temp;
341 if (io__get_hex(io, &temp) != ' ')
342 return false;
343 *min = temp;
344
345 ch = io__get_dec(io, inode);
346 if (ch != ' ') {
347 *pathname = '\0';
348 return ch == '\n';
349 }
350 do {
351 ch = io__get_char(io);
352 } while (ch == ' ');
353 while (true) {
354 if (ch < 0)
355 return false;
356 if (ch == '\0' || ch == '\n' ||
357 (pathname + 1 - start_pathname) >= pathname_size) {
358 *pathname = '\0';
359 return true;
360 }
361 *pathname++ = ch;
362 ch = io__get_char(io);
363 }
364 }
365
perf_record_mmap2__read_build_id(struct perf_record_mmap2 * event,struct machine * machine,bool is_kernel)366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
367 struct machine *machine,
368 bool is_kernel)
369 {
370 struct build_id bid;
371 struct nsinfo *nsi;
372 struct nscookie nc;
373 struct dso *dso = NULL;
374 struct dso_id id;
375 int rc;
376
377 if (is_kernel) {
378 rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
379 goto out;
380 }
381
382 id.maj = event->maj;
383 id.min = event->min;
384 id.ino = event->ino;
385 id.ino_generation = event->ino_generation;
386
387 dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
388 if (dso && dso->has_build_id) {
389 bid = dso->bid;
390 rc = 0;
391 goto out;
392 }
393
394 nsi = nsinfo__new(event->pid);
395 nsinfo__mountns_enter(nsi, &nc);
396
397 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
398
399 nsinfo__mountns_exit(&nc);
400 nsinfo__put(nsi);
401
402 out:
403 if (rc == 0) {
404 memcpy(event->build_id, bid.data, sizeof(bid.data));
405 event->build_id_size = (u8) bid.size;
406 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
407 event->__reserved_1 = 0;
408 event->__reserved_2 = 0;
409
410 if (dso && !dso->has_build_id)
411 dso__set_build_id(dso, &bid);
412 } else {
413 if (event->filename[0] == '/') {
414 pr_debug2("Failed to read build ID for %s\n",
415 event->filename);
416 }
417 }
418 dso__put(dso);
419 }
420
perf_event__synthesize_mmap_events(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine,bool mmap_data)421 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
422 union perf_event *event,
423 pid_t pid, pid_t tgid,
424 perf_event__handler_t process,
425 struct machine *machine,
426 bool mmap_data)
427 {
428 unsigned long long t;
429 char bf[BUFSIZ];
430 struct io io;
431 bool truncation = false;
432 unsigned long long timeout = proc_map_timeout * 1000000ULL;
433 int rc = 0;
434 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
435 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
436
437 if (machine__is_default_guest(machine))
438 return 0;
439
440 snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
441 machine->root_dir, pid, pid);
442
443 io.fd = open(bf, O_RDONLY, 0);
444 if (io.fd < 0) {
445 /*
446 * We raced with a task exiting - just return:
447 */
448 pr_debug("couldn't open %s\n", bf);
449 return -1;
450 }
451 io__init(&io, io.fd, bf, sizeof(bf));
452
453 event->header.type = PERF_RECORD_MMAP2;
454 t = rdclock();
455
456 while (!io.eof) {
457 static const char anonstr[] = "//anon";
458 size_t size, aligned_size;
459
460 /* ensure null termination since stack will be reused. */
461 event->mmap2.filename[0] = '\0';
462
463 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
464 if (!read_proc_maps_line(&io,
465 &event->mmap2.start,
466 &event->mmap2.len,
467 &event->mmap2.prot,
468 &event->mmap2.flags,
469 &event->mmap2.pgoff,
470 &event->mmap2.maj,
471 &event->mmap2.min,
472 &event->mmap2.ino,
473 sizeof(event->mmap2.filename),
474 event->mmap2.filename))
475 continue;
476
477 if ((rdclock() - t) > timeout) {
478 pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
479 "You may want to increase "
480 "the time limit by --proc-map-timeout\n",
481 machine->root_dir, pid, pid);
482 truncation = true;
483 goto out;
484 }
485
486 event->mmap2.ino_generation = 0;
487
488 /*
489 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
490 */
491 if (machine__is_host(machine))
492 event->header.misc = PERF_RECORD_MISC_USER;
493 else
494 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
495
496 if ((event->mmap2.prot & PROT_EXEC) == 0) {
497 if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
498 continue;
499
500 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
501 }
502
503 out:
504 if (truncation)
505 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
506
507 if (!strcmp(event->mmap2.filename, ""))
508 strcpy(event->mmap2.filename, anonstr);
509
510 if (hugetlbfs_mnt_len &&
511 !strncmp(event->mmap2.filename, hugetlbfs_mnt,
512 hugetlbfs_mnt_len)) {
513 strcpy(event->mmap2.filename, anonstr);
514 event->mmap2.flags |= MAP_HUGETLB;
515 }
516
517 size = strlen(event->mmap2.filename) + 1;
518 aligned_size = PERF_ALIGN(size, sizeof(u64));
519 event->mmap2.len -= event->mmap.start;
520 event->mmap2.header.size = (sizeof(event->mmap2) -
521 (sizeof(event->mmap2.filename) - aligned_size));
522 memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
523 (aligned_size - size));
524 event->mmap2.header.size += machine->id_hdr_size;
525 event->mmap2.pid = tgid;
526 event->mmap2.tid = pid;
527
528 if (symbol_conf.buildid_mmap2)
529 perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
530
531 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
532 rc = -1;
533 break;
534 }
535
536 if (truncation)
537 break;
538 }
539
540 close(io.fd);
541 return rc;
542 }
543
544 #ifdef HAVE_FILE_HANDLE
perf_event__synthesize_cgroup(struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)545 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
546 union perf_event *event,
547 char *path, size_t mount_len,
548 perf_event__handler_t process,
549 struct machine *machine)
550 {
551 size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
552 size_t path_len = strlen(path) - mount_len + 1;
553 struct {
554 struct file_handle fh;
555 uint64_t cgroup_id;
556 } handle;
557 int mount_id;
558
559 while (path_len % sizeof(u64))
560 path[mount_len + path_len++] = '\0';
561
562 memset(&event->cgroup, 0, event_size);
563
564 event->cgroup.header.type = PERF_RECORD_CGROUP;
565 event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
566
567 handle.fh.handle_bytes = sizeof(handle.cgroup_id);
568 if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
569 pr_debug("stat failed: %s\n", path);
570 return -1;
571 }
572
573 event->cgroup.id = handle.cgroup_id;
574 strncpy(event->cgroup.path, path + mount_len, path_len);
575 memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
576
577 if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
578 pr_debug("process synth event failed\n");
579 return -1;
580 }
581
582 return 0;
583 }
584
perf_event__walk_cgroup_tree(struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)585 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
586 union perf_event *event,
587 char *path, size_t mount_len,
588 perf_event__handler_t process,
589 struct machine *machine)
590 {
591 size_t pos = strlen(path);
592 DIR *d;
593 struct dirent *dent;
594 int ret = 0;
595
596 if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
597 process, machine) < 0)
598 return -1;
599
600 d = opendir(path);
601 if (d == NULL) {
602 pr_debug("failed to open directory: %s\n", path);
603 return -1;
604 }
605
606 while ((dent = readdir(d)) != NULL) {
607 if (dent->d_type != DT_DIR)
608 continue;
609 if (!strcmp(dent->d_name, ".") ||
610 !strcmp(dent->d_name, ".."))
611 continue;
612
613 /* any sane path should be less than PATH_MAX */
614 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
615 continue;
616
617 if (path[pos - 1] != '/')
618 strcat(path, "/");
619 strcat(path, dent->d_name);
620
621 ret = perf_event__walk_cgroup_tree(tool, event, path,
622 mount_len, process, machine);
623 if (ret < 0)
624 break;
625
626 path[pos] = '\0';
627 }
628
629 closedir(d);
630 return ret;
631 }
632
perf_event__synthesize_cgroups(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)633 int perf_event__synthesize_cgroups(struct perf_tool *tool,
634 perf_event__handler_t process,
635 struct machine *machine)
636 {
637 union perf_event event;
638 char cgrp_root[PATH_MAX];
639 size_t mount_len; /* length of mount point in the path */
640
641 if (!tool || !tool->cgroup_events)
642 return 0;
643
644 if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
645 pr_debug("cannot find cgroup mount point\n");
646 return -1;
647 }
648
649 mount_len = strlen(cgrp_root);
650 /* make sure the path starts with a slash (after mount point) */
651 strcat(cgrp_root, "/");
652
653 if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
654 process, machine) < 0)
655 return -1;
656
657 return 0;
658 }
659 #else
perf_event__synthesize_cgroups(struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)660 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
661 perf_event__handler_t process __maybe_unused,
662 struct machine *machine __maybe_unused)
663 {
664 return -1;
665 }
666 #endif
667
perf_event__synthesize_modules(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)668 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
669 struct machine *machine)
670 {
671 int rc = 0;
672 struct map *pos;
673 struct maps *maps = machine__kernel_maps(machine);
674 union perf_event *event;
675 size_t size = symbol_conf.buildid_mmap2 ?
676 sizeof(event->mmap2) : sizeof(event->mmap);
677
678 event = zalloc(size + machine->id_hdr_size);
679 if (event == NULL) {
680 pr_debug("Not enough memory synthesizing mmap event "
681 "for kernel modules\n");
682 return -1;
683 }
684
685 /*
686 * kernel uses 0 for user space maps, see kernel/perf_event.c
687 * __perf_event_mmap
688 */
689 if (machine__is_host(machine))
690 event->header.misc = PERF_RECORD_MISC_KERNEL;
691 else
692 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
693
694 maps__for_each_entry(maps, pos) {
695 if (!__map__is_kmodule(pos))
696 continue;
697
698 if (symbol_conf.buildid_mmap2) {
699 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
700 event->mmap2.header.type = PERF_RECORD_MMAP2;
701 event->mmap2.header.size = (sizeof(event->mmap2) -
702 (sizeof(event->mmap2.filename) - size));
703 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
704 event->mmap2.header.size += machine->id_hdr_size;
705 event->mmap2.start = pos->start;
706 event->mmap2.len = pos->end - pos->start;
707 event->mmap2.pid = machine->pid;
708
709 memcpy(event->mmap2.filename, pos->dso->long_name,
710 pos->dso->long_name_len + 1);
711
712 perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
713 } else {
714 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
715 event->mmap.header.type = PERF_RECORD_MMAP;
716 event->mmap.header.size = (sizeof(event->mmap) -
717 (sizeof(event->mmap.filename) - size));
718 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
719 event->mmap.header.size += machine->id_hdr_size;
720 event->mmap.start = pos->start;
721 event->mmap.len = pos->end - pos->start;
722 event->mmap.pid = machine->pid;
723
724 memcpy(event->mmap.filename, pos->dso->long_name,
725 pos->dso->long_name_len + 1);
726 }
727
728 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
729 rc = -1;
730 break;
731 }
732 }
733
734 free(event);
735 return rc;
736 }
737
filter_task(const struct dirent * dirent)738 static int filter_task(const struct dirent *dirent)
739 {
740 return isdigit(dirent->d_name[0]);
741 }
742
__event__synthesize_thread(union perf_event * comm_event,union perf_event * mmap_event,union perf_event * fork_event,union perf_event * namespaces_event,pid_t pid,int full,perf_event__handler_t process,struct perf_tool * tool,struct machine * machine,bool needs_mmap,bool mmap_data)743 static int __event__synthesize_thread(union perf_event *comm_event,
744 union perf_event *mmap_event,
745 union perf_event *fork_event,
746 union perf_event *namespaces_event,
747 pid_t pid, int full, perf_event__handler_t process,
748 struct perf_tool *tool, struct machine *machine,
749 bool needs_mmap, bool mmap_data)
750 {
751 char filename[PATH_MAX];
752 struct dirent **dirent;
753 pid_t tgid, ppid;
754 int rc = 0;
755 int i, n;
756
757 /* special case: only send one comm event using passed in pid */
758 if (!full) {
759 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
760 process, machine);
761
762 if (tgid == -1)
763 return -1;
764
765 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
766 tgid, process, machine) < 0)
767 return -1;
768
769 /*
770 * send mmap only for thread group leader
771 * see thread__init_maps()
772 */
773 if (pid == tgid && needs_mmap &&
774 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
775 process, machine, mmap_data))
776 return -1;
777
778 return 0;
779 }
780
781 if (machine__is_default_guest(machine))
782 return 0;
783
784 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
785 machine->root_dir, pid);
786
787 n = scandir(filename, &dirent, filter_task, NULL);
788 if (n < 0)
789 return n;
790
791 for (i = 0; i < n; i++) {
792 char *end;
793 pid_t _pid;
794 bool kernel_thread = false;
795
796 _pid = strtol(dirent[i]->d_name, &end, 10);
797 if (*end)
798 continue;
799
800 /* some threads may exit just after scan, ignore it */
801 if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
802 &tgid, &ppid, &kernel_thread) != 0)
803 continue;
804
805 rc = -1;
806 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
807 ppid, process, machine) < 0)
808 break;
809
810 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
811 tgid, process, machine) < 0)
812 break;
813
814 /*
815 * Send the prepared comm event
816 */
817 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
818 break;
819
820 rc = 0;
821 if (_pid == pid && !kernel_thread && needs_mmap) {
822 /* process the parent's maps too */
823 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
824 process, machine, mmap_data);
825 if (rc)
826 break;
827 }
828 }
829
830 for (i = 0; i < n; i++)
831 zfree(&dirent[i]);
832 free(dirent);
833
834 return rc;
835 }
836
perf_event__synthesize_thread_map(struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine,bool needs_mmap,bool mmap_data)837 int perf_event__synthesize_thread_map(struct perf_tool *tool,
838 struct perf_thread_map *threads,
839 perf_event__handler_t process,
840 struct machine *machine,
841 bool needs_mmap, bool mmap_data)
842 {
843 union perf_event *comm_event, *mmap_event, *fork_event;
844 union perf_event *namespaces_event;
845 int err = -1, thread, j;
846
847 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
848 if (comm_event == NULL)
849 goto out;
850
851 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
852 if (mmap_event == NULL)
853 goto out_free_comm;
854
855 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
856 if (fork_event == NULL)
857 goto out_free_mmap;
858
859 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
860 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
861 machine->id_hdr_size);
862 if (namespaces_event == NULL)
863 goto out_free_fork;
864
865 err = 0;
866 for (thread = 0; thread < threads->nr; ++thread) {
867 if (__event__synthesize_thread(comm_event, mmap_event,
868 fork_event, namespaces_event,
869 perf_thread_map__pid(threads, thread), 0,
870 process, tool, machine,
871 needs_mmap, mmap_data)) {
872 err = -1;
873 break;
874 }
875
876 /*
877 * comm.pid is set to thread group id by
878 * perf_event__synthesize_comm
879 */
880 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
881 bool need_leader = true;
882
883 /* is thread group leader in thread_map? */
884 for (j = 0; j < threads->nr; ++j) {
885 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
886 need_leader = false;
887 break;
888 }
889 }
890
891 /* if not, generate events for it */
892 if (need_leader &&
893 __event__synthesize_thread(comm_event, mmap_event,
894 fork_event, namespaces_event,
895 comm_event->comm.pid, 0,
896 process, tool, machine,
897 needs_mmap, mmap_data)) {
898 err = -1;
899 break;
900 }
901 }
902 }
903 free(namespaces_event);
904 out_free_fork:
905 free(fork_event);
906 out_free_mmap:
907 free(mmap_event);
908 out_free_comm:
909 free(comm_event);
910 out:
911 return err;
912 }
913
__perf_event__synthesize_threads(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool needs_mmap,bool mmap_data,struct dirent ** dirent,int start,int num)914 static int __perf_event__synthesize_threads(struct perf_tool *tool,
915 perf_event__handler_t process,
916 struct machine *machine,
917 bool needs_mmap,
918 bool mmap_data,
919 struct dirent **dirent,
920 int start,
921 int num)
922 {
923 union perf_event *comm_event, *mmap_event, *fork_event;
924 union perf_event *namespaces_event;
925 int err = -1;
926 char *end;
927 pid_t pid;
928 int i;
929
930 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
931 if (comm_event == NULL)
932 goto out;
933
934 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
935 if (mmap_event == NULL)
936 goto out_free_comm;
937
938 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
939 if (fork_event == NULL)
940 goto out_free_mmap;
941
942 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
943 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
944 machine->id_hdr_size);
945 if (namespaces_event == NULL)
946 goto out_free_fork;
947
948 for (i = start; i < start + num; i++) {
949 if (!isdigit(dirent[i]->d_name[0]))
950 continue;
951
952 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
953 /* only interested in proper numerical dirents */
954 if (*end)
955 continue;
956 /*
957 * We may race with exiting thread, so don't stop just because
958 * one thread couldn't be synthesized.
959 */
960 __event__synthesize_thread(comm_event, mmap_event, fork_event,
961 namespaces_event, pid, 1, process,
962 tool, machine, needs_mmap, mmap_data);
963 }
964 err = 0;
965
966 free(namespaces_event);
967 out_free_fork:
968 free(fork_event);
969 out_free_mmap:
970 free(mmap_event);
971 out_free_comm:
972 free(comm_event);
973 out:
974 return err;
975 }
976
977 struct synthesize_threads_arg {
978 struct perf_tool *tool;
979 perf_event__handler_t process;
980 struct machine *machine;
981 bool needs_mmap;
982 bool mmap_data;
983 struct dirent **dirent;
984 int num;
985 int start;
986 };
987
synthesize_threads_worker(void * arg)988 static void *synthesize_threads_worker(void *arg)
989 {
990 struct synthesize_threads_arg *args = arg;
991
992 __perf_event__synthesize_threads(args->tool, args->process,
993 args->machine,
994 args->needs_mmap, args->mmap_data,
995 args->dirent,
996 args->start, args->num);
997 return NULL;
998 }
999
perf_event__synthesize_threads(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool needs_mmap,bool mmap_data,unsigned int nr_threads_synthesize)1000 int perf_event__synthesize_threads(struct perf_tool *tool,
1001 perf_event__handler_t process,
1002 struct machine *machine,
1003 bool needs_mmap, bool mmap_data,
1004 unsigned int nr_threads_synthesize)
1005 {
1006 struct synthesize_threads_arg *args = NULL;
1007 pthread_t *synthesize_threads = NULL;
1008 char proc_path[PATH_MAX];
1009 struct dirent **dirent;
1010 int num_per_thread;
1011 int m, n, i, j;
1012 int thread_nr;
1013 int base = 0;
1014 int err = -1;
1015
1016
1017 if (machine__is_default_guest(machine))
1018 return 0;
1019
1020 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
1021 n = scandir(proc_path, &dirent, filter_task, NULL);
1022 if (n < 0)
1023 return err;
1024
1025 if (nr_threads_synthesize == UINT_MAX)
1026 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
1027 else
1028 thread_nr = nr_threads_synthesize;
1029
1030 if (thread_nr <= 1) {
1031 err = __perf_event__synthesize_threads(tool, process,
1032 machine,
1033 needs_mmap, mmap_data,
1034 dirent, base, n);
1035 goto free_dirent;
1036 }
1037 if (thread_nr > n)
1038 thread_nr = n;
1039
1040 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1041 if (synthesize_threads == NULL)
1042 goto free_dirent;
1043
1044 args = calloc(sizeof(*args), thread_nr);
1045 if (args == NULL)
1046 goto free_threads;
1047
1048 num_per_thread = n / thread_nr;
1049 m = n % thread_nr;
1050 for (i = 0; i < thread_nr; i++) {
1051 args[i].tool = tool;
1052 args[i].process = process;
1053 args[i].machine = machine;
1054 args[i].needs_mmap = needs_mmap;
1055 args[i].mmap_data = mmap_data;
1056 args[i].dirent = dirent;
1057 }
1058 for (i = 0; i < m; i++) {
1059 args[i].num = num_per_thread + 1;
1060 args[i].start = i * args[i].num;
1061 }
1062 if (i != 0)
1063 base = args[i-1].start + args[i-1].num;
1064 for (j = i; j < thread_nr; j++) {
1065 args[j].num = num_per_thread;
1066 args[j].start = base + (j - i) * args[i].num;
1067 }
1068
1069 for (i = 0; i < thread_nr; i++) {
1070 if (pthread_create(&synthesize_threads[i], NULL,
1071 synthesize_threads_worker, &args[i]))
1072 goto out_join;
1073 }
1074 err = 0;
1075 out_join:
1076 for (i = 0; i < thread_nr; i++)
1077 pthread_join(synthesize_threads[i], NULL);
1078 free(args);
1079 free_threads:
1080 free(synthesize_threads);
1081 free_dirent:
1082 for (i = 0; i < n; i++)
1083 zfree(&dirent[i]);
1084 free(dirent);
1085
1086 return err;
1087 }
1088
perf_event__synthesize_extra_kmaps(struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)1089 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1090 perf_event__handler_t process __maybe_unused,
1091 struct machine *machine __maybe_unused)
1092 {
1093 return 0;
1094 }
1095
__perf_event__synthesize_kernel_mmap(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)1096 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1097 perf_event__handler_t process,
1098 struct machine *machine)
1099 {
1100 union perf_event *event;
1101 size_t size = symbol_conf.buildid_mmap2 ?
1102 sizeof(event->mmap2) : sizeof(event->mmap);
1103 struct map *map = machine__kernel_map(machine);
1104 struct kmap *kmap;
1105 int err;
1106
1107 if (map == NULL)
1108 return -1;
1109
1110 kmap = map__kmap(map);
1111 if (!kmap->ref_reloc_sym)
1112 return -1;
1113
1114 /*
1115 * We should get this from /sys/kernel/sections/.text, but till that is
1116 * available use this, and after it is use this as a fallback for older
1117 * kernels.
1118 */
1119 event = zalloc(size + machine->id_hdr_size);
1120 if (event == NULL) {
1121 pr_debug("Not enough memory synthesizing mmap event "
1122 "for kernel modules\n");
1123 return -1;
1124 }
1125
1126 if (machine__is_host(machine)) {
1127 /*
1128 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1129 * see kernel/perf_event.c __perf_event_mmap
1130 */
1131 event->header.misc = PERF_RECORD_MISC_KERNEL;
1132 } else {
1133 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1134 }
1135
1136 if (symbol_conf.buildid_mmap2) {
1137 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1138 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1139 size = PERF_ALIGN(size, sizeof(u64));
1140 event->mmap2.header.type = PERF_RECORD_MMAP2;
1141 event->mmap2.header.size = (sizeof(event->mmap2) -
1142 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1143 event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1144 event->mmap2.start = map->start;
1145 event->mmap2.len = map->end - event->mmap.start;
1146 event->mmap2.pid = machine->pid;
1147
1148 perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
1149 } else {
1150 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1151 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1152 size = PERF_ALIGN(size, sizeof(u64));
1153 event->mmap.header.type = PERF_RECORD_MMAP;
1154 event->mmap.header.size = (sizeof(event->mmap) -
1155 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1156 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1157 event->mmap.start = map->start;
1158 event->mmap.len = map->end - event->mmap.start;
1159 event->mmap.pid = machine->pid;
1160 }
1161
1162 err = perf_tool__process_synth_event(tool, event, machine, process);
1163 free(event);
1164
1165 return err;
1166 }
1167
perf_event__synthesize_kernel_mmap(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)1168 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1169 perf_event__handler_t process,
1170 struct machine *machine)
1171 {
1172 int err;
1173
1174 err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1175 if (err < 0)
1176 return err;
1177
1178 return perf_event__synthesize_extra_kmaps(tool, process, machine);
1179 }
1180
perf_event__synthesize_thread_map2(struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine)1181 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1182 struct perf_thread_map *threads,
1183 perf_event__handler_t process,
1184 struct machine *machine)
1185 {
1186 union perf_event *event;
1187 int i, err, size;
1188
1189 size = sizeof(event->thread_map);
1190 size += threads->nr * sizeof(event->thread_map.entries[0]);
1191
1192 event = zalloc(size);
1193 if (!event)
1194 return -ENOMEM;
1195
1196 event->header.type = PERF_RECORD_THREAD_MAP;
1197 event->header.size = size;
1198 event->thread_map.nr = threads->nr;
1199
1200 for (i = 0; i < threads->nr; i++) {
1201 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1202 char *comm = perf_thread_map__comm(threads, i);
1203
1204 if (!comm)
1205 comm = (char *) "";
1206
1207 entry->pid = perf_thread_map__pid(threads, i);
1208 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1209 }
1210
1211 err = process(tool, event, NULL, machine);
1212
1213 free(event);
1214 return err;
1215 }
1216
1217 struct synthesize_cpu_map_data {
1218 const struct perf_cpu_map *map;
1219 int nr;
1220 int min_cpu;
1221 int max_cpu;
1222 int has_any_cpu;
1223 int type;
1224 size_t size;
1225 struct perf_record_cpu_map_data *data;
1226 };
1227
synthesize_cpus(struct synthesize_cpu_map_data * data)1228 static void synthesize_cpus(struct synthesize_cpu_map_data *data)
1229 {
1230 data->data->type = PERF_CPU_MAP__CPUS;
1231 data->data->cpus_data.nr = data->nr;
1232 for (int i = 0; i < data->nr; i++)
1233 data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
1234 }
1235
synthesize_mask(struct synthesize_cpu_map_data * data)1236 static void synthesize_mask(struct synthesize_cpu_map_data *data)
1237 {
1238 int idx;
1239 struct perf_cpu cpu;
1240
1241 /* Due to padding, the 4bytes per entry mask variant is always smaller. */
1242 data->data->type = PERF_CPU_MAP__MASK;
1243 data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
1244 data->data->mask32_data.long_size = 4;
1245
1246 perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
1247 int bit_word = cpu.cpu / 32;
1248 u32 bit_mask = 1U << (cpu.cpu & 31);
1249
1250 data->data->mask32_data.mask[bit_word] |= bit_mask;
1251 }
1252 }
1253
synthesize_range_cpus(struct synthesize_cpu_map_data * data)1254 static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
1255 {
1256 data->data->type = PERF_CPU_MAP__RANGE_CPUS;
1257 data->data->range_cpu_data.any_cpu = data->has_any_cpu;
1258 data->data->range_cpu_data.start_cpu = data->min_cpu;
1259 data->data->range_cpu_data.end_cpu = data->max_cpu;
1260 }
1261
cpu_map_data__alloc(struct synthesize_cpu_map_data * syn_data,size_t header_size)1262 static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
1263 size_t header_size)
1264 {
1265 size_t size_cpus, size_mask;
1266
1267 syn_data->nr = perf_cpu_map__nr(syn_data->map);
1268 syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
1269
1270 syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
1271 syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
1272 if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
1273 /* A consecutive range of CPUs can be encoded using a range. */
1274 assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
1275 syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
1276 syn_data->size = header_size + sizeof(u64);
1277 return zalloc(syn_data->size);
1278 }
1279
1280 size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
1281 /* Due to padding, the 4bytes per entry mask variant is always smaller. */
1282 size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
1283 BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
1284 if (syn_data->has_any_cpu || size_cpus < size_mask) {
1285 /* Follow the CPU map encoding. */
1286 syn_data->type = PERF_CPU_MAP__CPUS;
1287 syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
1288 return zalloc(syn_data->size);
1289 }
1290 /* Encode using a bitmask. */
1291 syn_data->type = PERF_CPU_MAP__MASK;
1292 syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
1293 return zalloc(syn_data->size);
1294 }
1295
cpu_map_data__synthesize(struct synthesize_cpu_map_data * data)1296 static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
1297 {
1298 switch (data->type) {
1299 case PERF_CPU_MAP__CPUS:
1300 synthesize_cpus(data);
1301 break;
1302 case PERF_CPU_MAP__MASK:
1303 synthesize_mask(data);
1304 break;
1305 case PERF_CPU_MAP__RANGE_CPUS:
1306 synthesize_range_cpus(data);
1307 break;
1308 default:
1309 break;
1310 }
1311 }
1312
cpu_map_event__new(const struct perf_cpu_map * map)1313 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
1314 {
1315 struct synthesize_cpu_map_data syn_data = { .map = map };
1316 struct perf_record_cpu_map *event;
1317
1318
1319 event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
1320 if (!event)
1321 return NULL;
1322
1323 syn_data.data = &event->data;
1324 event->header.type = PERF_RECORD_CPU_MAP;
1325 event->header.size = syn_data.size;
1326 cpu_map_data__synthesize(&syn_data);
1327 return event;
1328 }
1329
1330
perf_event__synthesize_cpu_map(struct perf_tool * tool,const struct perf_cpu_map * map,perf_event__handler_t process,struct machine * machine)1331 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1332 const struct perf_cpu_map *map,
1333 perf_event__handler_t process,
1334 struct machine *machine)
1335 {
1336 struct perf_record_cpu_map *event;
1337 int err;
1338
1339 event = cpu_map_event__new(map);
1340 if (!event)
1341 return -ENOMEM;
1342
1343 err = process(tool, (union perf_event *) event, NULL, machine);
1344
1345 free(event);
1346 return err;
1347 }
1348
perf_event__synthesize_stat_config(struct perf_tool * tool,struct perf_stat_config * config,perf_event__handler_t process,struct machine * machine)1349 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1350 struct perf_stat_config *config,
1351 perf_event__handler_t process,
1352 struct machine *machine)
1353 {
1354 struct perf_record_stat_config *event;
1355 int size, i = 0, err;
1356
1357 size = sizeof(*event);
1358 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1359
1360 event = zalloc(size);
1361 if (!event)
1362 return -ENOMEM;
1363
1364 event->header.type = PERF_RECORD_STAT_CONFIG;
1365 event->header.size = size;
1366 event->nr = PERF_STAT_CONFIG_TERM__MAX;
1367
1368 #define ADD(__term, __val) \
1369 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1370 event->data[i].val = __val; \
1371 i++;
1372
1373 ADD(AGGR_MODE, config->aggr_mode)
1374 ADD(INTERVAL, config->interval)
1375 ADD(SCALE, config->scale)
1376
1377 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1378 "stat config terms unbalanced\n");
1379 #undef ADD
1380
1381 err = process(tool, (union perf_event *) event, NULL, machine);
1382
1383 free(event);
1384 return err;
1385 }
1386
perf_event__synthesize_stat(struct perf_tool * tool,struct perf_cpu cpu,u32 thread,u64 id,struct perf_counts_values * count,perf_event__handler_t process,struct machine * machine)1387 int perf_event__synthesize_stat(struct perf_tool *tool,
1388 struct perf_cpu cpu, u32 thread, u64 id,
1389 struct perf_counts_values *count,
1390 perf_event__handler_t process,
1391 struct machine *machine)
1392 {
1393 struct perf_record_stat event;
1394
1395 event.header.type = PERF_RECORD_STAT;
1396 event.header.size = sizeof(event);
1397 event.header.misc = 0;
1398
1399 event.id = id;
1400 event.cpu = cpu.cpu;
1401 event.thread = thread;
1402 event.val = count->val;
1403 event.ena = count->ena;
1404 event.run = count->run;
1405
1406 return process(tool, (union perf_event *) &event, NULL, machine);
1407 }
1408
perf_event__synthesize_stat_round(struct perf_tool * tool,u64 evtime,u64 type,perf_event__handler_t process,struct machine * machine)1409 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1410 u64 evtime, u64 type,
1411 perf_event__handler_t process,
1412 struct machine *machine)
1413 {
1414 struct perf_record_stat_round event;
1415
1416 event.header.type = PERF_RECORD_STAT_ROUND;
1417 event.header.size = sizeof(event);
1418 event.header.misc = 0;
1419
1420 event.time = evtime;
1421 event.type = type;
1422
1423 return process(tool, (union perf_event *) &event, NULL, machine);
1424 }
1425
perf_event__sample_event_size(const struct perf_sample * sample,u64 type,u64 read_format)1426 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1427 {
1428 size_t sz, result = sizeof(struct perf_record_sample);
1429
1430 if (type & PERF_SAMPLE_IDENTIFIER)
1431 result += sizeof(u64);
1432
1433 if (type & PERF_SAMPLE_IP)
1434 result += sizeof(u64);
1435
1436 if (type & PERF_SAMPLE_TID)
1437 result += sizeof(u64);
1438
1439 if (type & PERF_SAMPLE_TIME)
1440 result += sizeof(u64);
1441
1442 if (type & PERF_SAMPLE_ADDR)
1443 result += sizeof(u64);
1444
1445 if (type & PERF_SAMPLE_ID)
1446 result += sizeof(u64);
1447
1448 if (type & PERF_SAMPLE_STREAM_ID)
1449 result += sizeof(u64);
1450
1451 if (type & PERF_SAMPLE_CPU)
1452 result += sizeof(u64);
1453
1454 if (type & PERF_SAMPLE_PERIOD)
1455 result += sizeof(u64);
1456
1457 if (type & PERF_SAMPLE_READ) {
1458 result += sizeof(u64);
1459 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1460 result += sizeof(u64);
1461 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1462 result += sizeof(u64);
1463 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1464 if (read_format & PERF_FORMAT_GROUP) {
1465 sz = sample_read_value_size(read_format);
1466 result += sz * sample->read.group.nr;
1467 } else {
1468 result += sizeof(u64);
1469 if (read_format & PERF_FORMAT_LOST)
1470 result += sizeof(u64);
1471 }
1472 }
1473
1474 if (type & PERF_SAMPLE_CALLCHAIN) {
1475 sz = (sample->callchain->nr + 1) * sizeof(u64);
1476 result += sz;
1477 }
1478
1479 if (type & PERF_SAMPLE_RAW) {
1480 result += sizeof(u32);
1481 result += sample->raw_size;
1482 }
1483
1484 if (type & PERF_SAMPLE_BRANCH_STACK) {
1485 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1486 /* nr, hw_idx */
1487 sz += 2 * sizeof(u64);
1488 result += sz;
1489 }
1490
1491 if (type & PERF_SAMPLE_REGS_USER) {
1492 if (sample->user_regs.abi) {
1493 result += sizeof(u64);
1494 sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1495 result += sz;
1496 } else {
1497 result += sizeof(u64);
1498 }
1499 }
1500
1501 if (type & PERF_SAMPLE_STACK_USER) {
1502 sz = sample->user_stack.size;
1503 result += sizeof(u64);
1504 if (sz) {
1505 result += sz;
1506 result += sizeof(u64);
1507 }
1508 }
1509
1510 if (type & PERF_SAMPLE_WEIGHT_TYPE)
1511 result += sizeof(u64);
1512
1513 if (type & PERF_SAMPLE_DATA_SRC)
1514 result += sizeof(u64);
1515
1516 if (type & PERF_SAMPLE_TRANSACTION)
1517 result += sizeof(u64);
1518
1519 if (type & PERF_SAMPLE_REGS_INTR) {
1520 if (sample->intr_regs.abi) {
1521 result += sizeof(u64);
1522 sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1523 result += sz;
1524 } else {
1525 result += sizeof(u64);
1526 }
1527 }
1528
1529 if (type & PERF_SAMPLE_PHYS_ADDR)
1530 result += sizeof(u64);
1531
1532 if (type & PERF_SAMPLE_CGROUP)
1533 result += sizeof(u64);
1534
1535 if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1536 result += sizeof(u64);
1537
1538 if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1539 result += sizeof(u64);
1540
1541 if (type & PERF_SAMPLE_AUX) {
1542 result += sizeof(u64);
1543 result += sample->aux_sample.size;
1544 }
1545
1546 return result;
1547 }
1548
arch_perf_synthesize_sample_weight(const struct perf_sample * data,__u64 * array,u64 type __maybe_unused)1549 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1550 __u64 *array, u64 type __maybe_unused)
1551 {
1552 *array = data->weight;
1553 }
1554
copy_read_group_values(__u64 * array,__u64 read_format,const struct perf_sample * sample)1555 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
1556 const struct perf_sample *sample)
1557 {
1558 size_t sz = sample_read_value_size(read_format);
1559 struct sample_read_value *v = sample->read.group.values;
1560
1561 sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1562 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1563 memcpy(array, v, sz);
1564 array = (void *)array + sz;
1565 }
1566 return array;
1567 }
1568
perf_event__synthesize_sample(union perf_event * event,u64 type,u64 read_format,const struct perf_sample * sample)1569 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1570 const struct perf_sample *sample)
1571 {
1572 __u64 *array;
1573 size_t sz;
1574 /*
1575 * used for cross-endian analysis. See git commit 65014ab3
1576 * for why this goofiness is needed.
1577 */
1578 union u64_swap u;
1579
1580 array = event->sample.array;
1581
1582 if (type & PERF_SAMPLE_IDENTIFIER) {
1583 *array = sample->id;
1584 array++;
1585 }
1586
1587 if (type & PERF_SAMPLE_IP) {
1588 *array = sample->ip;
1589 array++;
1590 }
1591
1592 if (type & PERF_SAMPLE_TID) {
1593 u.val32[0] = sample->pid;
1594 u.val32[1] = sample->tid;
1595 *array = u.val64;
1596 array++;
1597 }
1598
1599 if (type & PERF_SAMPLE_TIME) {
1600 *array = sample->time;
1601 array++;
1602 }
1603
1604 if (type & PERF_SAMPLE_ADDR) {
1605 *array = sample->addr;
1606 array++;
1607 }
1608
1609 if (type & PERF_SAMPLE_ID) {
1610 *array = sample->id;
1611 array++;
1612 }
1613
1614 if (type & PERF_SAMPLE_STREAM_ID) {
1615 *array = sample->stream_id;
1616 array++;
1617 }
1618
1619 if (type & PERF_SAMPLE_CPU) {
1620 u.val32[0] = sample->cpu;
1621 u.val32[1] = 0;
1622 *array = u.val64;
1623 array++;
1624 }
1625
1626 if (type & PERF_SAMPLE_PERIOD) {
1627 *array = sample->period;
1628 array++;
1629 }
1630
1631 if (type & PERF_SAMPLE_READ) {
1632 if (read_format & PERF_FORMAT_GROUP)
1633 *array = sample->read.group.nr;
1634 else
1635 *array = sample->read.one.value;
1636 array++;
1637
1638 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1639 *array = sample->read.time_enabled;
1640 array++;
1641 }
1642
1643 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1644 *array = sample->read.time_running;
1645 array++;
1646 }
1647
1648 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1649 if (read_format & PERF_FORMAT_GROUP) {
1650 array = copy_read_group_values(array, read_format,
1651 sample);
1652 } else {
1653 *array = sample->read.one.id;
1654 array++;
1655
1656 if (read_format & PERF_FORMAT_LOST) {
1657 *array = sample->read.one.lost;
1658 array++;
1659 }
1660 }
1661 }
1662
1663 if (type & PERF_SAMPLE_CALLCHAIN) {
1664 sz = (sample->callchain->nr + 1) * sizeof(u64);
1665 memcpy(array, sample->callchain, sz);
1666 array = (void *)array + sz;
1667 }
1668
1669 if (type & PERF_SAMPLE_RAW) {
1670 u.val32[0] = sample->raw_size;
1671 *array = u.val64;
1672 array = (void *)array + sizeof(u32);
1673
1674 memcpy(array, sample->raw_data, sample->raw_size);
1675 array = (void *)array + sample->raw_size;
1676 }
1677
1678 if (type & PERF_SAMPLE_BRANCH_STACK) {
1679 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1680 /* nr, hw_idx */
1681 sz += 2 * sizeof(u64);
1682 memcpy(array, sample->branch_stack, sz);
1683 array = (void *)array + sz;
1684 }
1685
1686 if (type & PERF_SAMPLE_REGS_USER) {
1687 if (sample->user_regs.abi) {
1688 *array++ = sample->user_regs.abi;
1689 sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1690 memcpy(array, sample->user_regs.regs, sz);
1691 array = (void *)array + sz;
1692 } else {
1693 *array++ = 0;
1694 }
1695 }
1696
1697 if (type & PERF_SAMPLE_STACK_USER) {
1698 sz = sample->user_stack.size;
1699 *array++ = sz;
1700 if (sz) {
1701 memcpy(array, sample->user_stack.data, sz);
1702 array = (void *)array + sz;
1703 *array++ = sz;
1704 }
1705 }
1706
1707 if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1708 arch_perf_synthesize_sample_weight(sample, array, type);
1709 array++;
1710 }
1711
1712 if (type & PERF_SAMPLE_DATA_SRC) {
1713 *array = sample->data_src;
1714 array++;
1715 }
1716
1717 if (type & PERF_SAMPLE_TRANSACTION) {
1718 *array = sample->transaction;
1719 array++;
1720 }
1721
1722 if (type & PERF_SAMPLE_REGS_INTR) {
1723 if (sample->intr_regs.abi) {
1724 *array++ = sample->intr_regs.abi;
1725 sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1726 memcpy(array, sample->intr_regs.regs, sz);
1727 array = (void *)array + sz;
1728 } else {
1729 *array++ = 0;
1730 }
1731 }
1732
1733 if (type & PERF_SAMPLE_PHYS_ADDR) {
1734 *array = sample->phys_addr;
1735 array++;
1736 }
1737
1738 if (type & PERF_SAMPLE_CGROUP) {
1739 *array = sample->cgroup;
1740 array++;
1741 }
1742
1743 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1744 *array = sample->data_page_size;
1745 array++;
1746 }
1747
1748 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1749 *array = sample->code_page_size;
1750 array++;
1751 }
1752
1753 if (type & PERF_SAMPLE_AUX) {
1754 sz = sample->aux_sample.size;
1755 *array++ = sz;
1756 memcpy(array, sample->aux_sample.data, sz);
1757 array = (void *)array + sz;
1758 }
1759
1760 return 0;
1761 }
1762
perf_event__synthesize_id_sample(__u64 * array,u64 type,const struct perf_sample * sample)1763 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
1764 {
1765 __u64 *start = array;
1766
1767 /*
1768 * used for cross-endian analysis. See git commit 65014ab3
1769 * for why this goofiness is needed.
1770 */
1771 union u64_swap u;
1772
1773 if (type & PERF_SAMPLE_TID) {
1774 u.val32[0] = sample->pid;
1775 u.val32[1] = sample->tid;
1776 *array = u.val64;
1777 array++;
1778 }
1779
1780 if (type & PERF_SAMPLE_TIME) {
1781 *array = sample->time;
1782 array++;
1783 }
1784
1785 if (type & PERF_SAMPLE_ID) {
1786 *array = sample->id;
1787 array++;
1788 }
1789
1790 if (type & PERF_SAMPLE_STREAM_ID) {
1791 *array = sample->stream_id;
1792 array++;
1793 }
1794
1795 if (type & PERF_SAMPLE_CPU) {
1796 u.val32[0] = sample->cpu;
1797 u.val32[1] = 0;
1798 *array = u.val64;
1799 array++;
1800 }
1801
1802 if (type & PERF_SAMPLE_IDENTIFIER) {
1803 *array = sample->id;
1804 array++;
1805 }
1806
1807 return (void *)array - (void *)start;
1808 }
1809
__perf_event__synthesize_id_index(struct perf_tool * tool,perf_event__handler_t process,struct evlist * evlist,struct machine * machine,size_t from)1810 int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1811 struct evlist *evlist, struct machine *machine, size_t from)
1812 {
1813 union perf_event *ev;
1814 struct evsel *evsel;
1815 size_t nr = 0, i = 0, sz, max_nr, n, pos;
1816 size_t e1_sz = sizeof(struct id_index_entry);
1817 size_t e2_sz = sizeof(struct id_index_entry_2);
1818 size_t etot_sz = e1_sz + e2_sz;
1819 bool e2_needed = false;
1820 int err;
1821
1822 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
1823
1824 pos = 0;
1825 evlist__for_each_entry(evlist, evsel) {
1826 if (pos++ < from)
1827 continue;
1828 nr += evsel->core.ids;
1829 }
1830
1831 if (!nr)
1832 return 0;
1833
1834 pr_debug2("Synthesizing id index\n");
1835
1836 n = nr > max_nr ? max_nr : nr;
1837 sz = sizeof(struct perf_record_id_index) + n * etot_sz;
1838 ev = zalloc(sz);
1839 if (!ev)
1840 return -ENOMEM;
1841
1842 sz = sizeof(struct perf_record_id_index) + n * e1_sz;
1843
1844 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1845 ev->id_index.nr = n;
1846
1847 pos = 0;
1848 evlist__for_each_entry(evlist, evsel) {
1849 u32 j;
1850
1851 if (pos++ < from)
1852 continue;
1853 for (j = 0; j < evsel->core.ids; j++, i++) {
1854 struct id_index_entry *e;
1855 struct id_index_entry_2 *e2;
1856 struct perf_sample_id *sid;
1857
1858 if (i >= n) {
1859 ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
1860 err = process(tool, ev, NULL, machine);
1861 if (err)
1862 goto out_err;
1863 nr -= n;
1864 i = 0;
1865 e2_needed = false;
1866 }
1867
1868 e = &ev->id_index.entries[i];
1869
1870 e->id = evsel->core.id[j];
1871
1872 sid = evlist__id2sid(evlist, e->id);
1873 if (!sid) {
1874 free(ev);
1875 return -ENOENT;
1876 }
1877
1878 e->idx = sid->idx;
1879 e->cpu = sid->cpu.cpu;
1880 e->tid = sid->tid;
1881
1882 if (sid->machine_pid)
1883 e2_needed = true;
1884
1885 e2 = (void *)ev + sz;
1886 e2[i].machine_pid = sid->machine_pid;
1887 e2[i].vcpu = sid->vcpu.cpu;
1888 }
1889 }
1890
1891 sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
1892 ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
1893 ev->id_index.nr = nr;
1894
1895 err = process(tool, ev, NULL, machine);
1896 out_err:
1897 free(ev);
1898
1899 return err;
1900 }
1901
perf_event__synthesize_id_index(struct perf_tool * tool,perf_event__handler_t process,struct evlist * evlist,struct machine * machine)1902 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1903 struct evlist *evlist, struct machine *machine)
1904 {
1905 return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
1906 }
1907
__machine__synthesize_threads(struct machine * machine,struct perf_tool * tool,struct target * target,struct perf_thread_map * threads,perf_event__handler_t process,bool needs_mmap,bool data_mmap,unsigned int nr_threads_synthesize)1908 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1909 struct target *target, struct perf_thread_map *threads,
1910 perf_event__handler_t process, bool needs_mmap,
1911 bool data_mmap, unsigned int nr_threads_synthesize)
1912 {
1913 /*
1914 * When perf runs in non-root PID namespace, and the namespace's proc FS
1915 * is not mounted, nsinfo__is_in_root_namespace() returns false.
1916 * In this case, the proc FS is coming for the parent namespace, thus
1917 * perf tool will wrongly gather process info from its parent PID
1918 * namespace.
1919 *
1920 * To avoid the confusion that the perf tool runs in a child PID
1921 * namespace but it synthesizes thread info from its parent PID
1922 * namespace, returns failure with warning.
1923 */
1924 if (!nsinfo__is_in_root_namespace()) {
1925 pr_err("Perf runs in non-root PID namespace but it tries to ");
1926 pr_err("gather process info from its parent PID namespace.\n");
1927 pr_err("Please mount the proc file system properly, e.g. ");
1928 pr_err("add the option '--mount-proc' for unshare command.\n");
1929 return -EPERM;
1930 }
1931
1932 if (target__has_task(target))
1933 return perf_event__synthesize_thread_map(tool, threads, process, machine,
1934 needs_mmap, data_mmap);
1935 else if (target__has_cpu(target))
1936 return perf_event__synthesize_threads(tool, process, machine,
1937 needs_mmap, data_mmap,
1938 nr_threads_synthesize);
1939 /* command specified */
1940 return 0;
1941 }
1942
machine__synthesize_threads(struct machine * machine,struct target * target,struct perf_thread_map * threads,bool needs_mmap,bool data_mmap,unsigned int nr_threads_synthesize)1943 int machine__synthesize_threads(struct machine *machine, struct target *target,
1944 struct perf_thread_map *threads, bool needs_mmap,
1945 bool data_mmap, unsigned int nr_threads_synthesize)
1946 {
1947 return __machine__synthesize_threads(machine, NULL, target, threads,
1948 perf_event__process, needs_mmap,
1949 data_mmap, nr_threads_synthesize);
1950 }
1951
event_update_event__new(size_t size,u64 type,u64 id)1952 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1953 {
1954 struct perf_record_event_update *ev;
1955
1956 size += sizeof(*ev);
1957 size = PERF_ALIGN(size, sizeof(u64));
1958
1959 ev = zalloc(size);
1960 if (ev) {
1961 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1962 ev->header.size = (u16)size;
1963 ev->type = type;
1964 ev->id = id;
1965 }
1966 return ev;
1967 }
1968
perf_event__synthesize_event_update_unit(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1969 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1970 perf_event__handler_t process)
1971 {
1972 size_t size = strlen(evsel->unit);
1973 struct perf_record_event_update *ev;
1974 int err;
1975
1976 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1977 if (ev == NULL)
1978 return -ENOMEM;
1979
1980 strlcpy(ev->unit, evsel->unit, size + 1);
1981 err = process(tool, (union perf_event *)ev, NULL, NULL);
1982 free(ev);
1983 return err;
1984 }
1985
perf_event__synthesize_event_update_scale(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1986 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1987 perf_event__handler_t process)
1988 {
1989 struct perf_record_event_update *ev;
1990 struct perf_record_event_update_scale *ev_data;
1991 int err;
1992
1993 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1994 if (ev == NULL)
1995 return -ENOMEM;
1996
1997 ev->scale.scale = evsel->scale;
1998 err = process(tool, (union perf_event *)ev, NULL, NULL);
1999 free(ev);
2000 return err;
2001 }
2002
perf_event__synthesize_event_update_name(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)2003 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
2004 perf_event__handler_t process)
2005 {
2006 struct perf_record_event_update *ev;
2007 size_t len = strlen(evsel->name);
2008 int err;
2009
2010 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
2011 if (ev == NULL)
2012 return -ENOMEM;
2013
2014 strlcpy(ev->name, evsel->name, len + 1);
2015 err = process(tool, (union perf_event *)ev, NULL, NULL);
2016 free(ev);
2017 return err;
2018 }
2019
perf_event__synthesize_event_update_cpus(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)2020 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
2021 perf_event__handler_t process)
2022 {
2023 struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
2024 struct perf_record_event_update *ev;
2025 int err;
2026
2027 ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
2028 if (!ev)
2029 return -ENOMEM;
2030
2031 syn_data.data = &ev->cpus.cpus;
2032 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2033 ev->header.size = (u16)syn_data.size;
2034 ev->type = PERF_EVENT_UPDATE__CPUS;
2035 ev->id = evsel->core.id[0];
2036 cpu_map_data__synthesize(&syn_data);
2037
2038 err = process(tool, (union perf_event *)ev, NULL, NULL);
2039 free(ev);
2040 return err;
2041 }
2042
perf_event__synthesize_attrs(struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process)2043 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
2044 perf_event__handler_t process)
2045 {
2046 struct evsel *evsel;
2047 int err = 0;
2048
2049 evlist__for_each_entry(evlist, evsel) {
2050 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
2051 evsel->core.id, process);
2052 if (err) {
2053 pr_debug("failed to create perf header attribute\n");
2054 return err;
2055 }
2056 }
2057
2058 return err;
2059 }
2060
has_unit(struct evsel * evsel)2061 static bool has_unit(struct evsel *evsel)
2062 {
2063 return evsel->unit && *evsel->unit;
2064 }
2065
has_scale(struct evsel * evsel)2066 static bool has_scale(struct evsel *evsel)
2067 {
2068 return evsel->scale != 1;
2069 }
2070
perf_event__synthesize_extra_attr(struct perf_tool * tool,struct evlist * evsel_list,perf_event__handler_t process,bool is_pipe)2071 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
2072 perf_event__handler_t process, bool is_pipe)
2073 {
2074 struct evsel *evsel;
2075 int err;
2076
2077 /*
2078 * Synthesize other events stuff not carried within
2079 * attr event - unit, scale, name
2080 */
2081 evlist__for_each_entry(evsel_list, evsel) {
2082 if (!evsel->supported)
2083 continue;
2084
2085 /*
2086 * Synthesize unit and scale only if it's defined.
2087 */
2088 if (has_unit(evsel)) {
2089 err = perf_event__synthesize_event_update_unit(tool, evsel, process);
2090 if (err < 0) {
2091 pr_err("Couldn't synthesize evsel unit.\n");
2092 return err;
2093 }
2094 }
2095
2096 if (has_scale(evsel)) {
2097 err = perf_event__synthesize_event_update_scale(tool, evsel, process);
2098 if (err < 0) {
2099 pr_err("Couldn't synthesize evsel evsel.\n");
2100 return err;
2101 }
2102 }
2103
2104 if (evsel->core.own_cpus) {
2105 err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
2106 if (err < 0) {
2107 pr_err("Couldn't synthesize evsel cpus.\n");
2108 return err;
2109 }
2110 }
2111
2112 /*
2113 * Name is needed only for pipe output,
2114 * perf.data carries event names.
2115 */
2116 if (is_pipe) {
2117 err = perf_event__synthesize_event_update_name(tool, evsel, process);
2118 if (err < 0) {
2119 pr_err("Couldn't synthesize evsel name.\n");
2120 return err;
2121 }
2122 }
2123 }
2124 return 0;
2125 }
2126
perf_event__synthesize_attr(struct perf_tool * tool,struct perf_event_attr * attr,u32 ids,u64 * id,perf_event__handler_t process)2127 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
2128 u32 ids, u64 *id, perf_event__handler_t process)
2129 {
2130 union perf_event *ev;
2131 size_t size;
2132 int err;
2133
2134 size = sizeof(struct perf_event_attr);
2135 size = PERF_ALIGN(size, sizeof(u64));
2136 size += sizeof(struct perf_event_header);
2137 size += ids * sizeof(u64);
2138
2139 ev = zalloc(size);
2140
2141 if (ev == NULL)
2142 return -ENOMEM;
2143
2144 ev->attr.attr = *attr;
2145 memcpy(ev->attr.id, id, ids * sizeof(u64));
2146
2147 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2148 ev->attr.header.size = (u16)size;
2149
2150 if (ev->attr.header.size == size)
2151 err = process(tool, ev, NULL, NULL);
2152 else
2153 err = -E2BIG;
2154
2155 free(ev);
2156
2157 return err;
2158 }
2159
perf_event__synthesize_tracing_data(struct perf_tool * tool,int fd,struct evlist * evlist,perf_event__handler_t process)2160 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2161 perf_event__handler_t process)
2162 {
2163 union perf_event ev;
2164 struct tracing_data *tdata;
2165 ssize_t size = 0, aligned_size = 0, padding;
2166 struct feat_fd ff;
2167
2168 /*
2169 * We are going to store the size of the data followed
2170 * by the data contents. Since the fd descriptor is a pipe,
2171 * we cannot seek back to store the size of the data once
2172 * we know it. Instead we:
2173 *
2174 * - write the tracing data to the temp file
2175 * - get/write the data size to pipe
2176 * - write the tracing data from the temp file
2177 * to the pipe
2178 */
2179 tdata = tracing_data_get(&evlist->core.entries, fd, true);
2180 if (!tdata)
2181 return -1;
2182
2183 memset(&ev, 0, sizeof(ev));
2184
2185 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2186 size = tdata->size;
2187 aligned_size = PERF_ALIGN(size, sizeof(u64));
2188 padding = aligned_size - size;
2189 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2190 ev.tracing_data.size = aligned_size;
2191
2192 process(tool, &ev, NULL, NULL);
2193
2194 /*
2195 * The put function will copy all the tracing data
2196 * stored in temp file to the pipe.
2197 */
2198 tracing_data_put(tdata);
2199
2200 ff = (struct feat_fd){ .fd = fd };
2201 if (write_padded(&ff, NULL, 0, padding))
2202 return -1;
2203
2204 return aligned_size;
2205 }
2206
perf_event__synthesize_build_id(struct perf_tool * tool,struct dso * pos,u16 misc,perf_event__handler_t process,struct machine * machine)2207 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2208 perf_event__handler_t process, struct machine *machine)
2209 {
2210 union perf_event ev;
2211 size_t len;
2212
2213 if (!pos->hit)
2214 return 0;
2215
2216 memset(&ev, 0, sizeof(ev));
2217
2218 len = pos->long_name_len + 1;
2219 len = PERF_ALIGN(len, NAME_ALIGN);
2220 memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
2221 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2222 ev.build_id.header.misc = misc;
2223 ev.build_id.pid = machine->pid;
2224 ev.build_id.header.size = sizeof(ev.build_id) + len;
2225 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2226
2227 return process(tool, &ev, NULL, machine);
2228 }
2229
perf_event__synthesize_stat_events(struct perf_stat_config * config,struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process,bool attrs)2230 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2231 struct evlist *evlist, perf_event__handler_t process, bool attrs)
2232 {
2233 int err;
2234
2235 if (attrs) {
2236 err = perf_event__synthesize_attrs(tool, evlist, process);
2237 if (err < 0) {
2238 pr_err("Couldn't synthesize attrs.\n");
2239 return err;
2240 }
2241 }
2242
2243 err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2244 err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2245 if (err < 0) {
2246 pr_err("Couldn't synthesize thread map.\n");
2247 return err;
2248 }
2249
2250 err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
2251 if (err < 0) {
2252 pr_err("Couldn't synthesize thread map.\n");
2253 return err;
2254 }
2255
2256 err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2257 if (err < 0) {
2258 pr_err("Couldn't synthesize config.\n");
2259 return err;
2260 }
2261
2262 return 0;
2263 }
2264
2265 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2266
perf_event__synthesize_features(struct perf_tool * tool,struct perf_session * session,struct evlist * evlist,perf_event__handler_t process)2267 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2268 struct evlist *evlist, perf_event__handler_t process)
2269 {
2270 struct perf_header *header = &session->header;
2271 struct perf_record_header_feature *fe;
2272 struct feat_fd ff;
2273 size_t sz, sz_hdr;
2274 int feat, ret;
2275
2276 sz_hdr = sizeof(fe->header);
2277 sz = sizeof(union perf_event);
2278 /* get a nice alignment */
2279 sz = PERF_ALIGN(sz, page_size);
2280
2281 memset(&ff, 0, sizeof(ff));
2282
2283 ff.buf = malloc(sz);
2284 if (!ff.buf)
2285 return -ENOMEM;
2286
2287 ff.size = sz - sz_hdr;
2288 ff.ph = &session->header;
2289
2290 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2291 if (!feat_ops[feat].synthesize) {
2292 pr_debug("No record header feature for header :%d\n", feat);
2293 continue;
2294 }
2295
2296 ff.offset = sizeof(*fe);
2297
2298 ret = feat_ops[feat].write(&ff, evlist);
2299 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2300 pr_debug("Error writing feature\n");
2301 continue;
2302 }
2303 /* ff.buf may have changed due to realloc in do_write() */
2304 fe = ff.buf;
2305 memset(fe, 0, sizeof(*fe));
2306
2307 fe->feat_id = feat;
2308 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2309 fe->header.size = ff.offset;
2310
2311 ret = process(tool, ff.buf, NULL, NULL);
2312 if (ret) {
2313 free(ff.buf);
2314 return ret;
2315 }
2316 }
2317
2318 /* Send HEADER_LAST_FEATURE mark. */
2319 fe = ff.buf;
2320 fe->feat_id = HEADER_LAST_FEATURE;
2321 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2322 fe->header.size = sizeof(*fe);
2323
2324 ret = process(tool, ff.buf, NULL, NULL);
2325
2326 free(ff.buf);
2327 return ret;
2328 }
2329
perf_event__synthesize_for_pipe(struct perf_tool * tool,struct perf_session * session,struct perf_data * data,perf_event__handler_t process)2330 int perf_event__synthesize_for_pipe(struct perf_tool *tool,
2331 struct perf_session *session,
2332 struct perf_data *data,
2333 perf_event__handler_t process)
2334 {
2335 int err;
2336 int ret = 0;
2337 struct evlist *evlist = session->evlist;
2338
2339 /*
2340 * We need to synthesize events first, because some
2341 * features works on top of them (on report side).
2342 */
2343 err = perf_event__synthesize_attrs(tool, evlist, process);
2344 if (err < 0) {
2345 pr_err("Couldn't synthesize attrs.\n");
2346 return err;
2347 }
2348 ret += err;
2349
2350 err = perf_event__synthesize_features(tool, session, evlist, process);
2351 if (err < 0) {
2352 pr_err("Couldn't synthesize features.\n");
2353 return err;
2354 }
2355 ret += err;
2356
2357 if (have_tracepoints(&evlist->core.entries)) {
2358 int fd = perf_data__fd(data);
2359
2360 /*
2361 * FIXME err <= 0 here actually means that
2362 * there were no tracepoints so its not really
2363 * an error, just that we don't need to
2364 * synthesize anything. We really have to
2365 * return this more properly and also
2366 * propagate errors that now are calling die()
2367 */
2368 err = perf_event__synthesize_tracing_data(tool, fd, evlist,
2369 process);
2370 if (err <= 0) {
2371 pr_err("Couldn't record tracing data.\n");
2372 return err;
2373 }
2374 ret += err;
2375 }
2376
2377 return ret;
2378 }
2379
parse_synth_opt(char * synth)2380 int parse_synth_opt(char *synth)
2381 {
2382 char *p, *q;
2383 int ret = 0;
2384
2385 if (synth == NULL)
2386 return -1;
2387
2388 for (q = synth; (p = strsep(&q, ",")); p = q) {
2389 if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
2390 return 0;
2391
2392 if (!strcasecmp(p, "all"))
2393 return PERF_SYNTH_ALL;
2394
2395 if (!strcasecmp(p, "task"))
2396 ret |= PERF_SYNTH_TASK;
2397 else if (!strcasecmp(p, "mmap"))
2398 ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
2399 else if (!strcasecmp(p, "cgroup"))
2400 ret |= PERF_SYNTH_CGROUP;
2401 else
2402 return -1;
2403 }
2404
2405 return ret;
2406 }
2407