1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9 #include "builtin.h"
10
11 #include "util/color.h"
12 #include "util/dso.h"
13 #include "util/vdso.h"
14 #include "util/evlist.h"
15 #include "util/evsel.h"
16 #include "util/map.h"
17 #include "util/session.h"
18 #include "util/tool.h"
19 #include "util/debug.h"
20 #include "util/build-id.h"
21 #include "util/data.h"
22 #include "util/auxtrace.h"
23 #include "util/jit.h"
24 #include "util/string2.h"
25 #include "util/symbol.h"
26 #include "util/synthetic-events.h"
27 #include "util/thread.h"
28 #include "util/namespaces.h"
29 #include "util/util.h"
30 #include "util/tsc.h"
31
32 #include <internal/lib.h>
33
34 #include <linux/err.h>
35 #include <subcmd/parse-options.h>
36 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
37
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/zalloc.h>
41 #include <linux/hash.h>
42 #include <ctype.h>
43 #include <errno.h>
44 #include <signal.h>
45 #include <inttypes.h>
46
47 struct guest_event {
48 struct perf_sample sample;
49 union perf_event *event;
50 char event_buf[PERF_SAMPLE_MAX_SIZE];
51 };
52
53 struct guest_id {
54 /* hlist_node must be first, see free_hlist() */
55 struct hlist_node node;
56 u64 id;
57 u64 host_id;
58 u32 vcpu;
59 };
60
61 struct guest_tid {
62 /* hlist_node must be first, see free_hlist() */
63 struct hlist_node node;
64 /* Thread ID of QEMU thread */
65 u32 tid;
66 u32 vcpu;
67 };
68
69 struct guest_vcpu {
70 /* Current host CPU */
71 u32 cpu;
72 /* Thread ID of QEMU thread */
73 u32 tid;
74 };
75
76 struct guest_session {
77 char *perf_data_file;
78 u32 machine_pid;
79 u64 time_offset;
80 double time_scale;
81 struct perf_tool tool;
82 struct perf_data data;
83 struct perf_session *session;
84 char *tmp_file_name;
85 int tmp_fd;
86 struct perf_tsc_conversion host_tc;
87 struct perf_tsc_conversion guest_tc;
88 bool copy_kcore_dir;
89 bool have_tc;
90 bool fetched;
91 bool ready;
92 u16 dflt_id_hdr_size;
93 u64 dflt_id;
94 u64 highest_id;
95 /* Array of guest_vcpu */
96 struct guest_vcpu *vcpu;
97 size_t vcpu_cnt;
98 /* Hash table for guest_id */
99 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
100 /* Hash table for guest_tid */
101 struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
102 /* Place to stash next guest event */
103 struct guest_event ev;
104 };
105
106 struct perf_inject {
107 struct perf_tool tool;
108 struct perf_session *session;
109 bool build_ids;
110 bool build_id_all;
111 bool sched_stat;
112 bool have_auxtrace;
113 bool strip;
114 bool jit_mode;
115 bool in_place_update;
116 bool in_place_update_dry_run;
117 bool is_pipe;
118 bool copy_kcore_dir;
119 const char *input_name;
120 struct perf_data output;
121 u64 bytes_written;
122 u64 aux_id;
123 struct list_head samples;
124 struct itrace_synth_opts itrace_synth_opts;
125 char event_copy[PERF_SAMPLE_MAX_SIZE];
126 struct perf_file_section secs[HEADER_FEAT_BITS];
127 struct guest_session guest_session;
128 struct strlist *known_build_ids;
129 };
130
131 struct event_entry {
132 struct list_head node;
133 u32 tid;
134 union perf_event event[];
135 };
136
137 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
138 struct machine *machine, u8 cpumode, u32 flags);
139
output_bytes(struct perf_inject * inject,void * buf,size_t sz)140 static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
141 {
142 ssize_t size;
143
144 size = perf_data__write(&inject->output, buf, sz);
145 if (size < 0)
146 return -errno;
147
148 inject->bytes_written += size;
149 return 0;
150 }
151
perf_event__repipe_synth(struct perf_tool * tool,union perf_event * event)152 static int perf_event__repipe_synth(struct perf_tool *tool,
153 union perf_event *event)
154 {
155 struct perf_inject *inject = container_of(tool, struct perf_inject,
156 tool);
157
158 return output_bytes(inject, event, event->header.size);
159 }
160
perf_event__repipe_oe_synth(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe __maybe_unused)161 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
162 union perf_event *event,
163 struct ordered_events *oe __maybe_unused)
164 {
165 return perf_event__repipe_synth(tool, event);
166 }
167
168 #ifdef HAVE_JITDUMP
perf_event__drop_oe(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe __maybe_unused)169 static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
170 union perf_event *event __maybe_unused,
171 struct ordered_events *oe __maybe_unused)
172 {
173 return 0;
174 }
175 #endif
176
perf_event__repipe_op2_synth(struct perf_session * session,union perf_event * event)177 static int perf_event__repipe_op2_synth(struct perf_session *session,
178 union perf_event *event)
179 {
180 return perf_event__repipe_synth(session->tool, event);
181 }
182
perf_event__repipe_op4_synth(struct perf_session * session,union perf_event * event,u64 data __maybe_unused,const char * str __maybe_unused)183 static int perf_event__repipe_op4_synth(struct perf_session *session,
184 union perf_event *event,
185 u64 data __maybe_unused,
186 const char *str __maybe_unused)
187 {
188 return perf_event__repipe_synth(session->tool, event);
189 }
190
perf_event__repipe_attr(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist)191 static int perf_event__repipe_attr(struct perf_tool *tool,
192 union perf_event *event,
193 struct evlist **pevlist)
194 {
195 struct perf_inject *inject = container_of(tool, struct perf_inject,
196 tool);
197 int ret;
198
199 ret = perf_event__process_attr(tool, event, pevlist);
200 if (ret)
201 return ret;
202
203 if (!inject->is_pipe)
204 return 0;
205
206 return perf_event__repipe_synth(tool, event);
207 }
208
perf_event__repipe_event_update(struct perf_tool * tool,union perf_event * event,struct evlist ** pevlist __maybe_unused)209 static int perf_event__repipe_event_update(struct perf_tool *tool,
210 union perf_event *event,
211 struct evlist **pevlist __maybe_unused)
212 {
213 return perf_event__repipe_synth(tool, event);
214 }
215
216 #ifdef HAVE_AUXTRACE_SUPPORT
217
copy_bytes(struct perf_inject * inject,int fd,off_t size)218 static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
219 {
220 char buf[4096];
221 ssize_t ssz;
222 int ret;
223
224 while (size > 0) {
225 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
226 if (ssz < 0)
227 return -errno;
228 ret = output_bytes(inject, buf, ssz);
229 if (ret)
230 return ret;
231 size -= ssz;
232 }
233
234 return 0;
235 }
236
perf_event__repipe_auxtrace(struct perf_session * session,union perf_event * event)237 static s64 perf_event__repipe_auxtrace(struct perf_session *session,
238 union perf_event *event)
239 {
240 struct perf_tool *tool = session->tool;
241 struct perf_inject *inject = container_of(tool, struct perf_inject,
242 tool);
243 int ret;
244
245 inject->have_auxtrace = true;
246
247 if (!inject->output.is_pipe) {
248 off_t offset;
249
250 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
251 if (offset == -1)
252 return -errno;
253 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
254 event, offset);
255 if (ret < 0)
256 return ret;
257 }
258
259 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
260 ret = output_bytes(inject, event, event->header.size);
261 if (ret < 0)
262 return ret;
263 ret = copy_bytes(inject, perf_data__fd(session->data),
264 event->auxtrace.size);
265 } else {
266 ret = output_bytes(inject, event,
267 event->header.size + event->auxtrace.size);
268 }
269 if (ret < 0)
270 return ret;
271
272 return event->auxtrace.size;
273 }
274
275 #else
276
277 static s64
perf_event__repipe_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)278 perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
279 union perf_event *event __maybe_unused)
280 {
281 pr_err("AUX area tracing not supported\n");
282 return -EINVAL;
283 }
284
285 #endif
286
perf_event__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)287 static int perf_event__repipe(struct perf_tool *tool,
288 union perf_event *event,
289 struct perf_sample *sample __maybe_unused,
290 struct machine *machine __maybe_unused)
291 {
292 return perf_event__repipe_synth(tool, event);
293 }
294
perf_event__drop(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)295 static int perf_event__drop(struct perf_tool *tool __maybe_unused,
296 union perf_event *event __maybe_unused,
297 struct perf_sample *sample __maybe_unused,
298 struct machine *machine __maybe_unused)
299 {
300 return 0;
301 }
302
perf_event__drop_aux(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct machine * machine __maybe_unused)303 static int perf_event__drop_aux(struct perf_tool *tool,
304 union perf_event *event __maybe_unused,
305 struct perf_sample *sample,
306 struct machine *machine __maybe_unused)
307 {
308 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
309
310 if (!inject->aux_id)
311 inject->aux_id = sample->id;
312
313 return 0;
314 }
315
316 static union perf_event *
perf_inject__cut_auxtrace_sample(struct perf_inject * inject,union perf_event * event,struct perf_sample * sample)317 perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
318 union perf_event *event,
319 struct perf_sample *sample)
320 {
321 size_t sz1 = sample->aux_sample.data - (void *)event;
322 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
323 union perf_event *ev = (union perf_event *)inject->event_copy;
324
325 if (sz1 > event->header.size || sz2 > event->header.size ||
326 sz1 + sz2 > event->header.size ||
327 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
328 return event;
329
330 memcpy(ev, event, sz1);
331 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
332 ev->header.size = sz1 + sz2;
333 ((u64 *)((void *)ev + sz1))[-1] = 0;
334
335 return ev;
336 }
337
338 typedef int (*inject_handler)(struct perf_tool *tool,
339 union perf_event *event,
340 struct perf_sample *sample,
341 struct evsel *evsel,
342 struct machine *machine);
343
perf_event__repipe_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)344 static int perf_event__repipe_sample(struct perf_tool *tool,
345 union perf_event *event,
346 struct perf_sample *sample,
347 struct evsel *evsel,
348 struct machine *machine)
349 {
350 struct perf_inject *inject = container_of(tool, struct perf_inject,
351 tool);
352
353 if (evsel && evsel->handler) {
354 inject_handler f = evsel->handler;
355 return f(tool, event, sample, evsel, machine);
356 }
357
358 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
359
360 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
361 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
362
363 return perf_event__repipe_synth(tool, event);
364 }
365
perf_event__repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)366 static int perf_event__repipe_mmap(struct perf_tool *tool,
367 union perf_event *event,
368 struct perf_sample *sample,
369 struct machine *machine)
370 {
371 int err;
372
373 err = perf_event__process_mmap(tool, event, sample, machine);
374 perf_event__repipe(tool, event, sample, machine);
375
376 return err;
377 }
378
379 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)380 static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
381 union perf_event *event,
382 struct perf_sample *sample,
383 struct machine *machine)
384 {
385 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
386 u64 n = 0;
387 int ret;
388
389 /*
390 * if jit marker, then inject jit mmaps and generate ELF images
391 */
392 ret = jit_process(inject->session, &inject->output, machine,
393 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
394 if (ret < 0)
395 return ret;
396 if (ret) {
397 inject->bytes_written += n;
398 return 0;
399 }
400 return perf_event__repipe_mmap(tool, event, sample, machine);
401 }
402 #endif
403
findnew_dso(int pid,int tid,const char * filename,struct dso_id * id,struct machine * machine)404 static struct dso *findnew_dso(int pid, int tid, const char *filename,
405 struct dso_id *id, struct machine *machine)
406 {
407 struct thread *thread;
408 struct nsinfo *nsi = NULL;
409 struct nsinfo *nnsi;
410 struct dso *dso;
411 bool vdso;
412
413 thread = machine__findnew_thread(machine, pid, tid);
414 if (thread == NULL) {
415 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
416 return NULL;
417 }
418
419 vdso = is_vdso_map(filename);
420 nsi = nsinfo__get(thread->nsinfo);
421
422 if (vdso) {
423 /* The vdso maps are always on the host and not the
424 * container. Ensure that we don't use setns to look
425 * them up.
426 */
427 nnsi = nsinfo__copy(nsi);
428 if (nnsi) {
429 nsinfo__put(nsi);
430 nsinfo__clear_need_setns(nnsi);
431 nsi = nnsi;
432 }
433 dso = machine__findnew_vdso(machine, thread);
434 } else {
435 dso = machine__findnew_dso_id(machine, filename, id);
436 }
437
438 if (dso) {
439 mutex_lock(&dso->lock);
440 nsinfo__put(dso->nsinfo);
441 dso->nsinfo = nsi;
442 mutex_unlock(&dso->lock);
443 } else
444 nsinfo__put(nsi);
445
446 thread__put(thread);
447 return dso;
448 }
449
perf_event__repipe_buildid_mmap(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)450 static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
451 union perf_event *event,
452 struct perf_sample *sample,
453 struct machine *machine)
454 {
455 struct dso *dso;
456
457 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
458 event->mmap.filename, NULL, machine);
459
460 if (dso && !dso->hit) {
461 dso->hit = 1;
462 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
463 }
464 dso__put(dso);
465
466 return perf_event__repipe(tool, event, sample, machine);
467 }
468
perf_event__repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)469 static int perf_event__repipe_mmap2(struct perf_tool *tool,
470 union perf_event *event,
471 struct perf_sample *sample,
472 struct machine *machine)
473 {
474 int err;
475
476 err = perf_event__process_mmap2(tool, event, sample, machine);
477 perf_event__repipe(tool, event, sample, machine);
478
479 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
480 struct dso *dso;
481
482 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
483 event->mmap2.filename, NULL, machine);
484 if (dso) {
485 /* mark it not to inject build-id */
486 dso->hit = 1;
487 }
488 dso__put(dso);
489 }
490
491 return err;
492 }
493
494 #ifdef HAVE_JITDUMP
perf_event__jit_repipe_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)495 static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
496 union perf_event *event,
497 struct perf_sample *sample,
498 struct machine *machine)
499 {
500 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
501 u64 n = 0;
502 int ret;
503
504 /*
505 * if jit marker, then inject jit mmaps and generate ELF images
506 */
507 ret = jit_process(inject->session, &inject->output, machine,
508 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
509 if (ret < 0)
510 return ret;
511 if (ret) {
512 inject->bytes_written += n;
513 return 0;
514 }
515 return perf_event__repipe_mmap2(tool, event, sample, machine);
516 }
517 #endif
518
perf_event__repipe_buildid_mmap2(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)519 static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
520 union perf_event *event,
521 struct perf_sample *sample,
522 struct machine *machine)
523 {
524 struct dso_id dso_id = {
525 .maj = event->mmap2.maj,
526 .min = event->mmap2.min,
527 .ino = event->mmap2.ino,
528 .ino_generation = event->mmap2.ino_generation,
529 };
530 struct dso *dso;
531
532 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
533 /* cannot use dso_id since it'd have invalid info */
534 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
535 event->mmap2.filename, NULL, machine);
536 if (dso) {
537 /* mark it not to inject build-id */
538 dso->hit = 1;
539 }
540 dso__put(dso);
541 return 0;
542 }
543
544 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
545 event->mmap2.filename, &dso_id, machine);
546
547 if (dso && !dso->hit) {
548 dso->hit = 1;
549 dso__inject_build_id(dso, tool, machine, sample->cpumode,
550 event->mmap2.flags);
551 }
552 dso__put(dso);
553
554 perf_event__repipe(tool, event, sample, machine);
555
556 return 0;
557 }
558
perf_event__repipe_fork(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)559 static int perf_event__repipe_fork(struct perf_tool *tool,
560 union perf_event *event,
561 struct perf_sample *sample,
562 struct machine *machine)
563 {
564 int err;
565
566 err = perf_event__process_fork(tool, event, sample, machine);
567 perf_event__repipe(tool, event, sample, machine);
568
569 return err;
570 }
571
perf_event__repipe_comm(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)572 static int perf_event__repipe_comm(struct perf_tool *tool,
573 union perf_event *event,
574 struct perf_sample *sample,
575 struct machine *machine)
576 {
577 int err;
578
579 err = perf_event__process_comm(tool, event, sample, machine);
580 perf_event__repipe(tool, event, sample, machine);
581
582 return err;
583 }
584
perf_event__repipe_namespaces(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)585 static int perf_event__repipe_namespaces(struct perf_tool *tool,
586 union perf_event *event,
587 struct perf_sample *sample,
588 struct machine *machine)
589 {
590 int err = perf_event__process_namespaces(tool, event, sample, machine);
591
592 perf_event__repipe(tool, event, sample, machine);
593
594 return err;
595 }
596
perf_event__repipe_exit(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)597 static int perf_event__repipe_exit(struct perf_tool *tool,
598 union perf_event *event,
599 struct perf_sample *sample,
600 struct machine *machine)
601 {
602 int err;
603
604 err = perf_event__process_exit(tool, event, sample, machine);
605 perf_event__repipe(tool, event, sample, machine);
606
607 return err;
608 }
609
perf_event__repipe_tracing_data(struct perf_session * session,union perf_event * event)610 static int perf_event__repipe_tracing_data(struct perf_session *session,
611 union perf_event *event)
612 {
613 perf_event__repipe_synth(session->tool, event);
614
615 return perf_event__process_tracing_data(session, event);
616 }
617
dso__read_build_id(struct dso * dso)618 static int dso__read_build_id(struct dso *dso)
619 {
620 struct nscookie nsc;
621
622 if (dso->has_build_id)
623 return 0;
624
625 mutex_lock(&dso->lock);
626 nsinfo__mountns_enter(dso->nsinfo, &nsc);
627 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
628 dso->has_build_id = true;
629 else if (dso->nsinfo) {
630 char *new_name;
631
632 new_name = filename_with_chroot(dso->nsinfo->pid,
633 dso->long_name);
634 if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
635 dso->has_build_id = true;
636 free(new_name);
637 }
638 nsinfo__mountns_exit(&nsc);
639 mutex_unlock(&dso->lock);
640
641 return dso->has_build_id ? 0 : -1;
642 }
643
perf_inject__parse_known_build_ids(const char * known_build_ids_string)644 static struct strlist *perf_inject__parse_known_build_ids(
645 const char *known_build_ids_string)
646 {
647 struct str_node *pos, *tmp;
648 struct strlist *known_build_ids;
649 int bid_len;
650
651 known_build_ids = strlist__new(known_build_ids_string, NULL);
652 if (known_build_ids == NULL)
653 return NULL;
654 strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
655 const char *build_id, *dso_name;
656
657 build_id = skip_spaces(pos->s);
658 dso_name = strchr(build_id, ' ');
659 if (dso_name == NULL) {
660 strlist__remove(known_build_ids, pos);
661 continue;
662 }
663 bid_len = dso_name - pos->s;
664 dso_name = skip_spaces(dso_name);
665 if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
666 strlist__remove(known_build_ids, pos);
667 continue;
668 }
669 for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
670 if (!isxdigit(build_id[2 * ix]) ||
671 !isxdigit(build_id[2 * ix + 1])) {
672 strlist__remove(known_build_ids, pos);
673 break;
674 }
675 }
676 }
677 return known_build_ids;
678 }
679
perf_inject__lookup_known_build_id(struct perf_inject * inject,struct dso * dso)680 static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
681 struct dso *dso)
682 {
683 struct str_node *pos;
684 int bid_len;
685
686 strlist__for_each_entry(pos, inject->known_build_ids) {
687 const char *build_id, *dso_name;
688
689 build_id = skip_spaces(pos->s);
690 dso_name = strchr(build_id, ' ');
691 bid_len = dso_name - pos->s;
692 dso_name = skip_spaces(dso_name);
693 if (strcmp(dso->long_name, dso_name))
694 continue;
695 for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
696 dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
697 hex(build_id[2 * ix + 1]));
698 }
699 dso->bid.size = bid_len / 2;
700 dso->has_build_id = 1;
701 return true;
702 }
703 return false;
704 }
705
dso__inject_build_id(struct dso * dso,struct perf_tool * tool,struct machine * machine,u8 cpumode,u32 flags)706 static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
707 struct machine *machine, u8 cpumode, u32 flags)
708 {
709 struct perf_inject *inject = container_of(tool, struct perf_inject,
710 tool);
711 int err;
712
713 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
714 return 0;
715 if (is_no_dso_memory(dso->long_name))
716 return 0;
717
718 if (inject->known_build_ids != NULL &&
719 perf_inject__lookup_known_build_id(inject, dso))
720 return 1;
721
722 if (dso__read_build_id(dso) < 0) {
723 pr_debug("no build_id found for %s\n", dso->long_name);
724 return -1;
725 }
726
727 err = perf_event__synthesize_build_id(tool, dso, cpumode,
728 perf_event__repipe, machine);
729 if (err) {
730 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
731 return -1;
732 }
733
734 return 0;
735 }
736
perf_event__inject_buildid(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine)737 int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
738 struct perf_sample *sample,
739 struct evsel *evsel __maybe_unused,
740 struct machine *machine)
741 {
742 struct addr_location al;
743 struct thread *thread;
744
745 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
746 if (thread == NULL) {
747 pr_err("problem processing %d event, skipping it.\n",
748 event->header.type);
749 goto repipe;
750 }
751
752 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
753 if (!al.map->dso->hit) {
754 al.map->dso->hit = 1;
755 dso__inject_build_id(al.map->dso, tool, machine,
756 sample->cpumode, al.map->flags);
757 }
758 }
759
760 thread__put(thread);
761 repipe:
762 perf_event__repipe(tool, event, sample, machine);
763 return 0;
764 }
765
perf_inject__sched_process_exit(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)766 static int perf_inject__sched_process_exit(struct perf_tool *tool,
767 union perf_event *event __maybe_unused,
768 struct perf_sample *sample,
769 struct evsel *evsel __maybe_unused,
770 struct machine *machine __maybe_unused)
771 {
772 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
773 struct event_entry *ent;
774
775 list_for_each_entry(ent, &inject->samples, node) {
776 if (sample->tid == ent->tid) {
777 list_del_init(&ent->node);
778 free(ent);
779 break;
780 }
781 }
782
783 return 0;
784 }
785
perf_inject__sched_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)786 static int perf_inject__sched_switch(struct perf_tool *tool,
787 union perf_event *event,
788 struct perf_sample *sample,
789 struct evsel *evsel,
790 struct machine *machine)
791 {
792 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
793 struct event_entry *ent;
794
795 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
796
797 ent = malloc(event->header.size + sizeof(struct event_entry));
798 if (ent == NULL) {
799 color_fprintf(stderr, PERF_COLOR_RED,
800 "Not enough memory to process sched switch event!");
801 return -1;
802 }
803
804 ent->tid = sample->tid;
805 memcpy(&ent->event, event, event->header.size);
806 list_add(&ent->node, &inject->samples);
807 return 0;
808 }
809
perf_inject__sched_stat(struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)810 static int perf_inject__sched_stat(struct perf_tool *tool,
811 union perf_event *event __maybe_unused,
812 struct perf_sample *sample,
813 struct evsel *evsel,
814 struct machine *machine)
815 {
816 struct event_entry *ent;
817 union perf_event *event_sw;
818 struct perf_sample sample_sw;
819 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
820 u32 pid = evsel__intval(evsel, sample, "pid");
821
822 list_for_each_entry(ent, &inject->samples, node) {
823 if (pid == ent->tid)
824 goto found;
825 }
826
827 return 0;
828 found:
829 event_sw = &ent->event[0];
830 evsel__parse_sample(evsel, event_sw, &sample_sw);
831
832 sample_sw.period = sample->period;
833 sample_sw.time = sample->time;
834 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
835 evsel->core.attr.read_format, &sample_sw);
836 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
837 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
838 }
839
guest_session__vcpu(struct guest_session * gs,u32 vcpu)840 static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
841 {
842 if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
843 return NULL;
844 return &gs->vcpu[vcpu];
845 }
846
guest_session__output_bytes(struct guest_session * gs,void * buf,size_t sz)847 static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
848 {
849 ssize_t ret = writen(gs->tmp_fd, buf, sz);
850
851 return ret < 0 ? ret : 0;
852 }
853
guest_session__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)854 static int guest_session__repipe(struct perf_tool *tool,
855 union perf_event *event,
856 struct perf_sample *sample __maybe_unused,
857 struct machine *machine __maybe_unused)
858 {
859 struct guest_session *gs = container_of(tool, struct guest_session, tool);
860
861 return guest_session__output_bytes(gs, event, event->header.size);
862 }
863
guest_session__map_tid(struct guest_session * gs,u32 tid,u32 vcpu)864 static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
865 {
866 struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
867 int hash;
868
869 if (!guest_tid)
870 return -ENOMEM;
871
872 guest_tid->tid = tid;
873 guest_tid->vcpu = vcpu;
874 hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
875 hlist_add_head(&guest_tid->node, &gs->tids[hash]);
876
877 return 0;
878 }
879
host_peek_vm_comms_cb(struct perf_session * session __maybe_unused,union perf_event * event,u64 offset __maybe_unused,void * data)880 static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
881 union perf_event *event,
882 u64 offset __maybe_unused, void *data)
883 {
884 struct guest_session *gs = data;
885 unsigned int vcpu;
886 struct guest_vcpu *guest_vcpu;
887 int ret;
888
889 if (event->header.type != PERF_RECORD_COMM ||
890 event->comm.pid != gs->machine_pid)
891 return 0;
892
893 /*
894 * QEMU option -name debug-threads=on, causes thread names formatted as
895 * below, although it is not an ABI. Also libvirt seems to use this by
896 * default. Here we rely on it to tell us which thread is which VCPU.
897 */
898 ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
899 if (ret <= 0)
900 return ret;
901 pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
902 event->comm.tid, event->comm.comm, vcpu);
903 if (vcpu > INT_MAX) {
904 pr_err("Invalid VCPU %u\n", vcpu);
905 return -EINVAL;
906 }
907 guest_vcpu = guest_session__vcpu(gs, vcpu);
908 if (!guest_vcpu)
909 return -ENOMEM;
910 if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
911 pr_err("Fatal error: Two threads found with the same VCPU\n");
912 return -EINVAL;
913 }
914 guest_vcpu->tid = event->comm.tid;
915
916 return guest_session__map_tid(gs, event->comm.tid, vcpu);
917 }
918
host_peek_vm_comms(struct perf_session * session,struct guest_session * gs)919 static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
920 {
921 return perf_session__peek_events(session, session->header.data_offset,
922 session->header.data_size,
923 host_peek_vm_comms_cb, gs);
924 }
925
evlist__is_id_used(struct evlist * evlist,u64 id)926 static bool evlist__is_id_used(struct evlist *evlist, u64 id)
927 {
928 return evlist__id2sid(evlist, id);
929 }
930
guest_session__allocate_new_id(struct guest_session * gs,struct evlist * host_evlist)931 static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
932 {
933 do {
934 gs->highest_id += 1;
935 } while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
936
937 return gs->highest_id;
938 }
939
guest_session__map_id(struct guest_session * gs,u64 id,u64 host_id,u32 vcpu)940 static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
941 {
942 struct guest_id *guest_id = zalloc(sizeof(*guest_id));
943 int hash;
944
945 if (!guest_id)
946 return -ENOMEM;
947
948 guest_id->id = id;
949 guest_id->host_id = host_id;
950 guest_id->vcpu = vcpu;
951 hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
952 hlist_add_head(&guest_id->node, &gs->heads[hash]);
953
954 return 0;
955 }
956
evlist__find_highest_id(struct evlist * evlist)957 static u64 evlist__find_highest_id(struct evlist *evlist)
958 {
959 struct evsel *evsel;
960 u64 highest_id = 1;
961
962 evlist__for_each_entry(evlist, evsel) {
963 u32 j;
964
965 for (j = 0; j < evsel->core.ids; j++) {
966 u64 id = evsel->core.id[j];
967
968 if (id > highest_id)
969 highest_id = id;
970 }
971 }
972
973 return highest_id;
974 }
975
guest_session__map_ids(struct guest_session * gs,struct evlist * host_evlist)976 static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
977 {
978 struct evlist *evlist = gs->session->evlist;
979 struct evsel *evsel;
980 int ret;
981
982 evlist__for_each_entry(evlist, evsel) {
983 u32 j;
984
985 for (j = 0; j < evsel->core.ids; j++) {
986 struct perf_sample_id *sid;
987 u64 host_id;
988 u64 id;
989
990 id = evsel->core.id[j];
991 sid = evlist__id2sid(evlist, id);
992 if (!sid || sid->cpu.cpu == -1)
993 continue;
994 host_id = guest_session__allocate_new_id(gs, host_evlist);
995 ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
996 if (ret)
997 return ret;
998 }
999 }
1000
1001 return 0;
1002 }
1003
guest_session__lookup_id(struct guest_session * gs,u64 id)1004 static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
1005 {
1006 struct hlist_head *head;
1007 struct guest_id *guest_id;
1008 int hash;
1009
1010 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1011 head = &gs->heads[hash];
1012
1013 hlist_for_each_entry(guest_id, head, node)
1014 if (guest_id->id == id)
1015 return guest_id;
1016
1017 return NULL;
1018 }
1019
process_attr(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1020 static int process_attr(struct perf_tool *tool, union perf_event *event,
1021 struct perf_sample *sample __maybe_unused,
1022 struct machine *machine __maybe_unused)
1023 {
1024 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1025
1026 return perf_event__process_attr(tool, event, &inject->session->evlist);
1027 }
1028
guest_session__add_attr(struct guest_session * gs,struct evsel * evsel)1029 static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
1030 {
1031 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1032 struct perf_event_attr attr = evsel->core.attr;
1033 u64 *id_array;
1034 u32 *vcpu_array;
1035 int ret = -ENOMEM;
1036 u32 i;
1037
1038 id_array = calloc(evsel->core.ids, sizeof(*id_array));
1039 if (!id_array)
1040 return -ENOMEM;
1041
1042 vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
1043 if (!vcpu_array)
1044 goto out;
1045
1046 for (i = 0; i < evsel->core.ids; i++) {
1047 u64 id = evsel->core.id[i];
1048 struct guest_id *guest_id = guest_session__lookup_id(gs, id);
1049
1050 if (!guest_id) {
1051 pr_err("Failed to find guest id %"PRIu64"\n", id);
1052 ret = -EINVAL;
1053 goto out;
1054 }
1055 id_array[i] = guest_id->host_id;
1056 vcpu_array[i] = guest_id->vcpu;
1057 }
1058
1059 attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
1060 attr.exclude_host = 1;
1061 attr.exclude_guest = 0;
1062
1063 ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
1064 id_array, process_attr);
1065 if (ret)
1066 pr_err("Failed to add guest attr.\n");
1067
1068 for (i = 0; i < evsel->core.ids; i++) {
1069 struct perf_sample_id *sid;
1070 u32 vcpu = vcpu_array[i];
1071
1072 sid = evlist__id2sid(inject->session->evlist, id_array[i]);
1073 /* Guest event is per-thread from the host point of view */
1074 sid->cpu.cpu = -1;
1075 sid->tid = gs->vcpu[vcpu].tid;
1076 sid->machine_pid = gs->machine_pid;
1077 sid->vcpu.cpu = vcpu;
1078 }
1079 out:
1080 free(vcpu_array);
1081 free(id_array);
1082 return ret;
1083 }
1084
guest_session__add_attrs(struct guest_session * gs)1085 static int guest_session__add_attrs(struct guest_session *gs)
1086 {
1087 struct evlist *evlist = gs->session->evlist;
1088 struct evsel *evsel;
1089 int ret;
1090
1091 evlist__for_each_entry(evlist, evsel) {
1092 ret = guest_session__add_attr(gs, evsel);
1093 if (ret)
1094 return ret;
1095 }
1096
1097 return 0;
1098 }
1099
synthesize_id_index(struct perf_inject * inject,size_t new_cnt)1100 static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
1101 {
1102 struct perf_session *session = inject->session;
1103 struct evlist *evlist = session->evlist;
1104 struct machine *machine = &session->machines.host;
1105 size_t from = evlist->core.nr_entries - new_cnt;
1106
1107 return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
1108 evlist, machine, from);
1109 }
1110
guest_session__lookup_tid(struct guest_session * gs,u32 tid)1111 static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
1112 {
1113 struct hlist_head *head;
1114 struct guest_tid *guest_tid;
1115 int hash;
1116
1117 hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
1118 head = &gs->tids[hash];
1119
1120 hlist_for_each_entry(guest_tid, head, node)
1121 if (guest_tid->tid == tid)
1122 return guest_tid;
1123
1124 return NULL;
1125 }
1126
dso__is_in_kernel_space(struct dso * dso)1127 static bool dso__is_in_kernel_space(struct dso *dso)
1128 {
1129 if (dso__is_vdso(dso))
1130 return false;
1131
1132 return dso__is_kcore(dso) ||
1133 dso->kernel ||
1134 is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
1135 }
1136
evlist__first_id(struct evlist * evlist)1137 static u64 evlist__first_id(struct evlist *evlist)
1138 {
1139 struct evsel *evsel;
1140
1141 evlist__for_each_entry(evlist, evsel) {
1142 if (evsel->core.ids)
1143 return evsel->core.id[0];
1144 }
1145 return 0;
1146 }
1147
process_build_id(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1148 static int process_build_id(struct perf_tool *tool,
1149 union perf_event *event,
1150 struct perf_sample *sample __maybe_unused,
1151 struct machine *machine __maybe_unused)
1152 {
1153 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1154
1155 return perf_event__process_build_id(inject->session, event);
1156 }
1157
synthesize_build_id(struct perf_inject * inject,struct dso * dso,pid_t machine_pid)1158 static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
1159 {
1160 struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
1161 u8 cpumode = dso__is_in_kernel_space(dso) ?
1162 PERF_RECORD_MISC_GUEST_KERNEL :
1163 PERF_RECORD_MISC_GUEST_USER;
1164
1165 if (!machine)
1166 return -ENOMEM;
1167
1168 dso->hit = 1;
1169
1170 return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
1171 process_build_id, machine);
1172 }
1173
guest_session__add_build_ids(struct guest_session * gs)1174 static int guest_session__add_build_ids(struct guest_session *gs)
1175 {
1176 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1177 struct machine *machine = &gs->session->machines.host;
1178 struct dso *dso;
1179 int ret;
1180
1181 /* Build IDs will be put in the Build ID feature section */
1182 perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
1183
1184 dsos__for_each_with_build_id(dso, &machine->dsos.head) {
1185 ret = synthesize_build_id(inject, dso, gs->machine_pid);
1186 if (ret)
1187 return ret;
1188 }
1189
1190 return 0;
1191 }
1192
guest_session__ksymbol_event(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1193 static int guest_session__ksymbol_event(struct perf_tool *tool,
1194 union perf_event *event,
1195 struct perf_sample *sample __maybe_unused,
1196 struct machine *machine __maybe_unused)
1197 {
1198 struct guest_session *gs = container_of(tool, struct guest_session, tool);
1199
1200 /* Only support out-of-line i.e. no BPF support */
1201 if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
1202 return 0;
1203
1204 return guest_session__output_bytes(gs, event, event->header.size);
1205 }
1206
guest_session__start(struct guest_session * gs,const char * name,bool force)1207 static int guest_session__start(struct guest_session *gs, const char *name, bool force)
1208 {
1209 char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
1210 struct perf_session *session;
1211 int ret;
1212
1213 /* Only these events will be injected */
1214 gs->tool.mmap = guest_session__repipe;
1215 gs->tool.mmap2 = guest_session__repipe;
1216 gs->tool.comm = guest_session__repipe;
1217 gs->tool.fork = guest_session__repipe;
1218 gs->tool.exit = guest_session__repipe;
1219 gs->tool.lost = guest_session__repipe;
1220 gs->tool.context_switch = guest_session__repipe;
1221 gs->tool.ksymbol = guest_session__ksymbol_event;
1222 gs->tool.text_poke = guest_session__repipe;
1223 /*
1224 * Processing a build ID creates a struct dso with that build ID. Later,
1225 * all guest dsos are iterated and the build IDs processed into the host
1226 * session where they will be output to the Build ID feature section
1227 * when the perf.data file header is written.
1228 */
1229 gs->tool.build_id = perf_event__process_build_id;
1230 /* Process the id index to know what VCPU an ID belongs to */
1231 gs->tool.id_index = perf_event__process_id_index;
1232
1233 gs->tool.ordered_events = true;
1234 gs->tool.ordering_requires_timestamps = true;
1235
1236 gs->data.path = name;
1237 gs->data.force = force;
1238 gs->data.mode = PERF_DATA_MODE_READ;
1239
1240 session = perf_session__new(&gs->data, &gs->tool);
1241 if (IS_ERR(session))
1242 return PTR_ERR(session);
1243 gs->session = session;
1244
1245 /*
1246 * Initial events have zero'd ID samples. Get default ID sample size
1247 * used for removing them.
1248 */
1249 gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
1250 /* And default ID for adding back a host-compatible ID sample */
1251 gs->dflt_id = evlist__first_id(session->evlist);
1252 if (!gs->dflt_id) {
1253 pr_err("Guest data has no sample IDs");
1254 return -EINVAL;
1255 }
1256
1257 /* Temporary file for guest events */
1258 gs->tmp_file_name = strdup(tmp_file_name);
1259 if (!gs->tmp_file_name)
1260 return -ENOMEM;
1261 gs->tmp_fd = mkstemp(gs->tmp_file_name);
1262 if (gs->tmp_fd < 0)
1263 return -errno;
1264
1265 if (zstd_init(&gs->session->zstd_data, 0) < 0)
1266 pr_warning("Guest session decompression initialization failed.\n");
1267
1268 /*
1269 * perf does not support processing 2 sessions simultaneously, so output
1270 * guest events to a temporary file.
1271 */
1272 ret = perf_session__process_events(gs->session);
1273 if (ret)
1274 return ret;
1275
1276 if (lseek(gs->tmp_fd, 0, SEEK_SET))
1277 return -errno;
1278
1279 return 0;
1280 }
1281
1282 /* Free hlist nodes assuming hlist_node is the first member of hlist entries */
free_hlist(struct hlist_head * heads,size_t hlist_sz)1283 static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
1284 {
1285 struct hlist_node *pos, *n;
1286 size_t i;
1287
1288 for (i = 0; i < hlist_sz; ++i) {
1289 hlist_for_each_safe(pos, n, &heads[i]) {
1290 hlist_del(pos);
1291 free(pos);
1292 }
1293 }
1294 }
1295
guest_session__exit(struct guest_session * gs)1296 static void guest_session__exit(struct guest_session *gs)
1297 {
1298 if (gs->session) {
1299 perf_session__delete(gs->session);
1300 free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
1301 free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
1302 }
1303 if (gs->tmp_file_name) {
1304 if (gs->tmp_fd >= 0)
1305 close(gs->tmp_fd);
1306 unlink(gs->tmp_file_name);
1307 free(gs->tmp_file_name);
1308 }
1309 free(gs->vcpu);
1310 free(gs->perf_data_file);
1311 }
1312
get_tsc_conv(struct perf_tsc_conversion * tc,struct perf_record_time_conv * time_conv)1313 static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
1314 {
1315 tc->time_shift = time_conv->time_shift;
1316 tc->time_mult = time_conv->time_mult;
1317 tc->time_zero = time_conv->time_zero;
1318 tc->time_cycles = time_conv->time_cycles;
1319 tc->time_mask = time_conv->time_mask;
1320 tc->cap_user_time_zero = time_conv->cap_user_time_zero;
1321 tc->cap_user_time_short = time_conv->cap_user_time_short;
1322 }
1323
guest_session__get_tc(struct guest_session * gs)1324 static void guest_session__get_tc(struct guest_session *gs)
1325 {
1326 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1327
1328 get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
1329 get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
1330 }
1331
guest_session__convert_time(struct guest_session * gs,u64 guest_time,u64 * host_time)1332 static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
1333 {
1334 u64 tsc;
1335
1336 if (!guest_time) {
1337 *host_time = 0;
1338 return;
1339 }
1340
1341 if (gs->guest_tc.cap_user_time_zero)
1342 tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
1343 else
1344 tsc = guest_time;
1345
1346 /*
1347 * This is the correct order of operations for x86 if the TSC Offset and
1348 * Multiplier values are used.
1349 */
1350 tsc -= gs->time_offset;
1351 tsc /= gs->time_scale;
1352
1353 if (gs->host_tc.cap_user_time_zero)
1354 *host_time = tsc_to_perf_time(tsc, &gs->host_tc);
1355 else
1356 *host_time = tsc;
1357 }
1358
guest_session__fetch(struct guest_session * gs)1359 static int guest_session__fetch(struct guest_session *gs)
1360 {
1361 void *buf = gs->ev.event_buf;
1362 struct perf_event_header *hdr = buf;
1363 size_t hdr_sz = sizeof(*hdr);
1364 ssize_t ret;
1365
1366 ret = readn(gs->tmp_fd, buf, hdr_sz);
1367 if (ret < 0)
1368 return ret;
1369
1370 if (!ret) {
1371 /* Zero size means EOF */
1372 hdr->size = 0;
1373 return 0;
1374 }
1375
1376 buf += hdr_sz;
1377
1378 ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
1379 if (ret < 0)
1380 return ret;
1381
1382 gs->ev.event = (union perf_event *)gs->ev.event_buf;
1383 gs->ev.sample.time = 0;
1384
1385 if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
1386 pr_err("Unexpected type fetching guest event");
1387 return 0;
1388 }
1389
1390 ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
1391 if (ret) {
1392 pr_err("Parse failed fetching guest event");
1393 return ret;
1394 }
1395
1396 if (!gs->have_tc) {
1397 guest_session__get_tc(gs);
1398 gs->have_tc = true;
1399 }
1400
1401 guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
1402
1403 return 0;
1404 }
1405
evlist__append_id_sample(struct evlist * evlist,union perf_event * ev,const struct perf_sample * sample)1406 static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
1407 const struct perf_sample *sample)
1408 {
1409 struct evsel *evsel;
1410 void *array;
1411 int ret;
1412
1413 evsel = evlist__id2evsel(evlist, sample->id);
1414 array = ev;
1415
1416 if (!evsel) {
1417 pr_err("No evsel for id %"PRIu64"\n", sample->id);
1418 return -EINVAL;
1419 }
1420
1421 array += ev->header.size;
1422 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
1423 if (ret < 0)
1424 return ret;
1425
1426 if (ret & 7) {
1427 pr_err("Bad id sample size %d\n", ret);
1428 return -EINVAL;
1429 }
1430
1431 ev->header.size += ret;
1432
1433 return 0;
1434 }
1435
guest_session__inject_events(struct guest_session * gs,u64 timestamp)1436 static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
1437 {
1438 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1439 int ret;
1440
1441 if (!gs->ready)
1442 return 0;
1443
1444 while (1) {
1445 struct perf_sample *sample;
1446 struct guest_id *guest_id;
1447 union perf_event *ev;
1448 u16 id_hdr_size;
1449 u8 cpumode;
1450 u64 id;
1451
1452 if (!gs->fetched) {
1453 ret = guest_session__fetch(gs);
1454 if (ret)
1455 return ret;
1456 gs->fetched = true;
1457 }
1458
1459 ev = gs->ev.event;
1460 sample = &gs->ev.sample;
1461
1462 if (!ev->header.size)
1463 return 0; /* EOF */
1464
1465 if (sample->time > timestamp)
1466 return 0;
1467
1468 /* Change cpumode to guest */
1469 cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1470 if (cpumode & PERF_RECORD_MISC_USER)
1471 cpumode = PERF_RECORD_MISC_GUEST_USER;
1472 else
1473 cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1474 ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
1475 ev->header.misc |= cpumode;
1476
1477 id = sample->id;
1478 if (!id) {
1479 id = gs->dflt_id;
1480 id_hdr_size = gs->dflt_id_hdr_size;
1481 } else {
1482 struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
1483
1484 id_hdr_size = evsel__id_hdr_size(evsel);
1485 }
1486
1487 if (id_hdr_size & 7) {
1488 pr_err("Bad id_hdr_size %u\n", id_hdr_size);
1489 return -EINVAL;
1490 }
1491
1492 if (ev->header.size & 7) {
1493 pr_err("Bad event size %u\n", ev->header.size);
1494 return -EINVAL;
1495 }
1496
1497 /* Remove guest id sample */
1498 ev->header.size -= id_hdr_size;
1499
1500 if (ev->header.size & 7) {
1501 pr_err("Bad raw event size %u\n", ev->header.size);
1502 return -EINVAL;
1503 }
1504
1505 guest_id = guest_session__lookup_id(gs, id);
1506 if (!guest_id) {
1507 pr_err("Guest event with unknown id %llu\n",
1508 (unsigned long long)id);
1509 return -EINVAL;
1510 }
1511
1512 /* Change to host ID to avoid conflicting ID values */
1513 sample->id = guest_id->host_id;
1514 sample->stream_id = guest_id->host_id;
1515
1516 if (sample->cpu != (u32)-1) {
1517 if (sample->cpu >= gs->vcpu_cnt) {
1518 pr_err("Guest event with unknown VCPU %u\n",
1519 sample->cpu);
1520 return -EINVAL;
1521 }
1522 /* Change to host CPU instead of guest VCPU */
1523 sample->cpu = gs->vcpu[sample->cpu].cpu;
1524 }
1525
1526 /* New id sample with new ID and CPU */
1527 ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
1528 if (ret)
1529 return ret;
1530
1531 if (ev->header.size & 7) {
1532 pr_err("Bad new event size %u\n", ev->header.size);
1533 return -EINVAL;
1534 }
1535
1536 gs->fetched = false;
1537
1538 ret = output_bytes(inject, ev, ev->header.size);
1539 if (ret)
1540 return ret;
1541 }
1542 }
1543
guest_session__flush_events(struct guest_session * gs)1544 static int guest_session__flush_events(struct guest_session *gs)
1545 {
1546 return guest_session__inject_events(gs, -1);
1547 }
1548
host__repipe(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1549 static int host__repipe(struct perf_tool *tool,
1550 union perf_event *event,
1551 struct perf_sample *sample,
1552 struct machine *machine)
1553 {
1554 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1555 int ret;
1556
1557 ret = guest_session__inject_events(&inject->guest_session, sample->time);
1558 if (ret)
1559 return ret;
1560
1561 return perf_event__repipe(tool, event, sample, machine);
1562 }
1563
host__finished_init(struct perf_session * session,union perf_event * event)1564 static int host__finished_init(struct perf_session *session, union perf_event *event)
1565 {
1566 struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
1567 struct guest_session *gs = &inject->guest_session;
1568 int ret;
1569
1570 /*
1571 * Peek through host COMM events to find QEMU threads and the VCPU they
1572 * are running.
1573 */
1574 ret = host_peek_vm_comms(session, gs);
1575 if (ret)
1576 return ret;
1577
1578 if (!gs->vcpu_cnt) {
1579 pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
1580 return -EINVAL;
1581 }
1582
1583 /*
1584 * Allocate new (unused) host sample IDs and map them to the guest IDs.
1585 */
1586 gs->highest_id = evlist__find_highest_id(session->evlist);
1587 ret = guest_session__map_ids(gs, session->evlist);
1588 if (ret)
1589 return ret;
1590
1591 ret = guest_session__add_attrs(gs);
1592 if (ret)
1593 return ret;
1594
1595 ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
1596 if (ret) {
1597 pr_err("Failed to synthesize id_index\n");
1598 return ret;
1599 }
1600
1601 ret = guest_session__add_build_ids(gs);
1602 if (ret) {
1603 pr_err("Failed to add guest build IDs\n");
1604 return ret;
1605 }
1606
1607 gs->ready = true;
1608
1609 ret = guest_session__inject_events(gs, 0);
1610 if (ret)
1611 return ret;
1612
1613 return perf_event__repipe_op2_synth(session, event);
1614 }
1615
1616 /*
1617 * Obey finished-round ordering. The FINISHED_ROUND event is first processed
1618 * which flushes host events to file up until the last flush time. Then inject
1619 * guest events up to the same time. Finally write out the FINISHED_ROUND event
1620 * itself.
1621 */
host__finished_round(struct perf_tool * tool,union perf_event * event,struct ordered_events * oe)1622 static int host__finished_round(struct perf_tool *tool,
1623 union perf_event *event,
1624 struct ordered_events *oe)
1625 {
1626 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1627 int ret = perf_event__process_finished_round(tool, event, oe);
1628 u64 timestamp = ordered_events__last_flush_time(oe);
1629
1630 if (ret)
1631 return ret;
1632
1633 ret = guest_session__inject_events(&inject->guest_session, timestamp);
1634 if (ret)
1635 return ret;
1636
1637 return perf_event__repipe_oe_synth(tool, event, oe);
1638 }
1639
host__context_switch(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1640 static int host__context_switch(struct perf_tool *tool,
1641 union perf_event *event,
1642 struct perf_sample *sample,
1643 struct machine *machine)
1644 {
1645 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1646 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1647 struct guest_session *gs = &inject->guest_session;
1648 u32 pid = event->context_switch.next_prev_pid;
1649 u32 tid = event->context_switch.next_prev_tid;
1650 struct guest_tid *guest_tid;
1651 u32 vcpu;
1652
1653 if (out || pid != gs->machine_pid)
1654 goto out;
1655
1656 guest_tid = guest_session__lookup_tid(gs, tid);
1657 if (!guest_tid)
1658 goto out;
1659
1660 if (sample->cpu == (u32)-1) {
1661 pr_err("Switch event does not have CPU\n");
1662 return -EINVAL;
1663 }
1664
1665 vcpu = guest_tid->vcpu;
1666 if (vcpu >= gs->vcpu_cnt)
1667 return -EINVAL;
1668
1669 /* Guest is switching in, record which CPU the VCPU is now running on */
1670 gs->vcpu[vcpu].cpu = sample->cpu;
1671 out:
1672 return host__repipe(tool, event, sample, machine);
1673 }
1674
sig_handler(int sig __maybe_unused)1675 static void sig_handler(int sig __maybe_unused)
1676 {
1677 session_done = 1;
1678 }
1679
evsel__check_stype(struct evsel * evsel,u64 sample_type,const char * sample_msg)1680 static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
1681 {
1682 struct perf_event_attr *attr = &evsel->core.attr;
1683 const char *name = evsel__name(evsel);
1684
1685 if (!(attr->sample_type & sample_type)) {
1686 pr_err("Samples for %s event do not have %s attribute set.",
1687 name, sample_msg);
1688 return -EINVAL;
1689 }
1690
1691 return 0;
1692 }
1693
drop_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct evsel * evsel __maybe_unused,struct machine * machine __maybe_unused)1694 static int drop_sample(struct perf_tool *tool __maybe_unused,
1695 union perf_event *event __maybe_unused,
1696 struct perf_sample *sample __maybe_unused,
1697 struct evsel *evsel __maybe_unused,
1698 struct machine *machine __maybe_unused)
1699 {
1700 return 0;
1701 }
1702
strip_init(struct perf_inject * inject)1703 static void strip_init(struct perf_inject *inject)
1704 {
1705 struct evlist *evlist = inject->session->evlist;
1706 struct evsel *evsel;
1707
1708 inject->tool.context_switch = perf_event__drop;
1709
1710 evlist__for_each_entry(evlist, evsel)
1711 evsel->handler = drop_sample;
1712 }
1713
parse_vm_time_correlation(const struct option * opt,const char * str,int unset)1714 static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
1715 {
1716 struct perf_inject *inject = opt->value;
1717 const char *args;
1718 char *dry_run;
1719
1720 if (unset)
1721 return 0;
1722
1723 inject->itrace_synth_opts.set = true;
1724 inject->itrace_synth_opts.vm_time_correlation = true;
1725 inject->in_place_update = true;
1726
1727 if (!str)
1728 return 0;
1729
1730 dry_run = skip_spaces(str);
1731 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
1732 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
1733 inject->in_place_update_dry_run = true;
1734 args = dry_run + strlen("dry-run");
1735 } else {
1736 args = str;
1737 }
1738
1739 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
1740
1741 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
1742 }
1743
parse_guest_data(const struct option * opt,const char * str,int unset)1744 static int parse_guest_data(const struct option *opt, const char *str, int unset)
1745 {
1746 struct perf_inject *inject = opt->value;
1747 struct guest_session *gs = &inject->guest_session;
1748 char *tok;
1749 char *s;
1750
1751 if (unset)
1752 return 0;
1753
1754 if (!str)
1755 goto bad_args;
1756
1757 s = strdup(str);
1758 if (!s)
1759 return -ENOMEM;
1760
1761 gs->perf_data_file = strsep(&s, ",");
1762 if (!gs->perf_data_file)
1763 goto bad_args;
1764
1765 gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
1766 if (gs->copy_kcore_dir)
1767 inject->output.is_dir = true;
1768
1769 tok = strsep(&s, ",");
1770 if (!tok)
1771 goto bad_args;
1772 gs->machine_pid = strtoul(tok, NULL, 0);
1773 if (!inject->guest_session.machine_pid)
1774 goto bad_args;
1775
1776 gs->time_scale = 1;
1777
1778 tok = strsep(&s, ",");
1779 if (!tok)
1780 goto out;
1781 gs->time_offset = strtoull(tok, NULL, 0);
1782
1783 tok = strsep(&s, ",");
1784 if (!tok)
1785 goto out;
1786 gs->time_scale = strtod(tok, NULL);
1787 if (!gs->time_scale)
1788 goto bad_args;
1789 out:
1790 return 0;
1791
1792 bad_args:
1793 pr_err("--guest-data option requires guest perf.data file name, "
1794 "guest machine PID, and optionally guest timestamp offset, "
1795 "and guest timestamp scale factor, separated by commas.\n");
1796 return -1;
1797 }
1798
save_section_info_cb(struct perf_file_section * section,struct perf_header * ph __maybe_unused,int feat,int fd __maybe_unused,void * data)1799 static int save_section_info_cb(struct perf_file_section *section,
1800 struct perf_header *ph __maybe_unused,
1801 int feat, int fd __maybe_unused, void *data)
1802 {
1803 struct perf_inject *inject = data;
1804
1805 inject->secs[feat] = *section;
1806 return 0;
1807 }
1808
save_section_info(struct perf_inject * inject)1809 static int save_section_info(struct perf_inject *inject)
1810 {
1811 struct perf_header *header = &inject->session->header;
1812 int fd = perf_data__fd(inject->session->data);
1813
1814 return perf_header__process_sections(header, fd, inject, save_section_info_cb);
1815 }
1816
keep_feat(int feat)1817 static bool keep_feat(int feat)
1818 {
1819 switch (feat) {
1820 /* Keep original information that describes the machine or software */
1821 case HEADER_TRACING_DATA:
1822 case HEADER_HOSTNAME:
1823 case HEADER_OSRELEASE:
1824 case HEADER_VERSION:
1825 case HEADER_ARCH:
1826 case HEADER_NRCPUS:
1827 case HEADER_CPUDESC:
1828 case HEADER_CPUID:
1829 case HEADER_TOTAL_MEM:
1830 case HEADER_CPU_TOPOLOGY:
1831 case HEADER_NUMA_TOPOLOGY:
1832 case HEADER_PMU_MAPPINGS:
1833 case HEADER_CACHE:
1834 case HEADER_MEM_TOPOLOGY:
1835 case HEADER_CLOCKID:
1836 case HEADER_BPF_PROG_INFO:
1837 case HEADER_BPF_BTF:
1838 case HEADER_CPU_PMU_CAPS:
1839 case HEADER_CLOCK_DATA:
1840 case HEADER_HYBRID_TOPOLOGY:
1841 case HEADER_PMU_CAPS:
1842 return true;
1843 /* Information that can be updated */
1844 case HEADER_BUILD_ID:
1845 case HEADER_CMDLINE:
1846 case HEADER_EVENT_DESC:
1847 case HEADER_BRANCH_STACK:
1848 case HEADER_GROUP_DESC:
1849 case HEADER_AUXTRACE:
1850 case HEADER_STAT:
1851 case HEADER_SAMPLE_TIME:
1852 case HEADER_DIR_FORMAT:
1853 case HEADER_COMPRESSED:
1854 default:
1855 return false;
1856 };
1857 }
1858
read_file(int fd,u64 offs,void * buf,size_t sz)1859 static int read_file(int fd, u64 offs, void *buf, size_t sz)
1860 {
1861 ssize_t ret = preadn(fd, buf, sz, offs);
1862
1863 if (ret < 0)
1864 return -errno;
1865 if ((size_t)ret != sz)
1866 return -EINVAL;
1867 return 0;
1868 }
1869
feat_copy(struct perf_inject * inject,int feat,struct feat_writer * fw)1870 static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
1871 {
1872 int fd = perf_data__fd(inject->session->data);
1873 u64 offs = inject->secs[feat].offset;
1874 size_t sz = inject->secs[feat].size;
1875 void *buf = malloc(sz);
1876 int ret;
1877
1878 if (!buf)
1879 return -ENOMEM;
1880
1881 ret = read_file(fd, offs, buf, sz);
1882 if (ret)
1883 goto out_free;
1884
1885 ret = fw->write(fw, buf, sz);
1886 out_free:
1887 free(buf);
1888 return ret;
1889 }
1890
1891 struct inject_fc {
1892 struct feat_copier fc;
1893 struct perf_inject *inject;
1894 };
1895
feat_copy_cb(struct feat_copier * fc,int feat,struct feat_writer * fw)1896 static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
1897 {
1898 struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
1899 struct perf_inject *inject = inj_fc->inject;
1900 int ret;
1901
1902 if (!inject->secs[feat].offset ||
1903 !keep_feat(feat))
1904 return 0;
1905
1906 ret = feat_copy(inject, feat, fw);
1907 if (ret < 0)
1908 return ret;
1909
1910 return 1; /* Feature section copied */
1911 }
1912
copy_kcore_dir(struct perf_inject * inject)1913 static int copy_kcore_dir(struct perf_inject *inject)
1914 {
1915 char *cmd;
1916 int ret;
1917
1918 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
1919 inject->input_name, inject->output.path);
1920 if (ret < 0)
1921 return ret;
1922 pr_debug("%s\n", cmd);
1923 ret = system(cmd);
1924 free(cmd);
1925 return ret;
1926 }
1927
guest_session__copy_kcore_dir(struct guest_session * gs)1928 static int guest_session__copy_kcore_dir(struct guest_session *gs)
1929 {
1930 struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1931 char *cmd;
1932 int ret;
1933
1934 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
1935 gs->perf_data_file, inject->output.path, gs->machine_pid);
1936 if (ret < 0)
1937 return ret;
1938 pr_debug("%s\n", cmd);
1939 ret = system(cmd);
1940 free(cmd);
1941 return ret;
1942 }
1943
output_fd(struct perf_inject * inject)1944 static int output_fd(struct perf_inject *inject)
1945 {
1946 return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
1947 }
1948
__cmd_inject(struct perf_inject * inject)1949 static int __cmd_inject(struct perf_inject *inject)
1950 {
1951 int ret = -EINVAL;
1952 struct guest_session *gs = &inject->guest_session;
1953 struct perf_session *session = inject->session;
1954 int fd = output_fd(inject);
1955 u64 output_data_offset;
1956
1957 signal(SIGINT, sig_handler);
1958
1959 if (inject->build_ids || inject->sched_stat ||
1960 inject->itrace_synth_opts.set || inject->build_id_all) {
1961 inject->tool.mmap = perf_event__repipe_mmap;
1962 inject->tool.mmap2 = perf_event__repipe_mmap2;
1963 inject->tool.fork = perf_event__repipe_fork;
1964 inject->tool.tracing_data = perf_event__repipe_tracing_data;
1965 }
1966
1967 output_data_offset = perf_session__data_offset(session->evlist);
1968
1969 if (inject->build_id_all) {
1970 inject->tool.mmap = perf_event__repipe_buildid_mmap;
1971 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
1972 } else if (inject->build_ids) {
1973 inject->tool.sample = perf_event__inject_buildid;
1974 } else if (inject->sched_stat) {
1975 struct evsel *evsel;
1976
1977 evlist__for_each_entry(session->evlist, evsel) {
1978 const char *name = evsel__name(evsel);
1979
1980 if (!strcmp(name, "sched:sched_switch")) {
1981 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
1982 return -EINVAL;
1983
1984 evsel->handler = perf_inject__sched_switch;
1985 } else if (!strcmp(name, "sched:sched_process_exit"))
1986 evsel->handler = perf_inject__sched_process_exit;
1987 else if (!strncmp(name, "sched:sched_stat_", 17))
1988 evsel->handler = perf_inject__sched_stat;
1989 }
1990 } else if (inject->itrace_synth_opts.vm_time_correlation) {
1991 session->itrace_synth_opts = &inject->itrace_synth_opts;
1992 memset(&inject->tool, 0, sizeof(inject->tool));
1993 inject->tool.id_index = perf_event__process_id_index;
1994 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
1995 inject->tool.auxtrace = perf_event__process_auxtrace;
1996 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
1997 inject->tool.ordered_events = true;
1998 inject->tool.ordering_requires_timestamps = true;
1999 } else if (inject->itrace_synth_opts.set) {
2000 session->itrace_synth_opts = &inject->itrace_synth_opts;
2001 inject->itrace_synth_opts.inject = true;
2002 inject->tool.comm = perf_event__repipe_comm;
2003 inject->tool.namespaces = perf_event__repipe_namespaces;
2004 inject->tool.exit = perf_event__repipe_exit;
2005 inject->tool.id_index = perf_event__process_id_index;
2006 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
2007 inject->tool.auxtrace = perf_event__process_auxtrace;
2008 inject->tool.aux = perf_event__drop_aux;
2009 inject->tool.itrace_start = perf_event__drop_aux;
2010 inject->tool.aux_output_hw_id = perf_event__drop_aux;
2011 inject->tool.ordered_events = true;
2012 inject->tool.ordering_requires_timestamps = true;
2013 /* Allow space in the header for new attributes */
2014 output_data_offset = roundup(8192 + session->header.data_offset, 4096);
2015 if (inject->strip)
2016 strip_init(inject);
2017 } else if (gs->perf_data_file) {
2018 char *name = gs->perf_data_file;
2019
2020 /*
2021 * Not strictly necessary, but keep these events in order wrt
2022 * guest events.
2023 */
2024 inject->tool.mmap = host__repipe;
2025 inject->tool.mmap2 = host__repipe;
2026 inject->tool.comm = host__repipe;
2027 inject->tool.fork = host__repipe;
2028 inject->tool.exit = host__repipe;
2029 inject->tool.lost = host__repipe;
2030 inject->tool.context_switch = host__repipe;
2031 inject->tool.ksymbol = host__repipe;
2032 inject->tool.text_poke = host__repipe;
2033 /*
2034 * Once the host session has initialized, set up sample ID
2035 * mapping and feed in guest attrs, build IDs and initial
2036 * events.
2037 */
2038 inject->tool.finished_init = host__finished_init;
2039 /* Obey finished round ordering */
2040 inject->tool.finished_round = host__finished_round,
2041 /* Keep track of which CPU a VCPU is runnng on */
2042 inject->tool.context_switch = host__context_switch;
2043 /*
2044 * Must order events to be able to obey finished round
2045 * ordering.
2046 */
2047 inject->tool.ordered_events = true;
2048 inject->tool.ordering_requires_timestamps = true;
2049 /* Set up a separate session to process guest perf.data file */
2050 ret = guest_session__start(gs, name, session->data->force);
2051 if (ret) {
2052 pr_err("Failed to process %s, error %d\n", name, ret);
2053 return ret;
2054 }
2055 /* Allow space in the header for guest attributes */
2056 output_data_offset += gs->session->header.data_offset;
2057 output_data_offset = roundup(output_data_offset, 4096);
2058 }
2059
2060 if (!inject->itrace_synth_opts.set)
2061 auxtrace_index__free(&session->auxtrace_index);
2062
2063 if (!inject->is_pipe && !inject->in_place_update)
2064 lseek(fd, output_data_offset, SEEK_SET);
2065
2066 ret = perf_session__process_events(session);
2067 if (ret)
2068 return ret;
2069
2070 if (gs->session) {
2071 /*
2072 * Remaining guest events have later timestamps. Flush them
2073 * out to file.
2074 */
2075 ret = guest_session__flush_events(gs);
2076 if (ret) {
2077 pr_err("Failed to flush guest events\n");
2078 return ret;
2079 }
2080 }
2081
2082 if (!inject->is_pipe && !inject->in_place_update) {
2083 struct inject_fc inj_fc = {
2084 .fc.copy = feat_copy_cb,
2085 .inject = inject,
2086 };
2087
2088 if (inject->build_ids)
2089 perf_header__set_feat(&session->header,
2090 HEADER_BUILD_ID);
2091 /*
2092 * Keep all buildids when there is unprocessed AUX data because
2093 * it is not known which ones the AUX trace hits.
2094 */
2095 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
2096 inject->have_auxtrace && !inject->itrace_synth_opts.set)
2097 dsos__hit_all(session);
2098 /*
2099 * The AUX areas have been removed and replaced with
2100 * synthesized hardware events, so clear the feature flag.
2101 */
2102 if (inject->itrace_synth_opts.set) {
2103 perf_header__clear_feat(&session->header,
2104 HEADER_AUXTRACE);
2105 if (inject->itrace_synth_opts.last_branch ||
2106 inject->itrace_synth_opts.add_last_branch)
2107 perf_header__set_feat(&session->header,
2108 HEADER_BRANCH_STACK);
2109 }
2110 session->header.data_offset = output_data_offset;
2111 session->header.data_size = inject->bytes_written;
2112 perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
2113
2114 if (inject->copy_kcore_dir) {
2115 ret = copy_kcore_dir(inject);
2116 if (ret) {
2117 pr_err("Failed to copy kcore\n");
2118 return ret;
2119 }
2120 }
2121 if (gs->copy_kcore_dir) {
2122 ret = guest_session__copy_kcore_dir(gs);
2123 if (ret) {
2124 pr_err("Failed to copy guest kcore\n");
2125 return ret;
2126 }
2127 }
2128 }
2129
2130 return ret;
2131 }
2132
cmd_inject(int argc,const char ** argv)2133 int cmd_inject(int argc, const char **argv)
2134 {
2135 struct perf_inject inject = {
2136 .tool = {
2137 .sample = perf_event__repipe_sample,
2138 .read = perf_event__repipe_sample,
2139 .mmap = perf_event__repipe,
2140 .mmap2 = perf_event__repipe,
2141 .comm = perf_event__repipe,
2142 .namespaces = perf_event__repipe,
2143 .cgroup = perf_event__repipe,
2144 .fork = perf_event__repipe,
2145 .exit = perf_event__repipe,
2146 .lost = perf_event__repipe,
2147 .lost_samples = perf_event__repipe,
2148 .aux = perf_event__repipe,
2149 .itrace_start = perf_event__repipe,
2150 .aux_output_hw_id = perf_event__repipe,
2151 .context_switch = perf_event__repipe,
2152 .throttle = perf_event__repipe,
2153 .unthrottle = perf_event__repipe,
2154 .ksymbol = perf_event__repipe,
2155 .bpf = perf_event__repipe,
2156 .text_poke = perf_event__repipe,
2157 .attr = perf_event__repipe_attr,
2158 .event_update = perf_event__repipe_event_update,
2159 .tracing_data = perf_event__repipe_op2_synth,
2160 .finished_round = perf_event__repipe_oe_synth,
2161 .build_id = perf_event__repipe_op2_synth,
2162 .id_index = perf_event__repipe_op2_synth,
2163 .auxtrace_info = perf_event__repipe_op2_synth,
2164 .auxtrace_error = perf_event__repipe_op2_synth,
2165 .time_conv = perf_event__repipe_op2_synth,
2166 .thread_map = perf_event__repipe_op2_synth,
2167 .cpu_map = perf_event__repipe_op2_synth,
2168 .stat_config = perf_event__repipe_op2_synth,
2169 .stat = perf_event__repipe_op2_synth,
2170 .stat_round = perf_event__repipe_op2_synth,
2171 .feature = perf_event__repipe_op2_synth,
2172 .finished_init = perf_event__repipe_op2_synth,
2173 .compressed = perf_event__repipe_op4_synth,
2174 .auxtrace = perf_event__repipe_auxtrace,
2175 },
2176 .input_name = "-",
2177 .samples = LIST_HEAD_INIT(inject.samples),
2178 .output = {
2179 .path = "-",
2180 .mode = PERF_DATA_MODE_WRITE,
2181 .use_stdio = true,
2182 },
2183 };
2184 struct perf_data data = {
2185 .mode = PERF_DATA_MODE_READ,
2186 .use_stdio = true,
2187 };
2188 int ret;
2189 bool repipe = true;
2190 const char *known_build_ids = NULL;
2191
2192 struct option options[] = {
2193 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
2194 "Inject build-ids into the output stream"),
2195 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
2196 "Inject build-ids of all DSOs into the output stream"),
2197 OPT_STRING(0, "known-build-ids", &known_build_ids,
2198 "buildid path [,buildid path...]",
2199 "build-ids to use for given paths"),
2200 OPT_STRING('i', "input", &inject.input_name, "file",
2201 "input file name"),
2202 OPT_STRING('o', "output", &inject.output.path, "file",
2203 "output file name"),
2204 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
2205 "Merge sched-stat and sched-switch for getting events "
2206 "where and how long tasks slept"),
2207 #ifdef HAVE_JITDUMP
2208 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
2209 #endif
2210 OPT_INCR('v', "verbose", &verbose,
2211 "be more verbose (show build ids, etc)"),
2212 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2213 "file", "vmlinux pathname"),
2214 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
2215 "don't load vmlinux even if found"),
2216 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
2217 "kallsyms pathname"),
2218 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
2219 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
2220 NULL, "opts", "Instruction Tracing options\n"
2221 ITRACE_HELP,
2222 itrace_parse_synth_opts),
2223 OPT_BOOLEAN(0, "strip", &inject.strip,
2224 "strip non-synthesized events (use with --itrace)"),
2225 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
2226 "correlate time between VM guests and the host",
2227 parse_vm_time_correlation),
2228 OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
2229 "inject events from a guest perf.data file",
2230 parse_guest_data),
2231 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2232 "guest mount directory under which every guest os"
2233 " instance has a subdir"),
2234 OPT_END()
2235 };
2236 const char * const inject_usage[] = {
2237 "perf inject [<options>]",
2238 NULL
2239 };
2240 #ifndef HAVE_JITDUMP
2241 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
2242 #endif
2243 argc = parse_options(argc, argv, options, inject_usage, 0);
2244
2245 /*
2246 * Any (unrecognized) arguments left?
2247 */
2248 if (argc)
2249 usage_with_options(inject_usage, options);
2250
2251 if (inject.strip && !inject.itrace_synth_opts.set) {
2252 pr_err("--strip option requires --itrace option\n");
2253 return -1;
2254 }
2255
2256 if (symbol__validate_sym_arguments())
2257 return -1;
2258
2259 if (inject.in_place_update) {
2260 if (!strcmp(inject.input_name, "-")) {
2261 pr_err("Input file name required for in-place updating\n");
2262 return -1;
2263 }
2264 if (strcmp(inject.output.path, "-")) {
2265 pr_err("Output file name must not be specified for in-place updating\n");
2266 return -1;
2267 }
2268 if (!data.force && !inject.in_place_update_dry_run) {
2269 pr_err("The input file would be updated in place, "
2270 "the --force option is required.\n");
2271 return -1;
2272 }
2273 if (!inject.in_place_update_dry_run)
2274 data.in_place_update = true;
2275 } else {
2276 if (strcmp(inject.output.path, "-") && !inject.strip &&
2277 has_kcore_dir(inject.input_name)) {
2278 inject.output.is_dir = true;
2279 inject.copy_kcore_dir = true;
2280 }
2281 if (perf_data__open(&inject.output)) {
2282 perror("failed to create output file");
2283 return -1;
2284 }
2285 }
2286
2287 data.path = inject.input_name;
2288 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
2289 inject.is_pipe = true;
2290 /*
2291 * Do not repipe header when input is a regular file
2292 * since either it can rewrite the header at the end
2293 * or write a new pipe header.
2294 */
2295 if (strcmp(inject.input_name, "-"))
2296 repipe = false;
2297 }
2298
2299 inject.session = __perf_session__new(&data, repipe,
2300 output_fd(&inject),
2301 &inject.tool);
2302 if (IS_ERR(inject.session)) {
2303 ret = PTR_ERR(inject.session);
2304 goto out_close_output;
2305 }
2306
2307 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
2308 pr_warning("Decompression initialization failed.\n");
2309
2310 /* Save original section info before feature bits change */
2311 ret = save_section_info(&inject);
2312 if (ret)
2313 goto out_delete;
2314
2315 if (!data.is_pipe && inject.output.is_pipe) {
2316 ret = perf_header__write_pipe(perf_data__fd(&inject.output));
2317 if (ret < 0) {
2318 pr_err("Couldn't write a new pipe header.\n");
2319 goto out_delete;
2320 }
2321
2322 ret = perf_event__synthesize_for_pipe(&inject.tool,
2323 inject.session,
2324 &inject.output,
2325 perf_event__repipe);
2326 if (ret < 0)
2327 goto out_delete;
2328 }
2329
2330 if (inject.build_ids && !inject.build_id_all) {
2331 /*
2332 * to make sure the mmap records are ordered correctly
2333 * and so that the correct especially due to jitted code
2334 * mmaps. We cannot generate the buildid hit list and
2335 * inject the jit mmaps at the same time for now.
2336 */
2337 inject.tool.ordered_events = true;
2338 inject.tool.ordering_requires_timestamps = true;
2339 if (known_build_ids != NULL) {
2340 inject.known_build_ids =
2341 perf_inject__parse_known_build_ids(known_build_ids);
2342
2343 if (inject.known_build_ids == NULL) {
2344 pr_err("Couldn't parse known build ids.\n");
2345 goto out_delete;
2346 }
2347 }
2348 }
2349
2350 if (inject.sched_stat) {
2351 inject.tool.ordered_events = true;
2352 }
2353
2354 #ifdef HAVE_JITDUMP
2355 if (inject.jit_mode) {
2356 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
2357 inject.tool.mmap = perf_event__jit_repipe_mmap;
2358 inject.tool.ordered_events = true;
2359 inject.tool.ordering_requires_timestamps = true;
2360 /*
2361 * JIT MMAP injection injects all MMAP events in one go, so it
2362 * does not obey finished_round semantics.
2363 */
2364 inject.tool.finished_round = perf_event__drop_oe;
2365 }
2366 #endif
2367 ret = symbol__init(&inject.session->header.env);
2368 if (ret < 0)
2369 goto out_delete;
2370
2371 ret = __cmd_inject(&inject);
2372
2373 guest_session__exit(&inject.guest_session);
2374
2375 out_delete:
2376 strlist__delete(inject.known_build_ids);
2377 zstd_fini(&(inject.session->zstd_data));
2378 perf_session__delete(inject.session);
2379 out_close_output:
2380 if (!inject.in_place_update)
2381 perf_data__close(&inject.output);
2382 free(inject.itrace_synth_opts.vm_tm_corr_args);
2383 return ret;
2384 }
2385