1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CTF writing support via babeltrace.
4 *
5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 */
8
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include <traceevent/event-parse.h>
23 #include "asm/bug.h"
24 #include "data-convert.h"
25 #include "session.h"
26 #include "debug.h"
27 #include "tool.h"
28 #include "evlist.h"
29 #include "evsel.h"
30 #include "machine.h"
31 #include "config.h"
32 #include <linux/ctype.h>
33 #include <linux/err.h>
34 #include <linux/time64.h>
35 #include "util.h"
36 #include "clockid.h"
37
38 #define pr_N(n, fmt, ...) \
39 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
40
41 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
42 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
43
44 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
45
46 struct evsel_priv {
47 struct bt_ctf_event_class *event_class;
48 };
49
50 #define MAX_CPUS 4096
51
52 struct ctf_stream {
53 struct bt_ctf_stream *stream;
54 int cpu;
55 u32 count;
56 };
57
58 struct ctf_writer {
59 /* writer primitives */
60 struct bt_ctf_writer *writer;
61 struct ctf_stream **stream;
62 int stream_cnt;
63 struct bt_ctf_stream_class *stream_class;
64 struct bt_ctf_clock *clock;
65
66 /* data types */
67 union {
68 struct {
69 struct bt_ctf_field_type *s64;
70 struct bt_ctf_field_type *u64;
71 struct bt_ctf_field_type *s32;
72 struct bt_ctf_field_type *u32;
73 struct bt_ctf_field_type *string;
74 struct bt_ctf_field_type *u32_hex;
75 struct bt_ctf_field_type *u64_hex;
76 };
77 struct bt_ctf_field_type *array[6];
78 } data;
79 struct bt_ctf_event_class *comm_class;
80 struct bt_ctf_event_class *exit_class;
81 struct bt_ctf_event_class *fork_class;
82 struct bt_ctf_event_class *mmap_class;
83 struct bt_ctf_event_class *mmap2_class;
84 };
85
86 struct convert {
87 struct perf_tool tool;
88 struct ctf_writer writer;
89
90 u64 events_size;
91 u64 events_count;
92 u64 non_sample_count;
93
94 /* Ordered events configured queue size. */
95 u64 queue_size;
96 };
97
value_set(struct bt_ctf_field_type * type,struct bt_ctf_event * event,const char * name,u64 val)98 static int value_set(struct bt_ctf_field_type *type,
99 struct bt_ctf_event *event,
100 const char *name, u64 val)
101 {
102 struct bt_ctf_field *field;
103 bool sign = bt_ctf_field_type_integer_get_signed(type);
104 int ret;
105
106 field = bt_ctf_field_create(type);
107 if (!field) {
108 pr_err("failed to create a field %s\n", name);
109 return -1;
110 }
111
112 if (sign) {
113 ret = bt_ctf_field_signed_integer_set_value(field, val);
114 if (ret) {
115 pr_err("failed to set field value %s\n", name);
116 goto err;
117 }
118 } else {
119 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
120 if (ret) {
121 pr_err("failed to set field value %s\n", name);
122 goto err;
123 }
124 }
125
126 ret = bt_ctf_event_set_payload(event, name, field);
127 if (ret) {
128 pr_err("failed to set payload %s\n", name);
129 goto err;
130 }
131
132 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
133
134 err:
135 bt_ctf_field_put(field);
136 return ret;
137 }
138
139 #define __FUNC_VALUE_SET(_name, _val_type) \
140 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
141 struct bt_ctf_event *event, \
142 const char *name, \
143 _val_type val) \
144 { \
145 struct bt_ctf_field_type *type = cw->data._name; \
146 return value_set(type, event, name, (u64) val); \
147 }
148
149 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
150
151 FUNC_VALUE_SET(s32)
152 FUNC_VALUE_SET(u32)
153 FUNC_VALUE_SET(s64)
154 FUNC_VALUE_SET(u64)
155 __FUNC_VALUE_SET(u64_hex, u64)
156
157 static int string_set_value(struct bt_ctf_field *field, const char *string);
158 static __maybe_unused int
value_set_string(struct ctf_writer * cw,struct bt_ctf_event * event,const char * name,const char * string)159 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
160 const char *name, const char *string)
161 {
162 struct bt_ctf_field_type *type = cw->data.string;
163 struct bt_ctf_field *field;
164 int ret = 0;
165
166 field = bt_ctf_field_create(type);
167 if (!field) {
168 pr_err("failed to create a field %s\n", name);
169 return -1;
170 }
171
172 ret = string_set_value(field, string);
173 if (ret) {
174 pr_err("failed to set value %s\n", name);
175 goto err_put_field;
176 }
177
178 ret = bt_ctf_event_set_payload(event, name, field);
179 if (ret)
180 pr_err("failed to set payload %s\n", name);
181
182 err_put_field:
183 bt_ctf_field_put(field);
184 return ret;
185 }
186
187 static struct bt_ctf_field_type*
get_tracepoint_field_type(struct ctf_writer * cw,struct tep_format_field * field)188 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
189 {
190 unsigned long flags = field->flags;
191
192 if (flags & TEP_FIELD_IS_STRING)
193 return cw->data.string;
194
195 if (!(flags & TEP_FIELD_IS_SIGNED)) {
196 /* unsigned long are mostly pointers */
197 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
198 return cw->data.u64_hex;
199 }
200
201 if (flags & TEP_FIELD_IS_SIGNED) {
202 if (field->size == 8)
203 return cw->data.s64;
204 else
205 return cw->data.s32;
206 }
207
208 if (field->size == 8)
209 return cw->data.u64;
210 else
211 return cw->data.u32;
212 }
213
adjust_signedness(unsigned long long value_int,int size)214 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
215 {
216 unsigned long long value_mask;
217
218 /*
219 * value_mask = (1 << (size * 8 - 1)) - 1.
220 * Directly set value_mask for code readers.
221 */
222 switch (size) {
223 case 1:
224 value_mask = 0x7fULL;
225 break;
226 case 2:
227 value_mask = 0x7fffULL;
228 break;
229 case 4:
230 value_mask = 0x7fffffffULL;
231 break;
232 case 8:
233 /*
234 * For 64 bit value, return it self. There is no need
235 * to fill high bit.
236 */
237 /* Fall through */
238 default:
239 /* BUG! */
240 return value_int;
241 }
242
243 /* If it is a positive value, don't adjust. */
244 if ((value_int & (~0ULL - value_mask)) == 0)
245 return value_int;
246
247 /* Fill upper part of value_int with 1 to make it a negative long long. */
248 return (value_int & value_mask) | ~value_mask;
249 }
250
string_set_value(struct bt_ctf_field * field,const char * string)251 static int string_set_value(struct bt_ctf_field *field, const char *string)
252 {
253 char *buffer = NULL;
254 size_t len = strlen(string), i, p;
255 int err;
256
257 for (i = p = 0; i < len; i++, p++) {
258 if (isprint(string[i])) {
259 if (!buffer)
260 continue;
261 buffer[p] = string[i];
262 } else {
263 char numstr[5];
264
265 snprintf(numstr, sizeof(numstr), "\\x%02x",
266 (unsigned int)(string[i]) & 0xff);
267
268 if (!buffer) {
269 buffer = zalloc(i + (len - i) * 4 + 2);
270 if (!buffer) {
271 pr_err("failed to set unprintable string '%s'\n", string);
272 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
273 }
274 if (i > 0)
275 strncpy(buffer, string, i);
276 }
277 memcpy(buffer + p, numstr, 4);
278 p += 3;
279 }
280 }
281
282 if (!buffer)
283 return bt_ctf_field_string_set_value(field, string);
284 err = bt_ctf_field_string_set_value(field, buffer);
285 free(buffer);
286 return err;
287 }
288
add_tracepoint_field_value(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample,struct tep_format_field * fmtf)289 static int add_tracepoint_field_value(struct ctf_writer *cw,
290 struct bt_ctf_event_class *event_class,
291 struct bt_ctf_event *event,
292 struct perf_sample *sample,
293 struct tep_format_field *fmtf)
294 {
295 struct bt_ctf_field_type *type;
296 struct bt_ctf_field *array_field;
297 struct bt_ctf_field *field;
298 const char *name = fmtf->name;
299 void *data = sample->raw_data;
300 unsigned long flags = fmtf->flags;
301 unsigned int n_items;
302 unsigned int i;
303 unsigned int offset;
304 unsigned int len;
305 int ret;
306
307 name = fmtf->alias;
308 offset = fmtf->offset;
309 len = fmtf->size;
310 if (flags & TEP_FIELD_IS_STRING)
311 flags &= ~TEP_FIELD_IS_ARRAY;
312
313 if (flags & TEP_FIELD_IS_DYNAMIC) {
314 unsigned long long tmp_val;
315
316 tmp_val = tep_read_number(fmtf->event->tep,
317 data + offset, len);
318 offset = tmp_val;
319 len = offset >> 16;
320 offset &= 0xffff;
321 if (flags & TEP_FIELD_IS_RELATIVE)
322 offset += fmtf->offset + fmtf->size;
323 }
324
325 if (flags & TEP_FIELD_IS_ARRAY) {
326
327 type = bt_ctf_event_class_get_field_by_name(
328 event_class, name);
329 array_field = bt_ctf_field_create(type);
330 bt_ctf_field_type_put(type);
331 if (!array_field) {
332 pr_err("Failed to create array type %s\n", name);
333 return -1;
334 }
335
336 len = fmtf->size / fmtf->arraylen;
337 n_items = fmtf->arraylen;
338 } else {
339 n_items = 1;
340 array_field = NULL;
341 }
342
343 type = get_tracepoint_field_type(cw, fmtf);
344
345 for (i = 0; i < n_items; i++) {
346 if (flags & TEP_FIELD_IS_ARRAY)
347 field = bt_ctf_field_array_get_field(array_field, i);
348 else
349 field = bt_ctf_field_create(type);
350
351 if (!field) {
352 pr_err("failed to create a field %s\n", name);
353 return -1;
354 }
355
356 if (flags & TEP_FIELD_IS_STRING)
357 ret = string_set_value(field, data + offset + i * len);
358 else {
359 unsigned long long value_int;
360
361 value_int = tep_read_number(
362 fmtf->event->tep,
363 data + offset + i * len, len);
364
365 if (!(flags & TEP_FIELD_IS_SIGNED))
366 ret = bt_ctf_field_unsigned_integer_set_value(
367 field, value_int);
368 else
369 ret = bt_ctf_field_signed_integer_set_value(
370 field, adjust_signedness(value_int, len));
371 }
372
373 if (ret) {
374 pr_err("failed to set file value %s\n", name);
375 goto err_put_field;
376 }
377 if (!(flags & TEP_FIELD_IS_ARRAY)) {
378 ret = bt_ctf_event_set_payload(event, name, field);
379 if (ret) {
380 pr_err("failed to set payload %s\n", name);
381 goto err_put_field;
382 }
383 }
384 bt_ctf_field_put(field);
385 }
386 if (flags & TEP_FIELD_IS_ARRAY) {
387 ret = bt_ctf_event_set_payload(event, name, array_field);
388 if (ret) {
389 pr_err("Failed add payload array %s\n", name);
390 return -1;
391 }
392 bt_ctf_field_put(array_field);
393 }
394 return 0;
395
396 err_put_field:
397 bt_ctf_field_put(field);
398 return -1;
399 }
400
add_tracepoint_fields_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct tep_format_field * fields,struct perf_sample * sample)401 static int add_tracepoint_fields_values(struct ctf_writer *cw,
402 struct bt_ctf_event_class *event_class,
403 struct bt_ctf_event *event,
404 struct tep_format_field *fields,
405 struct perf_sample *sample)
406 {
407 struct tep_format_field *field;
408 int ret;
409
410 for (field = fields; field; field = field->next) {
411 ret = add_tracepoint_field_value(cw, event_class, event, sample,
412 field);
413 if (ret)
414 return -1;
415 }
416 return 0;
417 }
418
add_tracepoint_values(struct ctf_writer * cw,struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)419 static int add_tracepoint_values(struct ctf_writer *cw,
420 struct bt_ctf_event_class *event_class,
421 struct bt_ctf_event *event,
422 struct evsel *evsel,
423 struct perf_sample *sample)
424 {
425 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
426 struct tep_format_field *fields = evsel->tp_format->format.fields;
427 int ret;
428
429 ret = add_tracepoint_fields_values(cw, event_class, event,
430 common_fields, sample);
431 if (!ret)
432 ret = add_tracepoint_fields_values(cw, event_class, event,
433 fields, sample);
434
435 return ret;
436 }
437
438 static int
add_bpf_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct perf_sample * sample)439 add_bpf_output_values(struct bt_ctf_event_class *event_class,
440 struct bt_ctf_event *event,
441 struct perf_sample *sample)
442 {
443 struct bt_ctf_field_type *len_type, *seq_type;
444 struct bt_ctf_field *len_field, *seq_field;
445 unsigned int raw_size = sample->raw_size;
446 unsigned int nr_elements = raw_size / sizeof(u32);
447 unsigned int i;
448 int ret;
449
450 if (nr_elements * sizeof(u32) != raw_size)
451 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
452 raw_size, nr_elements * sizeof(u32) - raw_size);
453
454 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
455 len_field = bt_ctf_field_create(len_type);
456 if (!len_field) {
457 pr_err("failed to create 'raw_len' for bpf output event\n");
458 ret = -1;
459 goto put_len_type;
460 }
461
462 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
463 if (ret) {
464 pr_err("failed to set field value for raw_len\n");
465 goto put_len_field;
466 }
467 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
468 if (ret) {
469 pr_err("failed to set payload to raw_len\n");
470 goto put_len_field;
471 }
472
473 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
474 seq_field = bt_ctf_field_create(seq_type);
475 if (!seq_field) {
476 pr_err("failed to create 'raw_data' for bpf output event\n");
477 ret = -1;
478 goto put_seq_type;
479 }
480
481 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
482 if (ret) {
483 pr_err("failed to set length of 'raw_data'\n");
484 goto put_seq_field;
485 }
486
487 for (i = 0; i < nr_elements; i++) {
488 struct bt_ctf_field *elem_field =
489 bt_ctf_field_sequence_get_field(seq_field, i);
490
491 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
492 ((u32 *)(sample->raw_data))[i]);
493
494 bt_ctf_field_put(elem_field);
495 if (ret) {
496 pr_err("failed to set raw_data[%d]\n", i);
497 goto put_seq_field;
498 }
499 }
500
501 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
502 if (ret)
503 pr_err("failed to set payload for raw_data\n");
504
505 put_seq_field:
506 bt_ctf_field_put(seq_field);
507 put_seq_type:
508 bt_ctf_field_type_put(seq_type);
509 put_len_field:
510 bt_ctf_field_put(len_field);
511 put_len_type:
512 bt_ctf_field_type_put(len_type);
513 return ret;
514 }
515
516 static int
add_callchain_output_values(struct bt_ctf_event_class * event_class,struct bt_ctf_event * event,struct ip_callchain * callchain)517 add_callchain_output_values(struct bt_ctf_event_class *event_class,
518 struct bt_ctf_event *event,
519 struct ip_callchain *callchain)
520 {
521 struct bt_ctf_field_type *len_type, *seq_type;
522 struct bt_ctf_field *len_field, *seq_field;
523 unsigned int nr_elements = callchain->nr;
524 unsigned int i;
525 int ret;
526
527 len_type = bt_ctf_event_class_get_field_by_name(
528 event_class, "perf_callchain_size");
529 len_field = bt_ctf_field_create(len_type);
530 if (!len_field) {
531 pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
532 ret = -1;
533 goto put_len_type;
534 }
535
536 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
537 if (ret) {
538 pr_err("failed to set field value for perf_callchain_size\n");
539 goto put_len_field;
540 }
541 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
542 if (ret) {
543 pr_err("failed to set payload to perf_callchain_size\n");
544 goto put_len_field;
545 }
546
547 seq_type = bt_ctf_event_class_get_field_by_name(
548 event_class, "perf_callchain");
549 seq_field = bt_ctf_field_create(seq_type);
550 if (!seq_field) {
551 pr_err("failed to create 'perf_callchain' for callchain output event\n");
552 ret = -1;
553 goto put_seq_type;
554 }
555
556 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
557 if (ret) {
558 pr_err("failed to set length of 'perf_callchain'\n");
559 goto put_seq_field;
560 }
561
562 for (i = 0; i < nr_elements; i++) {
563 struct bt_ctf_field *elem_field =
564 bt_ctf_field_sequence_get_field(seq_field, i);
565
566 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
567 ((u64 *)(callchain->ips))[i]);
568
569 bt_ctf_field_put(elem_field);
570 if (ret) {
571 pr_err("failed to set callchain[%d]\n", i);
572 goto put_seq_field;
573 }
574 }
575
576 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
577 if (ret)
578 pr_err("failed to set payload for raw_data\n");
579
580 put_seq_field:
581 bt_ctf_field_put(seq_field);
582 put_seq_type:
583 bt_ctf_field_type_put(seq_type);
584 put_len_field:
585 bt_ctf_field_put(len_field);
586 put_len_type:
587 bt_ctf_field_type_put(len_type);
588 return ret;
589 }
590
add_generic_values(struct ctf_writer * cw,struct bt_ctf_event * event,struct evsel * evsel,struct perf_sample * sample)591 static int add_generic_values(struct ctf_writer *cw,
592 struct bt_ctf_event *event,
593 struct evsel *evsel,
594 struct perf_sample *sample)
595 {
596 u64 type = evsel->core.attr.sample_type;
597 int ret;
598
599 /*
600 * missing:
601 * PERF_SAMPLE_TIME - not needed as we have it in
602 * ctf event header
603 * PERF_SAMPLE_READ - TODO
604 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
605 * PERF_SAMPLE_BRANCH_STACK - TODO
606 * PERF_SAMPLE_REGS_USER - TODO
607 * PERF_SAMPLE_STACK_USER - TODO
608 */
609
610 if (type & PERF_SAMPLE_IP) {
611 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
612 if (ret)
613 return -1;
614 }
615
616 if (type & PERF_SAMPLE_TID) {
617 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
618 if (ret)
619 return -1;
620
621 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
622 if (ret)
623 return -1;
624 }
625
626 if ((type & PERF_SAMPLE_ID) ||
627 (type & PERF_SAMPLE_IDENTIFIER)) {
628 ret = value_set_u64(cw, event, "perf_id", sample->id);
629 if (ret)
630 return -1;
631 }
632
633 if (type & PERF_SAMPLE_STREAM_ID) {
634 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
635 if (ret)
636 return -1;
637 }
638
639 if (type & PERF_SAMPLE_PERIOD) {
640 ret = value_set_u64(cw, event, "perf_period", sample->period);
641 if (ret)
642 return -1;
643 }
644
645 if (type & PERF_SAMPLE_WEIGHT) {
646 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
647 if (ret)
648 return -1;
649 }
650
651 if (type & PERF_SAMPLE_DATA_SRC) {
652 ret = value_set_u64(cw, event, "perf_data_src",
653 sample->data_src);
654 if (ret)
655 return -1;
656 }
657
658 if (type & PERF_SAMPLE_TRANSACTION) {
659 ret = value_set_u64(cw, event, "perf_transaction",
660 sample->transaction);
661 if (ret)
662 return -1;
663 }
664
665 return 0;
666 }
667
ctf_stream__flush(struct ctf_stream * cs)668 static int ctf_stream__flush(struct ctf_stream *cs)
669 {
670 int err = 0;
671
672 if (cs) {
673 err = bt_ctf_stream_flush(cs->stream);
674 if (err)
675 pr_err("CTF stream %d flush failed\n", cs->cpu);
676
677 pr("Flush stream for cpu %d (%u samples)\n",
678 cs->cpu, cs->count);
679
680 cs->count = 0;
681 }
682
683 return err;
684 }
685
ctf_stream__create(struct ctf_writer * cw,int cpu)686 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
687 {
688 struct ctf_stream *cs;
689 struct bt_ctf_field *pkt_ctx = NULL;
690 struct bt_ctf_field *cpu_field = NULL;
691 struct bt_ctf_stream *stream = NULL;
692 int ret;
693
694 cs = zalloc(sizeof(*cs));
695 if (!cs) {
696 pr_err("Failed to allocate ctf stream\n");
697 return NULL;
698 }
699
700 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
701 if (!stream) {
702 pr_err("Failed to create CTF stream\n");
703 goto out;
704 }
705
706 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
707 if (!pkt_ctx) {
708 pr_err("Failed to obtain packet context\n");
709 goto out;
710 }
711
712 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
713 bt_ctf_field_put(pkt_ctx);
714 if (!cpu_field) {
715 pr_err("Failed to obtain cpu field\n");
716 goto out;
717 }
718
719 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
720 if (ret) {
721 pr_err("Failed to update CPU number\n");
722 goto out;
723 }
724
725 bt_ctf_field_put(cpu_field);
726
727 cs->cpu = cpu;
728 cs->stream = stream;
729 return cs;
730
731 out:
732 if (cpu_field)
733 bt_ctf_field_put(cpu_field);
734 if (stream)
735 bt_ctf_stream_put(stream);
736
737 free(cs);
738 return NULL;
739 }
740
ctf_stream__delete(struct ctf_stream * cs)741 static void ctf_stream__delete(struct ctf_stream *cs)
742 {
743 if (cs) {
744 bt_ctf_stream_put(cs->stream);
745 free(cs);
746 }
747 }
748
ctf_stream(struct ctf_writer * cw,int cpu)749 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
750 {
751 struct ctf_stream *cs = cw->stream[cpu];
752
753 if (!cs) {
754 cs = ctf_stream__create(cw, cpu);
755 cw->stream[cpu] = cs;
756 }
757
758 return cs;
759 }
760
get_sample_cpu(struct ctf_writer * cw,struct perf_sample * sample,struct evsel * evsel)761 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
762 struct evsel *evsel)
763 {
764 int cpu = 0;
765
766 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
767 cpu = sample->cpu;
768
769 if (cpu > cw->stream_cnt) {
770 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
771 cpu, cw->stream_cnt);
772 cpu = 0;
773 }
774
775 return cpu;
776 }
777
778 #define STREAM_FLUSH_COUNT 100000
779
780 /*
781 * Currently we have no other way to determine the
782 * time for the stream flush other than keep track
783 * of the number of events and check it against
784 * threshold.
785 */
is_flush_needed(struct ctf_stream * cs)786 static bool is_flush_needed(struct ctf_stream *cs)
787 {
788 return cs->count >= STREAM_FLUSH_COUNT;
789 }
790
process_sample_event(struct perf_tool * tool,union perf_event * _event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine __maybe_unused)791 static int process_sample_event(struct perf_tool *tool,
792 union perf_event *_event,
793 struct perf_sample *sample,
794 struct evsel *evsel,
795 struct machine *machine __maybe_unused)
796 {
797 struct convert *c = container_of(tool, struct convert, tool);
798 struct evsel_priv *priv = evsel->priv;
799 struct ctf_writer *cw = &c->writer;
800 struct ctf_stream *cs;
801 struct bt_ctf_event_class *event_class;
802 struct bt_ctf_event *event;
803 int ret;
804 unsigned long type = evsel->core.attr.sample_type;
805
806 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
807 return 0;
808
809 event_class = priv->event_class;
810
811 /* update stats */
812 c->events_count++;
813 c->events_size += _event->header.size;
814
815 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
816
817 event = bt_ctf_event_create(event_class);
818 if (!event) {
819 pr_err("Failed to create an CTF event\n");
820 return -1;
821 }
822
823 bt_ctf_clock_set_time(cw->clock, sample->time);
824
825 ret = add_generic_values(cw, event, evsel, sample);
826 if (ret)
827 return -1;
828
829 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
830 ret = add_tracepoint_values(cw, event_class, event,
831 evsel, sample);
832 if (ret)
833 return -1;
834 }
835
836 if (type & PERF_SAMPLE_CALLCHAIN) {
837 ret = add_callchain_output_values(event_class,
838 event, sample->callchain);
839 if (ret)
840 return -1;
841 }
842
843 if (evsel__is_bpf_output(evsel)) {
844 ret = add_bpf_output_values(event_class, event, sample);
845 if (ret)
846 return -1;
847 }
848
849 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
850 if (cs) {
851 if (is_flush_needed(cs))
852 ctf_stream__flush(cs);
853
854 cs->count++;
855 bt_ctf_stream_append_event(cs->stream, event);
856 }
857
858 bt_ctf_event_put(event);
859 return cs ? 0 : -1;
860 }
861
862 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
863 do { \
864 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
865 if (ret) \
866 return -1; \
867 } while(0)
868
869 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
870 static int process_##_name##_event(struct perf_tool *tool, \
871 union perf_event *_event, \
872 struct perf_sample *sample, \
873 struct machine *machine) \
874 { \
875 struct convert *c = container_of(tool, struct convert, tool);\
876 struct ctf_writer *cw = &c->writer; \
877 struct bt_ctf_event_class *event_class = cw->_name##_class;\
878 struct bt_ctf_event *event; \
879 struct ctf_stream *cs; \
880 int ret; \
881 \
882 c->non_sample_count++; \
883 c->events_size += _event->header.size; \
884 event = bt_ctf_event_create(event_class); \
885 if (!event) { \
886 pr_err("Failed to create an CTF event\n"); \
887 return -1; \
888 } \
889 \
890 bt_ctf_clock_set_time(cw->clock, sample->time); \
891 body \
892 cs = ctf_stream(cw, 0); \
893 if (cs) { \
894 if (is_flush_needed(cs)) \
895 ctf_stream__flush(cs); \
896 \
897 cs->count++; \
898 bt_ctf_stream_append_event(cs->stream, event); \
899 } \
900 bt_ctf_event_put(event); \
901 \
902 return perf_event__process_##_name(tool, _event, sample, machine);\
903 }
904
__FUNC_PROCESS_NON_SAMPLE(comm,__NON_SAMPLE_SET_FIELD (comm,u32,pid);__NON_SAMPLE_SET_FIELD (comm,u32,tid);__NON_SAMPLE_SET_FIELD (comm,string,comm);)905 __FUNC_PROCESS_NON_SAMPLE(comm,
906 __NON_SAMPLE_SET_FIELD(comm, u32, pid);
907 __NON_SAMPLE_SET_FIELD(comm, u32, tid);
908 __NON_SAMPLE_SET_FIELD(comm, string, comm);
909 )
910 __FUNC_PROCESS_NON_SAMPLE(fork,
911 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
912 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
913 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
914 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
915 __NON_SAMPLE_SET_FIELD(fork, u64, time);
916 )
917
918 __FUNC_PROCESS_NON_SAMPLE(exit,
919 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
920 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
921 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
922 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
923 __NON_SAMPLE_SET_FIELD(fork, u64, time);
924 )
925 __FUNC_PROCESS_NON_SAMPLE(mmap,
926 __NON_SAMPLE_SET_FIELD(mmap, u32, pid);
927 __NON_SAMPLE_SET_FIELD(mmap, u32, tid);
928 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
929 __NON_SAMPLE_SET_FIELD(mmap, string, filename);
930 )
931 __FUNC_PROCESS_NON_SAMPLE(mmap2,
932 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
933 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
934 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
935 __NON_SAMPLE_SET_FIELD(mmap2, string, filename);
936 )
937 #undef __NON_SAMPLE_SET_FIELD
938 #undef __FUNC_PROCESS_NON_SAMPLE
939
940 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
941 static char *change_name(char *name, char *orig_name, int dup)
942 {
943 char *new_name = NULL;
944 size_t len;
945
946 if (!name)
947 name = orig_name;
948
949 if (dup >= 10)
950 goto out;
951 /*
952 * Add '_' prefix to potential keywork. According to
953 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
954 * further CTF spec updating may require us to use '$'.
955 */
956 if (dup < 0)
957 len = strlen(name) + sizeof("_");
958 else
959 len = strlen(orig_name) + sizeof("_dupl_X");
960
961 new_name = malloc(len);
962 if (!new_name)
963 goto out;
964
965 if (dup < 0)
966 snprintf(new_name, len, "_%s", name);
967 else
968 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
969
970 out:
971 if (name != orig_name)
972 free(name);
973 return new_name;
974 }
975
event_class_add_field(struct bt_ctf_event_class * event_class,struct bt_ctf_field_type * type,struct tep_format_field * field)976 static int event_class_add_field(struct bt_ctf_event_class *event_class,
977 struct bt_ctf_field_type *type,
978 struct tep_format_field *field)
979 {
980 struct bt_ctf_field_type *t = NULL;
981 char *name;
982 int dup = 1;
983 int ret;
984
985 /* alias was already assigned */
986 if (field->alias != field->name)
987 return bt_ctf_event_class_add_field(event_class, type,
988 (char *)field->alias);
989
990 name = field->name;
991
992 /* If 'name' is a keywork, add prefix. */
993 if (bt_ctf_validate_identifier(name))
994 name = change_name(name, field->name, -1);
995
996 if (!name) {
997 pr_err("Failed to fix invalid identifier.");
998 return -1;
999 }
1000 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1001 bt_ctf_field_type_put(t);
1002 name = change_name(name, field->name, dup++);
1003 if (!name) {
1004 pr_err("Failed to create dup name for '%s'\n", field->name);
1005 return -1;
1006 }
1007 }
1008
1009 ret = bt_ctf_event_class_add_field(event_class, type, name);
1010 if (!ret)
1011 field->alias = name;
1012
1013 return ret;
1014 }
1015
add_tracepoint_fields_types(struct ctf_writer * cw,struct tep_format_field * fields,struct bt_ctf_event_class * event_class)1016 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1017 struct tep_format_field *fields,
1018 struct bt_ctf_event_class *event_class)
1019 {
1020 struct tep_format_field *field;
1021 int ret;
1022
1023 for (field = fields; field; field = field->next) {
1024 struct bt_ctf_field_type *type;
1025 unsigned long flags = field->flags;
1026
1027 pr2(" field '%s'\n", field->name);
1028
1029 type = get_tracepoint_field_type(cw, field);
1030 if (!type)
1031 return -1;
1032
1033 /*
1034 * A string is an array of chars. For this we use the string
1035 * type and don't care that it is an array. What we don't
1036 * support is an array of strings.
1037 */
1038 if (flags & TEP_FIELD_IS_STRING)
1039 flags &= ~TEP_FIELD_IS_ARRAY;
1040
1041 if (flags & TEP_FIELD_IS_ARRAY)
1042 type = bt_ctf_field_type_array_create(type, field->arraylen);
1043
1044 ret = event_class_add_field(event_class, type, field);
1045
1046 if (flags & TEP_FIELD_IS_ARRAY)
1047 bt_ctf_field_type_put(type);
1048
1049 if (ret) {
1050 pr_err("Failed to add field '%s': %d\n",
1051 field->name, ret);
1052 return -1;
1053 }
1054 }
1055
1056 return 0;
1057 }
1058
add_tracepoint_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * class)1059 static int add_tracepoint_types(struct ctf_writer *cw,
1060 struct evsel *evsel,
1061 struct bt_ctf_event_class *class)
1062 {
1063 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1064 struct tep_format_field *fields = evsel->tp_format->format.fields;
1065 int ret;
1066
1067 ret = add_tracepoint_fields_types(cw, common_fields, class);
1068 if (!ret)
1069 ret = add_tracepoint_fields_types(cw, fields, class);
1070
1071 return ret;
1072 }
1073
add_bpf_output_types(struct ctf_writer * cw,struct bt_ctf_event_class * class)1074 static int add_bpf_output_types(struct ctf_writer *cw,
1075 struct bt_ctf_event_class *class)
1076 {
1077 struct bt_ctf_field_type *len_type = cw->data.u32;
1078 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1079 struct bt_ctf_field_type *seq_type;
1080 int ret;
1081
1082 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1083 if (ret)
1084 return ret;
1085
1086 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1087 if (!seq_type)
1088 return -1;
1089
1090 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1091 }
1092
add_generic_types(struct ctf_writer * cw,struct evsel * evsel,struct bt_ctf_event_class * event_class)1093 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1094 struct bt_ctf_event_class *event_class)
1095 {
1096 u64 type = evsel->core.attr.sample_type;
1097
1098 /*
1099 * missing:
1100 * PERF_SAMPLE_TIME - not needed as we have it in
1101 * ctf event header
1102 * PERF_SAMPLE_READ - TODO
1103 * PERF_SAMPLE_CALLCHAIN - TODO
1104 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1105 * are handled separately
1106 * PERF_SAMPLE_BRANCH_STACK - TODO
1107 * PERF_SAMPLE_REGS_USER - TODO
1108 * PERF_SAMPLE_STACK_USER - TODO
1109 */
1110
1111 #define ADD_FIELD(cl, t, n) \
1112 do { \
1113 pr2(" field '%s'\n", n); \
1114 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1115 pr_err("Failed to add field '%s';\n", n); \
1116 return -1; \
1117 } \
1118 } while (0)
1119
1120 if (type & PERF_SAMPLE_IP)
1121 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1122
1123 if (type & PERF_SAMPLE_TID) {
1124 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1125 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1126 }
1127
1128 if ((type & PERF_SAMPLE_ID) ||
1129 (type & PERF_SAMPLE_IDENTIFIER))
1130 ADD_FIELD(event_class, cw->data.u64, "perf_id");
1131
1132 if (type & PERF_SAMPLE_STREAM_ID)
1133 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1134
1135 if (type & PERF_SAMPLE_PERIOD)
1136 ADD_FIELD(event_class, cw->data.u64, "perf_period");
1137
1138 if (type & PERF_SAMPLE_WEIGHT)
1139 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1140
1141 if (type & PERF_SAMPLE_DATA_SRC)
1142 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1143
1144 if (type & PERF_SAMPLE_TRANSACTION)
1145 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1146
1147 if (type & PERF_SAMPLE_CALLCHAIN) {
1148 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1149 ADD_FIELD(event_class,
1150 bt_ctf_field_type_sequence_create(
1151 cw->data.u64_hex, "perf_callchain_size"),
1152 "perf_callchain");
1153 }
1154
1155 #undef ADD_FIELD
1156 return 0;
1157 }
1158
add_event(struct ctf_writer * cw,struct evsel * evsel)1159 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1160 {
1161 struct bt_ctf_event_class *event_class;
1162 struct evsel_priv *priv;
1163 const char *name = evsel__name(evsel);
1164 int ret;
1165
1166 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1167
1168 event_class = bt_ctf_event_class_create(name);
1169 if (!event_class)
1170 return -1;
1171
1172 ret = add_generic_types(cw, evsel, event_class);
1173 if (ret)
1174 goto err;
1175
1176 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1177 ret = add_tracepoint_types(cw, evsel, event_class);
1178 if (ret)
1179 goto err;
1180 }
1181
1182 if (evsel__is_bpf_output(evsel)) {
1183 ret = add_bpf_output_types(cw, event_class);
1184 if (ret)
1185 goto err;
1186 }
1187
1188 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1189 if (ret) {
1190 pr("Failed to add event class into stream.\n");
1191 goto err;
1192 }
1193
1194 priv = malloc(sizeof(*priv));
1195 if (!priv)
1196 goto err;
1197
1198 priv->event_class = event_class;
1199 evsel->priv = priv;
1200 return 0;
1201
1202 err:
1203 bt_ctf_event_class_put(event_class);
1204 pr_err("Failed to add event '%s'.\n", name);
1205 return -1;
1206 }
1207
setup_events(struct ctf_writer * cw,struct perf_session * session)1208 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1209 {
1210 struct evlist *evlist = session->evlist;
1211 struct evsel *evsel;
1212 int ret;
1213
1214 evlist__for_each_entry(evlist, evsel) {
1215 ret = add_event(cw, evsel);
1216 if (ret)
1217 return ret;
1218 }
1219 return 0;
1220 }
1221
1222 #define __NON_SAMPLE_ADD_FIELD(t, n) \
1223 do { \
1224 pr2(" field '%s'\n", #n); \
1225 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1226 pr_err("Failed to add field '%s';\n", #n);\
1227 return -1; \
1228 } \
1229 } while(0)
1230
1231 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1232 static int add_##_name##_event(struct ctf_writer *cw) \
1233 { \
1234 struct bt_ctf_event_class *event_class; \
1235 int ret; \
1236 \
1237 pr("Adding "#_name" event\n"); \
1238 event_class = bt_ctf_event_class_create("perf_" #_name);\
1239 if (!event_class) \
1240 return -1; \
1241 body \
1242 \
1243 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1244 if (ret) { \
1245 pr("Failed to add event class '"#_name"' into stream.\n");\
1246 return ret; \
1247 } \
1248 \
1249 cw->_name##_class = event_class; \
1250 bt_ctf_event_class_put(event_class); \
1251 return 0; \
1252 }
1253
__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,__NON_SAMPLE_ADD_FIELD (u32,pid);__NON_SAMPLE_ADD_FIELD (u32,tid);__NON_SAMPLE_ADD_FIELD (string,comm);)1254 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1255 __NON_SAMPLE_ADD_FIELD(u32, pid);
1256 __NON_SAMPLE_ADD_FIELD(u32, tid);
1257 __NON_SAMPLE_ADD_FIELD(string, comm);
1258 )
1259
1260 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1261 __NON_SAMPLE_ADD_FIELD(u32, pid);
1262 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1263 __NON_SAMPLE_ADD_FIELD(u32, tid);
1264 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1265 __NON_SAMPLE_ADD_FIELD(u64, time);
1266 )
1267
1268 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1269 __NON_SAMPLE_ADD_FIELD(u32, pid);
1270 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1271 __NON_SAMPLE_ADD_FIELD(u32, tid);
1272 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1273 __NON_SAMPLE_ADD_FIELD(u64, time);
1274 )
1275
1276 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1277 __NON_SAMPLE_ADD_FIELD(u32, pid);
1278 __NON_SAMPLE_ADD_FIELD(u32, tid);
1279 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1280 __NON_SAMPLE_ADD_FIELD(string, filename);
1281 )
1282
1283 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1284 __NON_SAMPLE_ADD_FIELD(u32, pid);
1285 __NON_SAMPLE_ADD_FIELD(u32, tid);
1286 __NON_SAMPLE_ADD_FIELD(u64_hex, start);
1287 __NON_SAMPLE_ADD_FIELD(string, filename);
1288 )
1289 #undef __NON_SAMPLE_ADD_FIELD
1290 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1291
1292 static int setup_non_sample_events(struct ctf_writer *cw,
1293 struct perf_session *session __maybe_unused)
1294 {
1295 int ret;
1296
1297 ret = add_comm_event(cw);
1298 if (ret)
1299 return ret;
1300 ret = add_exit_event(cw);
1301 if (ret)
1302 return ret;
1303 ret = add_fork_event(cw);
1304 if (ret)
1305 return ret;
1306 ret = add_mmap_event(cw);
1307 if (ret)
1308 return ret;
1309 ret = add_mmap2_event(cw);
1310 if (ret)
1311 return ret;
1312 return 0;
1313 }
1314
cleanup_events(struct perf_session * session)1315 static void cleanup_events(struct perf_session *session)
1316 {
1317 struct evlist *evlist = session->evlist;
1318 struct evsel *evsel;
1319
1320 evlist__for_each_entry(evlist, evsel) {
1321 struct evsel_priv *priv;
1322
1323 priv = evsel->priv;
1324 bt_ctf_event_class_put(priv->event_class);
1325 zfree(&evsel->priv);
1326 }
1327
1328 evlist__delete(evlist);
1329 session->evlist = NULL;
1330 }
1331
setup_streams(struct ctf_writer * cw,struct perf_session * session)1332 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1333 {
1334 struct ctf_stream **stream;
1335 struct perf_header *ph = &session->header;
1336 int ncpus;
1337
1338 /*
1339 * Try to get the number of cpus used in the data file,
1340 * if not present fallback to the MAX_CPUS.
1341 */
1342 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1343
1344 stream = zalloc(sizeof(*stream) * ncpus);
1345 if (!stream) {
1346 pr_err("Failed to allocate streams.\n");
1347 return -ENOMEM;
1348 }
1349
1350 cw->stream = stream;
1351 cw->stream_cnt = ncpus;
1352 return 0;
1353 }
1354
free_streams(struct ctf_writer * cw)1355 static void free_streams(struct ctf_writer *cw)
1356 {
1357 int cpu;
1358
1359 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1360 ctf_stream__delete(cw->stream[cpu]);
1361
1362 zfree(&cw->stream);
1363 }
1364
ctf_writer__setup_env(struct ctf_writer * cw,struct perf_session * session)1365 static int ctf_writer__setup_env(struct ctf_writer *cw,
1366 struct perf_session *session)
1367 {
1368 struct perf_header *header = &session->header;
1369 struct bt_ctf_writer *writer = cw->writer;
1370
1371 #define ADD(__n, __v) \
1372 do { \
1373 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1374 return -1; \
1375 } while (0)
1376
1377 ADD("host", header->env.hostname);
1378 ADD("sysname", "Linux");
1379 ADD("release", header->env.os_release);
1380 ADD("version", header->env.version);
1381 ADD("machine", header->env.arch);
1382 ADD("domain", "kernel");
1383 ADD("tracer_name", "perf");
1384
1385 #undef ADD
1386 return 0;
1387 }
1388
ctf_writer__setup_clock(struct ctf_writer * cw,struct perf_session * session,bool tod)1389 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1390 struct perf_session *session,
1391 bool tod)
1392 {
1393 struct bt_ctf_clock *clock = cw->clock;
1394 const char *desc = "perf clock";
1395 int64_t offset = 0;
1396
1397 if (tod) {
1398 struct perf_env *env = &session->header.env;
1399
1400 if (!env->clock.enabled) {
1401 pr_err("Can't provide --tod time, missing clock data. "
1402 "Please record with -k/--clockid option.\n");
1403 return -1;
1404 }
1405
1406 desc = clockid_name(env->clock.clockid);
1407 offset = env->clock.tod_ns - env->clock.clockid_ns;
1408 }
1409
1410 #define SET(__n, __v) \
1411 do { \
1412 if (bt_ctf_clock_set_##__n(clock, __v)) \
1413 return -1; \
1414 } while (0)
1415
1416 SET(frequency, 1000000000);
1417 SET(offset, offset);
1418 SET(description, desc);
1419 SET(precision, 10);
1420 SET(is_absolute, 0);
1421
1422 #undef SET
1423 return 0;
1424 }
1425
create_int_type(int size,bool sign,bool hex)1426 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1427 {
1428 struct bt_ctf_field_type *type;
1429
1430 type = bt_ctf_field_type_integer_create(size);
1431 if (!type)
1432 return NULL;
1433
1434 if (sign &&
1435 bt_ctf_field_type_integer_set_signed(type, 1))
1436 goto err;
1437
1438 if (hex &&
1439 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1440 goto err;
1441
1442 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1443 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1444 #else
1445 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1446 #endif
1447
1448 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1449 size, sign ? "un" : "", hex ? "hex" : "");
1450 return type;
1451
1452 err:
1453 bt_ctf_field_type_put(type);
1454 return NULL;
1455 }
1456
ctf_writer__cleanup_data(struct ctf_writer * cw)1457 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1458 {
1459 unsigned int i;
1460
1461 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1462 bt_ctf_field_type_put(cw->data.array[i]);
1463 }
1464
ctf_writer__init_data(struct ctf_writer * cw)1465 static int ctf_writer__init_data(struct ctf_writer *cw)
1466 {
1467 #define CREATE_INT_TYPE(type, size, sign, hex) \
1468 do { \
1469 (type) = create_int_type(size, sign, hex); \
1470 if (!(type)) \
1471 goto err; \
1472 } while (0)
1473
1474 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1475 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1476 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1477 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1478 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1479 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1480
1481 cw->data.string = bt_ctf_field_type_string_create();
1482 if (cw->data.string)
1483 return 0;
1484
1485 err:
1486 ctf_writer__cleanup_data(cw);
1487 pr_err("Failed to create data types.\n");
1488 return -1;
1489 }
1490
ctf_writer__cleanup(struct ctf_writer * cw)1491 static void ctf_writer__cleanup(struct ctf_writer *cw)
1492 {
1493 ctf_writer__cleanup_data(cw);
1494
1495 bt_ctf_clock_put(cw->clock);
1496 free_streams(cw);
1497 bt_ctf_stream_class_put(cw->stream_class);
1498 bt_ctf_writer_put(cw->writer);
1499
1500 /* and NULL all the pointers */
1501 memset(cw, 0, sizeof(*cw));
1502 }
1503
ctf_writer__init(struct ctf_writer * cw,const char * path,struct perf_session * session,bool tod)1504 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1505 struct perf_session *session, bool tod)
1506 {
1507 struct bt_ctf_writer *writer;
1508 struct bt_ctf_stream_class *stream_class;
1509 struct bt_ctf_clock *clock;
1510 struct bt_ctf_field_type *pkt_ctx_type;
1511 int ret;
1512
1513 /* CTF writer */
1514 writer = bt_ctf_writer_create(path);
1515 if (!writer)
1516 goto err;
1517
1518 cw->writer = writer;
1519
1520 /* CTF clock */
1521 clock = bt_ctf_clock_create("perf_clock");
1522 if (!clock) {
1523 pr("Failed to create CTF clock.\n");
1524 goto err_cleanup;
1525 }
1526
1527 cw->clock = clock;
1528
1529 if (ctf_writer__setup_clock(cw, session, tod)) {
1530 pr("Failed to setup CTF clock.\n");
1531 goto err_cleanup;
1532 }
1533
1534 /* CTF stream class */
1535 stream_class = bt_ctf_stream_class_create("perf_stream");
1536 if (!stream_class) {
1537 pr("Failed to create CTF stream class.\n");
1538 goto err_cleanup;
1539 }
1540
1541 cw->stream_class = stream_class;
1542
1543 /* CTF clock stream setup */
1544 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1545 pr("Failed to assign CTF clock to stream class.\n");
1546 goto err_cleanup;
1547 }
1548
1549 if (ctf_writer__init_data(cw))
1550 goto err_cleanup;
1551
1552 /* Add cpu_id for packet context */
1553 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1554 if (!pkt_ctx_type)
1555 goto err_cleanup;
1556
1557 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1558 bt_ctf_field_type_put(pkt_ctx_type);
1559 if (ret)
1560 goto err_cleanup;
1561
1562 /* CTF clock writer setup */
1563 if (bt_ctf_writer_add_clock(writer, clock)) {
1564 pr("Failed to assign CTF clock to writer.\n");
1565 goto err_cleanup;
1566 }
1567
1568 return 0;
1569
1570 err_cleanup:
1571 ctf_writer__cleanup(cw);
1572 err:
1573 pr_err("Failed to setup CTF writer.\n");
1574 return -1;
1575 }
1576
ctf_writer__flush_streams(struct ctf_writer * cw)1577 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1578 {
1579 int cpu, ret = 0;
1580
1581 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1582 ret = ctf_stream__flush(cw->stream[cpu]);
1583
1584 return ret;
1585 }
1586
convert__config(const char * var,const char * value,void * cb)1587 static int convert__config(const char *var, const char *value, void *cb)
1588 {
1589 struct convert *c = cb;
1590
1591 if (!strcmp(var, "convert.queue-size"))
1592 return perf_config_u64(&c->queue_size, var, value);
1593
1594 return 0;
1595 }
1596
bt_convert__perf2ctf(const char * input,const char * path,struct perf_data_convert_opts * opts)1597 int bt_convert__perf2ctf(const char *input, const char *path,
1598 struct perf_data_convert_opts *opts)
1599 {
1600 struct perf_session *session;
1601 struct perf_data data = {
1602 .path = input,
1603 .mode = PERF_DATA_MODE_READ,
1604 .force = opts->force,
1605 };
1606 struct convert c = {
1607 .tool = {
1608 .sample = process_sample_event,
1609 .mmap = perf_event__process_mmap,
1610 .mmap2 = perf_event__process_mmap2,
1611 .comm = perf_event__process_comm,
1612 .exit = perf_event__process_exit,
1613 .fork = perf_event__process_fork,
1614 .lost = perf_event__process_lost,
1615 .tracing_data = perf_event__process_tracing_data,
1616 .build_id = perf_event__process_build_id,
1617 .namespaces = perf_event__process_namespaces,
1618 .ordered_events = true,
1619 .ordering_requires_timestamps = true,
1620 },
1621 };
1622 struct ctf_writer *cw = &c.writer;
1623 int err;
1624
1625 if (opts->all) {
1626 c.tool.comm = process_comm_event;
1627 c.tool.exit = process_exit_event;
1628 c.tool.fork = process_fork_event;
1629 c.tool.mmap = process_mmap_event;
1630 c.tool.mmap2 = process_mmap2_event;
1631 }
1632
1633 err = perf_config(convert__config, &c);
1634 if (err)
1635 return err;
1636
1637 err = -1;
1638 /* perf.data session */
1639 session = perf_session__new(&data, &c.tool);
1640 if (IS_ERR(session))
1641 return PTR_ERR(session);
1642
1643 /* CTF writer */
1644 if (ctf_writer__init(cw, path, session, opts->tod))
1645 goto free_session;
1646
1647 if (c.queue_size) {
1648 ordered_events__set_alloc_size(&session->ordered_events,
1649 c.queue_size);
1650 }
1651
1652 /* CTF writer env/clock setup */
1653 if (ctf_writer__setup_env(cw, session))
1654 goto free_writer;
1655
1656 /* CTF events setup */
1657 if (setup_events(cw, session))
1658 goto free_writer;
1659
1660 if (opts->all && setup_non_sample_events(cw, session))
1661 goto free_writer;
1662
1663 if (setup_streams(cw, session))
1664 goto free_writer;
1665
1666 err = perf_session__process_events(session);
1667 if (!err)
1668 err = ctf_writer__flush_streams(cw);
1669 else
1670 pr_err("Error during conversion.\n");
1671
1672 fprintf(stderr,
1673 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1674 data.path, path);
1675
1676 fprintf(stderr,
1677 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1678 (double) c.events_size / 1024.0 / 1024.0,
1679 c.events_count);
1680
1681 if (!c.non_sample_count)
1682 fprintf(stderr, ") ]\n");
1683 else
1684 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1685
1686 cleanup_events(session);
1687 perf_session__delete(session);
1688 ctf_writer__cleanup(cw);
1689
1690 return err;
1691
1692 free_writer:
1693 ctf_writer__cleanup(cw);
1694 free_session:
1695 perf_session__delete(session);
1696 pr_err("Error during conversion setup.\n");
1697 return err;
1698 }
1699