1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * auxtrace.h: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdio.h> // FILE
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <internal/cpumap.h>
19 #include <asm/bitsperlong.h>
20 #include <asm/barrier.h>
21
22 union perf_event;
23 struct perf_session;
24 struct evlist;
25 struct evsel;
26 struct perf_tool;
27 struct mmap;
28 struct perf_sample;
29 struct option;
30 struct record_opts;
31 struct perf_record_auxtrace_error;
32 struct perf_record_auxtrace_info;
33 struct events_stats;
34 struct perf_pmu;
35
36 enum auxtrace_error_type {
37 PERF_AUXTRACE_ERROR_ITRACE = 1,
38 PERF_AUXTRACE_ERROR_MAX
39 };
40
41 /* Auxtrace records must have the same alignment as perf event records */
42 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
43
44 enum auxtrace_type {
45 PERF_AUXTRACE_UNKNOWN,
46 PERF_AUXTRACE_INTEL_PT,
47 PERF_AUXTRACE_INTEL_BTS,
48 PERF_AUXTRACE_CS_ETM,
49 PERF_AUXTRACE_ARM_SPE,
50 PERF_AUXTRACE_S390_CPUMSF,
51 PERF_AUXTRACE_HISI_PTT,
52 };
53
54 enum itrace_period_type {
55 PERF_ITRACE_PERIOD_INSTRUCTIONS,
56 PERF_ITRACE_PERIOD_TICKS,
57 PERF_ITRACE_PERIOD_NANOSECS,
58 };
59
60 #define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a'))
61 #define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a'))
62
63 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a'))
64 #define AUXTRACE_LOG_FLG_ON_ERROR (1 << ('e' - 'a'))
65 #define AUXTRACE_LOG_FLG_USE_STDOUT (1 << ('o' - 'a'))
66
67 /**
68 * struct itrace_synth_opts - AUX area tracing synthesis options.
69 * @set: indicates whether or not options have been set
70 * @default_no_sample: Default to no sampling.
71 * @inject: indicates the event (not just the sample) must be fully synthesized
72 * because 'perf inject' will write it out
73 * @instructions: whether to synthesize 'instructions' events
74 * @branches: whether to synthesize 'branches' events
75 * (branch misses only for Arm SPE)
76 * @transactions: whether to synthesize events for transactions
77 * @ptwrites: whether to synthesize events for ptwrites
78 * @pwr_events: whether to synthesize power events
79 * @other_events: whether to synthesize other events recorded due to the use of
80 * aux_output
81 * @intr_events: whether to synthesize interrupt events
82 * @errors: whether to synthesize decoder error events
83 * @dont_decode: whether to skip decoding entirely
84 * @log: write a decoding log
85 * @calls: limit branch samples to calls (can be combined with @returns)
86 * @returns: limit branch samples to returns (can be combined with @calls)
87 * @callchain: add callchain to 'instructions' events
88 * @add_callchain: add callchain to existing event records
89 * @thread_stack: feed branches to the thread_stack
90 * @last_branch: add branch context to 'instruction' events
91 * @add_last_branch: add branch context to existing event records
92 * @approx_ipc: approximate IPC
93 * @flc: whether to synthesize first level cache events
94 * @llc: whether to synthesize last level cache events
95 * @tlb: whether to synthesize TLB events
96 * @remote_access: whether to synthesize remote access events
97 * @mem: whether to synthesize memory events
98 * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
99 * @vm_time_correlation: perform VM Time Correlation
100 * @vm_tm_corr_dry_run: VM Time Correlation dry-run
101 * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments
102 * @callchain_sz: maximum callchain size
103 * @last_branch_sz: branch context size
104 * @period: 'instructions' events period
105 * @period_type: 'instructions' events period type
106 * @initial_skip: skip N events at the beginning.
107 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
108 * @ptime_range: time intervals to trace or NULL
109 * @range_num: number of time intervals to trace
110 * @error_plus_flags: flags to affect what errors are reported
111 * @error_minus_flags: flags to affect what errors are reported
112 * @log_plus_flags: flags to affect what is logged
113 * @log_minus_flags: flags to affect what is logged
114 * @quick: quicker (less detailed) decoding
115 * @log_on_error_size: size of log to keep for outputting log only on errors
116 */
117 struct itrace_synth_opts {
118 bool set;
119 bool default_no_sample;
120 bool inject;
121 bool instructions;
122 bool branches;
123 bool transactions;
124 bool ptwrites;
125 bool pwr_events;
126 bool other_events;
127 bool intr_events;
128 bool errors;
129 bool dont_decode;
130 bool log;
131 bool calls;
132 bool returns;
133 bool callchain;
134 bool add_callchain;
135 bool thread_stack;
136 bool last_branch;
137 bool add_last_branch;
138 bool approx_ipc;
139 bool flc;
140 bool llc;
141 bool tlb;
142 bool remote_access;
143 bool mem;
144 bool timeless_decoding;
145 bool vm_time_correlation;
146 bool vm_tm_corr_dry_run;
147 char *vm_tm_corr_args;
148 unsigned int callchain_sz;
149 unsigned int last_branch_sz;
150 unsigned long long period;
151 enum itrace_period_type period_type;
152 unsigned long initial_skip;
153 unsigned long *cpu_bitmap;
154 struct perf_time_interval *ptime_range;
155 int range_num;
156 unsigned int error_plus_flags;
157 unsigned int error_minus_flags;
158 unsigned int log_plus_flags;
159 unsigned int log_minus_flags;
160 unsigned int quick;
161 unsigned int log_on_error_size;
162 };
163
164 /**
165 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
166 * perf.data file.
167 * @file_offset: offset within the perf.data file
168 * @sz: size of the event
169 */
170 struct auxtrace_index_entry {
171 u64 file_offset;
172 u64 sz;
173 };
174
175 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
176
177 /**
178 * struct auxtrace_index - index of AUX area tracing events within a perf.data
179 * file.
180 * @list: linking a number of arrays of entries
181 * @nr: number of entries
182 * @entries: array of entries
183 */
184 struct auxtrace_index {
185 struct list_head list;
186 size_t nr;
187 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
188 };
189
190 /**
191 * struct auxtrace - session callbacks to allow AUX area data decoding.
192 * @process_event: lets the decoder see all session events
193 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
194 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
195 * processing
196 * @dump_auxtrace_sample: dump AUX area sample data
197 * @flush_events: process any remaining data
198 * @free_events: free resources associated with event processing
199 * @free: free resources associated with the session
200 */
201 struct auxtrace {
202 int (*process_event)(struct perf_session *session,
203 union perf_event *event,
204 struct perf_sample *sample,
205 struct perf_tool *tool);
206 int (*process_auxtrace_event)(struct perf_session *session,
207 union perf_event *event,
208 struct perf_tool *tool);
209 int (*queue_data)(struct perf_session *session,
210 struct perf_sample *sample, union perf_event *event,
211 u64 data_offset);
212 void (*dump_auxtrace_sample)(struct perf_session *session,
213 struct perf_sample *sample);
214 int (*flush_events)(struct perf_session *session,
215 struct perf_tool *tool);
216 void (*free_events)(struct perf_session *session);
217 void (*free)(struct perf_session *session);
218 bool (*evsel_is_auxtrace)(struct perf_session *session,
219 struct evsel *evsel);
220 };
221
222 /**
223 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
224 * @list: buffers are queued in a list held by struct auxtrace_queue
225 * @size: size of the buffer in bytes
226 * @pid: in per-thread mode, the pid this buffer is associated with
227 * @tid: in per-thread mode, the tid this buffer is associated with
228 * @cpu: in per-cpu mode, the cpu this buffer is associated with
229 * @data: actual buffer data (can be null if the data has not been loaded)
230 * @data_offset: file offset at which the buffer can be read
231 * @mmap_addr: mmap address at which the buffer can be read
232 * @mmap_size: size of the mmap at @mmap_addr
233 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
234 * needed
235 * @consecutive: the original data was split up and this buffer is consecutive
236 * to the previous buffer
237 * @offset: offset as determined by aux_head / aux_tail members of struct
238 * perf_event_mmap_page
239 * @reference: an implementation-specific reference determined when the data is
240 * recorded
241 * @buffer_nr: used to number each buffer
242 * @use_size: implementation actually only uses this number of bytes
243 * @use_data: implementation actually only uses data starting at this address
244 */
245 struct auxtrace_buffer {
246 struct list_head list;
247 size_t size;
248 pid_t pid;
249 pid_t tid;
250 struct perf_cpu cpu;
251 void *data;
252 off_t data_offset;
253 void *mmap_addr;
254 size_t mmap_size;
255 bool data_needs_freeing;
256 bool consecutive;
257 u64 offset;
258 u64 reference;
259 u64 buffer_nr;
260 size_t use_size;
261 void *use_data;
262 };
263
264 /**
265 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
266 * @head: head of buffer list
267 * @tid: in per-thread mode, the tid this queue is associated with
268 * @cpu: in per-cpu mode, the cpu this queue is associated with
269 * @set: %true once this queue has been dedicated to a specific thread or cpu
270 * @priv: implementation-specific data
271 */
272 struct auxtrace_queue {
273 struct list_head head;
274 pid_t tid;
275 int cpu;
276 bool set;
277 void *priv;
278 };
279
280 /**
281 * struct auxtrace_queues - an array of AUX area tracing queues.
282 * @queue_array: array of queues
283 * @nr_queues: number of queues
284 * @new_data: set whenever new data is queued
285 * @populated: queues have been fully populated using the auxtrace_index
286 * @next_buffer_nr: used to number each buffer
287 */
288 struct auxtrace_queues {
289 struct auxtrace_queue *queue_array;
290 unsigned int nr_queues;
291 bool new_data;
292 bool populated;
293 u64 next_buffer_nr;
294 };
295
296 /**
297 * struct auxtrace_heap_item - element of struct auxtrace_heap.
298 * @queue_nr: queue number
299 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
300 * to be a timestamp
301 */
302 struct auxtrace_heap_item {
303 unsigned int queue_nr;
304 u64 ordinal;
305 };
306
307 /**
308 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
309 * @heap_array: the heap
310 * @heap_cnt: the number of elements in the heap
311 * @heap_sz: maximum number of elements (grows as needed)
312 */
313 struct auxtrace_heap {
314 struct auxtrace_heap_item *heap_array;
315 unsigned int heap_cnt;
316 unsigned int heap_sz;
317 };
318
319 /**
320 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
321 * @base: address of mapped area
322 * @userpg: pointer to buffer's perf_event_mmap_page
323 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
324 * @len: size of mapped area
325 * @prev: previous aux_head
326 * @idx: index of this mmap
327 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
328 * mmap) otherwise %0
329 * @cpu: cpu number for a per-cpu mmap otherwise %-1
330 */
331 struct auxtrace_mmap {
332 void *base;
333 void *userpg;
334 size_t mask;
335 size_t len;
336 u64 prev;
337 int idx;
338 pid_t tid;
339 int cpu;
340 };
341
342 /**
343 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
344 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
345 * @offset: file offset of mapped area
346 * @len: size of mapped area
347 * @prot: mmap memory protection
348 * @idx: index of this mmap
349 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
350 * mmap) otherwise %0
351 * @mmap_needed: set to %false for non-auxtrace events. This is needed because
352 * auxtrace mmapping is done in the same code path as non-auxtrace
353 * mmapping but not every evsel that needs non-auxtrace mmapping
354 * also needs auxtrace mmapping.
355 * @cpu: cpu number for a per-cpu mmap otherwise %-1
356 */
357 struct auxtrace_mmap_params {
358 size_t mask;
359 off_t offset;
360 size_t len;
361 int prot;
362 int idx;
363 pid_t tid;
364 bool mmap_needed;
365 struct perf_cpu cpu;
366 };
367
368 /**
369 * struct auxtrace_record - callbacks for recording AUX area data.
370 * @recording_options: validate and process recording options
371 * @info_priv_size: return the size of the private data in auxtrace_info_event
372 * @info_fill: fill-in the private data in auxtrace_info_event
373 * @free: free this auxtrace record structure
374 * @snapshot_start: starting a snapshot
375 * @snapshot_finish: finishing a snapshot
376 * @find_snapshot: find data to snapshot within auxtrace mmap
377 * @parse_snapshot_options: parse snapshot options
378 * @reference: provide a 64-bit reference number for auxtrace_event
379 * @read_finish: called after reading from an auxtrace mmap
380 * @alignment: alignment (if any) for AUX area data
381 * @default_aux_sample_size: default sample size for --aux sample option
382 * @pmu: associated pmu
383 * @evlist: selected events list
384 */
385 struct auxtrace_record {
386 int (*recording_options)(struct auxtrace_record *itr,
387 struct evlist *evlist,
388 struct record_opts *opts);
389 size_t (*info_priv_size)(struct auxtrace_record *itr,
390 struct evlist *evlist);
391 int (*info_fill)(struct auxtrace_record *itr,
392 struct perf_session *session,
393 struct perf_record_auxtrace_info *auxtrace_info,
394 size_t priv_size);
395 void (*free)(struct auxtrace_record *itr);
396 int (*snapshot_start)(struct auxtrace_record *itr);
397 int (*snapshot_finish)(struct auxtrace_record *itr);
398 int (*find_snapshot)(struct auxtrace_record *itr, int idx,
399 struct auxtrace_mmap *mm, unsigned char *data,
400 u64 *head, u64 *old);
401 int (*parse_snapshot_options)(struct auxtrace_record *itr,
402 struct record_opts *opts,
403 const char *str);
404 u64 (*reference)(struct auxtrace_record *itr);
405 int (*read_finish)(struct auxtrace_record *itr, int idx);
406 unsigned int alignment;
407 unsigned int default_aux_sample_size;
408 struct perf_pmu *pmu;
409 struct evlist *evlist;
410 };
411
412 /**
413 * struct addr_filter - address filter.
414 * @list: list node
415 * @range: true if it is a range filter
416 * @start: true if action is 'filter' or 'start'
417 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
418 * to 'stop')
419 * @sym_from: symbol name for the filter address
420 * @sym_to: symbol name that determines the filter size
421 * @sym_from_idx: selects n'th from symbols with the same name (0 means global
422 * and less than 0 means symbol must be unique)
423 * @sym_to_idx: same as @sym_from_idx but for @sym_to
424 * @addr: filter address
425 * @size: filter region size (for range filters)
426 * @filename: DSO file name or NULL for the kernel
427 * @str: allocated string that contains the other string members
428 */
429 struct addr_filter {
430 struct list_head list;
431 bool range;
432 bool start;
433 const char *action;
434 const char *sym_from;
435 const char *sym_to;
436 int sym_from_idx;
437 int sym_to_idx;
438 u64 addr;
439 u64 size;
440 const char *filename;
441 char *str;
442 };
443
444 /**
445 * struct addr_filters - list of address filters.
446 * @head: list of address filters
447 * @cnt: number of address filters
448 */
449 struct addr_filters {
450 struct list_head head;
451 int cnt;
452 };
453
454 struct auxtrace_cache;
455
456 #ifdef HAVE_AUXTRACE_SUPPORT
457
458 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
459 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);
460
auxtrace_mmap__read_head(struct auxtrace_mmap * mm,int kernel_is_64_bit __maybe_unused)461 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm,
462 int kernel_is_64_bit __maybe_unused)
463 {
464 struct perf_event_mmap_page *pc = mm->userpg;
465 u64 head;
466
467 #if BITS_PER_LONG == 32
468 if (kernel_is_64_bit)
469 return compat_auxtrace_mmap__read_head(mm);
470 #endif
471 head = READ_ONCE(pc->aux_head);
472
473 /* Ensure all reads are done after we read the head */
474 smp_rmb();
475 return head;
476 }
477
auxtrace_mmap__write_tail(struct auxtrace_mmap * mm,u64 tail,int kernel_is_64_bit __maybe_unused)478 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail,
479 int kernel_is_64_bit __maybe_unused)
480 {
481 struct perf_event_mmap_page *pc = mm->userpg;
482
483 #if BITS_PER_LONG == 32
484 if (kernel_is_64_bit)
485 return compat_auxtrace_mmap__write_tail(mm, tail);
486 #endif
487 /* Ensure all reads are done before we write the tail out */
488 smp_mb();
489 WRITE_ONCE(pc->aux_tail, tail);
490 return 0;
491 }
492
493 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
494 struct auxtrace_mmap_params *mp,
495 void *userpg, int fd);
496 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
497 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
498 off_t auxtrace_offset,
499 unsigned int auxtrace_pages,
500 bool auxtrace_overwrite);
501 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
502 struct evlist *evlist,
503 struct evsel *evsel, int idx);
504
505 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
506 struct mmap *map,
507 union perf_event *event, void *data1,
508 size_t len1, void *data2, size_t len2);
509
510 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
511 struct perf_tool *tool, process_auxtrace_t fn);
512
513 int auxtrace_mmap__read_snapshot(struct mmap *map,
514 struct auxtrace_record *itr,
515 struct perf_tool *tool, process_auxtrace_t fn,
516 size_t snapshot_size);
517
518 int auxtrace_queues__init(struct auxtrace_queues *queues);
519 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
520 struct perf_session *session,
521 union perf_event *event, off_t data_offset,
522 struct auxtrace_buffer **buffer_ptr);
523 struct auxtrace_queue *
524 auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
525 struct perf_sample *sample,
526 struct perf_session *session);
527 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
528 struct perf_session *session,
529 struct perf_sample *sample, u64 data_offset,
530 u64 reference);
531 void auxtrace_queues__free(struct auxtrace_queues *queues);
532 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
533 struct perf_session *session);
534 int auxtrace_queue_data(struct perf_session *session, bool samples,
535 bool events);
536 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
537 struct auxtrace_buffer *buffer);
538 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
auxtrace_buffer__get_data(struct auxtrace_buffer * buffer,int fd)539 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
540 {
541 return auxtrace_buffer__get_data_rw(buffer, fd, false);
542 }
543 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
544 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
545 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
546
547 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
548 u64 ordinal);
549 void auxtrace_heap__pop(struct auxtrace_heap *heap);
550 void auxtrace_heap__free(struct auxtrace_heap *heap);
551
552 struct auxtrace_cache_entry {
553 struct hlist_node hash;
554 u32 key;
555 };
556
557 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
558 unsigned int limit_percent);
559 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
560 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
561 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
562 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
563 struct auxtrace_cache_entry *entry);
564 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
565 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
566
567 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
568 int *err);
569
570 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
571 struct record_opts *opts,
572 const char *str);
573 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
574 struct evlist *evlist,
575 struct record_opts *opts, const char *str);
576 void auxtrace_regroup_aux_output(struct evlist *evlist);
577 int auxtrace_record__options(struct auxtrace_record *itr,
578 struct evlist *evlist,
579 struct record_opts *opts);
580 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
581 struct evlist *evlist);
582 int auxtrace_record__info_fill(struct auxtrace_record *itr,
583 struct perf_session *session,
584 struct perf_record_auxtrace_info *auxtrace_info,
585 size_t priv_size);
586 void auxtrace_record__free(struct auxtrace_record *itr);
587 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
588 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
589 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
590 struct auxtrace_mmap *mm,
591 unsigned char *data, u64 *head, u64 *old);
592 u64 auxtrace_record__reference(struct auxtrace_record *itr);
593 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
594
595 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
596 off_t file_offset);
597 int auxtrace_index__write(int fd, struct list_head *head);
598 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
599 bool needs_swap);
600 void auxtrace_index__free(struct list_head *head);
601
602 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
603 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
604 const char *msg, u64 timestamp,
605 pid_t machine_pid, int vcpu);
606 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
607 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
608 const char *msg, u64 timestamp);
609
610 int perf_event__process_auxtrace_info(struct perf_session *session,
611 union perf_event *event);
612 s64 perf_event__process_auxtrace(struct perf_session *session,
613 union perf_event *event);
614 int perf_event__process_auxtrace_error(struct perf_session *session,
615 union perf_event *event);
616 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
617 const char *str, int unset);
618 int itrace_parse_synth_opts(const struct option *opt, const char *str,
619 int unset);
620 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
621 bool no_sample);
622
623 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
624 void perf_session__auxtrace_error_inc(struct perf_session *session,
625 union perf_event *event);
626 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
627
628 void addr_filters__init(struct addr_filters *filts);
629 void addr_filters__exit(struct addr_filters *filts);
630 int addr_filters__parse_bare_filter(struct addr_filters *filts,
631 const char *filter);
632 int auxtrace_parse_filters(struct evlist *evlist);
633
634 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
635 struct perf_sample *sample, struct perf_tool *tool);
636 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
637 struct perf_sample *sample);
638 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
639 void auxtrace__free_events(struct perf_session *session);
640 void auxtrace__free(struct perf_session *session);
641 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
642 struct evsel *evsel);
643
644 #define ITRACE_HELP \
645 " i[period]: synthesize instructions events\n" \
646 " b: synthesize branches events (branch misses for Arm SPE)\n" \
647 " c: synthesize branches events (calls only)\n" \
648 " r: synthesize branches events (returns only)\n" \
649 " x: synthesize transactions events\n" \
650 " w: synthesize ptwrite events\n" \
651 " p: synthesize power events\n" \
652 " o: synthesize other events recorded due to the use\n" \
653 " of aux-output (refer to perf record)\n" \
654 " I: synthesize interrupt or similar (asynchronous) events\n" \
655 " (e.g. Intel PT Event Trace)\n" \
656 " e[flags]: synthesize error events\n" \
657 " each flag must be preceded by + or -\n" \
658 " error flags are: o (overflow)\n" \
659 " l (data lost)\n" \
660 " d[flags]: create a debug log\n" \
661 " each flag must be preceded by + or -\n" \
662 " log flags are: a (all perf events)\n" \
663 " o (output to stdout)\n" \
664 " f: synthesize first level cache events\n" \
665 " m: synthesize last level cache events\n" \
666 " t: synthesize TLB events\n" \
667 " a: synthesize remote access events\n" \
668 " g[len]: synthesize a call chain (use with i or x)\n" \
669 " G[len]: synthesize a call chain on existing event records\n" \
670 " l[len]: synthesize last branch entries (use with i or x)\n" \
671 " L[len]: synthesize last branch entries on existing event records\n" \
672 " sNUMBER: skip initial number of events\n" \
673 " q: quicker (less detailed) decoding\n" \
674 " A: approximate IPC\n" \
675 " Z: prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
676 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
677 " concatenate multiple options. Default is ibxwpe or cewp\n"
678
679 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts,struct perf_time_interval * ptime_range,int range_num)680 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
681 struct perf_time_interval *ptime_range,
682 int range_num)
683 {
684 opts->ptime_range = ptime_range;
685 opts->range_num = range_num;
686 }
687
688 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts)689 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
690 {
691 opts->ptime_range = NULL;
692 opts->range_num = 0;
693 }
694
695 #else
696 #include "debug.h"
697
698 static inline struct auxtrace_record *
auxtrace_record__init(struct evlist * evlist __maybe_unused,int * err)699 auxtrace_record__init(struct evlist *evlist __maybe_unused,
700 int *err)
701 {
702 *err = 0;
703 return NULL;
704 }
705
706 static inline
auxtrace_record__free(struct auxtrace_record * itr __maybe_unused)707 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
708 {
709 }
710
711 static inline
auxtrace_record__options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused)712 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
713 struct evlist *evlist __maybe_unused,
714 struct record_opts *opts __maybe_unused)
715 {
716 return 0;
717 }
718
719 static inline
perf_event__process_auxtrace_info(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)720 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
721 union perf_event *event __maybe_unused)
722 {
723 return 0;
724 }
725
726 static inline
perf_event__process_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)727 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
728 union perf_event *event __maybe_unused)
729 {
730 return 0;
731 }
732
733 static inline
perf_event__process_auxtrace_error(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)734 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
735 union perf_event *event __maybe_unused)
736 {
737 return 0;
738 }
739
740 static inline
perf_session__auxtrace_error_inc(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)741 void perf_session__auxtrace_error_inc(struct perf_session *session
742 __maybe_unused,
743 union perf_event *event
744 __maybe_unused)
745 {
746 }
747
748 static inline
events_stats__auxtrace_error_warn(const struct events_stats * stats __maybe_unused)749 void events_stats__auxtrace_error_warn(const struct events_stats *stats
750 __maybe_unused)
751 {
752 }
753
754 static inline
itrace_do_parse_synth_opts(struct itrace_synth_opts * synth_opts __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)755 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
756 const char *str __maybe_unused, int unset __maybe_unused)
757 {
758 pr_err("AUX area tracing not supported\n");
759 return -EINVAL;
760 }
761
762 static inline
itrace_parse_synth_opts(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)763 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
764 const char *str __maybe_unused,
765 int unset __maybe_unused)
766 {
767 pr_err("AUX area tracing not supported\n");
768 return -EINVAL;
769 }
770
771 static inline
auxtrace_parse_snapshot_options(struct auxtrace_record * itr __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)772 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
773 struct record_opts *opts __maybe_unused,
774 const char *str)
775 {
776 if (!str)
777 return 0;
778 pr_err("AUX area tracing not supported\n");
779 return -EINVAL;
780 }
781
782 static inline
auxtrace_parse_sample_options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)783 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
784 struct evlist *evlist __maybe_unused,
785 struct record_opts *opts __maybe_unused,
786 const char *str)
787 {
788 if (!str)
789 return 0;
790 pr_err("AUX area tracing not supported\n");
791 return -EINVAL;
792 }
793
794 static inline
auxtrace_regroup_aux_output(struct evlist * evlist __maybe_unused)795 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
796 {
797 }
798
799 static inline
auxtrace__process_event(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct perf_tool * tool __maybe_unused)800 int auxtrace__process_event(struct perf_session *session __maybe_unused,
801 union perf_event *event __maybe_unused,
802 struct perf_sample *sample __maybe_unused,
803 struct perf_tool *tool __maybe_unused)
804 {
805 return 0;
806 }
807
808 static inline
auxtrace__dump_auxtrace_sample(struct perf_session * session __maybe_unused,struct perf_sample * sample __maybe_unused)809 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
810 struct perf_sample *sample __maybe_unused)
811 {
812 }
813
814 static inline
auxtrace__flush_events(struct perf_session * session __maybe_unused,struct perf_tool * tool __maybe_unused)815 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
816 struct perf_tool *tool __maybe_unused)
817 {
818 return 0;
819 }
820
821 static inline
auxtrace__free_events(struct perf_session * session __maybe_unused)822 void auxtrace__free_events(struct perf_session *session __maybe_unused)
823 {
824 }
825
826 static inline
auxtrace_cache__free(struct auxtrace_cache * auxtrace_cache __maybe_unused)827 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
828 {
829 }
830
831 static inline
auxtrace__free(struct perf_session * session __maybe_unused)832 void auxtrace__free(struct perf_session *session __maybe_unused)
833 {
834 }
835
836 static inline
auxtrace_index__write(int fd __maybe_unused,struct list_head * head __maybe_unused)837 int auxtrace_index__write(int fd __maybe_unused,
838 struct list_head *head __maybe_unused)
839 {
840 return -EINVAL;
841 }
842
843 static inline
auxtrace_index__process(int fd __maybe_unused,u64 size __maybe_unused,struct perf_session * session __maybe_unused,bool needs_swap __maybe_unused)844 int auxtrace_index__process(int fd __maybe_unused,
845 u64 size __maybe_unused,
846 struct perf_session *session __maybe_unused,
847 bool needs_swap __maybe_unused)
848 {
849 return -EINVAL;
850 }
851
852 static inline
auxtrace_index__free(struct list_head * head __maybe_unused)853 void auxtrace_index__free(struct list_head *head __maybe_unused)
854 {
855 }
856
857 static inline
auxtrace__evsel_is_auxtrace(struct perf_session * session __maybe_unused,struct evsel * evsel __maybe_unused)858 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
859 struct evsel *evsel __maybe_unused)
860 {
861 return false;
862 }
863
864 static inline
auxtrace_parse_filters(struct evlist * evlist __maybe_unused)865 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
866 {
867 return 0;
868 }
869
870 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
871 struct auxtrace_mmap_params *mp,
872 void *userpg, int fd);
873 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
874 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
875 off_t auxtrace_offset,
876 unsigned int auxtrace_pages,
877 bool auxtrace_overwrite);
878 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
879 struct evlist *evlist,
880 struct evsel *evsel, int idx);
881
882 #define ITRACE_HELP ""
883
884 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts __maybe_unused,struct perf_time_interval * ptime_range __maybe_unused,int range_num __maybe_unused)885 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
886 __maybe_unused,
887 struct perf_time_interval *ptime_range
888 __maybe_unused,
889 int range_num __maybe_unused)
890 {
891 }
892
893 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts __maybe_unused)894 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
895 __maybe_unused)
896 {
897 }
898
899 #endif
900
901 #endif
902