1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RING_BUFFER_H
3 #define _LINUX_RING_BUFFER_H
4
5 #include <linux/mm.h>
6 #include <linux/seq_file.h>
7 #include <linux/poll.h>
8
9 struct trace_buffer;
10 struct ring_buffer_iter;
11
12 /*
13 * Don't refer to this struct directly, use functions below.
14 */
15 struct ring_buffer_event {
16 u32 type_len:5, time_delta:27;
17
18 u32 array[];
19 };
20
21 /**
22 * enum ring_buffer_type - internal ring buffer types
23 *
24 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
25 * If time_delta is 0:
26 * array is ignored
27 * size is variable depending on how much
28 * padding is needed
29 * If time_delta is non zero:
30 * array[0] holds the actual length
31 * size = 4 + length (bytes)
32 *
33 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
34 * array[0] = time delta (28 .. 59)
35 * size = 8 bytes
36 *
37 * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
38 * Same format as TIME_EXTEND except that the
39 * value is an absolute timestamp, not a delta
40 * event.time_delta contains bottom 27 bits
41 * array[0] = top (28 .. 59) bits
42 * size = 8 bytes
43 *
44 * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
45 * Data record
46 * If type_len is zero:
47 * array[0] holds the actual length
48 * array[1..(length+3)/4] holds data
49 * size = 4 + length (bytes)
50 * else
51 * length = type_len << 2
52 * array[0..(length+3)/4-1] holds data
53 * size = 4 + length (bytes)
54 */
55 enum ring_buffer_type {
56 RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
57 RINGBUF_TYPE_PADDING,
58 RINGBUF_TYPE_TIME_EXTEND,
59 RINGBUF_TYPE_TIME_STAMP,
60 };
61
62 unsigned ring_buffer_event_length(struct ring_buffer_event *event);
63 void *ring_buffer_event_data(struct ring_buffer_event *event);
64 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
65 struct ring_buffer_event *event);
66
67 /*
68 * ring_buffer_discard_commit will remove an event that has not
69 * been committed yet. If this is used, then ring_buffer_unlock_commit
70 * must not be called on the discarded event. This function
71 * will try to remove the event from the ring buffer completely
72 * if another event has not been written after it.
73 *
74 * Example use:
75 *
76 * if (some_condition)
77 * ring_buffer_discard_commit(buffer, event);
78 * else
79 * ring_buffer_unlock_commit(buffer, event);
80 */
81 void ring_buffer_discard_commit(struct trace_buffer *buffer,
82 struct ring_buffer_event *event);
83
84 /*
85 * size is in bytes for each per CPU buffer.
86 */
87 struct trace_buffer *
88 __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
89
90 /*
91 * Because the ring buffer is generic, if other users of the ring buffer get
92 * traced by ftrace, it can produce lockdep warnings. We need to keep each
93 * ring buffer's lock class separate.
94 */
95 #define ring_buffer_alloc(size, flags) \
96 ({ \
97 static struct lock_class_key __key; \
98 __ring_buffer_alloc((size), (flags), &__key); \
99 })
100
101 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
102 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
103 struct file *filp, poll_table *poll_table);
104
105
106 #define RING_BUFFER_ALL_CPUS -1
107
108 void ring_buffer_free(struct trace_buffer *buffer);
109
110 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
111
112 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
113
114 struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
115 unsigned long length);
116 int ring_buffer_unlock_commit(struct trace_buffer *buffer,
117 struct ring_buffer_event *event);
118 int ring_buffer_write(struct trace_buffer *buffer,
119 unsigned long length, void *data);
120
121 void ring_buffer_nest_start(struct trace_buffer *buffer);
122 void ring_buffer_nest_end(struct trace_buffer *buffer);
123
124 struct ring_buffer_event *
125 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
126 unsigned long *lost_events);
127 struct ring_buffer_event *
128 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
129 unsigned long *lost_events);
130
131 struct ring_buffer_iter *
132 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
133 void ring_buffer_read_prepare_sync(void);
134 void ring_buffer_read_start(struct ring_buffer_iter *iter);
135 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
136
137 struct ring_buffer_event *
138 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
139 void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
140 void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
141 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
142 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
143
144 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
145
146 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
147 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
148 void ring_buffer_reset(struct trace_buffer *buffer);
149
150 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
151 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
152 struct trace_buffer *buffer_b, int cpu);
153 #else
154 static inline int
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)155 ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
156 struct trace_buffer *buffer_b, int cpu)
157 {
158 return -ENODEV;
159 }
160 #endif
161
162 bool ring_buffer_empty(struct trace_buffer *buffer);
163 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
164
165 void ring_buffer_record_disable(struct trace_buffer *buffer);
166 void ring_buffer_record_enable(struct trace_buffer *buffer);
167 void ring_buffer_record_off(struct trace_buffer *buffer);
168 void ring_buffer_record_on(struct trace_buffer *buffer);
169 bool ring_buffer_record_is_on(struct trace_buffer *buffer);
170 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
171 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
172 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
173
174 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
175 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
176 unsigned long ring_buffer_entries(struct trace_buffer *buffer);
177 unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
178 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
179 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
180 unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
181 unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
182 unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
183
184 u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
185 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
186 int cpu, u64 *ts);
187 void ring_buffer_set_clock(struct trace_buffer *buffer,
188 u64 (*clock)(void));
189 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
190 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
191
192 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
193 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
194
195 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
196 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
197 int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
198 size_t len, int cpu, int full);
199
200 struct trace_seq;
201
202 int ring_buffer_print_entry_header(struct trace_seq *s);
203 int ring_buffer_print_page_header(struct trace_seq *s);
204
205 enum ring_buffer_flags {
206 RB_FL_OVERWRITE = 1 << 0,
207 };
208
209 #ifdef CONFIG_RING_BUFFER
210 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
211 #else
212 #define trace_rb_cpu_prepare NULL
213 #endif
214
215 #endif /* _LINUX_RING_BUFFER_H */
216