1 #include <linux/bpf.h>
2 #include <linux/btf.h>
3 #include <linux/err.h>
4 #include <linux/irq_work.h>
5 #include <linux/slab.h>
6 #include <linux/filter.h>
7 #include <linux/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/wait.h>
10 #include <linux/poll.h>
11 #include <linux/kmemleak.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/btf_ids.h>
14
15 #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
16
17 /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
18 #define RINGBUF_PGOFF \
19 (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
20 /* consumer page and producer page */
21 #define RINGBUF_POS_PAGES 2
22
23 #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
24
25 /* Maximum size of ring buffer area is limited by 32-bit page offset within
26 * record header, counted in pages. Reserve 8 bits for extensibility, and take
27 * into account few extra pages for consumer/producer pages and
28 * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
29 * ring buffer.
30 */
31 #define RINGBUF_MAX_DATA_SZ \
32 (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
33
34 struct bpf_ringbuf {
35 wait_queue_head_t waitq;
36 struct irq_work work;
37 u64 mask;
38 struct page **pages;
39 int nr_pages;
40 spinlock_t spinlock ____cacheline_aligned_in_smp;
41 /* Consumer and producer counters are put into separate pages to allow
42 * mapping consumer page as r/w, but restrict producer page to r/o.
43 * This protects producer position from being modified by user-space
44 * application and ruining in-kernel position tracking.
45 */
46 unsigned long consumer_pos __aligned(PAGE_SIZE);
47 unsigned long producer_pos __aligned(PAGE_SIZE);
48 char data[] __aligned(PAGE_SIZE);
49 };
50
51 struct bpf_ringbuf_map {
52 struct bpf_map map;
53 struct bpf_ringbuf *rb;
54 };
55
56 /* 8-byte ring buffer record header structure */
57 struct bpf_ringbuf_hdr {
58 u32 len;
59 u32 pg_off;
60 };
61
bpf_ringbuf_area_alloc(size_t data_sz,int numa_node)62 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
63 {
64 const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |
65 __GFP_NOWARN | __GFP_ZERO;
66 int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
67 int nr_data_pages = data_sz >> PAGE_SHIFT;
68 int nr_pages = nr_meta_pages + nr_data_pages;
69 struct page **pages, *page;
70 struct bpf_ringbuf *rb;
71 size_t array_size;
72 int i;
73
74 /* Each data page is mapped twice to allow "virtual"
75 * continuous read of samples wrapping around the end of ring
76 * buffer area:
77 * ------------------------------------------------------
78 * | meta pages | real data pages | same data pages |
79 * ------------------------------------------------------
80 * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
81 * ------------------------------------------------------
82 * | | TA DA | TA DA |
83 * ------------------------------------------------------
84 * ^^^^^^^
85 * |
86 * Here, no need to worry about special handling of wrapped-around
87 * data due to double-mapped data pages. This works both in kernel and
88 * when mmap()'ed in user-space, simplifying both kernel and
89 * user-space implementations significantly.
90 */
91 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
92 pages = bpf_map_area_alloc(array_size, numa_node);
93 if (!pages)
94 return NULL;
95
96 for (i = 0; i < nr_pages; i++) {
97 page = alloc_pages_node(numa_node, flags, 0);
98 if (!page) {
99 nr_pages = i;
100 goto err_free_pages;
101 }
102 pages[i] = page;
103 if (i >= nr_meta_pages)
104 pages[nr_data_pages + i] = page;
105 }
106
107 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
108 VM_MAP | VM_USERMAP, PAGE_KERNEL);
109 if (rb) {
110 kmemleak_not_leak(pages);
111 rb->pages = pages;
112 rb->nr_pages = nr_pages;
113 return rb;
114 }
115
116 err_free_pages:
117 for (i = 0; i < nr_pages; i++)
118 __free_page(pages[i]);
119 kvfree(pages);
120 return NULL;
121 }
122
bpf_ringbuf_notify(struct irq_work * work)123 static void bpf_ringbuf_notify(struct irq_work *work)
124 {
125 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
126
127 wake_up_all(&rb->waitq);
128 }
129
bpf_ringbuf_alloc(size_t data_sz,int numa_node)130 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
131 {
132 struct bpf_ringbuf *rb;
133
134 rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
135 if (!rb)
136 return NULL;
137
138 spin_lock_init(&rb->spinlock);
139 init_waitqueue_head(&rb->waitq);
140 init_irq_work(&rb->work, bpf_ringbuf_notify);
141
142 rb->mask = data_sz - 1;
143 rb->consumer_pos = 0;
144 rb->producer_pos = 0;
145
146 return rb;
147 }
148
ringbuf_map_alloc(union bpf_attr * attr)149 static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
150 {
151 struct bpf_ringbuf_map *rb_map;
152
153 if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
154 return ERR_PTR(-EINVAL);
155
156 if (attr->key_size || attr->value_size ||
157 !is_power_of_2(attr->max_entries) ||
158 !PAGE_ALIGNED(attr->max_entries))
159 return ERR_PTR(-EINVAL);
160
161 #ifdef CONFIG_64BIT
162 /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
163 if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
164 return ERR_PTR(-E2BIG);
165 #endif
166
167 rb_map = kzalloc(sizeof(*rb_map), GFP_USER | __GFP_ACCOUNT);
168 if (!rb_map)
169 return ERR_PTR(-ENOMEM);
170
171 bpf_map_init_from_attr(&rb_map->map, attr);
172
173 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
174 if (!rb_map->rb) {
175 kfree(rb_map);
176 return ERR_PTR(-ENOMEM);
177 }
178
179 return &rb_map->map;
180 }
181
bpf_ringbuf_free(struct bpf_ringbuf * rb)182 static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
183 {
184 /* copy pages pointer and nr_pages to local variable, as we are going
185 * to unmap rb itself with vunmap() below
186 */
187 struct page **pages = rb->pages;
188 int i, nr_pages = rb->nr_pages;
189
190 vunmap(rb);
191 for (i = 0; i < nr_pages; i++)
192 __free_page(pages[i]);
193 kvfree(pages);
194 }
195
ringbuf_map_free(struct bpf_map * map)196 static void ringbuf_map_free(struct bpf_map *map)
197 {
198 struct bpf_ringbuf_map *rb_map;
199
200 rb_map = container_of(map, struct bpf_ringbuf_map, map);
201 bpf_ringbuf_free(rb_map->rb);
202 kfree(rb_map);
203 }
204
ringbuf_map_lookup_elem(struct bpf_map * map,void * key)205 static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
206 {
207 return ERR_PTR(-ENOTSUPP);
208 }
209
ringbuf_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)210 static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
211 u64 flags)
212 {
213 return -ENOTSUPP;
214 }
215
ringbuf_map_delete_elem(struct bpf_map * map,void * key)216 static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
217 {
218 return -ENOTSUPP;
219 }
220
ringbuf_map_get_next_key(struct bpf_map * map,void * key,void * next_key)221 static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
222 void *next_key)
223 {
224 return -ENOTSUPP;
225 }
226
ringbuf_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)227 static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
228 {
229 struct bpf_ringbuf_map *rb_map;
230
231 rb_map = container_of(map, struct bpf_ringbuf_map, map);
232
233 if (vma->vm_flags & VM_WRITE) {
234 /* allow writable mapping for the consumer_pos only */
235 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
236 return -EPERM;
237 } else {
238 vma->vm_flags &= ~VM_MAYWRITE;
239 }
240 /* remap_vmalloc_range() checks size and offset constraints */
241 return remap_vmalloc_range(vma, rb_map->rb,
242 vma->vm_pgoff + RINGBUF_PGOFF);
243 }
244
ringbuf_avail_data_sz(struct bpf_ringbuf * rb)245 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
246 {
247 unsigned long cons_pos, prod_pos;
248
249 cons_pos = smp_load_acquire(&rb->consumer_pos);
250 prod_pos = smp_load_acquire(&rb->producer_pos);
251 return prod_pos - cons_pos;
252 }
253
ringbuf_map_poll(struct bpf_map * map,struct file * filp,struct poll_table_struct * pts)254 static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
255 struct poll_table_struct *pts)
256 {
257 struct bpf_ringbuf_map *rb_map;
258
259 rb_map = container_of(map, struct bpf_ringbuf_map, map);
260 poll_wait(filp, &rb_map->rb->waitq, pts);
261
262 if (ringbuf_avail_data_sz(rb_map->rb))
263 return EPOLLIN | EPOLLRDNORM;
264 return 0;
265 }
266
267 BTF_ID_LIST_SINGLE(ringbuf_map_btf_ids, struct, bpf_ringbuf_map)
268 const struct bpf_map_ops ringbuf_map_ops = {
269 .map_meta_equal = bpf_map_meta_equal,
270 .map_alloc = ringbuf_map_alloc,
271 .map_free = ringbuf_map_free,
272 .map_mmap = ringbuf_map_mmap,
273 .map_poll = ringbuf_map_poll,
274 .map_lookup_elem = ringbuf_map_lookup_elem,
275 .map_update_elem = ringbuf_map_update_elem,
276 .map_delete_elem = ringbuf_map_delete_elem,
277 .map_get_next_key = ringbuf_map_get_next_key,
278 .map_btf_id = &ringbuf_map_btf_ids[0],
279 };
280
281 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
282 * calculate offset from record metadata to ring buffer in pages, rounded
283 * down. This page offset is stored as part of record metadata and allows to
284 * restore struct bpf_ringbuf * from record pointer. This page offset is
285 * stored at offset 4 of record metadata header.
286 */
bpf_ringbuf_rec_pg_off(struct bpf_ringbuf * rb,struct bpf_ringbuf_hdr * hdr)287 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
288 struct bpf_ringbuf_hdr *hdr)
289 {
290 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
291 }
292
293 /* Given pointer to ring buffer record header, restore pointer to struct
294 * bpf_ringbuf itself by using page offset stored at offset 4
295 */
296 static struct bpf_ringbuf *
bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr * hdr)297 bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
298 {
299 unsigned long addr = (unsigned long)(void *)hdr;
300 unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
301
302 return (void*)((addr & PAGE_MASK) - off);
303 }
304
__bpf_ringbuf_reserve(struct bpf_ringbuf * rb,u64 size)305 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
306 {
307 unsigned long cons_pos, prod_pos, new_prod_pos, flags;
308 u32 len, pg_off;
309 struct bpf_ringbuf_hdr *hdr;
310
311 if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
312 return NULL;
313
314 len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
315 if (len > rb->mask + 1)
316 return NULL;
317
318 cons_pos = smp_load_acquire(&rb->consumer_pos);
319
320 if (in_nmi()) {
321 if (!spin_trylock_irqsave(&rb->spinlock, flags))
322 return NULL;
323 } else {
324 spin_lock_irqsave(&rb->spinlock, flags);
325 }
326
327 prod_pos = rb->producer_pos;
328 new_prod_pos = prod_pos + len;
329
330 /* check for out of ringbuf space by ensuring producer position
331 * doesn't advance more than (ringbuf_size - 1) ahead
332 */
333 if (new_prod_pos - cons_pos > rb->mask) {
334 spin_unlock_irqrestore(&rb->spinlock, flags);
335 return NULL;
336 }
337
338 hdr = (void *)rb->data + (prod_pos & rb->mask);
339 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
340 hdr->len = size | BPF_RINGBUF_BUSY_BIT;
341 hdr->pg_off = pg_off;
342
343 /* pairs with consumer's smp_load_acquire() */
344 smp_store_release(&rb->producer_pos, new_prod_pos);
345
346 spin_unlock_irqrestore(&rb->spinlock, flags);
347
348 return (void *)hdr + BPF_RINGBUF_HDR_SZ;
349 }
350
BPF_CALL_3(bpf_ringbuf_reserve,struct bpf_map *,map,u64,size,u64,flags)351 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
352 {
353 struct bpf_ringbuf_map *rb_map;
354
355 if (unlikely(flags))
356 return 0;
357
358 rb_map = container_of(map, struct bpf_ringbuf_map, map);
359 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
360 }
361
362 const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
363 .func = bpf_ringbuf_reserve,
364 .ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL,
365 .arg1_type = ARG_CONST_MAP_PTR,
366 .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
367 .arg3_type = ARG_ANYTHING,
368 };
369
bpf_ringbuf_commit(void * sample,u64 flags,bool discard)370 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
371 {
372 unsigned long rec_pos, cons_pos;
373 struct bpf_ringbuf_hdr *hdr;
374 struct bpf_ringbuf *rb;
375 u32 new_len;
376
377 hdr = sample - BPF_RINGBUF_HDR_SZ;
378 rb = bpf_ringbuf_restore_from_rec(hdr);
379 new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
380 if (discard)
381 new_len |= BPF_RINGBUF_DISCARD_BIT;
382
383 /* update record header with correct final size prefix */
384 xchg(&hdr->len, new_len);
385
386 /* if consumer caught up and is waiting for our record, notify about
387 * new data availability
388 */
389 rec_pos = (void *)hdr - (void *)rb->data;
390 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
391
392 if (flags & BPF_RB_FORCE_WAKEUP)
393 irq_work_queue(&rb->work);
394 else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
395 irq_work_queue(&rb->work);
396 }
397
BPF_CALL_2(bpf_ringbuf_submit,void *,sample,u64,flags)398 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
399 {
400 bpf_ringbuf_commit(sample, flags, false /* discard */);
401 return 0;
402 }
403
404 const struct bpf_func_proto bpf_ringbuf_submit_proto = {
405 .func = bpf_ringbuf_submit,
406 .ret_type = RET_VOID,
407 .arg1_type = ARG_PTR_TO_ALLOC_MEM | OBJ_RELEASE,
408 .arg2_type = ARG_ANYTHING,
409 };
410
BPF_CALL_2(bpf_ringbuf_discard,void *,sample,u64,flags)411 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
412 {
413 bpf_ringbuf_commit(sample, flags, true /* discard */);
414 return 0;
415 }
416
417 const struct bpf_func_proto bpf_ringbuf_discard_proto = {
418 .func = bpf_ringbuf_discard,
419 .ret_type = RET_VOID,
420 .arg1_type = ARG_PTR_TO_ALLOC_MEM | OBJ_RELEASE,
421 .arg2_type = ARG_ANYTHING,
422 };
423
BPF_CALL_4(bpf_ringbuf_output,struct bpf_map *,map,void *,data,u64,size,u64,flags)424 BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
425 u64, flags)
426 {
427 struct bpf_ringbuf_map *rb_map;
428 void *rec;
429
430 if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
431 return -EINVAL;
432
433 rb_map = container_of(map, struct bpf_ringbuf_map, map);
434 rec = __bpf_ringbuf_reserve(rb_map->rb, size);
435 if (!rec)
436 return -EAGAIN;
437
438 memcpy(rec, data, size);
439 bpf_ringbuf_commit(rec, flags, false /* discard */);
440 return 0;
441 }
442
443 const struct bpf_func_proto bpf_ringbuf_output_proto = {
444 .func = bpf_ringbuf_output,
445 .ret_type = RET_INTEGER,
446 .arg1_type = ARG_CONST_MAP_PTR,
447 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
448 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
449 .arg4_type = ARG_ANYTHING,
450 };
451
BPF_CALL_2(bpf_ringbuf_query,struct bpf_map *,map,u64,flags)452 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
453 {
454 struct bpf_ringbuf *rb;
455
456 rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
457
458 switch (flags) {
459 case BPF_RB_AVAIL_DATA:
460 return ringbuf_avail_data_sz(rb);
461 case BPF_RB_RING_SIZE:
462 return rb->mask + 1;
463 case BPF_RB_CONS_POS:
464 return smp_load_acquire(&rb->consumer_pos);
465 case BPF_RB_PROD_POS:
466 return smp_load_acquire(&rb->producer_pos);
467 default:
468 return 0;
469 }
470 }
471
472 const struct bpf_func_proto bpf_ringbuf_query_proto = {
473 .func = bpf_ringbuf_query,
474 .ret_type = RET_INTEGER,
475 .arg1_type = ARG_CONST_MAP_PTR,
476 .arg2_type = ARG_ANYTHING,
477 };
478
BPF_CALL_4(bpf_ringbuf_reserve_dynptr,struct bpf_map *,map,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)479 BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
480 struct bpf_dynptr_kern *, ptr)
481 {
482 struct bpf_ringbuf_map *rb_map;
483 void *sample;
484 int err;
485
486 if (unlikely(flags)) {
487 bpf_dynptr_set_null(ptr);
488 return -EINVAL;
489 }
490
491 err = bpf_dynptr_check_size(size);
492 if (err) {
493 bpf_dynptr_set_null(ptr);
494 return err;
495 }
496
497 rb_map = container_of(map, struct bpf_ringbuf_map, map);
498
499 sample = __bpf_ringbuf_reserve(rb_map->rb, size);
500 if (!sample) {
501 bpf_dynptr_set_null(ptr);
502 return -EINVAL;
503 }
504
505 bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
506
507 return 0;
508 }
509
510 const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
511 .func = bpf_ringbuf_reserve_dynptr,
512 .ret_type = RET_INTEGER,
513 .arg1_type = ARG_CONST_MAP_PTR,
514 .arg2_type = ARG_ANYTHING,
515 .arg3_type = ARG_ANYTHING,
516 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
517 };
518
BPF_CALL_2(bpf_ringbuf_submit_dynptr,struct bpf_dynptr_kern *,ptr,u64,flags)519 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
520 {
521 if (!ptr->data)
522 return 0;
523
524 bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
525
526 bpf_dynptr_set_null(ptr);
527
528 return 0;
529 }
530
531 const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
532 .func = bpf_ringbuf_submit_dynptr,
533 .ret_type = RET_VOID,
534 .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
535 .arg2_type = ARG_ANYTHING,
536 };
537
BPF_CALL_2(bpf_ringbuf_discard_dynptr,struct bpf_dynptr_kern *,ptr,u64,flags)538 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
539 {
540 if (!ptr->data)
541 return 0;
542
543 bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
544
545 bpf_dynptr_set_null(ptr);
546
547 return 0;
548 }
549
550 const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
551 .func = bpf_ringbuf_discard_dynptr,
552 .ret_type = RET_VOID,
553 .arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
554 .arg2_type = ARG_ANYTHING,
555 };
556