1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
4 */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
15
16 #include "map_in_map.h"
17
18 #define ARRAY_CREATE_FLAG_MASK \
19 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21
bpf_array_free_percpu(struct bpf_array * array)22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 int i;
25
26 for (i = 0; i < array->map.max_entries; i++) {
27 free_percpu(array->pptrs[i]);
28 cond_resched();
29 }
30 }
31
bpf_array_alloc_percpu(struct bpf_array * array)32 static int bpf_array_alloc_percpu(struct bpf_array *array)
33 {
34 void __percpu *ptr;
35 int i;
36
37 for (i = 0; i < array->map.max_entries; i++) {
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39 GFP_USER | __GFP_NOWARN);
40 if (!ptr) {
41 bpf_array_free_percpu(array);
42 return -ENOMEM;
43 }
44 array->pptrs[i] = ptr;
45 cond_resched();
46 }
47
48 return 0;
49 }
50
51 /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)52 int array_map_alloc_check(union bpf_attr *attr)
53 {
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr);
56
57 /* check sanity of attributes */
58 if (attr->max_entries == 0 || attr->key_size != 4 ||
59 attr->value_size == 0 ||
60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 !bpf_map_flags_access_ok(attr->map_flags) ||
62 (percpu && numa_node != NUMA_NO_NODE))
63 return -EINVAL;
64
65 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67 return -EINVAL;
68
69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70 attr->map_flags & BPF_F_PRESERVE_ELEMS)
71 return -EINVAL;
72
73 if (attr->value_size > KMALLOC_MAX_SIZE)
74 /* if value_size is bigger, the user space won't be able to
75 * access the elements.
76 */
77 return -E2BIG;
78
79 return 0;
80 }
81
array_map_alloc(union bpf_attr * attr)82 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
83 {
84 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
85 int numa_node = bpf_map_attr_numa_node(attr);
86 u32 elem_size, index_mask, max_entries;
87 bool bypass_spec_v1 = bpf_bypass_spec_v1();
88 u64 array_size, mask64;
89 struct bpf_array *array;
90
91 elem_size = round_up(attr->value_size, 8);
92
93 max_entries = attr->max_entries;
94
95 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
96 * upper most bit set in u32 space is undefined behavior due to
97 * resulting 1U << 32, so do it manually here in u64 space.
98 */
99 mask64 = fls_long(max_entries - 1);
100 mask64 = 1ULL << mask64;
101 mask64 -= 1;
102
103 index_mask = mask64;
104 if (!bypass_spec_v1) {
105 /* round up array size to nearest power of 2,
106 * since cpu will speculate within index_mask limits
107 */
108 max_entries = index_mask + 1;
109 /* Check for overflows. */
110 if (max_entries < attr->max_entries)
111 return ERR_PTR(-E2BIG);
112 }
113
114 array_size = sizeof(*array);
115 if (percpu) {
116 array_size += (u64) max_entries * sizeof(void *);
117 } else {
118 /* rely on vmalloc() to return page-aligned memory and
119 * ensure array->value is exactly page-aligned
120 */
121 if (attr->map_flags & BPF_F_MMAPABLE) {
122 array_size = PAGE_ALIGN(array_size);
123 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
124 } else {
125 array_size += (u64) max_entries * elem_size;
126 }
127 }
128
129 /* allocate all map elements and zero-initialize them */
130 if (attr->map_flags & BPF_F_MMAPABLE) {
131 void *data;
132
133 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
134 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
135 if (!data)
136 return ERR_PTR(-ENOMEM);
137 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
138 - offsetof(struct bpf_array, value);
139 } else {
140 array = bpf_map_area_alloc(array_size, numa_node);
141 }
142 if (!array)
143 return ERR_PTR(-ENOMEM);
144 array->index_mask = index_mask;
145 array->map.bypass_spec_v1 = bypass_spec_v1;
146
147 /* copy mandatory map attributes */
148 bpf_map_init_from_attr(&array->map, attr);
149 array->elem_size = elem_size;
150
151 if (percpu && bpf_array_alloc_percpu(array)) {
152 bpf_map_area_free(array);
153 return ERR_PTR(-ENOMEM);
154 }
155
156 return &array->map;
157 }
158
array_map_elem_ptr(struct bpf_array * array,u32 index)159 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
160 {
161 return array->value + (u64)array->elem_size * index;
162 }
163
164 /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)165 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
166 {
167 struct bpf_array *array = container_of(map, struct bpf_array, map);
168 u32 index = *(u32 *)key;
169
170 if (unlikely(index >= array->map.max_entries))
171 return NULL;
172
173 return array->value + (u64)array->elem_size * (index & array->index_mask);
174 }
175
array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off)176 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
177 u32 off)
178 {
179 struct bpf_array *array = container_of(map, struct bpf_array, map);
180
181 if (map->max_entries != 1)
182 return -ENOTSUPP;
183 if (off >= map->value_size)
184 return -EINVAL;
185
186 *imm = (unsigned long)array->value;
187 return 0;
188 }
189
array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off)190 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
191 u32 *off)
192 {
193 struct bpf_array *array = container_of(map, struct bpf_array, map);
194 u64 base = (unsigned long)array->value;
195 u64 range = array->elem_size;
196
197 if (map->max_entries != 1)
198 return -ENOTSUPP;
199 if (imm < base || imm >= base + range)
200 return -ENOENT;
201
202 *off = imm - base;
203 return 0;
204 }
205
206 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)207 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
208 {
209 struct bpf_array *array = container_of(map, struct bpf_array, map);
210 struct bpf_insn *insn = insn_buf;
211 u32 elem_size = round_up(map->value_size, 8);
212 const int ret = BPF_REG_0;
213 const int map_ptr = BPF_REG_1;
214 const int index = BPF_REG_2;
215
216 if (map->map_flags & BPF_F_INNER_MAP)
217 return -EOPNOTSUPP;
218
219 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
220 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
221 if (!map->bypass_spec_v1) {
222 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
223 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
224 } else {
225 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
226 }
227
228 if (is_power_of_2(elem_size)) {
229 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
230 } else {
231 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
232 }
233 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
234 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
235 *insn++ = BPF_MOV64_IMM(ret, 0);
236 return insn - insn_buf;
237 }
238
239 /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)240 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
241 {
242 struct bpf_array *array = container_of(map, struct bpf_array, map);
243 u32 index = *(u32 *)key;
244
245 if (unlikely(index >= array->map.max_entries))
246 return NULL;
247
248 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
249 }
250
percpu_array_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)251 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
252 {
253 struct bpf_array *array = container_of(map, struct bpf_array, map);
254 u32 index = *(u32 *)key;
255
256 if (cpu >= nr_cpu_ids)
257 return NULL;
258
259 if (unlikely(index >= array->map.max_entries))
260 return NULL;
261
262 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
263 }
264
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)265 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
266 {
267 struct bpf_array *array = container_of(map, struct bpf_array, map);
268 u32 index = *(u32 *)key;
269 void __percpu *pptr;
270 int cpu, off = 0;
271 u32 size;
272
273 if (unlikely(index >= array->map.max_entries))
274 return -ENOENT;
275
276 /* per_cpu areas are zero-filled and bpf programs can only
277 * access 'value_size' of them, so copying rounded areas
278 * will not leak any kernel data
279 */
280 size = round_up(map->value_size, 8);
281 rcu_read_lock();
282 pptr = array->pptrs[index & array->index_mask];
283 for_each_possible_cpu(cpu) {
284 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
285 off += size;
286 }
287 rcu_read_unlock();
288 return 0;
289 }
290
291 /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)292 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
293 {
294 struct bpf_array *array = container_of(map, struct bpf_array, map);
295 u32 index = key ? *(u32 *)key : U32_MAX;
296 u32 *next = (u32 *)next_key;
297
298 if (index >= array->map.max_entries) {
299 *next = 0;
300 return 0;
301 }
302
303 if (index == array->map.max_entries - 1)
304 return -ENOENT;
305
306 *next = index + 1;
307 return 0;
308 }
309
check_and_free_fields(struct bpf_array * arr,void * val)310 static void check_and_free_fields(struct bpf_array *arr, void *val)
311 {
312 if (map_value_has_timer(&arr->map))
313 bpf_timer_cancel_and_free(val + arr->map.timer_off);
314 if (map_value_has_kptrs(&arr->map))
315 bpf_map_free_kptrs(&arr->map, val);
316 }
317
318 /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)319 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
320 u64 map_flags)
321 {
322 struct bpf_array *array = container_of(map, struct bpf_array, map);
323 u32 index = *(u32 *)key;
324 char *val;
325
326 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
327 /* unknown flags */
328 return -EINVAL;
329
330 if (unlikely(index >= array->map.max_entries))
331 /* all elements were pre-allocated, cannot insert a new one */
332 return -E2BIG;
333
334 if (unlikely(map_flags & BPF_NOEXIST))
335 /* all elements already exist */
336 return -EEXIST;
337
338 if (unlikely((map_flags & BPF_F_LOCK) &&
339 !map_value_has_spin_lock(map)))
340 return -EINVAL;
341
342 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
343 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
344 value, map->value_size);
345 } else {
346 val = array->value +
347 (u64)array->elem_size * (index & array->index_mask);
348 if (map_flags & BPF_F_LOCK)
349 copy_map_value_locked(map, val, value, false);
350 else
351 copy_map_value(map, val, value);
352 check_and_free_fields(array, val);
353 }
354 return 0;
355 }
356
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)357 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
358 u64 map_flags)
359 {
360 struct bpf_array *array = container_of(map, struct bpf_array, map);
361 u32 index = *(u32 *)key;
362 void __percpu *pptr;
363 int cpu, off = 0;
364 u32 size;
365
366 if (unlikely(map_flags > BPF_EXIST))
367 /* unknown flags */
368 return -EINVAL;
369
370 if (unlikely(index >= array->map.max_entries))
371 /* all elements were pre-allocated, cannot insert a new one */
372 return -E2BIG;
373
374 if (unlikely(map_flags == BPF_NOEXIST))
375 /* all elements already exist */
376 return -EEXIST;
377
378 /* the user space will provide round_up(value_size, 8) bytes that
379 * will be copied into per-cpu area. bpf programs can only access
380 * value_size of it. During lookup the same extra bytes will be
381 * returned or zeros which were zero-filled by percpu_alloc,
382 * so no kernel data leaks possible
383 */
384 size = round_up(map->value_size, 8);
385 rcu_read_lock();
386 pptr = array->pptrs[index & array->index_mask];
387 for_each_possible_cpu(cpu) {
388 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
389 off += size;
390 }
391 rcu_read_unlock();
392 return 0;
393 }
394
395 /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)396 static int array_map_delete_elem(struct bpf_map *map, void *key)
397 {
398 return -EINVAL;
399 }
400
array_map_vmalloc_addr(struct bpf_array * array)401 static void *array_map_vmalloc_addr(struct bpf_array *array)
402 {
403 return (void *)round_down((unsigned long)array, PAGE_SIZE);
404 }
405
array_map_free_timers(struct bpf_map * map)406 static void array_map_free_timers(struct bpf_map *map)
407 {
408 struct bpf_array *array = container_of(map, struct bpf_array, map);
409 int i;
410
411 /* We don't reset or free kptr on uref dropping to zero. */
412 if (!map_value_has_timer(map))
413 return;
414
415 for (i = 0; i < array->map.max_entries; i++)
416 bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off);
417 }
418
419 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)420 static void array_map_free(struct bpf_map *map)
421 {
422 struct bpf_array *array = container_of(map, struct bpf_array, map);
423 int i;
424
425 if (map_value_has_kptrs(map)) {
426 for (i = 0; i < array->map.max_entries; i++)
427 bpf_map_free_kptrs(map, array_map_elem_ptr(array, i));
428 bpf_map_free_kptr_off_tab(map);
429 }
430
431 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
432 bpf_array_free_percpu(array);
433
434 if (array->map.map_flags & BPF_F_MMAPABLE)
435 bpf_map_area_free(array_map_vmalloc_addr(array));
436 else
437 bpf_map_area_free(array);
438 }
439
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)440 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
441 struct seq_file *m)
442 {
443 void *value;
444
445 rcu_read_lock();
446
447 value = array_map_lookup_elem(map, key);
448 if (!value) {
449 rcu_read_unlock();
450 return;
451 }
452
453 if (map->btf_key_type_id)
454 seq_printf(m, "%u: ", *(u32 *)key);
455 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
456 seq_puts(m, "\n");
457
458 rcu_read_unlock();
459 }
460
percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)461 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
462 struct seq_file *m)
463 {
464 struct bpf_array *array = container_of(map, struct bpf_array, map);
465 u32 index = *(u32 *)key;
466 void __percpu *pptr;
467 int cpu;
468
469 rcu_read_lock();
470
471 seq_printf(m, "%u: {\n", *(u32 *)key);
472 pptr = array->pptrs[index & array->index_mask];
473 for_each_possible_cpu(cpu) {
474 seq_printf(m, "\tcpu%d: ", cpu);
475 btf_type_seq_show(map->btf, map->btf_value_type_id,
476 per_cpu_ptr(pptr, cpu), m);
477 seq_puts(m, "\n");
478 }
479 seq_puts(m, "}\n");
480
481 rcu_read_unlock();
482 }
483
array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)484 static int array_map_check_btf(const struct bpf_map *map,
485 const struct btf *btf,
486 const struct btf_type *key_type,
487 const struct btf_type *value_type)
488 {
489 u32 int_data;
490
491 /* One exception for keyless BTF: .bss/.data/.rodata map */
492 if (btf_type_is_void(key_type)) {
493 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
494 map->max_entries != 1)
495 return -EINVAL;
496
497 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
498 return -EINVAL;
499
500 return 0;
501 }
502
503 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
504 return -EINVAL;
505
506 int_data = *(u32 *)(key_type + 1);
507 /* bpf array can only take a u32 key. This check makes sure
508 * that the btf matches the attr used during map_create.
509 */
510 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
511 return -EINVAL;
512
513 return 0;
514 }
515
array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)516 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
517 {
518 struct bpf_array *array = container_of(map, struct bpf_array, map);
519 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
520
521 if (!(map->map_flags & BPF_F_MMAPABLE))
522 return -EINVAL;
523
524 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
525 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
526 return -EINVAL;
527
528 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
529 vma->vm_pgoff + pgoff);
530 }
531
array_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)532 static bool array_map_meta_equal(const struct bpf_map *meta0,
533 const struct bpf_map *meta1)
534 {
535 if (!bpf_map_meta_equal(meta0, meta1))
536 return false;
537 return meta0->map_flags & BPF_F_INNER_MAP ? true :
538 meta0->max_entries == meta1->max_entries;
539 }
540
541 struct bpf_iter_seq_array_map_info {
542 struct bpf_map *map;
543 void *percpu_value_buf;
544 u32 index;
545 };
546
bpf_array_map_seq_start(struct seq_file * seq,loff_t * pos)547 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
548 {
549 struct bpf_iter_seq_array_map_info *info = seq->private;
550 struct bpf_map *map = info->map;
551 struct bpf_array *array;
552 u32 index;
553
554 if (info->index >= map->max_entries)
555 return NULL;
556
557 if (*pos == 0)
558 ++*pos;
559 array = container_of(map, struct bpf_array, map);
560 index = info->index & array->index_mask;
561 if (info->percpu_value_buf)
562 return array->pptrs[index];
563 return array_map_elem_ptr(array, index);
564 }
565
bpf_array_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)566 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
567 {
568 struct bpf_iter_seq_array_map_info *info = seq->private;
569 struct bpf_map *map = info->map;
570 struct bpf_array *array;
571 u32 index;
572
573 ++*pos;
574 ++info->index;
575 if (info->index >= map->max_entries)
576 return NULL;
577
578 array = container_of(map, struct bpf_array, map);
579 index = info->index & array->index_mask;
580 if (info->percpu_value_buf)
581 return array->pptrs[index];
582 return array_map_elem_ptr(array, index);
583 }
584
__bpf_array_map_seq_show(struct seq_file * seq,void * v)585 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
586 {
587 struct bpf_iter_seq_array_map_info *info = seq->private;
588 struct bpf_iter__bpf_map_elem ctx = {};
589 struct bpf_map *map = info->map;
590 struct bpf_iter_meta meta;
591 struct bpf_prog *prog;
592 int off = 0, cpu = 0;
593 void __percpu **pptr;
594 u32 size;
595
596 meta.seq = seq;
597 prog = bpf_iter_get_info(&meta, v == NULL);
598 if (!prog)
599 return 0;
600
601 ctx.meta = &meta;
602 ctx.map = info->map;
603 if (v) {
604 ctx.key = &info->index;
605
606 if (!info->percpu_value_buf) {
607 ctx.value = v;
608 } else {
609 pptr = v;
610 size = round_up(map->value_size, 8);
611 for_each_possible_cpu(cpu) {
612 bpf_long_memcpy(info->percpu_value_buf + off,
613 per_cpu_ptr(pptr, cpu),
614 size);
615 off += size;
616 }
617 ctx.value = info->percpu_value_buf;
618 }
619 }
620
621 return bpf_iter_run_prog(prog, &ctx);
622 }
623
bpf_array_map_seq_show(struct seq_file * seq,void * v)624 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
625 {
626 return __bpf_array_map_seq_show(seq, v);
627 }
628
bpf_array_map_seq_stop(struct seq_file * seq,void * v)629 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
630 {
631 if (!v)
632 (void)__bpf_array_map_seq_show(seq, NULL);
633 }
634
bpf_iter_init_array_map(void * priv_data,struct bpf_iter_aux_info * aux)635 static int bpf_iter_init_array_map(void *priv_data,
636 struct bpf_iter_aux_info *aux)
637 {
638 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
639 struct bpf_map *map = aux->map;
640 void *value_buf;
641 u32 buf_size;
642
643 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
644 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
645 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
646 if (!value_buf)
647 return -ENOMEM;
648
649 seq_info->percpu_value_buf = value_buf;
650 }
651
652 /* bpf_iter_attach_map() acquires a map uref, and the uref may be
653 * released before or in the middle of iterating map elements, so
654 * acquire an extra map uref for iterator.
655 */
656 bpf_map_inc_with_uref(map);
657 seq_info->map = map;
658 return 0;
659 }
660
bpf_iter_fini_array_map(void * priv_data)661 static void bpf_iter_fini_array_map(void *priv_data)
662 {
663 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
664
665 bpf_map_put_with_uref(seq_info->map);
666 kfree(seq_info->percpu_value_buf);
667 }
668
669 static const struct seq_operations bpf_array_map_seq_ops = {
670 .start = bpf_array_map_seq_start,
671 .next = bpf_array_map_seq_next,
672 .stop = bpf_array_map_seq_stop,
673 .show = bpf_array_map_seq_show,
674 };
675
676 static const struct bpf_iter_seq_info iter_seq_info = {
677 .seq_ops = &bpf_array_map_seq_ops,
678 .init_seq_private = bpf_iter_init_array_map,
679 .fini_seq_private = bpf_iter_fini_array_map,
680 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
681 };
682
bpf_for_each_array_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)683 static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
684 void *callback_ctx, u64 flags)
685 {
686 u32 i, key, num_elems = 0;
687 struct bpf_array *array;
688 bool is_percpu;
689 u64 ret = 0;
690 void *val;
691
692 if (flags != 0)
693 return -EINVAL;
694
695 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
696 array = container_of(map, struct bpf_array, map);
697 if (is_percpu)
698 migrate_disable();
699 for (i = 0; i < map->max_entries; i++) {
700 if (is_percpu)
701 val = this_cpu_ptr(array->pptrs[i]);
702 else
703 val = array_map_elem_ptr(array, i);
704 num_elems++;
705 key = i;
706 ret = callback_fn((u64)(long)map, (u64)(long)&key,
707 (u64)(long)val, (u64)(long)callback_ctx, 0);
708 /* return value: 0 - continue, 1 - stop and return */
709 if (ret)
710 break;
711 }
712
713 if (is_percpu)
714 migrate_enable();
715 return num_elems;
716 }
717
718 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
719 const struct bpf_map_ops array_map_ops = {
720 .map_meta_equal = array_map_meta_equal,
721 .map_alloc_check = array_map_alloc_check,
722 .map_alloc = array_map_alloc,
723 .map_free = array_map_free,
724 .map_get_next_key = array_map_get_next_key,
725 .map_release_uref = array_map_free_timers,
726 .map_lookup_elem = array_map_lookup_elem,
727 .map_update_elem = array_map_update_elem,
728 .map_delete_elem = array_map_delete_elem,
729 .map_gen_lookup = array_map_gen_lookup,
730 .map_direct_value_addr = array_map_direct_value_addr,
731 .map_direct_value_meta = array_map_direct_value_meta,
732 .map_mmap = array_map_mmap,
733 .map_seq_show_elem = array_map_seq_show_elem,
734 .map_check_btf = array_map_check_btf,
735 .map_lookup_batch = generic_map_lookup_batch,
736 .map_update_batch = generic_map_update_batch,
737 .map_set_for_each_callback_args = map_set_for_each_callback_args,
738 .map_for_each_callback = bpf_for_each_array_elem,
739 .map_btf_id = &array_map_btf_ids[0],
740 .iter_seq_info = &iter_seq_info,
741 };
742
743 const struct bpf_map_ops percpu_array_map_ops = {
744 .map_meta_equal = bpf_map_meta_equal,
745 .map_alloc_check = array_map_alloc_check,
746 .map_alloc = array_map_alloc,
747 .map_free = array_map_free,
748 .map_get_next_key = array_map_get_next_key,
749 .map_lookup_elem = percpu_array_map_lookup_elem,
750 .map_update_elem = array_map_update_elem,
751 .map_delete_elem = array_map_delete_elem,
752 .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
753 .map_seq_show_elem = percpu_array_map_seq_show_elem,
754 .map_check_btf = array_map_check_btf,
755 .map_lookup_batch = generic_map_lookup_batch,
756 .map_update_batch = generic_map_update_batch,
757 .map_set_for_each_callback_args = map_set_for_each_callback_args,
758 .map_for_each_callback = bpf_for_each_array_elem,
759 .map_btf_id = &array_map_btf_ids[0],
760 .iter_seq_info = &iter_seq_info,
761 };
762
fd_array_map_alloc_check(union bpf_attr * attr)763 static int fd_array_map_alloc_check(union bpf_attr *attr)
764 {
765 /* only file descriptors can be stored in this type of map */
766 if (attr->value_size != sizeof(u32))
767 return -EINVAL;
768 /* Program read-only/write-only not supported for special maps yet. */
769 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
770 return -EINVAL;
771 return array_map_alloc_check(attr);
772 }
773
fd_array_map_free(struct bpf_map * map)774 static void fd_array_map_free(struct bpf_map *map)
775 {
776 struct bpf_array *array = container_of(map, struct bpf_array, map);
777 int i;
778
779 /* make sure it's empty */
780 for (i = 0; i < array->map.max_entries; i++)
781 BUG_ON(array->ptrs[i] != NULL);
782
783 bpf_map_area_free(array);
784 }
785
fd_array_map_lookup_elem(struct bpf_map * map,void * key)786 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
787 {
788 return ERR_PTR(-EOPNOTSUPP);
789 }
790
791 /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)792 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
793 {
794 void **elem, *ptr;
795 int ret = 0;
796
797 if (!map->ops->map_fd_sys_lookup_elem)
798 return -ENOTSUPP;
799
800 rcu_read_lock();
801 elem = array_map_lookup_elem(map, key);
802 if (elem && (ptr = READ_ONCE(*elem)))
803 *value = map->ops->map_fd_sys_lookup_elem(ptr);
804 else
805 ret = -ENOENT;
806 rcu_read_unlock();
807
808 return ret;
809 }
810
811 /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)812 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
813 void *key, void *value, u64 map_flags)
814 {
815 struct bpf_array *array = container_of(map, struct bpf_array, map);
816 void *new_ptr, *old_ptr;
817 u32 index = *(u32 *)key, ufd;
818
819 if (map_flags != BPF_ANY)
820 return -EINVAL;
821
822 if (index >= array->map.max_entries)
823 return -E2BIG;
824
825 ufd = *(u32 *)value;
826 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
827 if (IS_ERR(new_ptr))
828 return PTR_ERR(new_ptr);
829
830 if (map->ops->map_poke_run) {
831 mutex_lock(&array->aux->poke_mutex);
832 old_ptr = xchg(array->ptrs + index, new_ptr);
833 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
834 mutex_unlock(&array->aux->poke_mutex);
835 } else {
836 old_ptr = xchg(array->ptrs + index, new_ptr);
837 }
838
839 if (old_ptr)
840 map->ops->map_fd_put_ptr(old_ptr);
841 return 0;
842 }
843
fd_array_map_delete_elem(struct bpf_map * map,void * key)844 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
845 {
846 struct bpf_array *array = container_of(map, struct bpf_array, map);
847 void *old_ptr;
848 u32 index = *(u32 *)key;
849
850 if (index >= array->map.max_entries)
851 return -E2BIG;
852
853 if (map->ops->map_poke_run) {
854 mutex_lock(&array->aux->poke_mutex);
855 old_ptr = xchg(array->ptrs + index, NULL);
856 map->ops->map_poke_run(map, index, old_ptr, NULL);
857 mutex_unlock(&array->aux->poke_mutex);
858 } else {
859 old_ptr = xchg(array->ptrs + index, NULL);
860 }
861
862 if (old_ptr) {
863 map->ops->map_fd_put_ptr(old_ptr);
864 return 0;
865 } else {
866 return -ENOENT;
867 }
868 }
869
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)870 static void *prog_fd_array_get_ptr(struct bpf_map *map,
871 struct file *map_file, int fd)
872 {
873 struct bpf_prog *prog = bpf_prog_get(fd);
874
875 if (IS_ERR(prog))
876 return prog;
877
878 if (!bpf_prog_map_compatible(map, prog)) {
879 bpf_prog_put(prog);
880 return ERR_PTR(-EINVAL);
881 }
882
883 return prog;
884 }
885
prog_fd_array_put_ptr(void * ptr)886 static void prog_fd_array_put_ptr(void *ptr)
887 {
888 bpf_prog_put(ptr);
889 }
890
prog_fd_array_sys_lookup_elem(void * ptr)891 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
892 {
893 return ((struct bpf_prog *)ptr)->aux->id;
894 }
895
896 /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map)897 static void bpf_fd_array_map_clear(struct bpf_map *map)
898 {
899 struct bpf_array *array = container_of(map, struct bpf_array, map);
900 int i;
901
902 for (i = 0; i < array->map.max_entries; i++)
903 fd_array_map_delete_elem(map, &i);
904 }
905
prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)906 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
907 struct seq_file *m)
908 {
909 void **elem, *ptr;
910 u32 prog_id;
911
912 rcu_read_lock();
913
914 elem = array_map_lookup_elem(map, key);
915 if (elem) {
916 ptr = READ_ONCE(*elem);
917 if (ptr) {
918 seq_printf(m, "%u: ", *(u32 *)key);
919 prog_id = prog_fd_array_sys_lookup_elem(ptr);
920 btf_type_seq_show(map->btf, map->btf_value_type_id,
921 &prog_id, m);
922 seq_puts(m, "\n");
923 }
924 }
925
926 rcu_read_unlock();
927 }
928
929 struct prog_poke_elem {
930 struct list_head list;
931 struct bpf_prog_aux *aux;
932 };
933
prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux)934 static int prog_array_map_poke_track(struct bpf_map *map,
935 struct bpf_prog_aux *prog_aux)
936 {
937 struct prog_poke_elem *elem;
938 struct bpf_array_aux *aux;
939 int ret = 0;
940
941 aux = container_of(map, struct bpf_array, map)->aux;
942 mutex_lock(&aux->poke_mutex);
943 list_for_each_entry(elem, &aux->poke_progs, list) {
944 if (elem->aux == prog_aux)
945 goto out;
946 }
947
948 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
949 if (!elem) {
950 ret = -ENOMEM;
951 goto out;
952 }
953
954 INIT_LIST_HEAD(&elem->list);
955 /* We must track the program's aux info at this point in time
956 * since the program pointer itself may not be stable yet, see
957 * also comment in prog_array_map_poke_run().
958 */
959 elem->aux = prog_aux;
960
961 list_add_tail(&elem->list, &aux->poke_progs);
962 out:
963 mutex_unlock(&aux->poke_mutex);
964 return ret;
965 }
966
prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux)967 static void prog_array_map_poke_untrack(struct bpf_map *map,
968 struct bpf_prog_aux *prog_aux)
969 {
970 struct prog_poke_elem *elem, *tmp;
971 struct bpf_array_aux *aux;
972
973 aux = container_of(map, struct bpf_array, map)->aux;
974 mutex_lock(&aux->poke_mutex);
975 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
976 if (elem->aux == prog_aux) {
977 list_del_init(&elem->list);
978 kfree(elem);
979 break;
980 }
981 }
982 mutex_unlock(&aux->poke_mutex);
983 }
984
prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new)985 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
986 struct bpf_prog *old,
987 struct bpf_prog *new)
988 {
989 u8 *old_addr, *new_addr, *old_bypass_addr;
990 struct prog_poke_elem *elem;
991 struct bpf_array_aux *aux;
992
993 aux = container_of(map, struct bpf_array, map)->aux;
994 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
995
996 list_for_each_entry(elem, &aux->poke_progs, list) {
997 struct bpf_jit_poke_descriptor *poke;
998 int i, ret;
999
1000 for (i = 0; i < elem->aux->size_poke_tab; i++) {
1001 poke = &elem->aux->poke_tab[i];
1002
1003 /* Few things to be aware of:
1004 *
1005 * 1) We can only ever access aux in this context, but
1006 * not aux->prog since it might not be stable yet and
1007 * there could be danger of use after free otherwise.
1008 * 2) Initially when we start tracking aux, the program
1009 * is not JITed yet and also does not have a kallsyms
1010 * entry. We skip these as poke->tailcall_target_stable
1011 * is not active yet. The JIT will do the final fixup
1012 * before setting it stable. The various
1013 * poke->tailcall_target_stable are successively
1014 * activated, so tail call updates can arrive from here
1015 * while JIT is still finishing its final fixup for
1016 * non-activated poke entries.
1017 * 3) On program teardown, the program's kallsym entry gets
1018 * removed out of RCU callback, but we can only untrack
1019 * from sleepable context, therefore bpf_arch_text_poke()
1020 * might not see that this is in BPF text section and
1021 * bails out with -EINVAL. As these are unreachable since
1022 * RCU grace period already passed, we simply skip them.
1023 * 4) Also programs reaching refcount of zero while patching
1024 * is in progress is okay since we're protected under
1025 * poke_mutex and untrack the programs before the JIT
1026 * buffer is freed. When we're still in the middle of
1027 * patching and suddenly kallsyms entry of the program
1028 * gets evicted, we just skip the rest which is fine due
1029 * to point 3).
1030 * 5) Any other error happening below from bpf_arch_text_poke()
1031 * is a unexpected bug.
1032 */
1033 if (!READ_ONCE(poke->tailcall_target_stable))
1034 continue;
1035 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1036 continue;
1037 if (poke->tail_call.map != map ||
1038 poke->tail_call.key != key)
1039 continue;
1040
1041 old_bypass_addr = old ? NULL : poke->bypass_addr;
1042 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1043 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1044
1045 if (new) {
1046 ret = bpf_arch_text_poke(poke->tailcall_target,
1047 BPF_MOD_JUMP,
1048 old_addr, new_addr);
1049 BUG_ON(ret < 0 && ret != -EINVAL);
1050 if (!old) {
1051 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1052 BPF_MOD_JUMP,
1053 poke->bypass_addr,
1054 NULL);
1055 BUG_ON(ret < 0 && ret != -EINVAL);
1056 }
1057 } else {
1058 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1059 BPF_MOD_JUMP,
1060 old_bypass_addr,
1061 poke->bypass_addr);
1062 BUG_ON(ret < 0 && ret != -EINVAL);
1063 /* let other CPUs finish the execution of program
1064 * so that it will not possible to expose them
1065 * to invalid nop, stack unwind, nop state
1066 */
1067 if (!ret)
1068 synchronize_rcu();
1069 ret = bpf_arch_text_poke(poke->tailcall_target,
1070 BPF_MOD_JUMP,
1071 old_addr, NULL);
1072 BUG_ON(ret < 0 && ret != -EINVAL);
1073 }
1074 }
1075 }
1076 }
1077
prog_array_map_clear_deferred(struct work_struct * work)1078 static void prog_array_map_clear_deferred(struct work_struct *work)
1079 {
1080 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1081 work)->map;
1082 bpf_fd_array_map_clear(map);
1083 bpf_map_put(map);
1084 }
1085
prog_array_map_clear(struct bpf_map * map)1086 static void prog_array_map_clear(struct bpf_map *map)
1087 {
1088 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1089 map)->aux;
1090 bpf_map_inc(map);
1091 schedule_work(&aux->work);
1092 }
1093
prog_array_map_alloc(union bpf_attr * attr)1094 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1095 {
1096 struct bpf_array_aux *aux;
1097 struct bpf_map *map;
1098
1099 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1100 if (!aux)
1101 return ERR_PTR(-ENOMEM);
1102
1103 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1104 INIT_LIST_HEAD(&aux->poke_progs);
1105 mutex_init(&aux->poke_mutex);
1106
1107 map = array_map_alloc(attr);
1108 if (IS_ERR(map)) {
1109 kfree(aux);
1110 return map;
1111 }
1112
1113 container_of(map, struct bpf_array, map)->aux = aux;
1114 aux->map = map;
1115
1116 return map;
1117 }
1118
prog_array_map_free(struct bpf_map * map)1119 static void prog_array_map_free(struct bpf_map *map)
1120 {
1121 struct prog_poke_elem *elem, *tmp;
1122 struct bpf_array_aux *aux;
1123
1124 aux = container_of(map, struct bpf_array, map)->aux;
1125 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1126 list_del_init(&elem->list);
1127 kfree(elem);
1128 }
1129 kfree(aux);
1130 fd_array_map_free(map);
1131 }
1132
1133 /* prog_array->aux->{type,jited} is a runtime binding.
1134 * Doing static check alone in the verifier is not enough.
1135 * Thus, prog_array_map cannot be used as an inner_map
1136 * and map_meta_equal is not implemented.
1137 */
1138 const struct bpf_map_ops prog_array_map_ops = {
1139 .map_alloc_check = fd_array_map_alloc_check,
1140 .map_alloc = prog_array_map_alloc,
1141 .map_free = prog_array_map_free,
1142 .map_poke_track = prog_array_map_poke_track,
1143 .map_poke_untrack = prog_array_map_poke_untrack,
1144 .map_poke_run = prog_array_map_poke_run,
1145 .map_get_next_key = array_map_get_next_key,
1146 .map_lookup_elem = fd_array_map_lookup_elem,
1147 .map_delete_elem = fd_array_map_delete_elem,
1148 .map_fd_get_ptr = prog_fd_array_get_ptr,
1149 .map_fd_put_ptr = prog_fd_array_put_ptr,
1150 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1151 .map_release_uref = prog_array_map_clear,
1152 .map_seq_show_elem = prog_array_map_seq_show_elem,
1153 .map_btf_id = &array_map_btf_ids[0],
1154 };
1155
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)1156 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1157 struct file *map_file)
1158 {
1159 struct bpf_event_entry *ee;
1160
1161 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1162 if (ee) {
1163 ee->event = perf_file->private_data;
1164 ee->perf_file = perf_file;
1165 ee->map_file = map_file;
1166 }
1167
1168 return ee;
1169 }
1170
__bpf_event_entry_free(struct rcu_head * rcu)1171 static void __bpf_event_entry_free(struct rcu_head *rcu)
1172 {
1173 struct bpf_event_entry *ee;
1174
1175 ee = container_of(rcu, struct bpf_event_entry, rcu);
1176 fput(ee->perf_file);
1177 kfree(ee);
1178 }
1179
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)1180 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1181 {
1182 call_rcu(&ee->rcu, __bpf_event_entry_free);
1183 }
1184
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1185 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1186 struct file *map_file, int fd)
1187 {
1188 struct bpf_event_entry *ee;
1189 struct perf_event *event;
1190 struct file *perf_file;
1191 u64 value;
1192
1193 perf_file = perf_event_get(fd);
1194 if (IS_ERR(perf_file))
1195 return perf_file;
1196
1197 ee = ERR_PTR(-EOPNOTSUPP);
1198 event = perf_file->private_data;
1199 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1200 goto err_out;
1201
1202 ee = bpf_event_entry_gen(perf_file, map_file);
1203 if (ee)
1204 return ee;
1205 ee = ERR_PTR(-ENOMEM);
1206 err_out:
1207 fput(perf_file);
1208 return ee;
1209 }
1210
perf_event_fd_array_put_ptr(void * ptr)1211 static void perf_event_fd_array_put_ptr(void *ptr)
1212 {
1213 bpf_event_entry_free_rcu(ptr);
1214 }
1215
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)1216 static void perf_event_fd_array_release(struct bpf_map *map,
1217 struct file *map_file)
1218 {
1219 struct bpf_array *array = container_of(map, struct bpf_array, map);
1220 struct bpf_event_entry *ee;
1221 int i;
1222
1223 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1224 return;
1225
1226 rcu_read_lock();
1227 for (i = 0; i < array->map.max_entries; i++) {
1228 ee = READ_ONCE(array->ptrs[i]);
1229 if (ee && ee->map_file == map_file)
1230 fd_array_map_delete_elem(map, &i);
1231 }
1232 rcu_read_unlock();
1233 }
1234
perf_event_fd_array_map_free(struct bpf_map * map)1235 static void perf_event_fd_array_map_free(struct bpf_map *map)
1236 {
1237 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1238 bpf_fd_array_map_clear(map);
1239 fd_array_map_free(map);
1240 }
1241
1242 const struct bpf_map_ops perf_event_array_map_ops = {
1243 .map_meta_equal = bpf_map_meta_equal,
1244 .map_alloc_check = fd_array_map_alloc_check,
1245 .map_alloc = array_map_alloc,
1246 .map_free = perf_event_fd_array_map_free,
1247 .map_get_next_key = array_map_get_next_key,
1248 .map_lookup_elem = fd_array_map_lookup_elem,
1249 .map_delete_elem = fd_array_map_delete_elem,
1250 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1251 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1252 .map_release = perf_event_fd_array_release,
1253 .map_check_btf = map_check_no_btf,
1254 .map_btf_id = &array_map_btf_ids[0],
1255 };
1256
1257 #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1258 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1259 struct file *map_file /* not used */,
1260 int fd)
1261 {
1262 return cgroup_get_from_fd(fd);
1263 }
1264
cgroup_fd_array_put_ptr(void * ptr)1265 static void cgroup_fd_array_put_ptr(void *ptr)
1266 {
1267 /* cgroup_put free cgrp after a rcu grace period */
1268 cgroup_put(ptr);
1269 }
1270
cgroup_fd_array_free(struct bpf_map * map)1271 static void cgroup_fd_array_free(struct bpf_map *map)
1272 {
1273 bpf_fd_array_map_clear(map);
1274 fd_array_map_free(map);
1275 }
1276
1277 const struct bpf_map_ops cgroup_array_map_ops = {
1278 .map_meta_equal = bpf_map_meta_equal,
1279 .map_alloc_check = fd_array_map_alloc_check,
1280 .map_alloc = array_map_alloc,
1281 .map_free = cgroup_fd_array_free,
1282 .map_get_next_key = array_map_get_next_key,
1283 .map_lookup_elem = fd_array_map_lookup_elem,
1284 .map_delete_elem = fd_array_map_delete_elem,
1285 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1286 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1287 .map_check_btf = map_check_no_btf,
1288 .map_btf_id = &array_map_btf_ids[0],
1289 };
1290 #endif
1291
array_of_map_alloc(union bpf_attr * attr)1292 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1293 {
1294 struct bpf_map *map, *inner_map_meta;
1295
1296 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1297 if (IS_ERR(inner_map_meta))
1298 return inner_map_meta;
1299
1300 map = array_map_alloc(attr);
1301 if (IS_ERR(map)) {
1302 bpf_map_meta_free(inner_map_meta);
1303 return map;
1304 }
1305
1306 map->inner_map_meta = inner_map_meta;
1307
1308 return map;
1309 }
1310
array_of_map_free(struct bpf_map * map)1311 static void array_of_map_free(struct bpf_map *map)
1312 {
1313 /* map->inner_map_meta is only accessed by syscall which
1314 * is protected by fdget/fdput.
1315 */
1316 bpf_map_meta_free(map->inner_map_meta);
1317 bpf_fd_array_map_clear(map);
1318 fd_array_map_free(map);
1319 }
1320
array_of_map_lookup_elem(struct bpf_map * map,void * key)1321 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1322 {
1323 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1324
1325 if (!inner_map)
1326 return NULL;
1327
1328 return READ_ONCE(*inner_map);
1329 }
1330
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)1331 static int array_of_map_gen_lookup(struct bpf_map *map,
1332 struct bpf_insn *insn_buf)
1333 {
1334 struct bpf_array *array = container_of(map, struct bpf_array, map);
1335 u32 elem_size = round_up(map->value_size, 8);
1336 struct bpf_insn *insn = insn_buf;
1337 const int ret = BPF_REG_0;
1338 const int map_ptr = BPF_REG_1;
1339 const int index = BPF_REG_2;
1340
1341 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1342 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1343 if (!map->bypass_spec_v1) {
1344 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1345 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1346 } else {
1347 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1348 }
1349 if (is_power_of_2(elem_size))
1350 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1351 else
1352 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1353 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1354 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1355 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1356 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1357 *insn++ = BPF_MOV64_IMM(ret, 0);
1358
1359 return insn - insn_buf;
1360 }
1361
1362 const struct bpf_map_ops array_of_maps_map_ops = {
1363 .map_alloc_check = fd_array_map_alloc_check,
1364 .map_alloc = array_of_map_alloc,
1365 .map_free = array_of_map_free,
1366 .map_get_next_key = array_map_get_next_key,
1367 .map_lookup_elem = array_of_map_lookup_elem,
1368 .map_delete_elem = fd_array_map_delete_elem,
1369 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1370 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1371 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1372 .map_gen_lookup = array_of_map_gen_lookup,
1373 .map_lookup_batch = generic_map_lookup_batch,
1374 .map_update_batch = generic_map_update_batch,
1375 .map_check_btf = map_check_no_btf,
1376 .map_btf_id = &array_map_btf_ids[0],
1377 };
1378