Lines Matching refs:map
22 struct bpf_map map; member
29 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) in map_to_storage() argument
31 return container_of(map, struct bpf_cgroup_storage_map, map); in map_to_storage()
34 static bool attach_type_isolated(const struct bpf_map *map) in attach_type_isolated() argument
36 return map->key_size == sizeof(struct bpf_cgroup_storage_key); in attach_type_isolated()
39 static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map, in bpf_cgroup_storage_key_cmp() argument
42 if (attach_type_isolated(&map->map)) { in bpf_cgroup_storage_key_cmp()
67 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, in cgroup_storage_lookup() argument
70 struct rb_root *root = &map->root; in cgroup_storage_lookup()
74 spin_lock_bh(&map->lock); in cgroup_storage_lookup()
82 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) { in cgroup_storage_lookup()
91 spin_unlock_bh(&map->lock); in cgroup_storage_lookup()
97 spin_unlock_bh(&map->lock); in cgroup_storage_lookup()
102 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, in cgroup_storage_insert() argument
105 struct rb_root *root = &map->root; in cgroup_storage_insert()
114 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) { in cgroup_storage_insert()
134 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in cgroup_storage_lookup_elem() local
137 storage = cgroup_storage_lookup(map, key, false); in cgroup_storage_lookup_elem()
144 static long cgroup_storage_update_elem(struct bpf_map *map, void *key, in cgroup_storage_update_elem() argument
154 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in cgroup_storage_update_elem()
157 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, in cgroup_storage_update_elem()
163 copy_map_value_locked(map, storage->buf->data, value, false); in cgroup_storage_update_elem()
167 new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), in cgroup_storage_update_elem()
169 map->numa_node); in cgroup_storage_update_elem()
173 memcpy(&new->data[0], value, map->value_size); in cgroup_storage_update_elem()
174 check_and_init_map_value(map, new->data); in cgroup_storage_update_elem()
185 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in bpf_percpu_cgroup_storage_copy() local
191 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_copy()
214 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in bpf_percpu_cgroup_storage_update() local
223 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_update()
248 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in cgroup_storage_get_next_key() local
251 spin_lock_bh(&map->lock); in cgroup_storage_get_next_key()
253 if (list_empty(&map->list)) in cgroup_storage_get_next_key()
257 storage = cgroup_storage_lookup(map, key, true); in cgroup_storage_get_next_key()
265 storage = list_first_entry(&map->list, in cgroup_storage_get_next_key()
269 spin_unlock_bh(&map->lock); in cgroup_storage_get_next_key()
271 if (attach_type_isolated(&map->map)) { in cgroup_storage_get_next_key()
281 spin_unlock_bh(&map->lock); in cgroup_storage_get_next_key()
289 struct bpf_cgroup_storage_map *map; in cgroup_storage_map_alloc() local
316 map = bpf_map_area_alloc(sizeof(struct bpf_cgroup_storage_map), numa_node); in cgroup_storage_map_alloc()
317 if (!map) in cgroup_storage_map_alloc()
321 bpf_map_init_from_attr(&map->map, attr); in cgroup_storage_map_alloc()
323 spin_lock_init(&map->lock); in cgroup_storage_map_alloc()
324 map->root = RB_ROOT; in cgroup_storage_map_alloc()
325 INIT_LIST_HEAD(&map->list); in cgroup_storage_map_alloc()
327 return &map->map; in cgroup_storage_map_alloc()
332 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in cgroup_storage_map_free() local
333 struct list_head *storages = &map->list; in cgroup_storage_map_free()
345 WARN_ON(!RB_EMPTY_ROOT(&map->root)); in cgroup_storage_map_free()
346 WARN_ON(!list_empty(&map->list)); in cgroup_storage_map_free()
348 bpf_map_area_free(map); in cgroup_storage_map_free()
351 static long cgroup_storage_delete_elem(struct bpf_map *map, void *key) in cgroup_storage_delete_elem() argument
356 static int cgroup_storage_check_btf(const struct bpf_map *map, in cgroup_storage_check_btf() argument
361 if (attach_type_isolated(map)) { in cgroup_storage_check_btf()
414 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, in cgroup_storage_seq_show_elem() argument
422 storage = cgroup_storage_lookup(map_to_storage(map), key, false); in cgroup_storage_seq_show_elem()
428 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in cgroup_storage_seq_show_elem()
429 stype = cgroup_storage_type(map); in cgroup_storage_seq_show_elem()
432 btf_type_seq_show(map->btf, map->btf_value_type_id, in cgroup_storage_seq_show_elem()
439 btf_type_seq_show(map->btf, map->btf_value_type_id, in cgroup_storage_seq_show_elem()
449 static u64 cgroup_storage_map_usage(const struct bpf_map *map) in cgroup_storage_map_usage() argument
482 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) in bpf_cgroup_storage_calculate_size() argument
486 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { in bpf_cgroup_storage_calculate_size()
487 size = sizeof(struct bpf_storage_buffer) + map->value_size; in bpf_cgroup_storage_calculate_size()
491 size = map->value_size; in bpf_cgroup_storage_calculate_size()
504 struct bpf_map *map; in bpf_cgroup_storage_alloc() local
508 map = prog->aux->cgroup_storage[stype]; in bpf_cgroup_storage_alloc()
509 if (!map) in bpf_cgroup_storage_alloc()
512 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
514 storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage), in bpf_cgroup_storage_alloc()
515 gfp, map->numa_node); in bpf_cgroup_storage_alloc()
520 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc()
521 map->numa_node); in bpf_cgroup_storage_alloc()
524 check_and_init_map_value(map, storage->buf->data); in bpf_cgroup_storage_alloc()
526 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()
531 storage->map = (struct bpf_cgroup_storage_map *)map; in bpf_cgroup_storage_alloc()
561 struct bpf_map *map; in bpf_cgroup_storage_free() local
566 map = &storage->map->map; in bpf_cgroup_storage_free()
567 stype = cgroup_storage_type(map); in bpf_cgroup_storage_free()
578 struct bpf_cgroup_storage_map *map; in bpf_cgroup_storage_link() local
586 map = storage->map; in bpf_cgroup_storage_link()
588 spin_lock_bh(&map->lock); in bpf_cgroup_storage_link()
589 WARN_ON(cgroup_storage_insert(map, storage)); in bpf_cgroup_storage_link()
590 list_add(&storage->list_map, &map->list); in bpf_cgroup_storage_link()
592 spin_unlock_bh(&map->lock); in bpf_cgroup_storage_link()
597 struct bpf_cgroup_storage_map *map; in bpf_cgroup_storage_unlink() local
603 map = storage->map; in bpf_cgroup_storage_unlink()
605 spin_lock_bh(&map->lock); in bpf_cgroup_storage_unlink()
606 root = &map->root; in bpf_cgroup_storage_unlink()
611 spin_unlock_bh(&map->lock); in bpf_cgroup_storage_unlink()