1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
13 #include <net/sock.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
17
18 DEFINE_BPF_STORAGE_CACHE(sk_cache);
19
20 static struct bpf_local_storage_data *
bpf_sk_storage_lookup(struct sock * sk,struct bpf_map * map,bool cacheit_lockit)21 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
22 {
23 struct bpf_local_storage *sk_storage;
24 struct bpf_local_storage_map *smap;
25
26 sk_storage =
27 rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
28 if (!sk_storage)
29 return NULL;
30
31 smap = (struct bpf_local_storage_map *)map;
32 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
33 }
34
bpf_sk_storage_del(struct sock * sk,struct bpf_map * map)35 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
36 {
37 struct bpf_local_storage_data *sdata;
38
39 sdata = bpf_sk_storage_lookup(sk, map, false);
40 if (!sdata)
41 return -ENOENT;
42
43 bpf_selem_unlink(SELEM(sdata), true);
44
45 return 0;
46 }
47
48 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
bpf_sk_storage_free(struct sock * sk)49 void bpf_sk_storage_free(struct sock *sk)
50 {
51 struct bpf_local_storage_elem *selem;
52 struct bpf_local_storage *sk_storage;
53 bool free_sk_storage = false;
54 struct hlist_node *n;
55
56 rcu_read_lock();
57 sk_storage = rcu_dereference(sk->sk_bpf_storage);
58 if (!sk_storage) {
59 rcu_read_unlock();
60 return;
61 }
62
63 /* Netiher the bpf_prog nor the bpf-map's syscall
64 * could be modifying the sk_storage->list now.
65 * Thus, no elem can be added-to or deleted-from the
66 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
67 *
68 * It is racing with bpf_local_storage_map_free() alone
69 * when unlinking elem from the sk_storage->list and
70 * the map's bucket->list.
71 */
72 raw_spin_lock_bh(&sk_storage->lock);
73 hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
74 /* Always unlink from map before unlinking from
75 * sk_storage.
76 */
77 bpf_selem_unlink_map(selem);
78 free_sk_storage = bpf_selem_unlink_storage_nolock(
79 sk_storage, selem, true, false);
80 }
81 raw_spin_unlock_bh(&sk_storage->lock);
82 rcu_read_unlock();
83
84 if (free_sk_storage)
85 kfree_rcu(sk_storage, rcu);
86 }
87
bpf_sk_storage_map_free(struct bpf_map * map)88 static void bpf_sk_storage_map_free(struct bpf_map *map)
89 {
90 struct bpf_local_storage_map *smap;
91
92 smap = (struct bpf_local_storage_map *)map;
93 bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
94 bpf_local_storage_map_free(smap, NULL);
95 }
96
bpf_sk_storage_map_alloc(union bpf_attr * attr)97 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
98 {
99 struct bpf_local_storage_map *smap;
100
101 smap = bpf_local_storage_map_alloc(attr);
102 if (IS_ERR(smap))
103 return ERR_CAST(smap);
104
105 smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
106 return &smap->map;
107 }
108
notsupp_get_next_key(struct bpf_map * map,void * key,void * next_key)109 static int notsupp_get_next_key(struct bpf_map *map, void *key,
110 void *next_key)
111 {
112 return -ENOTSUPP;
113 }
114
bpf_fd_sk_storage_lookup_elem(struct bpf_map * map,void * key)115 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
116 {
117 struct bpf_local_storage_data *sdata;
118 struct socket *sock;
119 int fd, err;
120
121 fd = *(int *)key;
122 sock = sockfd_lookup(fd, &err);
123 if (sock) {
124 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
125 sockfd_put(sock);
126 return sdata ? sdata->data : NULL;
127 }
128
129 return ERR_PTR(err);
130 }
131
bpf_fd_sk_storage_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)132 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
133 void *value, u64 map_flags)
134 {
135 struct bpf_local_storage_data *sdata;
136 struct socket *sock;
137 int fd, err;
138
139 fd = *(int *)key;
140 sock = sockfd_lookup(fd, &err);
141 if (sock) {
142 sdata = bpf_local_storage_update(
143 sock->sk, (struct bpf_local_storage_map *)map, value,
144 map_flags, GFP_ATOMIC);
145 sockfd_put(sock);
146 return PTR_ERR_OR_ZERO(sdata);
147 }
148
149 return err;
150 }
151
bpf_fd_sk_storage_delete_elem(struct bpf_map * map,void * key)152 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
153 {
154 struct socket *sock;
155 int fd, err;
156
157 fd = *(int *)key;
158 sock = sockfd_lookup(fd, &err);
159 if (sock) {
160 err = bpf_sk_storage_del(sock->sk, map);
161 sockfd_put(sock);
162 return err;
163 }
164
165 return err;
166 }
167
168 static struct bpf_local_storage_elem *
bpf_sk_storage_clone_elem(struct sock * newsk,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)169 bpf_sk_storage_clone_elem(struct sock *newsk,
170 struct bpf_local_storage_map *smap,
171 struct bpf_local_storage_elem *selem)
172 {
173 struct bpf_local_storage_elem *copy_selem;
174
175 copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC);
176 if (!copy_selem)
177 return NULL;
178
179 if (map_value_has_spin_lock(&smap->map))
180 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
181 SDATA(selem)->data, true);
182 else
183 copy_map_value(&smap->map, SDATA(copy_selem)->data,
184 SDATA(selem)->data);
185
186 return copy_selem;
187 }
188
bpf_sk_storage_clone(const struct sock * sk,struct sock * newsk)189 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
190 {
191 struct bpf_local_storage *new_sk_storage = NULL;
192 struct bpf_local_storage *sk_storage;
193 struct bpf_local_storage_elem *selem;
194 int ret = 0;
195
196 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
197
198 rcu_read_lock();
199 sk_storage = rcu_dereference(sk->sk_bpf_storage);
200
201 if (!sk_storage || hlist_empty(&sk_storage->list))
202 goto out;
203
204 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
205 struct bpf_local_storage_elem *copy_selem;
206 struct bpf_local_storage_map *smap;
207 struct bpf_map *map;
208
209 smap = rcu_dereference(SDATA(selem)->smap);
210 if (!(smap->map.map_flags & BPF_F_CLONE))
211 continue;
212
213 /* Note that for lockless listeners adding new element
214 * here can race with cleanup in bpf_local_storage_map_free.
215 * Try to grab map refcnt to make sure that it's still
216 * alive and prevent concurrent removal.
217 */
218 map = bpf_map_inc_not_zero(&smap->map);
219 if (IS_ERR(map))
220 continue;
221
222 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
223 if (!copy_selem) {
224 ret = -ENOMEM;
225 bpf_map_put(map);
226 goto out;
227 }
228
229 if (new_sk_storage) {
230 bpf_selem_link_map(smap, copy_selem);
231 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
232 } else {
233 ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
234 if (ret) {
235 kfree(copy_selem);
236 atomic_sub(smap->elem_size,
237 &newsk->sk_omem_alloc);
238 bpf_map_put(map);
239 goto out;
240 }
241
242 new_sk_storage =
243 rcu_dereference(copy_selem->local_storage);
244 }
245 bpf_map_put(map);
246 }
247
248 out:
249 rcu_read_unlock();
250
251 /* In case of an error, don't free anything explicitly here, the
252 * caller is responsible to call bpf_sk_storage_free.
253 */
254
255 return ret;
256 }
257
258 /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_sk_storage_get,struct bpf_map *,map,struct sock *,sk,void *,value,u64,flags,gfp_t,gfp_flags)259 BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
260 void *, value, u64, flags, gfp_t, gfp_flags)
261 {
262 struct bpf_local_storage_data *sdata;
263
264 WARN_ON_ONCE(!bpf_rcu_lock_held());
265 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
266 return (unsigned long)NULL;
267
268 sdata = bpf_sk_storage_lookup(sk, map, true);
269 if (sdata)
270 return (unsigned long)sdata->data;
271
272 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
273 /* Cannot add new elem to a going away sk.
274 * Otherwise, the new elem may become a leak
275 * (and also other memory issues during map
276 * destruction).
277 */
278 refcount_inc_not_zero(&sk->sk_refcnt)) {
279 sdata = bpf_local_storage_update(
280 sk, (struct bpf_local_storage_map *)map, value,
281 BPF_NOEXIST, gfp_flags);
282 /* sk must be a fullsock (guaranteed by verifier),
283 * so sock_gen_put() is unnecessary.
284 */
285 sock_put(sk);
286 return IS_ERR(sdata) ?
287 (unsigned long)NULL : (unsigned long)sdata->data;
288 }
289
290 return (unsigned long)NULL;
291 }
292
BPF_CALL_2(bpf_sk_storage_delete,struct bpf_map *,map,struct sock *,sk)293 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
294 {
295 WARN_ON_ONCE(!bpf_rcu_lock_held());
296 if (!sk || !sk_fullsock(sk))
297 return -EINVAL;
298
299 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
300 int err;
301
302 err = bpf_sk_storage_del(sk, map);
303 sock_put(sk);
304 return err;
305 }
306
307 return -ENOENT;
308 }
309
bpf_sk_storage_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)310 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
311 void *owner, u32 size)
312 {
313 int optmem_max = READ_ONCE(sysctl_optmem_max);
314 struct sock *sk = (struct sock *)owner;
315
316 /* same check as in sock_kmalloc() */
317 if (size <= optmem_max &&
318 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
319 atomic_add(size, &sk->sk_omem_alloc);
320 return 0;
321 }
322
323 return -ENOMEM;
324 }
325
bpf_sk_storage_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)326 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
327 void *owner, u32 size)
328 {
329 struct sock *sk = owner;
330
331 atomic_sub(size, &sk->sk_omem_alloc);
332 }
333
334 static struct bpf_local_storage __rcu **
bpf_sk_storage_ptr(void * owner)335 bpf_sk_storage_ptr(void *owner)
336 {
337 struct sock *sk = owner;
338
339 return &sk->sk_bpf_storage;
340 }
341
342 BTF_ID_LIST_SINGLE(sk_storage_map_btf_ids, struct, bpf_local_storage_map)
343 const struct bpf_map_ops sk_storage_map_ops = {
344 .map_meta_equal = bpf_map_meta_equal,
345 .map_alloc_check = bpf_local_storage_map_alloc_check,
346 .map_alloc = bpf_sk_storage_map_alloc,
347 .map_free = bpf_sk_storage_map_free,
348 .map_get_next_key = notsupp_get_next_key,
349 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
350 .map_update_elem = bpf_fd_sk_storage_update_elem,
351 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
352 .map_check_btf = bpf_local_storage_map_check_btf,
353 .map_btf_id = &sk_storage_map_btf_ids[0],
354 .map_local_storage_charge = bpf_sk_storage_charge,
355 .map_local_storage_uncharge = bpf_sk_storage_uncharge,
356 .map_owner_storage_ptr = bpf_sk_storage_ptr,
357 };
358
359 const struct bpf_func_proto bpf_sk_storage_get_proto = {
360 .func = bpf_sk_storage_get,
361 .gpl_only = false,
362 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
363 .arg1_type = ARG_CONST_MAP_PTR,
364 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
365 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
366 .arg4_type = ARG_ANYTHING,
367 };
368
369 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
370 .func = bpf_sk_storage_get,
371 .gpl_only = false,
372 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
373 .arg1_type = ARG_CONST_MAP_PTR,
374 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
375 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
376 .arg4_type = ARG_ANYTHING,
377 };
378
379 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
380 .func = bpf_sk_storage_delete,
381 .gpl_only = false,
382 .ret_type = RET_INTEGER,
383 .arg1_type = ARG_CONST_MAP_PTR,
384 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
385 };
386
bpf_sk_storage_tracing_allowed(const struct bpf_prog * prog)387 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
388 {
389 const struct btf *btf_vmlinux;
390 const struct btf_type *t;
391 const char *tname;
392 u32 btf_id;
393
394 if (prog->aux->dst_prog)
395 return false;
396
397 /* Ensure the tracing program is not tracing
398 * any bpf_sk_storage*() function and also
399 * use the bpf_sk_storage_(get|delete) helper.
400 */
401 switch (prog->expected_attach_type) {
402 case BPF_TRACE_ITER:
403 case BPF_TRACE_RAW_TP:
404 /* bpf_sk_storage has no trace point */
405 return true;
406 case BPF_TRACE_FENTRY:
407 case BPF_TRACE_FEXIT:
408 btf_vmlinux = bpf_get_btf_vmlinux();
409 if (IS_ERR_OR_NULL(btf_vmlinux))
410 return false;
411 btf_id = prog->aux->attach_btf_id;
412 t = btf_type_by_id(btf_vmlinux, btf_id);
413 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
414 return !!strncmp(tname, "bpf_sk_storage",
415 strlen("bpf_sk_storage"));
416 default:
417 return false;
418 }
419
420 return false;
421 }
422
423 /* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_sk_storage_get_tracing,struct bpf_map *,map,struct sock *,sk,void *,value,u64,flags,gfp_t,gfp_flags)424 BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
425 void *, value, u64, flags, gfp_t, gfp_flags)
426 {
427 WARN_ON_ONCE(!bpf_rcu_lock_held());
428 if (in_hardirq() || in_nmi())
429 return (unsigned long)NULL;
430
431 return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
432 gfp_flags);
433 }
434
BPF_CALL_2(bpf_sk_storage_delete_tracing,struct bpf_map *,map,struct sock *,sk)435 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
436 struct sock *, sk)
437 {
438 WARN_ON_ONCE(!bpf_rcu_lock_held());
439 if (in_hardirq() || in_nmi())
440 return -EPERM;
441
442 return ____bpf_sk_storage_delete(map, sk);
443 }
444
445 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
446 .func = bpf_sk_storage_get_tracing,
447 .gpl_only = false,
448 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
449 .arg1_type = ARG_CONST_MAP_PTR,
450 .arg2_type = ARG_PTR_TO_BTF_ID,
451 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
452 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
453 .arg4_type = ARG_ANYTHING,
454 .allowed = bpf_sk_storage_tracing_allowed,
455 };
456
457 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
458 .func = bpf_sk_storage_delete_tracing,
459 .gpl_only = false,
460 .ret_type = RET_INTEGER,
461 .arg1_type = ARG_CONST_MAP_PTR,
462 .arg2_type = ARG_PTR_TO_BTF_ID,
463 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
464 .allowed = bpf_sk_storage_tracing_allowed,
465 };
466
467 struct bpf_sk_storage_diag {
468 u32 nr_maps;
469 struct bpf_map *maps[];
470 };
471
472 /* The reply will be like:
473 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
474 * SK_DIAG_BPF_STORAGE (nla_nest)
475 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
476 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
477 * SK_DIAG_BPF_STORAGE (nla_nest)
478 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
479 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
480 * ....
481 */
nla_value_size(u32 value_size)482 static int nla_value_size(u32 value_size)
483 {
484 /* SK_DIAG_BPF_STORAGE (nla_nest)
485 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
486 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
487 */
488 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
489 nla_total_size_64bit(value_size);
490 }
491
bpf_sk_storage_diag_free(struct bpf_sk_storage_diag * diag)492 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
493 {
494 u32 i;
495
496 if (!diag)
497 return;
498
499 for (i = 0; i < diag->nr_maps; i++)
500 bpf_map_put(diag->maps[i]);
501
502 kfree(diag);
503 }
504 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
505
diag_check_dup(const struct bpf_sk_storage_diag * diag,const struct bpf_map * map)506 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
507 const struct bpf_map *map)
508 {
509 u32 i;
510
511 for (i = 0; i < diag->nr_maps; i++) {
512 if (diag->maps[i] == map)
513 return true;
514 }
515
516 return false;
517 }
518
519 struct bpf_sk_storage_diag *
bpf_sk_storage_diag_alloc(const struct nlattr * nla_stgs)520 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
521 {
522 struct bpf_sk_storage_diag *diag;
523 struct nlattr *nla;
524 u32 nr_maps = 0;
525 int rem, err;
526
527 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
528 * the map_alloc_check() side also does.
529 */
530 if (!bpf_capable())
531 return ERR_PTR(-EPERM);
532
533 nla_for_each_nested(nla, nla_stgs, rem) {
534 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
535 nr_maps++;
536 }
537
538 diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
539 if (!diag)
540 return ERR_PTR(-ENOMEM);
541
542 nla_for_each_nested(nla, nla_stgs, rem) {
543 struct bpf_map *map;
544 int map_fd;
545
546 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
547 continue;
548
549 map_fd = nla_get_u32(nla);
550 map = bpf_map_get(map_fd);
551 if (IS_ERR(map)) {
552 err = PTR_ERR(map);
553 goto err_free;
554 }
555 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
556 bpf_map_put(map);
557 err = -EINVAL;
558 goto err_free;
559 }
560 if (diag_check_dup(diag, map)) {
561 bpf_map_put(map);
562 err = -EEXIST;
563 goto err_free;
564 }
565 diag->maps[diag->nr_maps++] = map;
566 }
567
568 return diag;
569
570 err_free:
571 bpf_sk_storage_diag_free(diag);
572 return ERR_PTR(err);
573 }
574 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
575
diag_get(struct bpf_local_storage_data * sdata,struct sk_buff * skb)576 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
577 {
578 struct nlattr *nla_stg, *nla_value;
579 struct bpf_local_storage_map *smap;
580
581 /* It cannot exceed max nlattr's payload */
582 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
583
584 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
585 if (!nla_stg)
586 return -EMSGSIZE;
587
588 smap = rcu_dereference(sdata->smap);
589 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
590 goto errout;
591
592 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
593 smap->map.value_size,
594 SK_DIAG_BPF_STORAGE_PAD);
595 if (!nla_value)
596 goto errout;
597
598 if (map_value_has_spin_lock(&smap->map))
599 copy_map_value_locked(&smap->map, nla_data(nla_value),
600 sdata->data, true);
601 else
602 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
603
604 nla_nest_end(skb, nla_stg);
605 return 0;
606
607 errout:
608 nla_nest_cancel(skb, nla_stg);
609 return -EMSGSIZE;
610 }
611
bpf_sk_storage_diag_put_all(struct sock * sk,struct sk_buff * skb,int stg_array_type,unsigned int * res_diag_size)612 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
613 int stg_array_type,
614 unsigned int *res_diag_size)
615 {
616 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
617 unsigned int diag_size = nla_total_size(0);
618 struct bpf_local_storage *sk_storage;
619 struct bpf_local_storage_elem *selem;
620 struct bpf_local_storage_map *smap;
621 struct nlattr *nla_stgs;
622 unsigned int saved_len;
623 int err = 0;
624
625 rcu_read_lock();
626
627 sk_storage = rcu_dereference(sk->sk_bpf_storage);
628 if (!sk_storage || hlist_empty(&sk_storage->list)) {
629 rcu_read_unlock();
630 return 0;
631 }
632
633 nla_stgs = nla_nest_start(skb, stg_array_type);
634 if (!nla_stgs)
635 /* Continue to learn diag_size */
636 err = -EMSGSIZE;
637
638 saved_len = skb->len;
639 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
640 smap = rcu_dereference(SDATA(selem)->smap);
641 diag_size += nla_value_size(smap->map.value_size);
642
643 if (nla_stgs && diag_get(SDATA(selem), skb))
644 /* Continue to learn diag_size */
645 err = -EMSGSIZE;
646 }
647
648 rcu_read_unlock();
649
650 if (nla_stgs) {
651 if (saved_len == skb->len)
652 nla_nest_cancel(skb, nla_stgs);
653 else
654 nla_nest_end(skb, nla_stgs);
655 }
656
657 if (diag_size == nla_total_size(0)) {
658 *res_diag_size = 0;
659 return 0;
660 }
661
662 *res_diag_size = diag_size;
663 return err;
664 }
665
bpf_sk_storage_diag_put(struct bpf_sk_storage_diag * diag,struct sock * sk,struct sk_buff * skb,int stg_array_type,unsigned int * res_diag_size)666 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
667 struct sock *sk, struct sk_buff *skb,
668 int stg_array_type,
669 unsigned int *res_diag_size)
670 {
671 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
672 unsigned int diag_size = nla_total_size(0);
673 struct bpf_local_storage *sk_storage;
674 struct bpf_local_storage_data *sdata;
675 struct nlattr *nla_stgs;
676 unsigned int saved_len;
677 int err = 0;
678 u32 i;
679
680 *res_diag_size = 0;
681
682 /* No map has been specified. Dump all. */
683 if (!diag->nr_maps)
684 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
685 res_diag_size);
686
687 rcu_read_lock();
688 sk_storage = rcu_dereference(sk->sk_bpf_storage);
689 if (!sk_storage || hlist_empty(&sk_storage->list)) {
690 rcu_read_unlock();
691 return 0;
692 }
693
694 nla_stgs = nla_nest_start(skb, stg_array_type);
695 if (!nla_stgs)
696 /* Continue to learn diag_size */
697 err = -EMSGSIZE;
698
699 saved_len = skb->len;
700 for (i = 0; i < diag->nr_maps; i++) {
701 sdata = bpf_local_storage_lookup(sk_storage,
702 (struct bpf_local_storage_map *)diag->maps[i],
703 false);
704
705 if (!sdata)
706 continue;
707
708 diag_size += nla_value_size(diag->maps[i]->value_size);
709
710 if (nla_stgs && diag_get(sdata, skb))
711 /* Continue to learn diag_size */
712 err = -EMSGSIZE;
713 }
714 rcu_read_unlock();
715
716 if (nla_stgs) {
717 if (saved_len == skb->len)
718 nla_nest_cancel(skb, nla_stgs);
719 else
720 nla_nest_end(skb, nla_stgs);
721 }
722
723 if (diag_size == nla_total_size(0)) {
724 *res_diag_size = 0;
725 return 0;
726 }
727
728 *res_diag_size = diag_size;
729 return err;
730 }
731 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
732
733 struct bpf_iter_seq_sk_storage_map_info {
734 struct bpf_map *map;
735 unsigned int bucket_id;
736 unsigned skip_elems;
737 };
738
739 static struct bpf_local_storage_elem *
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info * info,struct bpf_local_storage_elem * prev_selem)740 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
741 struct bpf_local_storage_elem *prev_selem)
742 __acquires(RCU) __releases(RCU)
743 {
744 struct bpf_local_storage *sk_storage;
745 struct bpf_local_storage_elem *selem;
746 u32 skip_elems = info->skip_elems;
747 struct bpf_local_storage_map *smap;
748 u32 bucket_id = info->bucket_id;
749 u32 i, count, n_buckets;
750 struct bpf_local_storage_map_bucket *b;
751
752 smap = (struct bpf_local_storage_map *)info->map;
753 n_buckets = 1U << smap->bucket_log;
754 if (bucket_id >= n_buckets)
755 return NULL;
756
757 /* try to find next selem in the same bucket */
758 selem = prev_selem;
759 count = 0;
760 while (selem) {
761 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
762 struct bpf_local_storage_elem, map_node);
763 if (!selem) {
764 /* not found, unlock and go to the next bucket */
765 b = &smap->buckets[bucket_id++];
766 rcu_read_unlock();
767 skip_elems = 0;
768 break;
769 }
770 sk_storage = rcu_dereference(selem->local_storage);
771 if (sk_storage) {
772 info->skip_elems = skip_elems + count;
773 return selem;
774 }
775 count++;
776 }
777
778 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
779 b = &smap->buckets[i];
780 rcu_read_lock();
781 count = 0;
782 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
783 sk_storage = rcu_dereference(selem->local_storage);
784 if (sk_storage && count >= skip_elems) {
785 info->bucket_id = i;
786 info->skip_elems = count;
787 return selem;
788 }
789 count++;
790 }
791 rcu_read_unlock();
792 skip_elems = 0;
793 }
794
795 info->bucket_id = i;
796 info->skip_elems = 0;
797 return NULL;
798 }
799
bpf_sk_storage_map_seq_start(struct seq_file * seq,loff_t * pos)800 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
801 {
802 struct bpf_local_storage_elem *selem;
803
804 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
805 if (!selem)
806 return NULL;
807
808 if (*pos == 0)
809 ++*pos;
810 return selem;
811 }
812
bpf_sk_storage_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)813 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
814 loff_t *pos)
815 {
816 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
817
818 ++*pos;
819 ++info->skip_elems;
820 return bpf_sk_storage_map_seq_find_next(seq->private, v);
821 }
822
823 struct bpf_iter__bpf_sk_storage_map {
824 __bpf_md_ptr(struct bpf_iter_meta *, meta);
825 __bpf_md_ptr(struct bpf_map *, map);
826 __bpf_md_ptr(struct sock *, sk);
827 __bpf_md_ptr(void *, value);
828 };
829
DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map,struct bpf_iter_meta * meta,struct bpf_map * map,struct sock * sk,void * value)830 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
831 struct bpf_map *map, struct sock *sk,
832 void *value)
833
834 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
835 struct bpf_local_storage_elem *selem)
836 {
837 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
838 struct bpf_iter__bpf_sk_storage_map ctx = {};
839 struct bpf_local_storage *sk_storage;
840 struct bpf_iter_meta meta;
841 struct bpf_prog *prog;
842 int ret = 0;
843
844 meta.seq = seq;
845 prog = bpf_iter_get_info(&meta, selem == NULL);
846 if (prog) {
847 ctx.meta = &meta;
848 ctx.map = info->map;
849 if (selem) {
850 sk_storage = rcu_dereference(selem->local_storage);
851 ctx.sk = sk_storage->owner;
852 ctx.value = SDATA(selem)->data;
853 }
854 ret = bpf_iter_run_prog(prog, &ctx);
855 }
856
857 return ret;
858 }
859
bpf_sk_storage_map_seq_show(struct seq_file * seq,void * v)860 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
861 {
862 return __bpf_sk_storage_map_seq_show(seq, v);
863 }
864
bpf_sk_storage_map_seq_stop(struct seq_file * seq,void * v)865 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
866 __releases(RCU)
867 {
868 if (!v)
869 (void)__bpf_sk_storage_map_seq_show(seq, v);
870 else
871 rcu_read_unlock();
872 }
873
bpf_iter_init_sk_storage_map(void * priv_data,struct bpf_iter_aux_info * aux)874 static int bpf_iter_init_sk_storage_map(void *priv_data,
875 struct bpf_iter_aux_info *aux)
876 {
877 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
878
879 bpf_map_inc_with_uref(aux->map);
880 seq_info->map = aux->map;
881 return 0;
882 }
883
bpf_iter_fini_sk_storage_map(void * priv_data)884 static void bpf_iter_fini_sk_storage_map(void *priv_data)
885 {
886 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
887
888 bpf_map_put_with_uref(seq_info->map);
889 }
890
bpf_iter_attach_map(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)891 static int bpf_iter_attach_map(struct bpf_prog *prog,
892 union bpf_iter_link_info *linfo,
893 struct bpf_iter_aux_info *aux)
894 {
895 struct bpf_map *map;
896 int err = -EINVAL;
897
898 if (!linfo->map.map_fd)
899 return -EBADF;
900
901 map = bpf_map_get_with_uref(linfo->map.map_fd);
902 if (IS_ERR(map))
903 return PTR_ERR(map);
904
905 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
906 goto put_map;
907
908 if (prog->aux->max_rdwr_access > map->value_size) {
909 err = -EACCES;
910 goto put_map;
911 }
912
913 aux->map = map;
914 return 0;
915
916 put_map:
917 bpf_map_put_with_uref(map);
918 return err;
919 }
920
bpf_iter_detach_map(struct bpf_iter_aux_info * aux)921 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
922 {
923 bpf_map_put_with_uref(aux->map);
924 }
925
926 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
927 .start = bpf_sk_storage_map_seq_start,
928 .next = bpf_sk_storage_map_seq_next,
929 .stop = bpf_sk_storage_map_seq_stop,
930 .show = bpf_sk_storage_map_seq_show,
931 };
932
933 static const struct bpf_iter_seq_info iter_seq_info = {
934 .seq_ops = &bpf_sk_storage_map_seq_ops,
935 .init_seq_private = bpf_iter_init_sk_storage_map,
936 .fini_seq_private = bpf_iter_fini_sk_storage_map,
937 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
938 };
939
940 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
941 .target = "bpf_sk_storage_map",
942 .attach_target = bpf_iter_attach_map,
943 .detach_target = bpf_iter_detach_map,
944 .show_fdinfo = bpf_iter_map_show_fdinfo,
945 .fill_link_info = bpf_iter_map_fill_link_info,
946 .ctx_arg_info_size = 2,
947 .ctx_arg_info = {
948 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
949 PTR_TO_BTF_ID_OR_NULL },
950 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
951 PTR_TO_BUF | PTR_MAYBE_NULL },
952 },
953 .seq_info = &iter_seq_info,
954 };
955
bpf_sk_storage_map_iter_init(void)956 static int __init bpf_sk_storage_map_iter_init(void)
957 {
958 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
959 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
960 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
961 }
962 late_initcall(bpf_sk_storage_map_iter_init);
963