1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20 static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)21 select_bucket(struct bpf_local_storage_map *smap,
22 struct bpf_local_storage_elem *selem)
23 {
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26
mem_charge(struct bpf_local_storage_map * smap,void * owner,u32 size)27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29 struct bpf_map *map = &smap->map;
30
31 if (!map->ops->map_local_storage_charge)
32 return 0;
33
34 return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36
mem_uncharge(struct bpf_local_storage_map * smap,void * owner,u32 size)37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 u32 size)
39 {
40 struct bpf_map *map = &smap->map;
41
42 if (map->ops->map_local_storage_uncharge)
43 map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45
46 static struct bpf_local_storage __rcu **
owner_storage(struct bpf_local_storage_map * smap,void * owner)47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49 struct bpf_map *map = &smap->map;
50
51 return map->ops->map_owner_storage_ptr(owner);
52 }
53
selem_linked_to_storage(const struct bpf_local_storage_elem * selem)54 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
55 {
56 return !hlist_unhashed(&selem->snode);
57 }
58
selem_linked_to_map(const struct bpf_local_storage_elem * selem)59 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
60 {
61 return !hlist_unhashed(&selem->map_node);
62 }
63
64 struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map * smap,void * owner,void * value,bool charge_mem,gfp_t gfp_flags)65 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
66 void *value, bool charge_mem, gfp_t gfp_flags)
67 {
68 struct bpf_local_storage_elem *selem;
69
70 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
71 return NULL;
72
73 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
74 gfp_flags | __GFP_NOWARN);
75 if (selem) {
76 if (value)
77 copy_map_value(&smap->map, SDATA(selem)->data, value);
78 return selem;
79 }
80
81 if (charge_mem)
82 mem_uncharge(smap, owner, smap->elem_size);
83
84 return NULL;
85 }
86
bpf_local_storage_free_rcu(struct rcu_head * rcu)87 void bpf_local_storage_free_rcu(struct rcu_head *rcu)
88 {
89 struct bpf_local_storage *local_storage;
90
91 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
92 kfree_rcu(local_storage, rcu);
93 }
94
bpf_selem_free_rcu(struct rcu_head * rcu)95 static void bpf_selem_free_rcu(struct rcu_head *rcu)
96 {
97 struct bpf_local_storage_elem *selem;
98
99 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
100 kfree_rcu(selem, rcu);
101 }
102
103 /* local_storage->lock must be held and selem->local_storage == local_storage.
104 * The caller must ensure selem->smap is still valid to be
105 * dereferenced for its smap->elem_size and smap->cache_idx.
106 */
bpf_selem_unlink_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem,bool uncharge_mem,bool use_trace_rcu)107 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
108 struct bpf_local_storage_elem *selem,
109 bool uncharge_mem, bool use_trace_rcu)
110 {
111 struct bpf_local_storage_map *smap;
112 bool free_local_storage;
113 void *owner;
114
115 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
116 owner = local_storage->owner;
117
118 /* All uncharging on the owner must be done first.
119 * The owner may be freed once the last selem is unlinked
120 * from local_storage.
121 */
122 if (uncharge_mem)
123 mem_uncharge(smap, owner, smap->elem_size);
124
125 free_local_storage = hlist_is_singular_node(&selem->snode,
126 &local_storage->list);
127 if (free_local_storage) {
128 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
129 local_storage->owner = NULL;
130
131 /* After this RCU_INIT, owner may be freed and cannot be used */
132 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
133
134 /* local_storage is not freed now. local_storage->lock is
135 * still held and raw_spin_unlock_bh(&local_storage->lock)
136 * will be done by the caller.
137 *
138 * Although the unlock will be done under
139 * rcu_read_lock(), it is more intuitive to
140 * read if the freeing of the storage is done
141 * after the raw_spin_unlock_bh(&local_storage->lock).
142 *
143 * Hence, a "bool free_local_storage" is returned
144 * to the caller which then calls then frees the storage after
145 * all the RCU grace periods have expired.
146 */
147 }
148 hlist_del_init_rcu(&selem->snode);
149 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
150 SDATA(selem))
151 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
152
153 if (use_trace_rcu)
154 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
155 else
156 kfree_rcu(selem, rcu);
157
158 return free_local_storage;
159 }
160
__bpf_selem_unlink_storage(struct bpf_local_storage_elem * selem,bool use_trace_rcu)161 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
162 bool use_trace_rcu)
163 {
164 struct bpf_local_storage *local_storage;
165 bool free_local_storage = false;
166 unsigned long flags;
167
168 if (unlikely(!selem_linked_to_storage(selem)))
169 /* selem has already been unlinked from sk */
170 return;
171
172 local_storage = rcu_dereference_check(selem->local_storage,
173 bpf_rcu_lock_held());
174 raw_spin_lock_irqsave(&local_storage->lock, flags);
175 if (likely(selem_linked_to_storage(selem)))
176 free_local_storage = bpf_selem_unlink_storage_nolock(
177 local_storage, selem, true, use_trace_rcu);
178 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
179
180 if (free_local_storage) {
181 if (use_trace_rcu)
182 call_rcu_tasks_trace(&local_storage->rcu,
183 bpf_local_storage_free_rcu);
184 else
185 kfree_rcu(local_storage, rcu);
186 }
187 }
188
bpf_selem_link_storage_nolock(struct bpf_local_storage * local_storage,struct bpf_local_storage_elem * selem)189 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
190 struct bpf_local_storage_elem *selem)
191 {
192 RCU_INIT_POINTER(selem->local_storage, local_storage);
193 hlist_add_head_rcu(&selem->snode, &local_storage->list);
194 }
195
bpf_selem_unlink_map(struct bpf_local_storage_elem * selem)196 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
197 {
198 struct bpf_local_storage_map *smap;
199 struct bpf_local_storage_map_bucket *b;
200 unsigned long flags;
201
202 if (unlikely(!selem_linked_to_map(selem)))
203 /* selem has already be unlinked from smap */
204 return;
205
206 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
207 b = select_bucket(smap, selem);
208 raw_spin_lock_irqsave(&b->lock, flags);
209 if (likely(selem_linked_to_map(selem)))
210 hlist_del_init_rcu(&selem->map_node);
211 raw_spin_unlock_irqrestore(&b->lock, flags);
212 }
213
bpf_selem_link_map(struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * selem)214 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
215 struct bpf_local_storage_elem *selem)
216 {
217 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
218 unsigned long flags;
219
220 raw_spin_lock_irqsave(&b->lock, flags);
221 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
222 hlist_add_head_rcu(&selem->map_node, &b->list);
223 raw_spin_unlock_irqrestore(&b->lock, flags);
224 }
225
bpf_selem_unlink(struct bpf_local_storage_elem * selem,bool use_trace_rcu)226 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
227 {
228 /* Always unlink from map before unlinking from local_storage
229 * because selem will be freed after successfully unlinked from
230 * the local_storage.
231 */
232 bpf_selem_unlink_map(selem);
233 __bpf_selem_unlink_storage(selem, use_trace_rcu);
234 }
235
236 struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)237 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
238 struct bpf_local_storage_map *smap,
239 bool cacheit_lockit)
240 {
241 struct bpf_local_storage_data *sdata;
242 struct bpf_local_storage_elem *selem;
243
244 /* Fast path (cache hit) */
245 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
246 bpf_rcu_lock_held());
247 if (sdata && rcu_access_pointer(sdata->smap) == smap)
248 return sdata;
249
250 /* Slow path (cache miss) */
251 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
252 rcu_read_lock_trace_held())
253 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
254 break;
255
256 if (!selem)
257 return NULL;
258
259 sdata = SDATA(selem);
260 if (cacheit_lockit) {
261 unsigned long flags;
262
263 /* spinlock is needed to avoid racing with the
264 * parallel delete. Otherwise, publishing an already
265 * deleted sdata to the cache will become a use-after-free
266 * problem in the next bpf_local_storage_lookup().
267 */
268 raw_spin_lock_irqsave(&local_storage->lock, flags);
269 if (selem_linked_to_storage(selem))
270 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
271 sdata);
272 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
273 }
274
275 return sdata;
276 }
277
check_flags(const struct bpf_local_storage_data * old_sdata,u64 map_flags)278 static int check_flags(const struct bpf_local_storage_data *old_sdata,
279 u64 map_flags)
280 {
281 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
282 /* elem already exists */
283 return -EEXIST;
284
285 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
286 /* elem doesn't exist, cannot update it */
287 return -ENOENT;
288
289 return 0;
290 }
291
bpf_local_storage_alloc(void * owner,struct bpf_local_storage_map * smap,struct bpf_local_storage_elem * first_selem,gfp_t gfp_flags)292 int bpf_local_storage_alloc(void *owner,
293 struct bpf_local_storage_map *smap,
294 struct bpf_local_storage_elem *first_selem,
295 gfp_t gfp_flags)
296 {
297 struct bpf_local_storage *prev_storage, *storage;
298 struct bpf_local_storage **owner_storage_ptr;
299 int err;
300
301 err = mem_charge(smap, owner, sizeof(*storage));
302 if (err)
303 return err;
304
305 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
306 gfp_flags | __GFP_NOWARN);
307 if (!storage) {
308 err = -ENOMEM;
309 goto uncharge;
310 }
311
312 INIT_HLIST_HEAD(&storage->list);
313 raw_spin_lock_init(&storage->lock);
314 storage->owner = owner;
315
316 bpf_selem_link_storage_nolock(storage, first_selem);
317 bpf_selem_link_map(smap, first_selem);
318
319 owner_storage_ptr =
320 (struct bpf_local_storage **)owner_storage(smap, owner);
321 /* Publish storage to the owner.
322 * Instead of using any lock of the kernel object (i.e. owner),
323 * cmpxchg will work with any kernel object regardless what
324 * the running context is, bh, irq...etc.
325 *
326 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
327 * is protected by the storage->lock. Hence, when freeing
328 * the owner->storage, the storage->lock must be held before
329 * setting owner->storage ptr to NULL.
330 */
331 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
332 if (unlikely(prev_storage)) {
333 bpf_selem_unlink_map(first_selem);
334 err = -EAGAIN;
335 goto uncharge;
336
337 /* Note that even first_selem was linked to smap's
338 * bucket->list, first_selem can be freed immediately
339 * (instead of kfree_rcu) because
340 * bpf_local_storage_map_free() does a
341 * synchronize_rcu_mult (waiting for both sleepable and
342 * normal programs) before walking the bucket->list.
343 * Hence, no one is accessing selem from the
344 * bucket->list under rcu_read_lock().
345 */
346 }
347
348 return 0;
349
350 uncharge:
351 kfree(storage);
352 mem_uncharge(smap, owner, sizeof(*storage));
353 return err;
354 }
355
356 /* sk cannot be going away because it is linking new elem
357 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
358 * Otherwise, it will become a leak (and other memory issues
359 * during map destruction).
360 */
361 struct bpf_local_storage_data *
bpf_local_storage_update(void * owner,struct bpf_local_storage_map * smap,void * value,u64 map_flags,gfp_t gfp_flags)362 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
363 void *value, u64 map_flags, gfp_t gfp_flags)
364 {
365 struct bpf_local_storage_data *old_sdata = NULL;
366 struct bpf_local_storage_elem *selem = NULL;
367 struct bpf_local_storage *local_storage;
368 unsigned long flags;
369 int err;
370
371 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
372 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
373 /* BPF_F_LOCK can only be used in a value with spin_lock */
374 unlikely((map_flags & BPF_F_LOCK) &&
375 !map_value_has_spin_lock(&smap->map)))
376 return ERR_PTR(-EINVAL);
377
378 if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
379 return ERR_PTR(-EINVAL);
380
381 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
382 bpf_rcu_lock_held());
383 if (!local_storage || hlist_empty(&local_storage->list)) {
384 /* Very first elem for the owner */
385 err = check_flags(NULL, map_flags);
386 if (err)
387 return ERR_PTR(err);
388
389 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
390 if (!selem)
391 return ERR_PTR(-ENOMEM);
392
393 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
394 if (err) {
395 kfree(selem);
396 mem_uncharge(smap, owner, smap->elem_size);
397 return ERR_PTR(err);
398 }
399
400 return SDATA(selem);
401 }
402
403 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
404 /* Hoping to find an old_sdata to do inline update
405 * such that it can avoid taking the local_storage->lock
406 * and changing the lists.
407 */
408 old_sdata =
409 bpf_local_storage_lookup(local_storage, smap, false);
410 err = check_flags(old_sdata, map_flags);
411 if (err)
412 return ERR_PTR(err);
413 if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
414 copy_map_value_locked(&smap->map, old_sdata->data,
415 value, false);
416 return old_sdata;
417 }
418 }
419
420 if (gfp_flags == GFP_KERNEL) {
421 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
422 if (!selem)
423 return ERR_PTR(-ENOMEM);
424 }
425
426 raw_spin_lock_irqsave(&local_storage->lock, flags);
427
428 /* Recheck local_storage->list under local_storage->lock */
429 if (unlikely(hlist_empty(&local_storage->list))) {
430 /* A parallel del is happening and local_storage is going
431 * away. It has just been checked before, so very
432 * unlikely. Return instead of retry to keep things
433 * simple.
434 */
435 err = -EAGAIN;
436 goto unlock_err;
437 }
438
439 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
440 err = check_flags(old_sdata, map_flags);
441 if (err)
442 goto unlock_err;
443
444 if (old_sdata && (map_flags & BPF_F_LOCK)) {
445 copy_map_value_locked(&smap->map, old_sdata->data, value,
446 false);
447 selem = SELEM(old_sdata);
448 goto unlock;
449 }
450
451 if (gfp_flags != GFP_KERNEL) {
452 /* local_storage->lock is held. Hence, we are sure
453 * we can unlink and uncharge the old_sdata successfully
454 * later. Hence, instead of charging the new selem now
455 * and then uncharge the old selem later (which may cause
456 * a potential but unnecessary charge failure), avoid taking
457 * a charge at all here (the "!old_sdata" check) and the
458 * old_sdata will not be uncharged later during
459 * bpf_selem_unlink_storage_nolock().
460 */
461 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
462 if (!selem) {
463 err = -ENOMEM;
464 goto unlock_err;
465 }
466 }
467
468 /* First, link the new selem to the map */
469 bpf_selem_link_map(smap, selem);
470
471 /* Second, link (and publish) the new selem to local_storage */
472 bpf_selem_link_storage_nolock(local_storage, selem);
473
474 /* Third, remove old selem, SELEM(old_sdata) */
475 if (old_sdata) {
476 bpf_selem_unlink_map(SELEM(old_sdata));
477 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
478 false, true);
479 }
480
481 unlock:
482 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
483 return SDATA(selem);
484
485 unlock_err:
486 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
487 if (selem) {
488 mem_uncharge(smap, owner, smap->elem_size);
489 kfree(selem);
490 }
491 return ERR_PTR(err);
492 }
493
bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache * cache)494 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
495 {
496 u64 min_usage = U64_MAX;
497 u16 i, res = 0;
498
499 spin_lock(&cache->idx_lock);
500
501 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
502 if (cache->idx_usage_counts[i] < min_usage) {
503 min_usage = cache->idx_usage_counts[i];
504 res = i;
505
506 /* Found a free cache_idx */
507 if (!min_usage)
508 break;
509 }
510 }
511 cache->idx_usage_counts[res]++;
512
513 spin_unlock(&cache->idx_lock);
514
515 return res;
516 }
517
bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache * cache,u16 idx)518 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
519 u16 idx)
520 {
521 spin_lock(&cache->idx_lock);
522 cache->idx_usage_counts[idx]--;
523 spin_unlock(&cache->idx_lock);
524 }
525
bpf_local_storage_map_free(struct bpf_local_storage_map * smap,int __percpu * busy_counter)526 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
527 int __percpu *busy_counter)
528 {
529 struct bpf_local_storage_elem *selem;
530 struct bpf_local_storage_map_bucket *b;
531 unsigned int i;
532
533 /* Note that this map might be concurrently cloned from
534 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
535 * RCU read section to finish before proceeding. New RCU
536 * read sections should be prevented via bpf_map_inc_not_zero.
537 */
538 synchronize_rcu();
539
540 /* bpf prog and the userspace can no longer access this map
541 * now. No new selem (of this map) can be added
542 * to the owner->storage or to the map bucket's list.
543 *
544 * The elem of this map can be cleaned up here
545 * or when the storage is freed e.g.
546 * by bpf_sk_storage_free() during __sk_destruct().
547 */
548 for (i = 0; i < (1U << smap->bucket_log); i++) {
549 b = &smap->buckets[i];
550
551 rcu_read_lock();
552 /* No one is adding to b->list now */
553 while ((selem = hlist_entry_safe(
554 rcu_dereference_raw(hlist_first_rcu(&b->list)),
555 struct bpf_local_storage_elem, map_node))) {
556 if (busy_counter) {
557 migrate_disable();
558 this_cpu_inc(*busy_counter);
559 }
560 bpf_selem_unlink(selem, false);
561 if (busy_counter) {
562 this_cpu_dec(*busy_counter);
563 migrate_enable();
564 }
565 cond_resched_rcu();
566 }
567 rcu_read_unlock();
568 }
569
570 /* While freeing the storage we may still need to access the map.
571 *
572 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
573 * which then made the above while((selem = ...)) loop
574 * exit immediately.
575 *
576 * However, while freeing the storage one still needs to access the
577 * smap->elem_size to do the uncharging in
578 * bpf_selem_unlink_storage_nolock().
579 *
580 * Hence, wait another rcu grace period for the storage to be freed.
581 */
582 synchronize_rcu();
583
584 kvfree(smap->buckets);
585 bpf_map_area_free(smap);
586 }
587
bpf_local_storage_map_alloc_check(union bpf_attr * attr)588 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
589 {
590 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
591 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
592 attr->max_entries ||
593 attr->key_size != sizeof(int) || !attr->value_size ||
594 /* Enforce BTF for userspace sk dumping */
595 !attr->btf_key_type_id || !attr->btf_value_type_id)
596 return -EINVAL;
597
598 if (!bpf_capable())
599 return -EPERM;
600
601 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
602 return -E2BIG;
603
604 return 0;
605 }
606
bpf_local_storage_map_alloc(union bpf_attr * attr)607 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
608 {
609 struct bpf_local_storage_map *smap;
610 unsigned int i;
611 u32 nbuckets;
612
613 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
614 if (!smap)
615 return ERR_PTR(-ENOMEM);
616 bpf_map_init_from_attr(&smap->map, attr);
617
618 nbuckets = roundup_pow_of_two(num_possible_cpus());
619 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
620 nbuckets = max_t(u32, 2, nbuckets);
621 smap->bucket_log = ilog2(nbuckets);
622
623 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
624 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
625 if (!smap->buckets) {
626 bpf_map_area_free(smap);
627 return ERR_PTR(-ENOMEM);
628 }
629
630 for (i = 0; i < nbuckets; i++) {
631 INIT_HLIST_HEAD(&smap->buckets[i].list);
632 raw_spin_lock_init(&smap->buckets[i].lock);
633 }
634
635 smap->elem_size =
636 sizeof(struct bpf_local_storage_elem) + attr->value_size;
637
638 return smap;
639 }
640
bpf_local_storage_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)641 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
642 const struct btf *btf,
643 const struct btf_type *key_type,
644 const struct btf_type *value_type)
645 {
646 u32 int_data;
647
648 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
649 return -EINVAL;
650
651 int_data = *(u32 *)(key_type + 1);
652 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
653 return -EINVAL;
654
655 return 0;
656 }
657