1 /*
2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28
29 /* Protects offdevs, members of bpf_offload_netdev and offload members
30 * of all progs.
31 * RTNL lock cannot be taken when holding this lock.
32 */
33 static DECLARE_RWSEM(bpf_devs_lock);
34
35 struct bpf_offload_dev {
36 const struct bpf_prog_offload_ops *ops;
37 struct list_head netdevs;
38 void *priv;
39 };
40
41 struct bpf_offload_netdev {
42 struct rhash_head l;
43 struct net_device *netdev;
44 struct bpf_offload_dev *offdev;
45 struct list_head progs;
46 struct list_head maps;
47 struct list_head offdev_netdevs;
48 };
49
50 static const struct rhashtable_params offdevs_params = {
51 .nelem_hint = 4,
52 .key_len = sizeof(struct net_device *),
53 .key_offset = offsetof(struct bpf_offload_netdev, netdev),
54 .head_offset = offsetof(struct bpf_offload_netdev, l),
55 .automatic_shrinking = true,
56 };
57
58 static struct rhashtable offdevs;
59 static bool offdevs_inited;
60
bpf_dev_offload_check(struct net_device * netdev)61 static int bpf_dev_offload_check(struct net_device *netdev)
62 {
63 if (!netdev)
64 return -EINVAL;
65 if (!netdev->netdev_ops->ndo_bpf)
66 return -EOPNOTSUPP;
67 return 0;
68 }
69
70 static struct bpf_offload_netdev *
bpf_offload_find_netdev(struct net_device * netdev)71 bpf_offload_find_netdev(struct net_device *netdev)
72 {
73 lockdep_assert_held(&bpf_devs_lock);
74
75 if (!offdevs_inited)
76 return NULL;
77 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
78 }
79
bpf_prog_offload_init(struct bpf_prog * prog,union bpf_attr * attr)80 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
81 {
82 struct bpf_offload_netdev *ondev;
83 struct bpf_prog_offload *offload;
84 int err;
85
86 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
87 attr->prog_type != BPF_PROG_TYPE_XDP)
88 return -EINVAL;
89
90 if (attr->prog_flags)
91 return -EINVAL;
92
93 offload = kzalloc(sizeof(*offload), GFP_USER);
94 if (!offload)
95 return -ENOMEM;
96
97 offload->prog = prog;
98
99 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
100 attr->prog_ifindex);
101 err = bpf_dev_offload_check(offload->netdev);
102 if (err)
103 goto err_maybe_put;
104
105 down_write(&bpf_devs_lock);
106 ondev = bpf_offload_find_netdev(offload->netdev);
107 if (!ondev) {
108 err = -EINVAL;
109 goto err_unlock;
110 }
111 offload->offdev = ondev->offdev;
112 prog->aux->offload = offload;
113 list_add_tail(&offload->offloads, &ondev->progs);
114 dev_put(offload->netdev);
115 up_write(&bpf_devs_lock);
116
117 return 0;
118 err_unlock:
119 up_write(&bpf_devs_lock);
120 err_maybe_put:
121 if (offload->netdev)
122 dev_put(offload->netdev);
123 kfree(offload);
124 return err;
125 }
126
bpf_prog_offload_verifier_prep(struct bpf_prog * prog)127 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
128 {
129 struct bpf_prog_offload *offload;
130 int ret = -ENODEV;
131
132 down_read(&bpf_devs_lock);
133 offload = prog->aux->offload;
134 if (offload) {
135 ret = offload->offdev->ops->prepare(prog);
136 offload->dev_state = !ret;
137 }
138 up_read(&bpf_devs_lock);
139
140 return ret;
141 }
142
bpf_prog_offload_verify_insn(struct bpf_verifier_env * env,int insn_idx,int prev_insn_idx)143 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
144 int insn_idx, int prev_insn_idx)
145 {
146 struct bpf_prog_offload *offload;
147 int ret = -ENODEV;
148
149 down_read(&bpf_devs_lock);
150 offload = env->prog->aux->offload;
151 if (offload)
152 ret = offload->offdev->ops->insn_hook(env, insn_idx,
153 prev_insn_idx);
154 up_read(&bpf_devs_lock);
155
156 return ret;
157 }
158
bpf_prog_offload_finalize(struct bpf_verifier_env * env)159 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
160 {
161 struct bpf_prog_offload *offload;
162 int ret = -ENODEV;
163
164 down_read(&bpf_devs_lock);
165 offload = env->prog->aux->offload;
166 if (offload) {
167 if (offload->offdev->ops->finalize)
168 ret = offload->offdev->ops->finalize(env);
169 else
170 ret = 0;
171 }
172 up_read(&bpf_devs_lock);
173
174 return ret;
175 }
176
177 void
bpf_prog_offload_replace_insn(struct bpf_verifier_env * env,u32 off,struct bpf_insn * insn)178 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
179 struct bpf_insn *insn)
180 {
181 const struct bpf_prog_offload_ops *ops;
182 struct bpf_prog_offload *offload;
183 int ret = -EOPNOTSUPP;
184
185 down_read(&bpf_devs_lock);
186 offload = env->prog->aux->offload;
187 if (offload) {
188 ops = offload->offdev->ops;
189 if (!offload->opt_failed && ops->replace_insn)
190 ret = ops->replace_insn(env, off, insn);
191 offload->opt_failed |= ret;
192 }
193 up_read(&bpf_devs_lock);
194 }
195
196 void
bpf_prog_offload_remove_insns(struct bpf_verifier_env * env,u32 off,u32 cnt)197 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
198 {
199 struct bpf_prog_offload *offload;
200 int ret = -EOPNOTSUPP;
201
202 down_read(&bpf_devs_lock);
203 offload = env->prog->aux->offload;
204 if (offload) {
205 if (!offload->opt_failed && offload->offdev->ops->remove_insns)
206 ret = offload->offdev->ops->remove_insns(env, off, cnt);
207 offload->opt_failed |= ret;
208 }
209 up_read(&bpf_devs_lock);
210 }
211
__bpf_prog_offload_destroy(struct bpf_prog * prog)212 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
213 {
214 struct bpf_prog_offload *offload = prog->aux->offload;
215
216 if (offload->dev_state)
217 offload->offdev->ops->destroy(prog);
218
219 list_del_init(&offload->offloads);
220 kfree(offload);
221 prog->aux->offload = NULL;
222 }
223
bpf_prog_offload_destroy(struct bpf_prog * prog)224 void bpf_prog_offload_destroy(struct bpf_prog *prog)
225 {
226 down_write(&bpf_devs_lock);
227 if (prog->aux->offload)
228 __bpf_prog_offload_destroy(prog);
229 up_write(&bpf_devs_lock);
230 }
231
bpf_prog_offload_translate(struct bpf_prog * prog)232 static int bpf_prog_offload_translate(struct bpf_prog *prog)
233 {
234 struct bpf_prog_offload *offload;
235 int ret = -ENODEV;
236
237 down_read(&bpf_devs_lock);
238 offload = prog->aux->offload;
239 if (offload)
240 ret = offload->offdev->ops->translate(prog);
241 up_read(&bpf_devs_lock);
242
243 return ret;
244 }
245
bpf_prog_warn_on_exec(const void * ctx,const struct bpf_insn * insn)246 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
247 const struct bpf_insn *insn)
248 {
249 WARN(1, "attempt to execute device eBPF program on the host!");
250 return 0;
251 }
252
bpf_prog_offload_compile(struct bpf_prog * prog)253 int bpf_prog_offload_compile(struct bpf_prog *prog)
254 {
255 prog->bpf_func = bpf_prog_warn_on_exec;
256
257 return bpf_prog_offload_translate(prog);
258 }
259
260 struct ns_get_path_bpf_prog_args {
261 struct bpf_prog *prog;
262 struct bpf_prog_info *info;
263 };
264
bpf_prog_offload_info_fill_ns(void * private_data)265 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
266 {
267 struct ns_get_path_bpf_prog_args *args = private_data;
268 struct bpf_prog_aux *aux = args->prog->aux;
269 struct ns_common *ns;
270 struct net *net;
271
272 rtnl_lock();
273 down_read(&bpf_devs_lock);
274
275 if (aux->offload) {
276 args->info->ifindex = aux->offload->netdev->ifindex;
277 net = dev_net(aux->offload->netdev);
278 get_net(net);
279 ns = &net->ns;
280 } else {
281 args->info->ifindex = 0;
282 ns = NULL;
283 }
284
285 up_read(&bpf_devs_lock);
286 rtnl_unlock();
287
288 return ns;
289 }
290
bpf_prog_offload_info_fill(struct bpf_prog_info * info,struct bpf_prog * prog)291 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
292 struct bpf_prog *prog)
293 {
294 struct ns_get_path_bpf_prog_args args = {
295 .prog = prog,
296 .info = info,
297 };
298 struct bpf_prog_aux *aux = prog->aux;
299 struct inode *ns_inode;
300 struct path ns_path;
301 char __user *uinsns;
302 int res;
303 u32 ulen;
304
305 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
306 if (res) {
307 if (!info->ifindex)
308 return -ENODEV;
309 return res;
310 }
311
312 down_read(&bpf_devs_lock);
313
314 if (!aux->offload) {
315 up_read(&bpf_devs_lock);
316 return -ENODEV;
317 }
318
319 ulen = info->jited_prog_len;
320 info->jited_prog_len = aux->offload->jited_len;
321 if (info->jited_prog_len && ulen) {
322 uinsns = u64_to_user_ptr(info->jited_prog_insns);
323 ulen = min_t(u32, info->jited_prog_len, ulen);
324 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
325 up_read(&bpf_devs_lock);
326 return -EFAULT;
327 }
328 }
329
330 up_read(&bpf_devs_lock);
331
332 ns_inode = ns_path.dentry->d_inode;
333 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
334 info->netns_ino = ns_inode->i_ino;
335 path_put(&ns_path);
336
337 return 0;
338 }
339
340 const struct bpf_prog_ops bpf_offload_prog_ops = {
341 };
342
bpf_map_offload_ndo(struct bpf_offloaded_map * offmap,enum bpf_netdev_command cmd)343 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
344 enum bpf_netdev_command cmd)
345 {
346 struct netdev_bpf data = {};
347 struct net_device *netdev;
348
349 ASSERT_RTNL();
350
351 data.command = cmd;
352 data.offmap = offmap;
353 /* Caller must make sure netdev is valid */
354 netdev = offmap->netdev;
355
356 return netdev->netdev_ops->ndo_bpf(netdev, &data);
357 }
358
bpf_map_offload_map_alloc(union bpf_attr * attr)359 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
360 {
361 struct net *net = current->nsproxy->net_ns;
362 struct bpf_offload_netdev *ondev;
363 struct bpf_offloaded_map *offmap;
364 int err;
365
366 if (!capable(CAP_SYS_ADMIN))
367 return ERR_PTR(-EPERM);
368 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
369 attr->map_type != BPF_MAP_TYPE_HASH)
370 return ERR_PTR(-EINVAL);
371
372 offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
373 if (!offmap)
374 return ERR_PTR(-ENOMEM);
375
376 bpf_map_init_from_attr(&offmap->map, attr);
377
378 rtnl_lock();
379 down_write(&bpf_devs_lock);
380 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
381 err = bpf_dev_offload_check(offmap->netdev);
382 if (err)
383 goto err_unlock;
384
385 ondev = bpf_offload_find_netdev(offmap->netdev);
386 if (!ondev) {
387 err = -EINVAL;
388 goto err_unlock;
389 }
390
391 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
392 if (err)
393 goto err_unlock;
394
395 list_add_tail(&offmap->offloads, &ondev->maps);
396 up_write(&bpf_devs_lock);
397 rtnl_unlock();
398
399 return &offmap->map;
400
401 err_unlock:
402 up_write(&bpf_devs_lock);
403 rtnl_unlock();
404 bpf_map_area_free(offmap);
405 return ERR_PTR(err);
406 }
407
__bpf_map_offload_destroy(struct bpf_offloaded_map * offmap)408 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
409 {
410 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
411 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
412 bpf_map_free_id(&offmap->map, true);
413 list_del_init(&offmap->offloads);
414 offmap->netdev = NULL;
415 }
416
bpf_map_offload_map_free(struct bpf_map * map)417 void bpf_map_offload_map_free(struct bpf_map *map)
418 {
419 struct bpf_offloaded_map *offmap = map_to_offmap(map);
420
421 rtnl_lock();
422 down_write(&bpf_devs_lock);
423 if (offmap->netdev)
424 __bpf_map_offload_destroy(offmap);
425 up_write(&bpf_devs_lock);
426 rtnl_unlock();
427
428 bpf_map_area_free(offmap);
429 }
430
bpf_map_offload_lookup_elem(struct bpf_map * map,void * key,void * value)431 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
432 {
433 struct bpf_offloaded_map *offmap = map_to_offmap(map);
434 int ret = -ENODEV;
435
436 down_read(&bpf_devs_lock);
437 if (offmap->netdev)
438 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
439 up_read(&bpf_devs_lock);
440
441 return ret;
442 }
443
bpf_map_offload_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)444 int bpf_map_offload_update_elem(struct bpf_map *map,
445 void *key, void *value, u64 flags)
446 {
447 struct bpf_offloaded_map *offmap = map_to_offmap(map);
448 int ret = -ENODEV;
449
450 if (unlikely(flags > BPF_EXIST))
451 return -EINVAL;
452
453 down_read(&bpf_devs_lock);
454 if (offmap->netdev)
455 ret = offmap->dev_ops->map_update_elem(offmap, key, value,
456 flags);
457 up_read(&bpf_devs_lock);
458
459 return ret;
460 }
461
bpf_map_offload_delete_elem(struct bpf_map * map,void * key)462 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
463 {
464 struct bpf_offloaded_map *offmap = map_to_offmap(map);
465 int ret = -ENODEV;
466
467 down_read(&bpf_devs_lock);
468 if (offmap->netdev)
469 ret = offmap->dev_ops->map_delete_elem(offmap, key);
470 up_read(&bpf_devs_lock);
471
472 return ret;
473 }
474
bpf_map_offload_get_next_key(struct bpf_map * map,void * key,void * next_key)475 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
476 {
477 struct bpf_offloaded_map *offmap = map_to_offmap(map);
478 int ret = -ENODEV;
479
480 down_read(&bpf_devs_lock);
481 if (offmap->netdev)
482 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
483 up_read(&bpf_devs_lock);
484
485 return ret;
486 }
487
488 struct ns_get_path_bpf_map_args {
489 struct bpf_offloaded_map *offmap;
490 struct bpf_map_info *info;
491 };
492
bpf_map_offload_info_fill_ns(void * private_data)493 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
494 {
495 struct ns_get_path_bpf_map_args *args = private_data;
496 struct ns_common *ns;
497 struct net *net;
498
499 rtnl_lock();
500 down_read(&bpf_devs_lock);
501
502 if (args->offmap->netdev) {
503 args->info->ifindex = args->offmap->netdev->ifindex;
504 net = dev_net(args->offmap->netdev);
505 get_net(net);
506 ns = &net->ns;
507 } else {
508 args->info->ifindex = 0;
509 ns = NULL;
510 }
511
512 up_read(&bpf_devs_lock);
513 rtnl_unlock();
514
515 return ns;
516 }
517
bpf_map_offload_info_fill(struct bpf_map_info * info,struct bpf_map * map)518 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
519 {
520 struct ns_get_path_bpf_map_args args = {
521 .offmap = map_to_offmap(map),
522 .info = info,
523 };
524 struct inode *ns_inode;
525 struct path ns_path;
526 int res;
527
528 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
529 if (res) {
530 if (!info->ifindex)
531 return -ENODEV;
532 return res;
533 }
534
535 ns_inode = ns_path.dentry->d_inode;
536 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
537 info->netns_ino = ns_inode->i_ino;
538 path_put(&ns_path);
539
540 return 0;
541 }
542
__bpf_offload_dev_match(struct bpf_prog * prog,struct net_device * netdev)543 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
544 struct net_device *netdev)
545 {
546 struct bpf_offload_netdev *ondev1, *ondev2;
547 struct bpf_prog_offload *offload;
548
549 if (!bpf_prog_is_dev_bound(prog->aux))
550 return false;
551
552 offload = prog->aux->offload;
553 if (!offload)
554 return false;
555 if (offload->netdev == netdev)
556 return true;
557
558 ondev1 = bpf_offload_find_netdev(offload->netdev);
559 ondev2 = bpf_offload_find_netdev(netdev);
560
561 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
562 }
563
bpf_offload_dev_match(struct bpf_prog * prog,struct net_device * netdev)564 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
565 {
566 bool ret;
567
568 down_read(&bpf_devs_lock);
569 ret = __bpf_offload_dev_match(prog, netdev);
570 up_read(&bpf_devs_lock);
571
572 return ret;
573 }
574 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
575
bpf_offload_prog_map_match(struct bpf_prog * prog,struct bpf_map * map)576 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
577 {
578 struct bpf_offloaded_map *offmap;
579 bool ret;
580
581 if (!bpf_map_is_dev_bound(map))
582 return bpf_map_offload_neutral(map);
583 offmap = map_to_offmap(map);
584
585 down_read(&bpf_devs_lock);
586 ret = __bpf_offload_dev_match(prog, offmap->netdev);
587 up_read(&bpf_devs_lock);
588
589 return ret;
590 }
591
bpf_offload_dev_netdev_register(struct bpf_offload_dev * offdev,struct net_device * netdev)592 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
593 struct net_device *netdev)
594 {
595 struct bpf_offload_netdev *ondev;
596 int err;
597
598 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
599 if (!ondev)
600 return -ENOMEM;
601
602 ondev->netdev = netdev;
603 ondev->offdev = offdev;
604 INIT_LIST_HEAD(&ondev->progs);
605 INIT_LIST_HEAD(&ondev->maps);
606
607 down_write(&bpf_devs_lock);
608 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
609 if (err) {
610 netdev_warn(netdev, "failed to register for BPF offload\n");
611 goto err_unlock_free;
612 }
613
614 list_add(&ondev->offdev_netdevs, &offdev->netdevs);
615 up_write(&bpf_devs_lock);
616 return 0;
617
618 err_unlock_free:
619 up_write(&bpf_devs_lock);
620 kfree(ondev);
621 return err;
622 }
623 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
624
bpf_offload_dev_netdev_unregister(struct bpf_offload_dev * offdev,struct net_device * netdev)625 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
626 struct net_device *netdev)
627 {
628 struct bpf_offload_netdev *ondev, *altdev;
629 struct bpf_offloaded_map *offmap, *mtmp;
630 struct bpf_prog_offload *offload, *ptmp;
631
632 ASSERT_RTNL();
633
634 down_write(&bpf_devs_lock);
635 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
636 if (WARN_ON(!ondev))
637 goto unlock;
638
639 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
640 list_del(&ondev->offdev_netdevs);
641
642 /* Try to move the objects to another netdev of the device */
643 altdev = list_first_entry_or_null(&offdev->netdevs,
644 struct bpf_offload_netdev,
645 offdev_netdevs);
646 if (altdev) {
647 list_for_each_entry(offload, &ondev->progs, offloads)
648 offload->netdev = altdev->netdev;
649 list_splice_init(&ondev->progs, &altdev->progs);
650
651 list_for_each_entry(offmap, &ondev->maps, offloads)
652 offmap->netdev = altdev->netdev;
653 list_splice_init(&ondev->maps, &altdev->maps);
654 } else {
655 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
656 __bpf_prog_offload_destroy(offload->prog);
657 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
658 __bpf_map_offload_destroy(offmap);
659 }
660
661 WARN_ON(!list_empty(&ondev->progs));
662 WARN_ON(!list_empty(&ondev->maps));
663 kfree(ondev);
664 unlock:
665 up_write(&bpf_devs_lock);
666 }
667 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
668
669 struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops * ops,void * priv)670 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
671 {
672 struct bpf_offload_dev *offdev;
673 int err;
674
675 down_write(&bpf_devs_lock);
676 if (!offdevs_inited) {
677 err = rhashtable_init(&offdevs, &offdevs_params);
678 if (err) {
679 up_write(&bpf_devs_lock);
680 return ERR_PTR(err);
681 }
682 offdevs_inited = true;
683 }
684 up_write(&bpf_devs_lock);
685
686 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
687 if (!offdev)
688 return ERR_PTR(-ENOMEM);
689
690 offdev->ops = ops;
691 offdev->priv = priv;
692 INIT_LIST_HEAD(&offdev->netdevs);
693
694 return offdev;
695 }
696 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
697
bpf_offload_dev_destroy(struct bpf_offload_dev * offdev)698 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
699 {
700 WARN_ON(!list_empty(&offdev->netdevs));
701 kfree(offdev);
702 }
703 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
704
bpf_offload_dev_priv(struct bpf_offload_dev * offdev)705 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
706 {
707 return offdev->priv;
708 }
709 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
710