1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
3
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/filter.h>
7 #include <linux/bpf.h>
8 #include <linux/rcupdate_trace.h>
9
10 struct bpf_iter_target_info {
11 struct list_head list;
12 const struct bpf_iter_reg *reg_info;
13 u32 btf_id; /* cached value */
14 };
15
16 struct bpf_iter_link {
17 struct bpf_link link;
18 struct bpf_iter_aux_info aux;
19 struct bpf_iter_target_info *tinfo;
20 };
21
22 struct bpf_iter_priv_data {
23 struct bpf_iter_target_info *tinfo;
24 const struct bpf_iter_seq_info *seq_info;
25 struct bpf_prog *prog;
26 u64 session_id;
27 u64 seq_num;
28 bool done_stop;
29 u8 target_private[] __aligned(8);
30 };
31
32 static struct list_head targets = LIST_HEAD_INIT(targets);
33 static DEFINE_MUTEX(targets_mutex);
34
35 /* protect bpf_iter_link changes */
36 static DEFINE_MUTEX(link_mutex);
37
38 /* incremented on every opened seq_file */
39 static atomic64_t session_id;
40
41 static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
42 const struct bpf_iter_seq_info *seq_info);
43
bpf_iter_inc_seq_num(struct seq_file * seq)44 static void bpf_iter_inc_seq_num(struct seq_file *seq)
45 {
46 struct bpf_iter_priv_data *iter_priv;
47
48 iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
49 target_private);
50 iter_priv->seq_num++;
51 }
52
bpf_iter_dec_seq_num(struct seq_file * seq)53 static void bpf_iter_dec_seq_num(struct seq_file *seq)
54 {
55 struct bpf_iter_priv_data *iter_priv;
56
57 iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
58 target_private);
59 iter_priv->seq_num--;
60 }
61
bpf_iter_done_stop(struct seq_file * seq)62 static void bpf_iter_done_stop(struct seq_file *seq)
63 {
64 struct bpf_iter_priv_data *iter_priv;
65
66 iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
67 target_private);
68 iter_priv->done_stop = true;
69 }
70
bpf_iter_support_resched(struct seq_file * seq)71 static bool bpf_iter_support_resched(struct seq_file *seq)
72 {
73 struct bpf_iter_priv_data *iter_priv;
74
75 iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
76 target_private);
77 return iter_priv->tinfo->reg_info->feature & BPF_ITER_RESCHED;
78 }
79
80 /* maximum visited objects before bailing out */
81 #define MAX_ITER_OBJECTS 1000000
82
83 /* bpf_seq_read, a customized and simpler version for bpf iterator.
84 * no_llseek is assumed for this file.
85 * The following are differences from seq_read():
86 * . fixed buffer size (PAGE_SIZE)
87 * . assuming no_llseek
88 * . stop() may call bpf program, handling potential overflow there
89 */
bpf_seq_read(struct file * file,char __user * buf,size_t size,loff_t * ppos)90 static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
91 loff_t *ppos)
92 {
93 struct seq_file *seq = file->private_data;
94 size_t n, offs, copied = 0;
95 int err = 0, num_objs = 0;
96 bool can_resched;
97 void *p;
98
99 mutex_lock(&seq->lock);
100
101 if (!seq->buf) {
102 seq->size = PAGE_SIZE << 3;
103 seq->buf = kvmalloc(seq->size, GFP_KERNEL);
104 if (!seq->buf) {
105 err = -ENOMEM;
106 goto done;
107 }
108 }
109
110 if (seq->count) {
111 n = min(seq->count, size);
112 err = copy_to_user(buf, seq->buf + seq->from, n);
113 if (err) {
114 err = -EFAULT;
115 goto done;
116 }
117 seq->count -= n;
118 seq->from += n;
119 copied = n;
120 goto done;
121 }
122
123 seq->from = 0;
124 p = seq->op->start(seq, &seq->index);
125 if (!p)
126 goto stop;
127 if (IS_ERR(p)) {
128 err = PTR_ERR(p);
129 seq->op->stop(seq, p);
130 seq->count = 0;
131 goto done;
132 }
133
134 err = seq->op->show(seq, p);
135 if (err > 0) {
136 /* object is skipped, decrease seq_num, so next
137 * valid object can reuse the same seq_num.
138 */
139 bpf_iter_dec_seq_num(seq);
140 seq->count = 0;
141 } else if (err < 0 || seq_has_overflowed(seq)) {
142 if (!err)
143 err = -E2BIG;
144 seq->op->stop(seq, p);
145 seq->count = 0;
146 goto done;
147 }
148
149 can_resched = bpf_iter_support_resched(seq);
150 while (1) {
151 loff_t pos = seq->index;
152
153 num_objs++;
154 offs = seq->count;
155 p = seq->op->next(seq, p, &seq->index);
156 if (pos == seq->index) {
157 pr_info_ratelimited("buggy seq_file .next function %ps "
158 "did not updated position index\n",
159 seq->op->next);
160 seq->index++;
161 }
162
163 if (IS_ERR_OR_NULL(p))
164 break;
165
166 /* got a valid next object, increase seq_num */
167 bpf_iter_inc_seq_num(seq);
168
169 if (seq->count >= size)
170 break;
171
172 if (num_objs >= MAX_ITER_OBJECTS) {
173 if (offs == 0) {
174 err = -EAGAIN;
175 seq->op->stop(seq, p);
176 goto done;
177 }
178 break;
179 }
180
181 err = seq->op->show(seq, p);
182 if (err > 0) {
183 bpf_iter_dec_seq_num(seq);
184 seq->count = offs;
185 } else if (err < 0 || seq_has_overflowed(seq)) {
186 seq->count = offs;
187 if (offs == 0) {
188 if (!err)
189 err = -E2BIG;
190 seq->op->stop(seq, p);
191 goto done;
192 }
193 break;
194 }
195
196 if (can_resched)
197 cond_resched();
198 }
199 stop:
200 offs = seq->count;
201 /* bpf program called if !p */
202 seq->op->stop(seq, p);
203 if (!p) {
204 if (!seq_has_overflowed(seq)) {
205 bpf_iter_done_stop(seq);
206 } else {
207 seq->count = offs;
208 if (offs == 0) {
209 err = -E2BIG;
210 goto done;
211 }
212 }
213 }
214
215 n = min(seq->count, size);
216 err = copy_to_user(buf, seq->buf, n);
217 if (err) {
218 err = -EFAULT;
219 goto done;
220 }
221 copied = n;
222 seq->count -= n;
223 seq->from = n;
224 done:
225 if (!copied)
226 copied = err;
227 else
228 *ppos += copied;
229 mutex_unlock(&seq->lock);
230 return copied;
231 }
232
233 static const struct bpf_iter_seq_info *
__get_seq_info(struct bpf_iter_link * link)234 __get_seq_info(struct bpf_iter_link *link)
235 {
236 const struct bpf_iter_seq_info *seq_info;
237
238 if (link->aux.map) {
239 seq_info = link->aux.map->ops->iter_seq_info;
240 if (seq_info)
241 return seq_info;
242 }
243
244 return link->tinfo->reg_info->seq_info;
245 }
246
iter_open(struct inode * inode,struct file * file)247 static int iter_open(struct inode *inode, struct file *file)
248 {
249 struct bpf_iter_link *link = inode->i_private;
250
251 return prepare_seq_file(file, link, __get_seq_info(link));
252 }
253
iter_release(struct inode * inode,struct file * file)254 static int iter_release(struct inode *inode, struct file *file)
255 {
256 struct bpf_iter_priv_data *iter_priv;
257 struct seq_file *seq;
258
259 seq = file->private_data;
260 if (!seq)
261 return 0;
262
263 iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
264 target_private);
265
266 if (iter_priv->seq_info->fini_seq_private)
267 iter_priv->seq_info->fini_seq_private(seq->private);
268
269 bpf_prog_put(iter_priv->prog);
270 seq->private = iter_priv;
271
272 return seq_release_private(inode, file);
273 }
274
275 const struct file_operations bpf_iter_fops = {
276 .open = iter_open,
277 .llseek = no_llseek,
278 .read = bpf_seq_read,
279 .release = iter_release,
280 };
281
282 /* The argument reg_info will be cached in bpf_iter_target_info.
283 * The common practice is to declare target reg_info as
284 * a const static variable and passed as an argument to
285 * bpf_iter_reg_target().
286 */
bpf_iter_reg_target(const struct bpf_iter_reg * reg_info)287 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
288 {
289 struct bpf_iter_target_info *tinfo;
290
291 tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
292 if (!tinfo)
293 return -ENOMEM;
294
295 tinfo->reg_info = reg_info;
296 INIT_LIST_HEAD(&tinfo->list);
297
298 mutex_lock(&targets_mutex);
299 list_add(&tinfo->list, &targets);
300 mutex_unlock(&targets_mutex);
301
302 return 0;
303 }
304
bpf_iter_unreg_target(const struct bpf_iter_reg * reg_info)305 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info)
306 {
307 struct bpf_iter_target_info *tinfo;
308 bool found = false;
309
310 mutex_lock(&targets_mutex);
311 list_for_each_entry(tinfo, &targets, list) {
312 if (reg_info == tinfo->reg_info) {
313 list_del(&tinfo->list);
314 kfree(tinfo);
315 found = true;
316 break;
317 }
318 }
319 mutex_unlock(&targets_mutex);
320
321 WARN_ON(found == false);
322 }
323
cache_btf_id(struct bpf_iter_target_info * tinfo,struct bpf_prog * prog)324 static void cache_btf_id(struct bpf_iter_target_info *tinfo,
325 struct bpf_prog *prog)
326 {
327 tinfo->btf_id = prog->aux->attach_btf_id;
328 }
329
bpf_iter_prog_supported(struct bpf_prog * prog)330 bool bpf_iter_prog_supported(struct bpf_prog *prog)
331 {
332 const char *attach_fname = prog->aux->attach_func_name;
333 struct bpf_iter_target_info *tinfo = NULL, *iter;
334 u32 prog_btf_id = prog->aux->attach_btf_id;
335 const char *prefix = BPF_ITER_FUNC_PREFIX;
336 int prefix_len = strlen(prefix);
337
338 if (strncmp(attach_fname, prefix, prefix_len))
339 return false;
340
341 mutex_lock(&targets_mutex);
342 list_for_each_entry(iter, &targets, list) {
343 if (iter->btf_id && iter->btf_id == prog_btf_id) {
344 tinfo = iter;
345 break;
346 }
347 if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) {
348 cache_btf_id(iter, prog);
349 tinfo = iter;
350 break;
351 }
352 }
353 mutex_unlock(&targets_mutex);
354
355 if (tinfo) {
356 prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
357 prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
358 }
359
360 return tinfo != NULL;
361 }
362
363 const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)364 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
365 {
366 const struct bpf_iter_target_info *tinfo;
367 const struct bpf_func_proto *fn = NULL;
368
369 mutex_lock(&targets_mutex);
370 list_for_each_entry(tinfo, &targets, list) {
371 if (tinfo->btf_id == prog->aux->attach_btf_id) {
372 const struct bpf_iter_reg *reg_info;
373
374 reg_info = tinfo->reg_info;
375 if (reg_info->get_func_proto)
376 fn = reg_info->get_func_proto(func_id, prog);
377 break;
378 }
379 }
380 mutex_unlock(&targets_mutex);
381
382 return fn;
383 }
384
bpf_iter_link_release(struct bpf_link * link)385 static void bpf_iter_link_release(struct bpf_link *link)
386 {
387 struct bpf_iter_link *iter_link =
388 container_of(link, struct bpf_iter_link, link);
389
390 if (iter_link->tinfo->reg_info->detach_target)
391 iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
392 }
393
bpf_iter_link_dealloc(struct bpf_link * link)394 static void bpf_iter_link_dealloc(struct bpf_link *link)
395 {
396 struct bpf_iter_link *iter_link =
397 container_of(link, struct bpf_iter_link, link);
398
399 kfree(iter_link);
400 }
401
bpf_iter_link_replace(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)402 static int bpf_iter_link_replace(struct bpf_link *link,
403 struct bpf_prog *new_prog,
404 struct bpf_prog *old_prog)
405 {
406 int ret = 0;
407
408 mutex_lock(&link_mutex);
409 if (old_prog && link->prog != old_prog) {
410 ret = -EPERM;
411 goto out_unlock;
412 }
413
414 if (link->prog->type != new_prog->type ||
415 link->prog->expected_attach_type != new_prog->expected_attach_type ||
416 link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
417 ret = -EINVAL;
418 goto out_unlock;
419 }
420
421 old_prog = xchg(&link->prog, new_prog);
422 bpf_prog_put(old_prog);
423
424 out_unlock:
425 mutex_unlock(&link_mutex);
426 return ret;
427 }
428
bpf_iter_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)429 static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
430 struct seq_file *seq)
431 {
432 struct bpf_iter_link *iter_link =
433 container_of(link, struct bpf_iter_link, link);
434 bpf_iter_show_fdinfo_t show_fdinfo;
435
436 seq_printf(seq,
437 "target_name:\t%s\n",
438 iter_link->tinfo->reg_info->target);
439
440 show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
441 if (show_fdinfo)
442 show_fdinfo(&iter_link->aux, seq);
443 }
444
bpf_iter_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)445 static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
446 struct bpf_link_info *info)
447 {
448 struct bpf_iter_link *iter_link =
449 container_of(link, struct bpf_iter_link, link);
450 char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
451 bpf_iter_fill_link_info_t fill_link_info;
452 u32 ulen = info->iter.target_name_len;
453 const char *target_name;
454 u32 target_len;
455
456 if (!ulen ^ !ubuf)
457 return -EINVAL;
458
459 target_name = iter_link->tinfo->reg_info->target;
460 target_len = strlen(target_name);
461 info->iter.target_name_len = target_len + 1;
462
463 if (ubuf) {
464 if (ulen >= target_len + 1) {
465 if (copy_to_user(ubuf, target_name, target_len + 1))
466 return -EFAULT;
467 } else {
468 char zero = '\0';
469
470 if (copy_to_user(ubuf, target_name, ulen - 1))
471 return -EFAULT;
472 if (put_user(zero, ubuf + ulen - 1))
473 return -EFAULT;
474 return -ENOSPC;
475 }
476 }
477
478 fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
479 if (fill_link_info)
480 return fill_link_info(&iter_link->aux, info);
481
482 return 0;
483 }
484
485 static const struct bpf_link_ops bpf_iter_link_lops = {
486 .release = bpf_iter_link_release,
487 .dealloc = bpf_iter_link_dealloc,
488 .update_prog = bpf_iter_link_replace,
489 .show_fdinfo = bpf_iter_link_show_fdinfo,
490 .fill_link_info = bpf_iter_link_fill_link_info,
491 };
492
bpf_link_is_iter(struct bpf_link * link)493 bool bpf_link_is_iter(struct bpf_link *link)
494 {
495 return link->ops == &bpf_iter_link_lops;
496 }
497
bpf_iter_link_attach(const union bpf_attr * attr,bpfptr_t uattr,struct bpf_prog * prog)498 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
499 struct bpf_prog *prog)
500 {
501 struct bpf_iter_target_info *tinfo = NULL, *iter;
502 struct bpf_link_primer link_primer;
503 union bpf_iter_link_info linfo;
504 struct bpf_iter_link *link;
505 u32 prog_btf_id, linfo_len;
506 bpfptr_t ulinfo;
507 int err;
508
509 if (attr->link_create.target_fd || attr->link_create.flags)
510 return -EINVAL;
511
512 memset(&linfo, 0, sizeof(union bpf_iter_link_info));
513
514 ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
515 linfo_len = attr->link_create.iter_info_len;
516 if (bpfptr_is_null(ulinfo) ^ !linfo_len)
517 return -EINVAL;
518
519 if (!bpfptr_is_null(ulinfo)) {
520 err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
521 linfo_len);
522 if (err)
523 return err;
524 linfo_len = min_t(u32, linfo_len, sizeof(linfo));
525 if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
526 return -EFAULT;
527 }
528
529 prog_btf_id = prog->aux->attach_btf_id;
530 mutex_lock(&targets_mutex);
531 list_for_each_entry(iter, &targets, list) {
532 if (iter->btf_id == prog_btf_id) {
533 tinfo = iter;
534 break;
535 }
536 }
537 mutex_unlock(&targets_mutex);
538 if (!tinfo)
539 return -ENOENT;
540
541 link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
542 if (!link)
543 return -ENOMEM;
544
545 bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog);
546 link->tinfo = tinfo;
547
548 err = bpf_link_prime(&link->link, &link_primer);
549 if (err) {
550 kfree(link);
551 return err;
552 }
553
554 if (tinfo->reg_info->attach_target) {
555 err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
556 if (err) {
557 bpf_link_cleanup(&link_primer);
558 return err;
559 }
560 }
561
562 return bpf_link_settle(&link_primer);
563 }
564
init_seq_meta(struct bpf_iter_priv_data * priv_data,struct bpf_iter_target_info * tinfo,const struct bpf_iter_seq_info * seq_info,struct bpf_prog * prog)565 static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
566 struct bpf_iter_target_info *tinfo,
567 const struct bpf_iter_seq_info *seq_info,
568 struct bpf_prog *prog)
569 {
570 priv_data->tinfo = tinfo;
571 priv_data->seq_info = seq_info;
572 priv_data->prog = prog;
573 priv_data->session_id = atomic64_inc_return(&session_id);
574 priv_data->seq_num = 0;
575 priv_data->done_stop = false;
576 }
577
prepare_seq_file(struct file * file,struct bpf_iter_link * link,const struct bpf_iter_seq_info * seq_info)578 static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
579 const struct bpf_iter_seq_info *seq_info)
580 {
581 struct bpf_iter_priv_data *priv_data;
582 struct bpf_iter_target_info *tinfo;
583 struct bpf_prog *prog;
584 u32 total_priv_dsize;
585 struct seq_file *seq;
586 int err = 0;
587
588 mutex_lock(&link_mutex);
589 prog = link->link.prog;
590 bpf_prog_inc(prog);
591 mutex_unlock(&link_mutex);
592
593 tinfo = link->tinfo;
594 total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
595 seq_info->seq_priv_size;
596 priv_data = __seq_open_private(file, seq_info->seq_ops,
597 total_priv_dsize);
598 if (!priv_data) {
599 err = -ENOMEM;
600 goto release_prog;
601 }
602
603 if (seq_info->init_seq_private) {
604 err = seq_info->init_seq_private(priv_data->target_private, &link->aux);
605 if (err)
606 goto release_seq_file;
607 }
608
609 init_seq_meta(priv_data, tinfo, seq_info, prog);
610 seq = file->private_data;
611 seq->private = priv_data->target_private;
612
613 return 0;
614
615 release_seq_file:
616 seq_release_private(file->f_inode, file);
617 file->private_data = NULL;
618 release_prog:
619 bpf_prog_put(prog);
620 return err;
621 }
622
bpf_iter_new_fd(struct bpf_link * link)623 int bpf_iter_new_fd(struct bpf_link *link)
624 {
625 struct bpf_iter_link *iter_link;
626 struct file *file;
627 unsigned int flags;
628 int err, fd;
629
630 if (link->ops != &bpf_iter_link_lops)
631 return -EINVAL;
632
633 flags = O_RDONLY | O_CLOEXEC;
634 fd = get_unused_fd_flags(flags);
635 if (fd < 0)
636 return fd;
637
638 file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags);
639 if (IS_ERR(file)) {
640 err = PTR_ERR(file);
641 goto free_fd;
642 }
643
644 iter_link = container_of(link, struct bpf_iter_link, link);
645 err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link));
646 if (err)
647 goto free_file;
648
649 fd_install(fd, file);
650 return fd;
651
652 free_file:
653 fput(file);
654 free_fd:
655 put_unused_fd(fd);
656 return err;
657 }
658
bpf_iter_get_info(struct bpf_iter_meta * meta,bool in_stop)659 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
660 {
661 struct bpf_iter_priv_data *iter_priv;
662 struct seq_file *seq;
663 void *seq_priv;
664
665 seq = meta->seq;
666 if (seq->file->f_op != &bpf_iter_fops)
667 return NULL;
668
669 seq_priv = seq->private;
670 iter_priv = container_of(seq_priv, struct bpf_iter_priv_data,
671 target_private);
672
673 if (in_stop && iter_priv->done_stop)
674 return NULL;
675
676 meta->session_id = iter_priv->session_id;
677 meta->seq_num = iter_priv->seq_num;
678
679 return iter_priv->prog;
680 }
681
bpf_iter_run_prog(struct bpf_prog * prog,void * ctx)682 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
683 {
684 int ret;
685
686 if (prog->aux->sleepable) {
687 rcu_read_lock_trace();
688 migrate_disable();
689 might_fault();
690 ret = bpf_prog_run(prog, ctx);
691 migrate_enable();
692 rcu_read_unlock_trace();
693 } else {
694 rcu_read_lock();
695 migrate_disable();
696 ret = bpf_prog_run(prog, ctx);
697 migrate_enable();
698 rcu_read_unlock();
699 }
700
701 /* bpf program can only return 0 or 1:
702 * 0 : okay
703 * 1 : retry the same object
704 * The bpf_iter_run_prog() return value
705 * will be seq_ops->show() return value.
706 */
707 return ret == 0 ? 0 : -EAGAIN;
708 }
709
BPF_CALL_4(bpf_for_each_map_elem,struct bpf_map *,map,void *,callback_fn,void *,callback_ctx,u64,flags)710 BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
711 void *, callback_ctx, u64, flags)
712 {
713 return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
714 }
715
716 const struct bpf_func_proto bpf_for_each_map_elem_proto = {
717 .func = bpf_for_each_map_elem,
718 .gpl_only = false,
719 .ret_type = RET_INTEGER,
720 .arg1_type = ARG_CONST_MAP_PTR,
721 .arg2_type = ARG_PTR_TO_FUNC,
722 .arg3_type = ARG_PTR_TO_STACK_OR_NULL,
723 .arg4_type = ARG_ANYTHING,
724 };
725
726 /* maximum number of loops */
727 #define MAX_LOOPS BIT(23)
728
BPF_CALL_4(bpf_loop,u32,nr_loops,void *,callback_fn,void *,callback_ctx,u64,flags)729 BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
730 u64, flags)
731 {
732 bpf_callback_t callback = (bpf_callback_t)callback_fn;
733 u64 ret;
734 u32 i;
735
736 if (flags)
737 return -EINVAL;
738 if (nr_loops > MAX_LOOPS)
739 return -E2BIG;
740
741 for (i = 0; i < nr_loops; i++) {
742 ret = callback((u64)i, (u64)(long)callback_ctx, 0, 0, 0);
743 /* return value: 0 - continue, 1 - stop and return */
744 if (ret)
745 return i + 1;
746 }
747
748 return i;
749 }
750
751 const struct bpf_func_proto bpf_loop_proto = {
752 .func = bpf_loop,
753 .gpl_only = false,
754 .ret_type = RET_INTEGER,
755 .arg1_type = ARG_ANYTHING,
756 .arg2_type = ARG_PTR_TO_FUNC,
757 .arg3_type = ARG_PTR_TO_STACK_OR_NULL,
758 .arg4_type = ARG_ANYTHING,
759 };
760