1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
4 #include <linux/bpf.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
9 #include <linux/btf.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
13 #include <linux/static_call.h>
14
15 /* dummy _ops. The verifier will operate on target program's ops. */
16 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
17 };
18 const struct bpf_prog_ops bpf_extension_prog_ops = {
19 };
20
21 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
22 #define TRAMPOLINE_HASH_BITS 10
23 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
24
25 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
26
27 /* serializes access to trampoline_table */
28 static DEFINE_MUTEX(trampoline_mutex);
29
bpf_prog_has_trampoline(const struct bpf_prog * prog)30 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
31 {
32 enum bpf_attach_type eatype = prog->expected_attach_type;
33 enum bpf_prog_type ptype = prog->type;
34
35 return (ptype == BPF_PROG_TYPE_TRACING &&
36 (eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
37 eatype == BPF_MODIFY_RETURN)) ||
38 (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
39 }
40
bpf_jit_alloc_exec_page(void)41 void *bpf_jit_alloc_exec_page(void)
42 {
43 void *image;
44
45 image = bpf_jit_alloc_exec(PAGE_SIZE);
46 if (!image)
47 return NULL;
48
49 set_vm_flush_reset_perms(image);
50 /* Keep image as writeable. The alternative is to keep flipping ro/rw
51 * every time new program is attached or detached.
52 */
53 set_memory_x((long)image, 1);
54 return image;
55 }
56
bpf_image_ksym_add(void * data,struct bpf_ksym * ksym)57 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
58 {
59 ksym->start = (unsigned long) data;
60 ksym->end = ksym->start + PAGE_SIZE;
61 bpf_ksym_add(ksym);
62 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
63 PAGE_SIZE, false, ksym->name);
64 }
65
bpf_image_ksym_del(struct bpf_ksym * ksym)66 void bpf_image_ksym_del(struct bpf_ksym *ksym)
67 {
68 bpf_ksym_del(ksym);
69 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
70 PAGE_SIZE, true, ksym->name);
71 }
72
bpf_trampoline_lookup(u64 key)73 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
74 {
75 struct bpf_trampoline *tr;
76 struct hlist_head *head;
77 int i;
78
79 mutex_lock(&trampoline_mutex);
80 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
81 hlist_for_each_entry(tr, head, hlist) {
82 if (tr->key == key) {
83 refcount_inc(&tr->refcnt);
84 goto out;
85 }
86 }
87 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
88 if (!tr)
89 goto out;
90
91 tr->key = key;
92 INIT_HLIST_NODE(&tr->hlist);
93 hlist_add_head(&tr->hlist, head);
94 refcount_set(&tr->refcnt, 1);
95 mutex_init(&tr->mutex);
96 for (i = 0; i < BPF_TRAMP_MAX; i++)
97 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
98 out:
99 mutex_unlock(&trampoline_mutex);
100 return tr;
101 }
102
bpf_trampoline_module_get(struct bpf_trampoline * tr)103 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
104 {
105 struct module *mod;
106 int err = 0;
107
108 preempt_disable();
109 mod = __module_text_address((unsigned long) tr->func.addr);
110 if (mod && !try_module_get(mod))
111 err = -ENOENT;
112 preempt_enable();
113 tr->mod = mod;
114 return err;
115 }
116
bpf_trampoline_module_put(struct bpf_trampoline * tr)117 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
118 {
119 module_put(tr->mod);
120 tr->mod = NULL;
121 }
122
unregister_fentry(struct bpf_trampoline * tr,void * old_addr)123 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
124 {
125 void *ip = tr->func.addr;
126 int ret;
127
128 if (tr->func.ftrace_managed)
129 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
130 else
131 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
132
133 if (!ret)
134 bpf_trampoline_module_put(tr);
135 return ret;
136 }
137
modify_fentry(struct bpf_trampoline * tr,void * old_addr,void * new_addr)138 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
139 {
140 void *ip = tr->func.addr;
141 int ret;
142
143 if (tr->func.ftrace_managed)
144 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
145 else
146 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
147 return ret;
148 }
149
150 /* first time registering */
register_fentry(struct bpf_trampoline * tr,void * new_addr)151 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
152 {
153 void *ip = tr->func.addr;
154 unsigned long faddr;
155 int ret;
156
157 faddr = ftrace_location((unsigned long)ip);
158 if (faddr)
159 tr->func.ftrace_managed = true;
160
161 if (bpf_trampoline_module_get(tr))
162 return -ENOENT;
163
164 if (tr->func.ftrace_managed)
165 ret = register_ftrace_direct((long)ip, (long)new_addr);
166 else
167 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
168
169 if (ret)
170 bpf_trampoline_module_put(tr);
171 return ret;
172 }
173
174 static struct bpf_tramp_links *
bpf_trampoline_get_progs(const struct bpf_trampoline * tr,int * total,bool * ip_arg)175 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
176 {
177 struct bpf_tramp_link *link;
178 struct bpf_tramp_links *tlinks;
179 struct bpf_tramp_link **links;
180 int kind;
181
182 *total = 0;
183 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
184 if (!tlinks)
185 return ERR_PTR(-ENOMEM);
186
187 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
188 tlinks[kind].nr_links = tr->progs_cnt[kind];
189 *total += tr->progs_cnt[kind];
190 links = tlinks[kind].links;
191
192 hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
193 *ip_arg |= link->link.prog->call_get_func_ip;
194 *links++ = link;
195 }
196 }
197 return tlinks;
198 }
199
__bpf_tramp_image_put_deferred(struct work_struct * work)200 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
201 {
202 struct bpf_tramp_image *im;
203
204 im = container_of(work, struct bpf_tramp_image, work);
205 bpf_image_ksym_del(&im->ksym);
206 bpf_jit_free_exec(im->image);
207 bpf_jit_uncharge_modmem(PAGE_SIZE);
208 percpu_ref_exit(&im->pcref);
209 kfree_rcu(im, rcu);
210 }
211
212 /* callback, fexit step 3 or fentry step 2 */
__bpf_tramp_image_put_rcu(struct rcu_head * rcu)213 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
214 {
215 struct bpf_tramp_image *im;
216
217 im = container_of(rcu, struct bpf_tramp_image, rcu);
218 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
219 schedule_work(&im->work);
220 }
221
222 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
__bpf_tramp_image_release(struct percpu_ref * pcref)223 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
224 {
225 struct bpf_tramp_image *im;
226
227 im = container_of(pcref, struct bpf_tramp_image, pcref);
228 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
229 }
230
231 /* callback, fexit or fentry step 1 */
__bpf_tramp_image_put_rcu_tasks(struct rcu_head * rcu)232 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
233 {
234 struct bpf_tramp_image *im;
235
236 im = container_of(rcu, struct bpf_tramp_image, rcu);
237 if (im->ip_after_call)
238 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
239 percpu_ref_kill(&im->pcref);
240 else
241 /* the case of fentry trampoline */
242 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
243 }
244
bpf_tramp_image_put(struct bpf_tramp_image * im)245 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
246 {
247 /* The trampoline image that calls original function is using:
248 * rcu_read_lock_trace to protect sleepable bpf progs
249 * rcu_read_lock to protect normal bpf progs
250 * percpu_ref to protect trampoline itself
251 * rcu tasks to protect trampoline asm not covered by percpu_ref
252 * (which are few asm insns before __bpf_tramp_enter and
253 * after __bpf_tramp_exit)
254 *
255 * The trampoline is unreachable before bpf_tramp_image_put().
256 *
257 * First, patch the trampoline to avoid calling into fexit progs.
258 * The progs will be freed even if the original function is still
259 * executing or sleeping.
260 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
261 * first few asm instructions to execute and call into
262 * __bpf_tramp_enter->percpu_ref_get.
263 * Then use percpu_ref_kill to wait for the trampoline and the original
264 * function to finish.
265 * Then use call_rcu_tasks() to make sure few asm insns in
266 * the trampoline epilogue are done as well.
267 *
268 * In !PREEMPT case the task that got interrupted in the first asm
269 * insns won't go through an RCU quiescent state which the
270 * percpu_ref_kill will be waiting for. Hence the first
271 * call_rcu_tasks() is not necessary.
272 */
273 if (im->ip_after_call) {
274 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
275 NULL, im->ip_epilogue);
276 WARN_ON(err);
277 if (IS_ENABLED(CONFIG_PREEMPTION))
278 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
279 else
280 percpu_ref_kill(&im->pcref);
281 return;
282 }
283
284 /* The trampoline without fexit and fmod_ret progs doesn't call original
285 * function and doesn't use percpu_ref.
286 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
287 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
288 * and normal progs.
289 */
290 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
291 }
292
bpf_tramp_image_alloc(u64 key,u32 idx)293 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
294 {
295 struct bpf_tramp_image *im;
296 struct bpf_ksym *ksym;
297 void *image;
298 int err = -ENOMEM;
299
300 im = kzalloc(sizeof(*im), GFP_KERNEL);
301 if (!im)
302 goto out;
303
304 err = bpf_jit_charge_modmem(PAGE_SIZE);
305 if (err)
306 goto out_free_im;
307
308 err = -ENOMEM;
309 im->image = image = bpf_jit_alloc_exec_page();
310 if (!image)
311 goto out_uncharge;
312
313 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
314 if (err)
315 goto out_free_image;
316
317 ksym = &im->ksym;
318 INIT_LIST_HEAD_RCU(&ksym->lnode);
319 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
320 bpf_image_ksym_add(image, ksym);
321 return im;
322
323 out_free_image:
324 bpf_jit_free_exec(im->image);
325 out_uncharge:
326 bpf_jit_uncharge_modmem(PAGE_SIZE);
327 out_free_im:
328 kfree(im);
329 out:
330 return ERR_PTR(err);
331 }
332
bpf_trampoline_update(struct bpf_trampoline * tr)333 static int bpf_trampoline_update(struct bpf_trampoline *tr)
334 {
335 struct bpf_tramp_image *im;
336 struct bpf_tramp_links *tlinks;
337 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
338 bool ip_arg = false;
339 int err, total;
340
341 tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
342 if (IS_ERR(tlinks))
343 return PTR_ERR(tlinks);
344
345 if (total == 0) {
346 err = unregister_fentry(tr, tr->cur_image->image);
347 bpf_tramp_image_put(tr->cur_image);
348 tr->cur_image = NULL;
349 tr->selector = 0;
350 goto out;
351 }
352
353 im = bpf_tramp_image_alloc(tr->key, tr->selector);
354 if (IS_ERR(im)) {
355 err = PTR_ERR(im);
356 goto out;
357 }
358
359 if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
360 tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links)
361 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
362
363 if (ip_arg)
364 flags |= BPF_TRAMP_F_IP_ARG;
365
366 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
367 &tr->func.model, flags, tlinks,
368 tr->func.addr);
369 if (err < 0)
370 goto out;
371
372 WARN_ON(tr->cur_image && tr->selector == 0);
373 WARN_ON(!tr->cur_image && tr->selector);
374 if (tr->cur_image)
375 /* progs already running at this address */
376 err = modify_fentry(tr, tr->cur_image->image, im->image);
377 else
378 /* first time registering */
379 err = register_fentry(tr, im->image);
380 if (err)
381 goto out;
382 if (tr->cur_image)
383 bpf_tramp_image_put(tr->cur_image);
384 tr->cur_image = im;
385 tr->selector++;
386 out:
387 kfree(tlinks);
388 return err;
389 }
390
bpf_attach_type_to_tramp(struct bpf_prog * prog)391 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
392 {
393 switch (prog->expected_attach_type) {
394 case BPF_TRACE_FENTRY:
395 return BPF_TRAMP_FENTRY;
396 case BPF_MODIFY_RETURN:
397 return BPF_TRAMP_MODIFY_RETURN;
398 case BPF_TRACE_FEXIT:
399 return BPF_TRAMP_FEXIT;
400 case BPF_LSM_MAC:
401 if (!prog->aux->attach_func_proto->type)
402 /* The function returns void, we cannot modify its
403 * return value.
404 */
405 return BPF_TRAMP_FEXIT;
406 else
407 return BPF_TRAMP_MODIFY_RETURN;
408 default:
409 return BPF_TRAMP_REPLACE;
410 }
411 }
412
bpf_trampoline_link_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr)413 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
414 {
415 enum bpf_tramp_prog_type kind;
416 struct bpf_tramp_link *link_exiting;
417 int err = 0;
418 int cnt = 0, i;
419
420 kind = bpf_attach_type_to_tramp(link->link.prog);
421 mutex_lock(&tr->mutex);
422 if (tr->extension_prog) {
423 /* cannot attach fentry/fexit if extension prog is attached.
424 * cannot overwrite extension prog either.
425 */
426 err = -EBUSY;
427 goto out;
428 }
429
430 for (i = 0; i < BPF_TRAMP_MAX; i++)
431 cnt += tr->progs_cnt[i];
432
433 if (kind == BPF_TRAMP_REPLACE) {
434 /* Cannot attach extension if fentry/fexit are in use. */
435 if (cnt) {
436 err = -EBUSY;
437 goto out;
438 }
439 tr->extension_prog = link->link.prog;
440 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
441 link->link.prog->bpf_func);
442 goto out;
443 }
444 if (cnt >= BPF_MAX_TRAMP_LINKS) {
445 err = -E2BIG;
446 goto out;
447 }
448 if (!hlist_unhashed(&link->tramp_hlist)) {
449 /* prog already linked */
450 err = -EBUSY;
451 goto out;
452 }
453 hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
454 if (link_exiting->link.prog != link->link.prog)
455 continue;
456 /* prog already linked */
457 err = -EBUSY;
458 goto out;
459 }
460
461 hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
462 tr->progs_cnt[kind]++;
463 err = bpf_trampoline_update(tr);
464 if (err) {
465 hlist_del_init(&link->tramp_hlist);
466 tr->progs_cnt[kind]--;
467 }
468 out:
469 mutex_unlock(&tr->mutex);
470 return err;
471 }
472
473 /* bpf_trampoline_unlink_prog() should never fail. */
bpf_trampoline_unlink_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr)474 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
475 {
476 enum bpf_tramp_prog_type kind;
477 int err;
478
479 kind = bpf_attach_type_to_tramp(link->link.prog);
480 mutex_lock(&tr->mutex);
481 if (kind == BPF_TRAMP_REPLACE) {
482 WARN_ON_ONCE(!tr->extension_prog);
483 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
484 tr->extension_prog->bpf_func, NULL);
485 tr->extension_prog = NULL;
486 goto out;
487 }
488 hlist_del_init(&link->tramp_hlist);
489 tr->progs_cnt[kind]--;
490 err = bpf_trampoline_update(tr);
491 out:
492 mutex_unlock(&tr->mutex);
493 return err;
494 }
495
bpf_trampoline_get(u64 key,struct bpf_attach_target_info * tgt_info)496 struct bpf_trampoline *bpf_trampoline_get(u64 key,
497 struct bpf_attach_target_info *tgt_info)
498 {
499 struct bpf_trampoline *tr;
500
501 tr = bpf_trampoline_lookup(key);
502 if (!tr)
503 return NULL;
504
505 mutex_lock(&tr->mutex);
506 if (tr->func.addr)
507 goto out;
508
509 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
510 tr->func.addr = (void *)tgt_info->tgt_addr;
511 out:
512 mutex_unlock(&tr->mutex);
513 return tr;
514 }
515
bpf_trampoline_put(struct bpf_trampoline * tr)516 void bpf_trampoline_put(struct bpf_trampoline *tr)
517 {
518 int i;
519
520 if (!tr)
521 return;
522 mutex_lock(&trampoline_mutex);
523 if (!refcount_dec_and_test(&tr->refcnt))
524 goto out;
525 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
526
527 for (i = 0; i < BPF_TRAMP_MAX; i++)
528 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
529 goto out;
530
531 /* This code will be executed even when the last bpf_tramp_image
532 * is alive. All progs are detached from the trampoline and the
533 * trampoline image is patched with jmp into epilogue to skip
534 * fexit progs. The fentry-only trampoline will be freed via
535 * multiple rcu callbacks.
536 */
537 hlist_del(&tr->hlist);
538 kfree(tr);
539 out:
540 mutex_unlock(&trampoline_mutex);
541 }
542
543 #define NO_START_TIME 1
bpf_prog_start_time(void)544 static __always_inline u64 notrace bpf_prog_start_time(void)
545 {
546 u64 start = NO_START_TIME;
547
548 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
549 start = sched_clock();
550 if (unlikely(!start))
551 start = NO_START_TIME;
552 }
553 return start;
554 }
555
inc_misses_counter(struct bpf_prog * prog)556 static void notrace inc_misses_counter(struct bpf_prog *prog)
557 {
558 struct bpf_prog_stats *stats;
559 unsigned int flags;
560
561 stats = this_cpu_ptr(prog->stats);
562 flags = u64_stats_update_begin_irqsave(&stats->syncp);
563 u64_stats_inc(&stats->misses);
564 u64_stats_update_end_irqrestore(&stats->syncp, flags);
565 }
566
567 /* The logic is similar to bpf_prog_run(), but with an explicit
568 * rcu_read_lock() and migrate_disable() which are required
569 * for the trampoline. The macro is split into
570 * call __bpf_prog_enter
571 * call prog->bpf_func
572 * call __bpf_prog_exit
573 *
574 * __bpf_prog_enter returns:
575 * 0 - skip execution of the bpf prog
576 * 1 - execute bpf prog
577 * [2..MAX_U64] - execute bpf prog and record execution time.
578 * This is start time.
579 */
__bpf_prog_enter(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)580 u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
581 __acquires(RCU)
582 {
583 rcu_read_lock();
584 migrate_disable();
585
586 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
587
588 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
589 inc_misses_counter(prog);
590 return 0;
591 }
592 return bpf_prog_start_time();
593 }
594
update_prog_stats(struct bpf_prog * prog,u64 start)595 static void notrace update_prog_stats(struct bpf_prog *prog,
596 u64 start)
597 {
598 struct bpf_prog_stats *stats;
599
600 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
601 /* static_key could be enabled in __bpf_prog_enter*
602 * and disabled in __bpf_prog_exit*.
603 * And vice versa.
604 * Hence check that 'start' is valid.
605 */
606 start > NO_START_TIME) {
607 unsigned long flags;
608
609 stats = this_cpu_ptr(prog->stats);
610 flags = u64_stats_update_begin_irqsave(&stats->syncp);
611 u64_stats_inc(&stats->cnt);
612 u64_stats_add(&stats->nsecs, sched_clock() - start);
613 u64_stats_update_end_irqrestore(&stats->syncp, flags);
614 }
615 }
616
__bpf_prog_exit(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)617 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
618 __releases(RCU)
619 {
620 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
621
622 update_prog_stats(prog, start);
623 __this_cpu_dec(*(prog->active));
624 migrate_enable();
625 rcu_read_unlock();
626 }
627
__bpf_prog_enter_sleepable(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)628 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
629 {
630 rcu_read_lock_trace();
631 migrate_disable();
632 might_fault();
633
634 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
635 inc_misses_counter(prog);
636 return 0;
637 }
638
639 run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
640
641 return bpf_prog_start_time();
642 }
643
__bpf_prog_exit_sleepable(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)644 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
645 struct bpf_tramp_run_ctx *run_ctx)
646 {
647 bpf_reset_run_ctx(run_ctx->saved_run_ctx);
648
649 update_prog_stats(prog, start);
650 __this_cpu_dec(*(prog->active));
651 migrate_enable();
652 rcu_read_unlock_trace();
653 }
654
__bpf_tramp_enter(struct bpf_tramp_image * tr)655 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
656 {
657 percpu_ref_get(&tr->pcref);
658 }
659
__bpf_tramp_exit(struct bpf_tramp_image * tr)660 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
661 {
662 percpu_ref_put(&tr->pcref);
663 }
664
665 int __weak
arch_prepare_bpf_trampoline(struct bpf_tramp_image * tr,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * orig_call)666 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
667 const struct btf_func_model *m, u32 flags,
668 struct bpf_tramp_links *tlinks,
669 void *orig_call)
670 {
671 return -ENOTSUPP;
672 }
673
init_trampolines(void)674 static int __init init_trampolines(void)
675 {
676 int i;
677
678 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
679 INIT_HLIST_HEAD(&trampoline_table[i]);
680 return 0;
681 }
682 late_initcall(init_trampolines);
683