1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37
38 #include <asm/barrier.h>
39 #include <asm/unaligned.h>
40
41 /* Registers */
42 #define BPF_R0 regs[BPF_REG_0]
43 #define BPF_R1 regs[BPF_REG_1]
44 #define BPF_R2 regs[BPF_REG_2]
45 #define BPF_R3 regs[BPF_REG_3]
46 #define BPF_R4 regs[BPF_REG_4]
47 #define BPF_R5 regs[BPF_REG_5]
48 #define BPF_R6 regs[BPF_REG_6]
49 #define BPF_R7 regs[BPF_REG_7]
50 #define BPF_R8 regs[BPF_REG_8]
51 #define BPF_R9 regs[BPF_REG_9]
52 #define BPF_R10 regs[BPF_REG_10]
53
54 /* Named registers */
55 #define DST regs[insn->dst_reg]
56 #define SRC regs[insn->src_reg]
57 #define FP regs[BPF_REG_FP]
58 #define AX regs[BPF_REG_AX]
59 #define ARG1 regs[BPF_REG_ARG1]
60 #define CTX regs[BPF_REG_CTX]
61 #define IMM insn->imm
62
63 /* No hurry in this branch
64 *
65 * Exported for the bpf jit load helper.
66 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)67 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
68 {
69 u8 *ptr = NULL;
70
71 if (k >= SKF_NET_OFF) {
72 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
73 } else if (k >= SKF_LL_OFF) {
74 if (unlikely(!skb_mac_header_was_set(skb)))
75 return NULL;
76 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
77 }
78 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
79 return ptr;
80
81 return NULL;
82 }
83
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)84 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
85 {
86 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
87 struct bpf_prog_aux *aux;
88 struct bpf_prog *fp;
89
90 size = round_up(size, PAGE_SIZE);
91 fp = __vmalloc(size, gfp_flags);
92 if (fp == NULL)
93 return NULL;
94
95 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
96 if (aux == NULL) {
97 vfree(fp);
98 return NULL;
99 }
100 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
101 if (!fp->active) {
102 vfree(fp);
103 kfree(aux);
104 return NULL;
105 }
106
107 fp->pages = size / PAGE_SIZE;
108 fp->aux = aux;
109 fp->aux->prog = fp;
110 fp->jit_requested = ebpf_jit_enabled();
111 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
112
113 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
114 mutex_init(&fp->aux->used_maps_mutex);
115 mutex_init(&fp->aux->dst_mutex);
116
117 return fp;
118 }
119
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)120 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
121 {
122 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
123 struct bpf_prog *prog;
124 int cpu;
125
126 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
127 if (!prog)
128 return NULL;
129
130 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
131 if (!prog->stats) {
132 free_percpu(prog->active);
133 kfree(prog->aux);
134 vfree(prog);
135 return NULL;
136 }
137
138 for_each_possible_cpu(cpu) {
139 struct bpf_prog_stats *pstats;
140
141 pstats = per_cpu_ptr(prog->stats, cpu);
142 u64_stats_init(&pstats->syncp);
143 }
144 return prog;
145 }
146 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
147
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)148 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
149 {
150 if (!prog->aux->nr_linfo || !prog->jit_requested)
151 return 0;
152
153 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
154 sizeof(*prog->aux->jited_linfo),
155 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
156 if (!prog->aux->jited_linfo)
157 return -ENOMEM;
158
159 return 0;
160 }
161
bpf_prog_jit_attempt_done(struct bpf_prog * prog)162 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
163 {
164 if (prog->aux->jited_linfo &&
165 (!prog->jited || !prog->aux->jited_linfo[0])) {
166 kvfree(prog->aux->jited_linfo);
167 prog->aux->jited_linfo = NULL;
168 }
169
170 kfree(prog->aux->kfunc_tab);
171 prog->aux->kfunc_tab = NULL;
172 }
173
174 /* The jit engine is responsible to provide an array
175 * for insn_off to the jited_off mapping (insn_to_jit_off).
176 *
177 * The idx to this array is the insn_off. Hence, the insn_off
178 * here is relative to the prog itself instead of the main prog.
179 * This array has one entry for each xlated bpf insn.
180 *
181 * jited_off is the byte off to the last byte of the jited insn.
182 *
183 * Hence, with
184 * insn_start:
185 * The first bpf insn off of the prog. The insn off
186 * here is relative to the main prog.
187 * e.g. if prog is a subprog, insn_start > 0
188 * linfo_idx:
189 * The prog's idx to prog->aux->linfo and jited_linfo
190 *
191 * jited_linfo[linfo_idx] = prog->bpf_func
192 *
193 * For i > linfo_idx,
194 *
195 * jited_linfo[i] = prog->bpf_func +
196 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
197 */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)198 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
199 const u32 *insn_to_jit_off)
200 {
201 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
202 const struct bpf_line_info *linfo;
203 void **jited_linfo;
204
205 if (!prog->aux->jited_linfo)
206 /* Userspace did not provide linfo */
207 return;
208
209 linfo_idx = prog->aux->linfo_idx;
210 linfo = &prog->aux->linfo[linfo_idx];
211 insn_start = linfo[0].insn_off;
212 insn_end = insn_start + prog->len;
213
214 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
215 jited_linfo[0] = prog->bpf_func;
216
217 nr_linfo = prog->aux->nr_linfo - linfo_idx;
218
219 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
220 /* The verifier ensures that linfo[i].insn_off is
221 * strictly increasing
222 */
223 jited_linfo[i] = prog->bpf_func +
224 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
225 }
226
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)227 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
228 gfp_t gfp_extra_flags)
229 {
230 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
231 struct bpf_prog *fp;
232 u32 pages;
233
234 size = round_up(size, PAGE_SIZE);
235 pages = size / PAGE_SIZE;
236 if (pages <= fp_old->pages)
237 return fp_old;
238
239 fp = __vmalloc(size, gfp_flags);
240 if (fp) {
241 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
242 fp->pages = pages;
243 fp->aux->prog = fp;
244
245 /* We keep fp->aux from fp_old around in the new
246 * reallocated structure.
247 */
248 fp_old->aux = NULL;
249 fp_old->stats = NULL;
250 fp_old->active = NULL;
251 __bpf_prog_free(fp_old);
252 }
253
254 return fp;
255 }
256
__bpf_prog_free(struct bpf_prog * fp)257 void __bpf_prog_free(struct bpf_prog *fp)
258 {
259 if (fp->aux) {
260 mutex_destroy(&fp->aux->used_maps_mutex);
261 mutex_destroy(&fp->aux->dst_mutex);
262 kfree(fp->aux->poke_tab);
263 kfree(fp->aux);
264 }
265 free_percpu(fp->stats);
266 free_percpu(fp->active);
267 vfree(fp);
268 }
269
bpf_prog_calc_tag(struct bpf_prog * fp)270 int bpf_prog_calc_tag(struct bpf_prog *fp)
271 {
272 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
273 u32 raw_size = bpf_prog_tag_scratch_size(fp);
274 u32 digest[SHA1_DIGEST_WORDS];
275 u32 ws[SHA1_WORKSPACE_WORDS];
276 u32 i, bsize, psize, blocks;
277 struct bpf_insn *dst;
278 bool was_ld_map;
279 u8 *raw, *todo;
280 __be32 *result;
281 __be64 *bits;
282
283 raw = vmalloc(raw_size);
284 if (!raw)
285 return -ENOMEM;
286
287 sha1_init(digest);
288 memset(ws, 0, sizeof(ws));
289
290 /* We need to take out the map fd for the digest calculation
291 * since they are unstable from user space side.
292 */
293 dst = (void *)raw;
294 for (i = 0, was_ld_map = false; i < fp->len; i++) {
295 dst[i] = fp->insnsi[i];
296 if (!was_ld_map &&
297 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
298 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
299 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
300 was_ld_map = true;
301 dst[i].imm = 0;
302 } else if (was_ld_map &&
303 dst[i].code == 0 &&
304 dst[i].dst_reg == 0 &&
305 dst[i].src_reg == 0 &&
306 dst[i].off == 0) {
307 was_ld_map = false;
308 dst[i].imm = 0;
309 } else {
310 was_ld_map = false;
311 }
312 }
313
314 psize = bpf_prog_insn_size(fp);
315 memset(&raw[psize], 0, raw_size - psize);
316 raw[psize++] = 0x80;
317
318 bsize = round_up(psize, SHA1_BLOCK_SIZE);
319 blocks = bsize / SHA1_BLOCK_SIZE;
320 todo = raw;
321 if (bsize - psize >= sizeof(__be64)) {
322 bits = (__be64 *)(todo + bsize - sizeof(__be64));
323 } else {
324 bits = (__be64 *)(todo + bsize + bits_offset);
325 blocks++;
326 }
327 *bits = cpu_to_be64((psize - 1) << 3);
328
329 while (blocks--) {
330 sha1_transform(digest, todo, ws);
331 todo += SHA1_BLOCK_SIZE;
332 }
333
334 result = (__force __be32 *)digest;
335 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
336 result[i] = cpu_to_be32(digest[i]);
337 memcpy(fp->tag, result, sizeof(fp->tag));
338
339 vfree(raw);
340 return 0;
341 }
342
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)343 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
344 s32 end_new, s32 curr, const bool probe_pass)
345 {
346 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
347 s32 delta = end_new - end_old;
348 s64 imm = insn->imm;
349
350 if (curr < pos && curr + imm + 1 >= end_old)
351 imm += delta;
352 else if (curr >= end_new && curr + imm + 1 < end_new)
353 imm -= delta;
354 if (imm < imm_min || imm > imm_max)
355 return -ERANGE;
356 if (!probe_pass)
357 insn->imm = imm;
358 return 0;
359 }
360
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)361 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
362 s32 end_new, s32 curr, const bool probe_pass)
363 {
364 const s32 off_min = S16_MIN, off_max = S16_MAX;
365 s32 delta = end_new - end_old;
366 s32 off = insn->off;
367
368 if (curr < pos && curr + off + 1 >= end_old)
369 off += delta;
370 else if (curr >= end_new && curr + off + 1 < end_new)
371 off -= delta;
372 if (off < off_min || off > off_max)
373 return -ERANGE;
374 if (!probe_pass)
375 insn->off = off;
376 return 0;
377 }
378
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)379 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
380 s32 end_new, const bool probe_pass)
381 {
382 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
383 struct bpf_insn *insn = prog->insnsi;
384 int ret = 0;
385
386 for (i = 0; i < insn_cnt; i++, insn++) {
387 u8 code;
388
389 /* In the probing pass we still operate on the original,
390 * unpatched image in order to check overflows before we
391 * do any other adjustments. Therefore skip the patchlet.
392 */
393 if (probe_pass && i == pos) {
394 i = end_new;
395 insn = prog->insnsi + end_old;
396 }
397 if (bpf_pseudo_func(insn)) {
398 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
399 end_new, i, probe_pass);
400 if (ret)
401 return ret;
402 continue;
403 }
404 code = insn->code;
405 if ((BPF_CLASS(code) != BPF_JMP &&
406 BPF_CLASS(code) != BPF_JMP32) ||
407 BPF_OP(code) == BPF_EXIT)
408 continue;
409 /* Adjust offset of jmps if we cross patch boundaries. */
410 if (BPF_OP(code) == BPF_CALL) {
411 if (insn->src_reg != BPF_PSEUDO_CALL)
412 continue;
413 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
414 end_new, i, probe_pass);
415 } else {
416 ret = bpf_adj_delta_to_off(insn, pos, end_old,
417 end_new, i, probe_pass);
418 }
419 if (ret)
420 break;
421 }
422
423 return ret;
424 }
425
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)426 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
427 {
428 struct bpf_line_info *linfo;
429 u32 i, nr_linfo;
430
431 nr_linfo = prog->aux->nr_linfo;
432 if (!nr_linfo || !delta)
433 return;
434
435 linfo = prog->aux->linfo;
436
437 for (i = 0; i < nr_linfo; i++)
438 if (off < linfo[i].insn_off)
439 break;
440
441 /* Push all off < linfo[i].insn_off by delta */
442 for (; i < nr_linfo; i++)
443 linfo[i].insn_off += delta;
444 }
445
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)446 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
447 const struct bpf_insn *patch, u32 len)
448 {
449 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
450 const u32 cnt_max = S16_MAX;
451 struct bpf_prog *prog_adj;
452 int err;
453
454 /* Since our patchlet doesn't expand the image, we're done. */
455 if (insn_delta == 0) {
456 memcpy(prog->insnsi + off, patch, sizeof(*patch));
457 return prog;
458 }
459
460 insn_adj_cnt = prog->len + insn_delta;
461
462 /* Reject anything that would potentially let the insn->off
463 * target overflow when we have excessive program expansions.
464 * We need to probe here before we do any reallocation where
465 * we afterwards may not fail anymore.
466 */
467 if (insn_adj_cnt > cnt_max &&
468 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
469 return ERR_PTR(err);
470
471 /* Several new instructions need to be inserted. Make room
472 * for them. Likely, there's no need for a new allocation as
473 * last page could have large enough tailroom.
474 */
475 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
476 GFP_USER);
477 if (!prog_adj)
478 return ERR_PTR(-ENOMEM);
479
480 prog_adj->len = insn_adj_cnt;
481
482 /* Patching happens in 3 steps:
483 *
484 * 1) Move over tail of insnsi from next instruction onwards,
485 * so we can patch the single target insn with one or more
486 * new ones (patching is always from 1 to n insns, n > 0).
487 * 2) Inject new instructions at the target location.
488 * 3) Adjust branch offsets if necessary.
489 */
490 insn_rest = insn_adj_cnt - off - len;
491
492 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
493 sizeof(*patch) * insn_rest);
494 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
495
496 /* We are guaranteed to not fail at this point, otherwise
497 * the ship has sailed to reverse to the original state. An
498 * overflow cannot happen at this point.
499 */
500 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
501
502 bpf_adj_linfo(prog_adj, off, insn_delta);
503
504 return prog_adj;
505 }
506
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)507 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
508 {
509 /* Branch offsets can't overflow when program is shrinking, no need
510 * to call bpf_adj_branches(..., true) here
511 */
512 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
513 sizeof(struct bpf_insn) * (prog->len - off - cnt));
514 prog->len -= cnt;
515
516 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
517 }
518
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)519 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
520 {
521 int i;
522
523 for (i = 0; i < fp->aux->func_cnt; i++)
524 bpf_prog_kallsyms_del(fp->aux->func[i]);
525 }
526
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)527 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
528 {
529 bpf_prog_kallsyms_del_subprogs(fp);
530 bpf_prog_kallsyms_del(fp);
531 }
532
533 #ifdef CONFIG_BPF_JIT
534 /* All BPF JIT sysctl knobs here. */
535 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
536 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
537 int bpf_jit_harden __read_mostly;
538 long bpf_jit_limit __read_mostly;
539 long bpf_jit_limit_max __read_mostly;
540
541 static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)542 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
543 {
544 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
545
546 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
547 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
548 }
549
550 static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)551 bpf_prog_ksym_set_name(struct bpf_prog *prog)
552 {
553 char *sym = prog->aux->ksym.name;
554 const char *end = sym + KSYM_NAME_LEN;
555 const struct btf_type *type;
556 const char *func_name;
557
558 BUILD_BUG_ON(sizeof("bpf_prog_") +
559 sizeof(prog->tag) * 2 +
560 /* name has been null terminated.
561 * We should need +1 for the '_' preceding
562 * the name. However, the null character
563 * is double counted between the name and the
564 * sizeof("bpf_prog_") above, so we omit
565 * the +1 here.
566 */
567 sizeof(prog->aux->name) > KSYM_NAME_LEN);
568
569 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
570 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
571
572 /* prog->aux->name will be ignored if full btf name is available */
573 if (prog->aux->func_info_cnt) {
574 type = btf_type_by_id(prog->aux->btf,
575 prog->aux->func_info[prog->aux->func_idx].type_id);
576 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
577 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
578 return;
579 }
580
581 if (prog->aux->name[0])
582 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
583 else
584 *sym = 0;
585 }
586
bpf_get_ksym_start(struct latch_tree_node * n)587 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
588 {
589 return container_of(n, struct bpf_ksym, tnode)->start;
590 }
591
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)592 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
593 struct latch_tree_node *b)
594 {
595 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
596 }
597
bpf_tree_comp(void * key,struct latch_tree_node * n)598 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
599 {
600 unsigned long val = (unsigned long)key;
601 const struct bpf_ksym *ksym;
602
603 ksym = container_of(n, struct bpf_ksym, tnode);
604
605 if (val < ksym->start)
606 return -1;
607 if (val >= ksym->end)
608 return 1;
609
610 return 0;
611 }
612
613 static const struct latch_tree_ops bpf_tree_ops = {
614 .less = bpf_tree_less,
615 .comp = bpf_tree_comp,
616 };
617
618 static DEFINE_SPINLOCK(bpf_lock);
619 static LIST_HEAD(bpf_kallsyms);
620 static struct latch_tree_root bpf_tree __cacheline_aligned;
621
bpf_ksym_add(struct bpf_ksym * ksym)622 void bpf_ksym_add(struct bpf_ksym *ksym)
623 {
624 spin_lock_bh(&bpf_lock);
625 WARN_ON_ONCE(!list_empty(&ksym->lnode));
626 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
627 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
628 spin_unlock_bh(&bpf_lock);
629 }
630
__bpf_ksym_del(struct bpf_ksym * ksym)631 static void __bpf_ksym_del(struct bpf_ksym *ksym)
632 {
633 if (list_empty(&ksym->lnode))
634 return;
635
636 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
637 list_del_rcu(&ksym->lnode);
638 }
639
bpf_ksym_del(struct bpf_ksym * ksym)640 void bpf_ksym_del(struct bpf_ksym *ksym)
641 {
642 spin_lock_bh(&bpf_lock);
643 __bpf_ksym_del(ksym);
644 spin_unlock_bh(&bpf_lock);
645 }
646
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)647 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
648 {
649 return fp->jited && !bpf_prog_was_classic(fp);
650 }
651
bpf_prog_kallsyms_add(struct bpf_prog * fp)652 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
653 {
654 if (!bpf_prog_kallsyms_candidate(fp) ||
655 !bpf_capable())
656 return;
657
658 bpf_prog_ksym_set_addr(fp);
659 bpf_prog_ksym_set_name(fp);
660 fp->aux->ksym.prog = true;
661
662 bpf_ksym_add(&fp->aux->ksym);
663 }
664
bpf_prog_kallsyms_del(struct bpf_prog * fp)665 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
666 {
667 if (!bpf_prog_kallsyms_candidate(fp))
668 return;
669
670 bpf_ksym_del(&fp->aux->ksym);
671 }
672
bpf_ksym_find(unsigned long addr)673 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
674 {
675 struct latch_tree_node *n;
676
677 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
678 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
679 }
680
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)681 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
682 unsigned long *off, char *sym)
683 {
684 struct bpf_ksym *ksym;
685 char *ret = NULL;
686
687 rcu_read_lock();
688 ksym = bpf_ksym_find(addr);
689 if (ksym) {
690 unsigned long symbol_start = ksym->start;
691 unsigned long symbol_end = ksym->end;
692
693 strncpy(sym, ksym->name, KSYM_NAME_LEN);
694
695 ret = sym;
696 if (size)
697 *size = symbol_end - symbol_start;
698 if (off)
699 *off = addr - symbol_start;
700 }
701 rcu_read_unlock();
702
703 return ret;
704 }
705
is_bpf_text_address(unsigned long addr)706 bool is_bpf_text_address(unsigned long addr)
707 {
708 bool ret;
709
710 rcu_read_lock();
711 ret = bpf_ksym_find(addr) != NULL;
712 rcu_read_unlock();
713
714 return ret;
715 }
716
bpf_prog_ksym_find(unsigned long addr)717 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
718 {
719 struct bpf_ksym *ksym = bpf_ksym_find(addr);
720
721 return ksym && ksym->prog ?
722 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
723 NULL;
724 }
725
search_bpf_extables(unsigned long addr)726 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
727 {
728 const struct exception_table_entry *e = NULL;
729 struct bpf_prog *prog;
730
731 rcu_read_lock();
732 prog = bpf_prog_ksym_find(addr);
733 if (!prog)
734 goto out;
735 if (!prog->aux->num_exentries)
736 goto out;
737
738 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
739 out:
740 rcu_read_unlock();
741 return e;
742 }
743
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)744 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
745 char *sym)
746 {
747 struct bpf_ksym *ksym;
748 unsigned int it = 0;
749 int ret = -ERANGE;
750
751 if (!bpf_jit_kallsyms_enabled())
752 return ret;
753
754 rcu_read_lock();
755 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
756 if (it++ != symnum)
757 continue;
758
759 strncpy(sym, ksym->name, KSYM_NAME_LEN);
760
761 *value = ksym->start;
762 *type = BPF_SYM_ELF_TYPE;
763
764 ret = 0;
765 break;
766 }
767 rcu_read_unlock();
768
769 return ret;
770 }
771
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)772 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
773 struct bpf_jit_poke_descriptor *poke)
774 {
775 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
776 static const u32 poke_tab_max = 1024;
777 u32 slot = prog->aux->size_poke_tab;
778 u32 size = slot + 1;
779
780 if (size > poke_tab_max)
781 return -ENOSPC;
782 if (poke->tailcall_target || poke->tailcall_target_stable ||
783 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
784 return -EINVAL;
785
786 switch (poke->reason) {
787 case BPF_POKE_REASON_TAIL_CALL:
788 if (!poke->tail_call.map)
789 return -EINVAL;
790 break;
791 default:
792 return -EINVAL;
793 }
794
795 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
796 if (!tab)
797 return -ENOMEM;
798
799 memcpy(&tab[slot], poke, sizeof(*poke));
800 prog->aux->size_poke_tab = size;
801 prog->aux->poke_tab = tab;
802
803 return slot;
804 }
805
806 /*
807 * BPF program pack allocator.
808 *
809 * Most BPF programs are pretty small. Allocating a hole page for each
810 * program is sometime a waste. Many small bpf program also adds pressure
811 * to instruction TLB. To solve this issue, we introduce a BPF program pack
812 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
813 * to host BPF programs.
814 */
815 #define BPF_PROG_CHUNK_SHIFT 6
816 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
817 #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
818
819 struct bpf_prog_pack {
820 struct list_head list;
821 void *ptr;
822 unsigned long bitmap[];
823 };
824
825 #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
826
827 static size_t bpf_prog_pack_size = -1;
828 static size_t bpf_prog_pack_mask = -1;
829
bpf_prog_chunk_count(void)830 static int bpf_prog_chunk_count(void)
831 {
832 WARN_ON_ONCE(bpf_prog_pack_size == -1);
833 return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
834 }
835
836 static DEFINE_MUTEX(pack_mutex);
837 static LIST_HEAD(pack_list);
838
839 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
840 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
841 */
842 #ifdef PMD_SIZE
843 #define BPF_HPAGE_SIZE PMD_SIZE
844 #define BPF_HPAGE_MASK PMD_MASK
845 #else
846 #define BPF_HPAGE_SIZE PAGE_SIZE
847 #define BPF_HPAGE_MASK PAGE_MASK
848 #endif
849
select_bpf_prog_pack_size(void)850 static size_t select_bpf_prog_pack_size(void)
851 {
852 size_t size;
853 void *ptr;
854
855 size = BPF_HPAGE_SIZE * num_online_nodes();
856 ptr = module_alloc(size);
857
858 /* Test whether we can get huge pages. If not just use PAGE_SIZE
859 * packs.
860 */
861 if (!ptr || !is_vm_area_hugepages(ptr)) {
862 size = PAGE_SIZE;
863 bpf_prog_pack_mask = PAGE_MASK;
864 } else {
865 bpf_prog_pack_mask = BPF_HPAGE_MASK;
866 }
867
868 vfree(ptr);
869 return size;
870 }
871
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)872 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
873 {
874 struct bpf_prog_pack *pack;
875
876 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())),
877 GFP_KERNEL);
878 if (!pack)
879 return NULL;
880 pack->ptr = module_alloc(bpf_prog_pack_size);
881 if (!pack->ptr) {
882 kfree(pack);
883 return NULL;
884 }
885 bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size);
886 bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
887 list_add_tail(&pack->list, &pack_list);
888
889 set_vm_flush_reset_perms(pack->ptr);
890 set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
891 set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
892 return pack;
893 }
894
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)895 static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
896 {
897 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
898 struct bpf_prog_pack *pack;
899 unsigned long pos;
900 void *ptr = NULL;
901
902 mutex_lock(&pack_mutex);
903 if (bpf_prog_pack_size == -1)
904 bpf_prog_pack_size = select_bpf_prog_pack_size();
905
906 if (size > bpf_prog_pack_size) {
907 size = round_up(size, PAGE_SIZE);
908 ptr = module_alloc(size);
909 if (ptr) {
910 bpf_fill_ill_insns(ptr, size);
911 set_vm_flush_reset_perms(ptr);
912 set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
913 set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
914 }
915 goto out;
916 }
917 list_for_each_entry(pack, &pack_list, list) {
918 pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
919 nbits, 0);
920 if (pos < bpf_prog_chunk_count())
921 goto found_free_area;
922 }
923
924 pack = alloc_new_pack(bpf_fill_ill_insns);
925 if (!pack)
926 goto out;
927
928 pos = 0;
929
930 found_free_area:
931 bitmap_set(pack->bitmap, pos, nbits);
932 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
933
934 out:
935 mutex_unlock(&pack_mutex);
936 return ptr;
937 }
938
bpf_prog_pack_free(struct bpf_binary_header * hdr)939 static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
940 {
941 struct bpf_prog_pack *pack = NULL, *tmp;
942 unsigned int nbits;
943 unsigned long pos;
944 void *pack_ptr;
945
946 mutex_lock(&pack_mutex);
947 if (hdr->size > bpf_prog_pack_size) {
948 module_memfree(hdr);
949 goto out;
950 }
951
952 pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask);
953
954 list_for_each_entry(tmp, &pack_list, list) {
955 if (tmp->ptr == pack_ptr) {
956 pack = tmp;
957 break;
958 }
959 }
960
961 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
962 goto out;
963
964 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
965 pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
966
967 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
968 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
969
970 bitmap_clear(pack->bitmap, pos, nbits);
971 if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
972 bpf_prog_chunk_count(), 0) == 0) {
973 list_del(&pack->list);
974 module_memfree(pack->ptr);
975 kfree(pack);
976 }
977 out:
978 mutex_unlock(&pack_mutex);
979 }
980
981 static atomic_long_t bpf_jit_current;
982
983 /* Can be overridden by an arch's JIT compiler if it has a custom,
984 * dedicated BPF backend memory area, or if neither of the two
985 * below apply.
986 */
bpf_jit_alloc_exec_limit(void)987 u64 __weak bpf_jit_alloc_exec_limit(void)
988 {
989 #if defined(MODULES_VADDR)
990 return MODULES_END - MODULES_VADDR;
991 #else
992 return VMALLOC_END - VMALLOC_START;
993 #endif
994 }
995
bpf_jit_charge_init(void)996 static int __init bpf_jit_charge_init(void)
997 {
998 /* Only used as heuristic here to derive limit. */
999 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1000 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
1001 PAGE_SIZE), LONG_MAX);
1002 return 0;
1003 }
1004 pure_initcall(bpf_jit_charge_init);
1005
bpf_jit_charge_modmem(u32 size)1006 int bpf_jit_charge_modmem(u32 size)
1007 {
1008 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1009 if (!bpf_capable()) {
1010 atomic_long_sub(size, &bpf_jit_current);
1011 return -EPERM;
1012 }
1013 }
1014
1015 return 0;
1016 }
1017
bpf_jit_uncharge_modmem(u32 size)1018 void bpf_jit_uncharge_modmem(u32 size)
1019 {
1020 atomic_long_sub(size, &bpf_jit_current);
1021 }
1022
bpf_jit_alloc_exec(unsigned long size)1023 void *__weak bpf_jit_alloc_exec(unsigned long size)
1024 {
1025 return module_alloc(size);
1026 }
1027
bpf_jit_free_exec(void * addr)1028 void __weak bpf_jit_free_exec(void *addr)
1029 {
1030 module_memfree(addr);
1031 }
1032
1033 struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1034 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1035 unsigned int alignment,
1036 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1037 {
1038 struct bpf_binary_header *hdr;
1039 u32 size, hole, start;
1040
1041 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1042 alignment > BPF_IMAGE_ALIGNMENT);
1043
1044 /* Most of BPF filters are really small, but if some of them
1045 * fill a page, allow at least 128 extra bytes to insert a
1046 * random section of illegal instructions.
1047 */
1048 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1049
1050 if (bpf_jit_charge_modmem(size))
1051 return NULL;
1052 hdr = bpf_jit_alloc_exec(size);
1053 if (!hdr) {
1054 bpf_jit_uncharge_modmem(size);
1055 return NULL;
1056 }
1057
1058 /* Fill space with illegal/arch-dep instructions. */
1059 bpf_fill_ill_insns(hdr, size);
1060
1061 hdr->size = size;
1062 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1063 PAGE_SIZE - sizeof(*hdr));
1064 start = (get_random_int() % hole) & ~(alignment - 1);
1065
1066 /* Leave a random number of instructions before BPF code. */
1067 *image_ptr = &hdr->image[start];
1068
1069 return hdr;
1070 }
1071
bpf_jit_binary_free(struct bpf_binary_header * hdr)1072 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1073 {
1074 u32 size = hdr->size;
1075
1076 bpf_jit_free_exec(hdr);
1077 bpf_jit_uncharge_modmem(size);
1078 }
1079
1080 /* Allocate jit binary from bpf_prog_pack allocator.
1081 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1082 * to the memory. To solve this problem, a RW buffer is also allocated at
1083 * as the same time. The JIT engine should calculate offsets based on the
1084 * RO memory address, but write JITed program to the RW buffer. Once the
1085 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1086 * the JITed program to the RO memory.
1087 */
1088 struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)1089 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1090 unsigned int alignment,
1091 struct bpf_binary_header **rw_header,
1092 u8 **rw_image,
1093 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1094 {
1095 struct bpf_binary_header *ro_header;
1096 u32 size, hole, start;
1097
1098 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1099 alignment > BPF_IMAGE_ALIGNMENT);
1100
1101 /* add 16 bytes for a random section of illegal instructions */
1102 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1103
1104 if (bpf_jit_charge_modmem(size))
1105 return NULL;
1106 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1107 if (!ro_header) {
1108 bpf_jit_uncharge_modmem(size);
1109 return NULL;
1110 }
1111
1112 *rw_header = kvmalloc(size, GFP_KERNEL);
1113 if (!*rw_header) {
1114 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1115 bpf_prog_pack_free(ro_header);
1116 bpf_jit_uncharge_modmem(size);
1117 return NULL;
1118 }
1119
1120 /* Fill space with illegal/arch-dep instructions. */
1121 bpf_fill_ill_insns(*rw_header, size);
1122 (*rw_header)->size = size;
1123
1124 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1125 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1126 start = (get_random_int() % hole) & ~(alignment - 1);
1127
1128 *image_ptr = &ro_header->image[start];
1129 *rw_image = &(*rw_header)->image[start];
1130
1131 return ro_header;
1132 }
1133
1134 /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_prog * prog,struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1135 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1136 struct bpf_binary_header *ro_header,
1137 struct bpf_binary_header *rw_header)
1138 {
1139 void *ptr;
1140
1141 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1142
1143 kvfree(rw_header);
1144
1145 if (IS_ERR(ptr)) {
1146 bpf_prog_pack_free(ro_header);
1147 return PTR_ERR(ptr);
1148 }
1149 return 0;
1150 }
1151
1152 /* bpf_jit_binary_pack_free is called in two different scenarios:
1153 * 1) when the program is freed after;
1154 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1155 * For case 2), we need to free both the RO memory and the RW buffer.
1156 *
1157 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1158 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1159 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1160 * bpf_arch_text_copy (when jit fails).
1161 */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)1162 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1163 struct bpf_binary_header *rw_header)
1164 {
1165 u32 size = ro_header->size;
1166
1167 bpf_prog_pack_free(ro_header);
1168 kvfree(rw_header);
1169 bpf_jit_uncharge_modmem(size);
1170 }
1171
1172 struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)1173 bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1174 {
1175 unsigned long real_start = (unsigned long)fp->bpf_func;
1176 unsigned long addr;
1177
1178 addr = real_start & BPF_PROG_CHUNK_MASK;
1179 return (void *)addr;
1180 }
1181
1182 static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)1183 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1184 {
1185 unsigned long real_start = (unsigned long)fp->bpf_func;
1186 unsigned long addr;
1187
1188 addr = real_start & PAGE_MASK;
1189 return (void *)addr;
1190 }
1191
1192 /* This symbol is only overridden by archs that have different
1193 * requirements than the usual eBPF JITs, f.e. when they only
1194 * implement cBPF JIT, do not set images read-only, etc.
1195 */
bpf_jit_free(struct bpf_prog * fp)1196 void __weak bpf_jit_free(struct bpf_prog *fp)
1197 {
1198 if (fp->jited) {
1199 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1200
1201 bpf_jit_binary_free(hdr);
1202 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1203 }
1204
1205 bpf_prog_unlock_free(fp);
1206 }
1207
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1208 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1209 const struct bpf_insn *insn, bool extra_pass,
1210 u64 *func_addr, bool *func_addr_fixed)
1211 {
1212 s16 off = insn->off;
1213 s32 imm = insn->imm;
1214 u8 *addr;
1215
1216 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1217 if (!*func_addr_fixed) {
1218 /* Place-holder address till the last pass has collected
1219 * all addresses for JITed subprograms in which case we
1220 * can pick them up from prog->aux.
1221 */
1222 if (!extra_pass)
1223 addr = NULL;
1224 else if (prog->aux->func &&
1225 off >= 0 && off < prog->aux->func_cnt)
1226 addr = (u8 *)prog->aux->func[off]->bpf_func;
1227 else
1228 return -EINVAL;
1229 } else {
1230 /* Address of a BPF helper call. Since part of the core
1231 * kernel, it's always at a fixed location. __bpf_call_base
1232 * and the helper with imm relative to it are both in core
1233 * kernel.
1234 */
1235 addr = (u8 *)__bpf_call_base + imm;
1236 }
1237
1238 *func_addr = (unsigned long)addr;
1239 return 0;
1240 }
1241
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)1242 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1243 const struct bpf_insn *aux,
1244 struct bpf_insn *to_buff,
1245 bool emit_zext)
1246 {
1247 struct bpf_insn *to = to_buff;
1248 u32 imm_rnd = get_random_int();
1249 s16 off;
1250
1251 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1252 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1253
1254 /* Constraints on AX register:
1255 *
1256 * AX register is inaccessible from user space. It is mapped in
1257 * all JITs, and used here for constant blinding rewrites. It is
1258 * typically "stateless" meaning its contents are only valid within
1259 * the executed instruction, but not across several instructions.
1260 * There are a few exceptions however which are further detailed
1261 * below.
1262 *
1263 * Constant blinding is only used by JITs, not in the interpreter.
1264 * The interpreter uses AX in some occasions as a local temporary
1265 * register e.g. in DIV or MOD instructions.
1266 *
1267 * In restricted circumstances, the verifier can also use the AX
1268 * register for rewrites as long as they do not interfere with
1269 * the above cases!
1270 */
1271 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1272 goto out;
1273
1274 if (from->imm == 0 &&
1275 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1276 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1277 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1278 goto out;
1279 }
1280
1281 switch (from->code) {
1282 case BPF_ALU | BPF_ADD | BPF_K:
1283 case BPF_ALU | BPF_SUB | BPF_K:
1284 case BPF_ALU | BPF_AND | BPF_K:
1285 case BPF_ALU | BPF_OR | BPF_K:
1286 case BPF_ALU | BPF_XOR | BPF_K:
1287 case BPF_ALU | BPF_MUL | BPF_K:
1288 case BPF_ALU | BPF_MOV | BPF_K:
1289 case BPF_ALU | BPF_DIV | BPF_K:
1290 case BPF_ALU | BPF_MOD | BPF_K:
1291 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1292 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1293 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1294 break;
1295
1296 case BPF_ALU64 | BPF_ADD | BPF_K:
1297 case BPF_ALU64 | BPF_SUB | BPF_K:
1298 case BPF_ALU64 | BPF_AND | BPF_K:
1299 case BPF_ALU64 | BPF_OR | BPF_K:
1300 case BPF_ALU64 | BPF_XOR | BPF_K:
1301 case BPF_ALU64 | BPF_MUL | BPF_K:
1302 case BPF_ALU64 | BPF_MOV | BPF_K:
1303 case BPF_ALU64 | BPF_DIV | BPF_K:
1304 case BPF_ALU64 | BPF_MOD | BPF_K:
1305 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1306 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1307 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1308 break;
1309
1310 case BPF_JMP | BPF_JEQ | BPF_K:
1311 case BPF_JMP | BPF_JNE | BPF_K:
1312 case BPF_JMP | BPF_JGT | BPF_K:
1313 case BPF_JMP | BPF_JLT | BPF_K:
1314 case BPF_JMP | BPF_JGE | BPF_K:
1315 case BPF_JMP | BPF_JLE | BPF_K:
1316 case BPF_JMP | BPF_JSGT | BPF_K:
1317 case BPF_JMP | BPF_JSLT | BPF_K:
1318 case BPF_JMP | BPF_JSGE | BPF_K:
1319 case BPF_JMP | BPF_JSLE | BPF_K:
1320 case BPF_JMP | BPF_JSET | BPF_K:
1321 /* Accommodate for extra offset in case of a backjump. */
1322 off = from->off;
1323 if (off < 0)
1324 off -= 2;
1325 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1326 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1327 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1328 break;
1329
1330 case BPF_JMP32 | BPF_JEQ | BPF_K:
1331 case BPF_JMP32 | BPF_JNE | BPF_K:
1332 case BPF_JMP32 | BPF_JGT | BPF_K:
1333 case BPF_JMP32 | BPF_JLT | BPF_K:
1334 case BPF_JMP32 | BPF_JGE | BPF_K:
1335 case BPF_JMP32 | BPF_JLE | BPF_K:
1336 case BPF_JMP32 | BPF_JSGT | BPF_K:
1337 case BPF_JMP32 | BPF_JSLT | BPF_K:
1338 case BPF_JMP32 | BPF_JSGE | BPF_K:
1339 case BPF_JMP32 | BPF_JSLE | BPF_K:
1340 case BPF_JMP32 | BPF_JSET | BPF_K:
1341 /* Accommodate for extra offset in case of a backjump. */
1342 off = from->off;
1343 if (off < 0)
1344 off -= 2;
1345 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1346 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1347 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1348 off);
1349 break;
1350
1351 case BPF_LD | BPF_IMM | BPF_DW:
1352 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1353 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1354 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1355 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1356 break;
1357 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1358 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1359 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1360 if (emit_zext)
1361 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1362 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1363 break;
1364
1365 case BPF_ST | BPF_MEM | BPF_DW:
1366 case BPF_ST | BPF_MEM | BPF_W:
1367 case BPF_ST | BPF_MEM | BPF_H:
1368 case BPF_ST | BPF_MEM | BPF_B:
1369 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1370 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1371 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1372 break;
1373 }
1374 out:
1375 return to - to_buff;
1376 }
1377
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)1378 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1379 gfp_t gfp_extra_flags)
1380 {
1381 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1382 struct bpf_prog *fp;
1383
1384 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1385 if (fp != NULL) {
1386 /* aux->prog still points to the fp_other one, so
1387 * when promoting the clone to the real program,
1388 * this still needs to be adapted.
1389 */
1390 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1391 }
1392
1393 return fp;
1394 }
1395
bpf_prog_clone_free(struct bpf_prog * fp)1396 static void bpf_prog_clone_free(struct bpf_prog *fp)
1397 {
1398 /* aux was stolen by the other clone, so we cannot free
1399 * it from this path! It will be freed eventually by the
1400 * other program on release.
1401 *
1402 * At this point, we don't need a deferred release since
1403 * clone is guaranteed to not be locked.
1404 */
1405 fp->aux = NULL;
1406 fp->stats = NULL;
1407 fp->active = NULL;
1408 __bpf_prog_free(fp);
1409 }
1410
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)1411 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1412 {
1413 /* We have to repoint aux->prog to self, as we don't
1414 * know whether fp here is the clone or the original.
1415 */
1416 fp->aux->prog = fp;
1417 bpf_prog_clone_free(fp_other);
1418 }
1419
bpf_jit_blind_constants(struct bpf_prog * prog)1420 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1421 {
1422 struct bpf_insn insn_buff[16], aux[2];
1423 struct bpf_prog *clone, *tmp;
1424 int insn_delta, insn_cnt;
1425 struct bpf_insn *insn;
1426 int i, rewritten;
1427
1428 if (!prog->blinding_requested || prog->blinded)
1429 return prog;
1430
1431 clone = bpf_prog_clone_create(prog, GFP_USER);
1432 if (!clone)
1433 return ERR_PTR(-ENOMEM);
1434
1435 insn_cnt = clone->len;
1436 insn = clone->insnsi;
1437
1438 for (i = 0; i < insn_cnt; i++, insn++) {
1439 if (bpf_pseudo_func(insn)) {
1440 /* ld_imm64 with an address of bpf subprog is not
1441 * a user controlled constant. Don't randomize it,
1442 * since it will conflict with jit_subprogs() logic.
1443 */
1444 insn++;
1445 i++;
1446 continue;
1447 }
1448
1449 /* We temporarily need to hold the original ld64 insn
1450 * so that we can still access the first part in the
1451 * second blinding run.
1452 */
1453 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1454 insn[1].code == 0)
1455 memcpy(aux, insn, sizeof(aux));
1456
1457 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1458 clone->aux->verifier_zext);
1459 if (!rewritten)
1460 continue;
1461
1462 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1463 if (IS_ERR(tmp)) {
1464 /* Patching may have repointed aux->prog during
1465 * realloc from the original one, so we need to
1466 * fix it up here on error.
1467 */
1468 bpf_jit_prog_release_other(prog, clone);
1469 return tmp;
1470 }
1471
1472 clone = tmp;
1473 insn_delta = rewritten - 1;
1474
1475 /* Walk new program and skip insns we just inserted. */
1476 insn = clone->insnsi + i + insn_delta;
1477 insn_cnt += insn_delta;
1478 i += insn_delta;
1479 }
1480
1481 clone->blinded = 1;
1482 return clone;
1483 }
1484 #endif /* CONFIG_BPF_JIT */
1485
1486 /* Base function for offset calculation. Needs to go into .text section,
1487 * therefore keeping it non-static as well; will also be used by JITs
1488 * anyway later on, so do not let the compiler omit it. This also needs
1489 * to go into kallsyms for correlation from e.g. bpftool, so naming
1490 * must not change.
1491 */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1492 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1493 {
1494 return 0;
1495 }
1496 EXPORT_SYMBOL_GPL(__bpf_call_base);
1497
1498 /* All UAPI available opcodes. */
1499 #define BPF_INSN_MAP(INSN_2, INSN_3) \
1500 /* 32 bit ALU operations. */ \
1501 /* Register based. */ \
1502 INSN_3(ALU, ADD, X), \
1503 INSN_3(ALU, SUB, X), \
1504 INSN_3(ALU, AND, X), \
1505 INSN_3(ALU, OR, X), \
1506 INSN_3(ALU, LSH, X), \
1507 INSN_3(ALU, RSH, X), \
1508 INSN_3(ALU, XOR, X), \
1509 INSN_3(ALU, MUL, X), \
1510 INSN_3(ALU, MOV, X), \
1511 INSN_3(ALU, ARSH, X), \
1512 INSN_3(ALU, DIV, X), \
1513 INSN_3(ALU, MOD, X), \
1514 INSN_2(ALU, NEG), \
1515 INSN_3(ALU, END, TO_BE), \
1516 INSN_3(ALU, END, TO_LE), \
1517 /* Immediate based. */ \
1518 INSN_3(ALU, ADD, K), \
1519 INSN_3(ALU, SUB, K), \
1520 INSN_3(ALU, AND, K), \
1521 INSN_3(ALU, OR, K), \
1522 INSN_3(ALU, LSH, K), \
1523 INSN_3(ALU, RSH, K), \
1524 INSN_3(ALU, XOR, K), \
1525 INSN_3(ALU, MUL, K), \
1526 INSN_3(ALU, MOV, K), \
1527 INSN_3(ALU, ARSH, K), \
1528 INSN_3(ALU, DIV, K), \
1529 INSN_3(ALU, MOD, K), \
1530 /* 64 bit ALU operations. */ \
1531 /* Register based. */ \
1532 INSN_3(ALU64, ADD, X), \
1533 INSN_3(ALU64, SUB, X), \
1534 INSN_3(ALU64, AND, X), \
1535 INSN_3(ALU64, OR, X), \
1536 INSN_3(ALU64, LSH, X), \
1537 INSN_3(ALU64, RSH, X), \
1538 INSN_3(ALU64, XOR, X), \
1539 INSN_3(ALU64, MUL, X), \
1540 INSN_3(ALU64, MOV, X), \
1541 INSN_3(ALU64, ARSH, X), \
1542 INSN_3(ALU64, DIV, X), \
1543 INSN_3(ALU64, MOD, X), \
1544 INSN_2(ALU64, NEG), \
1545 /* Immediate based. */ \
1546 INSN_3(ALU64, ADD, K), \
1547 INSN_3(ALU64, SUB, K), \
1548 INSN_3(ALU64, AND, K), \
1549 INSN_3(ALU64, OR, K), \
1550 INSN_3(ALU64, LSH, K), \
1551 INSN_3(ALU64, RSH, K), \
1552 INSN_3(ALU64, XOR, K), \
1553 INSN_3(ALU64, MUL, K), \
1554 INSN_3(ALU64, MOV, K), \
1555 INSN_3(ALU64, ARSH, K), \
1556 INSN_3(ALU64, DIV, K), \
1557 INSN_3(ALU64, MOD, K), \
1558 /* Call instruction. */ \
1559 INSN_2(JMP, CALL), \
1560 /* Exit instruction. */ \
1561 INSN_2(JMP, EXIT), \
1562 /* 32-bit Jump instructions. */ \
1563 /* Register based. */ \
1564 INSN_3(JMP32, JEQ, X), \
1565 INSN_3(JMP32, JNE, X), \
1566 INSN_3(JMP32, JGT, X), \
1567 INSN_3(JMP32, JLT, X), \
1568 INSN_3(JMP32, JGE, X), \
1569 INSN_3(JMP32, JLE, X), \
1570 INSN_3(JMP32, JSGT, X), \
1571 INSN_3(JMP32, JSLT, X), \
1572 INSN_3(JMP32, JSGE, X), \
1573 INSN_3(JMP32, JSLE, X), \
1574 INSN_3(JMP32, JSET, X), \
1575 /* Immediate based. */ \
1576 INSN_3(JMP32, JEQ, K), \
1577 INSN_3(JMP32, JNE, K), \
1578 INSN_3(JMP32, JGT, K), \
1579 INSN_3(JMP32, JLT, K), \
1580 INSN_3(JMP32, JGE, K), \
1581 INSN_3(JMP32, JLE, K), \
1582 INSN_3(JMP32, JSGT, K), \
1583 INSN_3(JMP32, JSLT, K), \
1584 INSN_3(JMP32, JSGE, K), \
1585 INSN_3(JMP32, JSLE, K), \
1586 INSN_3(JMP32, JSET, K), \
1587 /* Jump instructions. */ \
1588 /* Register based. */ \
1589 INSN_3(JMP, JEQ, X), \
1590 INSN_3(JMP, JNE, X), \
1591 INSN_3(JMP, JGT, X), \
1592 INSN_3(JMP, JLT, X), \
1593 INSN_3(JMP, JGE, X), \
1594 INSN_3(JMP, JLE, X), \
1595 INSN_3(JMP, JSGT, X), \
1596 INSN_3(JMP, JSLT, X), \
1597 INSN_3(JMP, JSGE, X), \
1598 INSN_3(JMP, JSLE, X), \
1599 INSN_3(JMP, JSET, X), \
1600 /* Immediate based. */ \
1601 INSN_3(JMP, JEQ, K), \
1602 INSN_3(JMP, JNE, K), \
1603 INSN_3(JMP, JGT, K), \
1604 INSN_3(JMP, JLT, K), \
1605 INSN_3(JMP, JGE, K), \
1606 INSN_3(JMP, JLE, K), \
1607 INSN_3(JMP, JSGT, K), \
1608 INSN_3(JMP, JSLT, K), \
1609 INSN_3(JMP, JSGE, K), \
1610 INSN_3(JMP, JSLE, K), \
1611 INSN_3(JMP, JSET, K), \
1612 INSN_2(JMP, JA), \
1613 /* Store instructions. */ \
1614 /* Register based. */ \
1615 INSN_3(STX, MEM, B), \
1616 INSN_3(STX, MEM, H), \
1617 INSN_3(STX, MEM, W), \
1618 INSN_3(STX, MEM, DW), \
1619 INSN_3(STX, ATOMIC, W), \
1620 INSN_3(STX, ATOMIC, DW), \
1621 /* Immediate based. */ \
1622 INSN_3(ST, MEM, B), \
1623 INSN_3(ST, MEM, H), \
1624 INSN_3(ST, MEM, W), \
1625 INSN_3(ST, MEM, DW), \
1626 /* Load instructions. */ \
1627 /* Register based. */ \
1628 INSN_3(LDX, MEM, B), \
1629 INSN_3(LDX, MEM, H), \
1630 INSN_3(LDX, MEM, W), \
1631 INSN_3(LDX, MEM, DW), \
1632 /* Immediate based. */ \
1633 INSN_3(LD, IMM, DW)
1634
bpf_opcode_in_insntable(u8 code)1635 bool bpf_opcode_in_insntable(u8 code)
1636 {
1637 #define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1638 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1639 static const bool public_insntable[256] = {
1640 [0 ... 255] = false,
1641 /* Now overwrite non-defaults ... */
1642 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1643 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1644 [BPF_LD | BPF_ABS | BPF_B] = true,
1645 [BPF_LD | BPF_ABS | BPF_H] = true,
1646 [BPF_LD | BPF_ABS | BPF_W] = true,
1647 [BPF_LD | BPF_IND | BPF_B] = true,
1648 [BPF_LD | BPF_IND | BPF_H] = true,
1649 [BPF_LD | BPF_IND | BPF_W] = true,
1650 };
1651 #undef BPF_INSN_3_TBL
1652 #undef BPF_INSN_2_TBL
1653 return public_insntable[code];
1654 }
1655
1656 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
bpf_probe_read_kernel(void * dst,u32 size,const void * unsafe_ptr)1657 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1658 {
1659 memset(dst, 0, size);
1660 return -EFAULT;
1661 }
1662
1663 /**
1664 * ___bpf_prog_run - run eBPF program on a given context
1665 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1666 * @insn: is the array of eBPF instructions
1667 *
1668 * Decode and execute eBPF instructions.
1669 *
1670 * Return: whatever value is in %BPF_R0 at program exit
1671 */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)1672 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1673 {
1674 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1675 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1676 static const void * const jumptable[256] __annotate_jump_table = {
1677 [0 ... 255] = &&default_label,
1678 /* Now overwrite non-defaults ... */
1679 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1680 /* Non-UAPI available opcodes. */
1681 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1682 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1683 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1684 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1685 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1686 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1687 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1688 };
1689 #undef BPF_INSN_3_LBL
1690 #undef BPF_INSN_2_LBL
1691 u32 tail_call_cnt = 0;
1692
1693 #define CONT ({ insn++; goto select_insn; })
1694 #define CONT_JMP ({ insn++; goto select_insn; })
1695
1696 select_insn:
1697 goto *jumptable[insn->code];
1698
1699 /* Explicitly mask the register-based shift amounts with 63 or 31
1700 * to avoid undefined behavior. Normally this won't affect the
1701 * generated code, for example, in case of native 64 bit archs such
1702 * as x86-64 or arm64, the compiler is optimizing the AND away for
1703 * the interpreter. In case of JITs, each of the JIT backends compiles
1704 * the BPF shift operations to machine instructions which produce
1705 * implementation-defined results in such a case; the resulting
1706 * contents of the register may be arbitrary, but program behaviour
1707 * as a whole remains defined. In other words, in case of JIT backends,
1708 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1709 */
1710 /* ALU (shifts) */
1711 #define SHT(OPCODE, OP) \
1712 ALU64_##OPCODE##_X: \
1713 DST = DST OP (SRC & 63); \
1714 CONT; \
1715 ALU_##OPCODE##_X: \
1716 DST = (u32) DST OP ((u32) SRC & 31); \
1717 CONT; \
1718 ALU64_##OPCODE##_K: \
1719 DST = DST OP IMM; \
1720 CONT; \
1721 ALU_##OPCODE##_K: \
1722 DST = (u32) DST OP (u32) IMM; \
1723 CONT;
1724 /* ALU (rest) */
1725 #define ALU(OPCODE, OP) \
1726 ALU64_##OPCODE##_X: \
1727 DST = DST OP SRC; \
1728 CONT; \
1729 ALU_##OPCODE##_X: \
1730 DST = (u32) DST OP (u32) SRC; \
1731 CONT; \
1732 ALU64_##OPCODE##_K: \
1733 DST = DST OP IMM; \
1734 CONT; \
1735 ALU_##OPCODE##_K: \
1736 DST = (u32) DST OP (u32) IMM; \
1737 CONT;
1738 ALU(ADD, +)
1739 ALU(SUB, -)
1740 ALU(AND, &)
1741 ALU(OR, |)
1742 ALU(XOR, ^)
1743 ALU(MUL, *)
1744 SHT(LSH, <<)
1745 SHT(RSH, >>)
1746 #undef SHT
1747 #undef ALU
1748 ALU_NEG:
1749 DST = (u32) -DST;
1750 CONT;
1751 ALU64_NEG:
1752 DST = -DST;
1753 CONT;
1754 ALU_MOV_X:
1755 DST = (u32) SRC;
1756 CONT;
1757 ALU_MOV_K:
1758 DST = (u32) IMM;
1759 CONT;
1760 ALU64_MOV_X:
1761 DST = SRC;
1762 CONT;
1763 ALU64_MOV_K:
1764 DST = IMM;
1765 CONT;
1766 LD_IMM_DW:
1767 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1768 insn++;
1769 CONT;
1770 ALU_ARSH_X:
1771 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1772 CONT;
1773 ALU_ARSH_K:
1774 DST = (u64) (u32) (((s32) DST) >> IMM);
1775 CONT;
1776 ALU64_ARSH_X:
1777 (*(s64 *) &DST) >>= (SRC & 63);
1778 CONT;
1779 ALU64_ARSH_K:
1780 (*(s64 *) &DST) >>= IMM;
1781 CONT;
1782 ALU64_MOD_X:
1783 div64_u64_rem(DST, SRC, &AX);
1784 DST = AX;
1785 CONT;
1786 ALU_MOD_X:
1787 AX = (u32) DST;
1788 DST = do_div(AX, (u32) SRC);
1789 CONT;
1790 ALU64_MOD_K:
1791 div64_u64_rem(DST, IMM, &AX);
1792 DST = AX;
1793 CONT;
1794 ALU_MOD_K:
1795 AX = (u32) DST;
1796 DST = do_div(AX, (u32) IMM);
1797 CONT;
1798 ALU64_DIV_X:
1799 DST = div64_u64(DST, SRC);
1800 CONT;
1801 ALU_DIV_X:
1802 AX = (u32) DST;
1803 do_div(AX, (u32) SRC);
1804 DST = (u32) AX;
1805 CONT;
1806 ALU64_DIV_K:
1807 DST = div64_u64(DST, IMM);
1808 CONT;
1809 ALU_DIV_K:
1810 AX = (u32) DST;
1811 do_div(AX, (u32) IMM);
1812 DST = (u32) AX;
1813 CONT;
1814 ALU_END_TO_BE:
1815 switch (IMM) {
1816 case 16:
1817 DST = (__force u16) cpu_to_be16(DST);
1818 break;
1819 case 32:
1820 DST = (__force u32) cpu_to_be32(DST);
1821 break;
1822 case 64:
1823 DST = (__force u64) cpu_to_be64(DST);
1824 break;
1825 }
1826 CONT;
1827 ALU_END_TO_LE:
1828 switch (IMM) {
1829 case 16:
1830 DST = (__force u16) cpu_to_le16(DST);
1831 break;
1832 case 32:
1833 DST = (__force u32) cpu_to_le32(DST);
1834 break;
1835 case 64:
1836 DST = (__force u64) cpu_to_le64(DST);
1837 break;
1838 }
1839 CONT;
1840
1841 /* CALL */
1842 JMP_CALL:
1843 /* Function call scratches BPF_R1-BPF_R5 registers,
1844 * preserves BPF_R6-BPF_R9, and stores return value
1845 * into BPF_R0.
1846 */
1847 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1848 BPF_R4, BPF_R5);
1849 CONT;
1850
1851 JMP_CALL_ARGS:
1852 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1853 BPF_R3, BPF_R4,
1854 BPF_R5,
1855 insn + insn->off + 1);
1856 CONT;
1857
1858 JMP_TAIL_CALL: {
1859 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1860 struct bpf_array *array = container_of(map, struct bpf_array, map);
1861 struct bpf_prog *prog;
1862 u32 index = BPF_R3;
1863
1864 if (unlikely(index >= array->map.max_entries))
1865 goto out;
1866
1867 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1868 goto out;
1869
1870 tail_call_cnt++;
1871
1872 prog = READ_ONCE(array->ptrs[index]);
1873 if (!prog)
1874 goto out;
1875
1876 /* ARG1 at this point is guaranteed to point to CTX from
1877 * the verifier side due to the fact that the tail call is
1878 * handled like a helper, that is, bpf_tail_call_proto,
1879 * where arg1_type is ARG_PTR_TO_CTX.
1880 */
1881 insn = prog->insnsi;
1882 goto select_insn;
1883 out:
1884 CONT;
1885 }
1886 JMP_JA:
1887 insn += insn->off;
1888 CONT;
1889 JMP_EXIT:
1890 return BPF_R0;
1891 /* JMP */
1892 #define COND_JMP(SIGN, OPCODE, CMP_OP) \
1893 JMP_##OPCODE##_X: \
1894 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1895 insn += insn->off; \
1896 CONT_JMP; \
1897 } \
1898 CONT; \
1899 JMP32_##OPCODE##_X: \
1900 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1901 insn += insn->off; \
1902 CONT_JMP; \
1903 } \
1904 CONT; \
1905 JMP_##OPCODE##_K: \
1906 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1907 insn += insn->off; \
1908 CONT_JMP; \
1909 } \
1910 CONT; \
1911 JMP32_##OPCODE##_K: \
1912 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1913 insn += insn->off; \
1914 CONT_JMP; \
1915 } \
1916 CONT;
1917 COND_JMP(u, JEQ, ==)
1918 COND_JMP(u, JNE, !=)
1919 COND_JMP(u, JGT, >)
1920 COND_JMP(u, JLT, <)
1921 COND_JMP(u, JGE, >=)
1922 COND_JMP(u, JLE, <=)
1923 COND_JMP(u, JSET, &)
1924 COND_JMP(s, JSGT, >)
1925 COND_JMP(s, JSLT, <)
1926 COND_JMP(s, JSGE, >=)
1927 COND_JMP(s, JSLE, <=)
1928 #undef COND_JMP
1929 /* ST, STX and LDX*/
1930 ST_NOSPEC:
1931 /* Speculation barrier for mitigating Speculative Store Bypass.
1932 * In case of arm64, we rely on the firmware mitigation as
1933 * controlled via the ssbd kernel parameter. Whenever the
1934 * mitigation is enabled, it works for all of the kernel code
1935 * with no need to provide any additional instructions here.
1936 * In case of x86, we use 'lfence' insn for mitigation. We
1937 * reuse preexisting logic from Spectre v1 mitigation that
1938 * happens to produce the required code on x86 for v4 as well.
1939 */
1940 #ifdef CONFIG_X86
1941 barrier_nospec();
1942 #endif
1943 CONT;
1944 #define LDST(SIZEOP, SIZE) \
1945 STX_MEM_##SIZEOP: \
1946 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1947 CONT; \
1948 ST_MEM_##SIZEOP: \
1949 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1950 CONT; \
1951 LDX_MEM_##SIZEOP: \
1952 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1953 CONT; \
1954 LDX_PROBE_MEM_##SIZEOP: \
1955 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1956 (const void *)(long) (SRC + insn->off)); \
1957 DST = *((SIZE *)&DST); \
1958 CONT;
1959
1960 LDST(B, u8)
1961 LDST(H, u16)
1962 LDST(W, u32)
1963 LDST(DW, u64)
1964 #undef LDST
1965
1966 #define ATOMIC_ALU_OP(BOP, KOP) \
1967 case BOP: \
1968 if (BPF_SIZE(insn->code) == BPF_W) \
1969 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1970 (DST + insn->off)); \
1971 else \
1972 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1973 (DST + insn->off)); \
1974 break; \
1975 case BOP | BPF_FETCH: \
1976 if (BPF_SIZE(insn->code) == BPF_W) \
1977 SRC = (u32) atomic_fetch_##KOP( \
1978 (u32) SRC, \
1979 (atomic_t *)(unsigned long) (DST + insn->off)); \
1980 else \
1981 SRC = (u64) atomic64_fetch_##KOP( \
1982 (u64) SRC, \
1983 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1984 break;
1985
1986 STX_ATOMIC_DW:
1987 STX_ATOMIC_W:
1988 switch (IMM) {
1989 ATOMIC_ALU_OP(BPF_ADD, add)
1990 ATOMIC_ALU_OP(BPF_AND, and)
1991 ATOMIC_ALU_OP(BPF_OR, or)
1992 ATOMIC_ALU_OP(BPF_XOR, xor)
1993 #undef ATOMIC_ALU_OP
1994
1995 case BPF_XCHG:
1996 if (BPF_SIZE(insn->code) == BPF_W)
1997 SRC = (u32) atomic_xchg(
1998 (atomic_t *)(unsigned long) (DST + insn->off),
1999 (u32) SRC);
2000 else
2001 SRC = (u64) atomic64_xchg(
2002 (atomic64_t *)(unsigned long) (DST + insn->off),
2003 (u64) SRC);
2004 break;
2005 case BPF_CMPXCHG:
2006 if (BPF_SIZE(insn->code) == BPF_W)
2007 BPF_R0 = (u32) atomic_cmpxchg(
2008 (atomic_t *)(unsigned long) (DST + insn->off),
2009 (u32) BPF_R0, (u32) SRC);
2010 else
2011 BPF_R0 = (u64) atomic64_cmpxchg(
2012 (atomic64_t *)(unsigned long) (DST + insn->off),
2013 (u64) BPF_R0, (u64) SRC);
2014 break;
2015
2016 default:
2017 goto default_label;
2018 }
2019 CONT;
2020
2021 default_label:
2022 /* If we ever reach this, we have a bug somewhere. Die hard here
2023 * instead of just returning 0; we could be somewhere in a subprog,
2024 * so execution could continue otherwise which we do /not/ want.
2025 *
2026 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2027 */
2028 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2029 insn->code, insn->imm);
2030 BUG_ON(1);
2031 return 0;
2032 }
2033
2034 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2035 #define DEFINE_BPF_PROG_RUN(stack_size) \
2036 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2037 { \
2038 u64 stack[stack_size / sizeof(u64)]; \
2039 u64 regs[MAX_BPF_EXT_REG]; \
2040 \
2041 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2042 ARG1 = (u64) (unsigned long) ctx; \
2043 return ___bpf_prog_run(regs, insn); \
2044 }
2045
2046 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2047 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2048 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2049 const struct bpf_insn *insn) \
2050 { \
2051 u64 stack[stack_size / sizeof(u64)]; \
2052 u64 regs[MAX_BPF_EXT_REG]; \
2053 \
2054 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2055 BPF_R1 = r1; \
2056 BPF_R2 = r2; \
2057 BPF_R3 = r3; \
2058 BPF_R4 = r4; \
2059 BPF_R5 = r5; \
2060 return ___bpf_prog_run(regs, insn); \
2061 }
2062
2063 #define EVAL1(FN, X) FN(X)
2064 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2065 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2066 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2067 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2068 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2069
2070 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2071 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2072 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2073
2074 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2075 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2076 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2077
2078 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2079
2080 static unsigned int (*interpreters[])(const void *ctx,
2081 const struct bpf_insn *insn) = {
2082 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2083 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2084 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2085 };
2086 #undef PROG_NAME_LIST
2087 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2088 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2089 const struct bpf_insn *insn) = {
2090 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2091 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2092 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2093 };
2094 #undef PROG_NAME_LIST
2095
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)2096 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2097 {
2098 stack_depth = max_t(u32, stack_depth, 1);
2099 insn->off = (s16) insn->imm;
2100 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2101 __bpf_call_base_args;
2102 insn->code = BPF_JMP | BPF_CALL_ARGS;
2103 }
2104
2105 #else
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2106 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2107 const struct bpf_insn *insn)
2108 {
2109 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2110 * is not working properly, so warn about it!
2111 */
2112 WARN_ON_ONCE(1);
2113 return 0;
2114 }
2115 #endif
2116
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2117 bool bpf_prog_map_compatible(struct bpf_map *map,
2118 const struct bpf_prog *fp)
2119 {
2120 bool ret;
2121
2122 if (fp->kprobe_override)
2123 return false;
2124
2125 spin_lock(&map->owner.lock);
2126 if (!map->owner.type) {
2127 /* There's no owner yet where we could check for
2128 * compatibility.
2129 */
2130 map->owner.type = fp->type;
2131 map->owner.jited = fp->jited;
2132 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2133 ret = true;
2134 } else {
2135 ret = map->owner.type == fp->type &&
2136 map->owner.jited == fp->jited &&
2137 map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2138 }
2139 spin_unlock(&map->owner.lock);
2140
2141 return ret;
2142 }
2143
bpf_check_tail_call(const struct bpf_prog * fp)2144 static int bpf_check_tail_call(const struct bpf_prog *fp)
2145 {
2146 struct bpf_prog_aux *aux = fp->aux;
2147 int i, ret = 0;
2148
2149 mutex_lock(&aux->used_maps_mutex);
2150 for (i = 0; i < aux->used_map_cnt; i++) {
2151 struct bpf_map *map = aux->used_maps[i];
2152
2153 if (!map_type_contains_progs(map))
2154 continue;
2155
2156 if (!bpf_prog_map_compatible(map, fp)) {
2157 ret = -EINVAL;
2158 goto out;
2159 }
2160 }
2161
2162 out:
2163 mutex_unlock(&aux->used_maps_mutex);
2164 return ret;
2165 }
2166
bpf_prog_select_func(struct bpf_prog * fp)2167 static void bpf_prog_select_func(struct bpf_prog *fp)
2168 {
2169 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2170 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2171
2172 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2173 #else
2174 fp->bpf_func = __bpf_prog_ret0_warn;
2175 #endif
2176 }
2177
2178 /**
2179 * bpf_prog_select_runtime - select exec runtime for BPF program
2180 * @fp: bpf_prog populated with BPF program
2181 * @err: pointer to error variable
2182 *
2183 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2184 * The BPF program will be executed via bpf_prog_run() function.
2185 *
2186 * Return: the &fp argument along with &err set to 0 for success or
2187 * a negative errno code on failure
2188 */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2189 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2190 {
2191 /* In case of BPF to BPF calls, verifier did all the prep
2192 * work with regards to JITing, etc.
2193 */
2194 bool jit_needed = false;
2195
2196 if (fp->bpf_func)
2197 goto finalize;
2198
2199 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2200 bpf_prog_has_kfunc_call(fp))
2201 jit_needed = true;
2202
2203 bpf_prog_select_func(fp);
2204
2205 /* eBPF JITs can rewrite the program in case constant
2206 * blinding is active. However, in case of error during
2207 * blinding, bpf_int_jit_compile() must always return a
2208 * valid program, which in this case would simply not
2209 * be JITed, but falls back to the interpreter.
2210 */
2211 if (!bpf_prog_is_dev_bound(fp->aux)) {
2212 *err = bpf_prog_alloc_jited_linfo(fp);
2213 if (*err)
2214 return fp;
2215
2216 fp = bpf_int_jit_compile(fp);
2217 bpf_prog_jit_attempt_done(fp);
2218 if (!fp->jited && jit_needed) {
2219 *err = -ENOTSUPP;
2220 return fp;
2221 }
2222 } else {
2223 *err = bpf_prog_offload_compile(fp);
2224 if (*err)
2225 return fp;
2226 }
2227
2228 finalize:
2229 bpf_prog_lock_ro(fp);
2230
2231 /* The tail call compatibility check can only be done at
2232 * this late stage as we need to determine, if we deal
2233 * with JITed or non JITed program concatenations and not
2234 * all eBPF JITs might immediately support all features.
2235 */
2236 *err = bpf_check_tail_call(fp);
2237
2238 return fp;
2239 }
2240 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2241
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2242 static unsigned int __bpf_prog_ret1(const void *ctx,
2243 const struct bpf_insn *insn)
2244 {
2245 return 1;
2246 }
2247
2248 static struct bpf_prog_dummy {
2249 struct bpf_prog prog;
2250 } dummy_bpf_prog = {
2251 .prog = {
2252 .bpf_func = __bpf_prog_ret1,
2253 },
2254 };
2255
2256 struct bpf_empty_prog_array bpf_empty_prog_array = {
2257 .null_prog = NULL,
2258 };
2259 EXPORT_SYMBOL(bpf_empty_prog_array);
2260
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2261 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2262 {
2263 if (prog_cnt)
2264 return kzalloc(sizeof(struct bpf_prog_array) +
2265 sizeof(struct bpf_prog_array_item) *
2266 (prog_cnt + 1),
2267 flags);
2268
2269 return &bpf_empty_prog_array.hdr;
2270 }
2271
bpf_prog_array_free(struct bpf_prog_array * progs)2272 void bpf_prog_array_free(struct bpf_prog_array *progs)
2273 {
2274 if (!progs || progs == &bpf_empty_prog_array.hdr)
2275 return;
2276 kfree_rcu(progs, rcu);
2277 }
2278
bpf_prog_array_length(struct bpf_prog_array * array)2279 int bpf_prog_array_length(struct bpf_prog_array *array)
2280 {
2281 struct bpf_prog_array_item *item;
2282 u32 cnt = 0;
2283
2284 for (item = array->items; item->prog; item++)
2285 if (item->prog != &dummy_bpf_prog.prog)
2286 cnt++;
2287 return cnt;
2288 }
2289
bpf_prog_array_is_empty(struct bpf_prog_array * array)2290 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2291 {
2292 struct bpf_prog_array_item *item;
2293
2294 for (item = array->items; item->prog; item++)
2295 if (item->prog != &dummy_bpf_prog.prog)
2296 return false;
2297 return true;
2298 }
2299
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)2300 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2301 u32 *prog_ids,
2302 u32 request_cnt)
2303 {
2304 struct bpf_prog_array_item *item;
2305 int i = 0;
2306
2307 for (item = array->items; item->prog; item++) {
2308 if (item->prog == &dummy_bpf_prog.prog)
2309 continue;
2310 prog_ids[i] = item->prog->aux->id;
2311 if (++i == request_cnt) {
2312 item++;
2313 break;
2314 }
2315 }
2316
2317 return !!(item->prog);
2318 }
2319
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)2320 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2321 __u32 __user *prog_ids, u32 cnt)
2322 {
2323 unsigned long err = 0;
2324 bool nospc;
2325 u32 *ids;
2326
2327 /* users of this function are doing:
2328 * cnt = bpf_prog_array_length();
2329 * if (cnt > 0)
2330 * bpf_prog_array_copy_to_user(..., cnt);
2331 * so below kcalloc doesn't need extra cnt > 0 check.
2332 */
2333 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2334 if (!ids)
2335 return -ENOMEM;
2336 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2337 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2338 kfree(ids);
2339 if (err)
2340 return -EFAULT;
2341 if (nospc)
2342 return -ENOSPC;
2343 return 0;
2344 }
2345
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)2346 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2347 struct bpf_prog *old_prog)
2348 {
2349 struct bpf_prog_array_item *item;
2350
2351 for (item = array->items; item->prog; item++)
2352 if (item->prog == old_prog) {
2353 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2354 break;
2355 }
2356 }
2357
2358 /**
2359 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2360 * index into the program array with
2361 * a dummy no-op program.
2362 * @array: a bpf_prog_array
2363 * @index: the index of the program to replace
2364 *
2365 * Skips over dummy programs, by not counting them, when calculating
2366 * the position of the program to replace.
2367 *
2368 * Return:
2369 * * 0 - Success
2370 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2371 * * -ENOENT - Index out of range
2372 */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2373 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2374 {
2375 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2376 }
2377
2378 /**
2379 * bpf_prog_array_update_at() - Updates the program at the given index
2380 * into the program array.
2381 * @array: a bpf_prog_array
2382 * @index: the index of the program to update
2383 * @prog: the program to insert into the array
2384 *
2385 * Skips over dummy programs, by not counting them, when calculating
2386 * the position of the program to update.
2387 *
2388 * Return:
2389 * * 0 - Success
2390 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2391 * * -ENOENT - Index out of range
2392 */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2393 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2394 struct bpf_prog *prog)
2395 {
2396 struct bpf_prog_array_item *item;
2397
2398 if (unlikely(index < 0))
2399 return -EINVAL;
2400
2401 for (item = array->items; item->prog; item++) {
2402 if (item->prog == &dummy_bpf_prog.prog)
2403 continue;
2404 if (!index) {
2405 WRITE_ONCE(item->prog, prog);
2406 return 0;
2407 }
2408 index--;
2409 }
2410 return -ENOENT;
2411 }
2412
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)2413 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2414 struct bpf_prog *exclude_prog,
2415 struct bpf_prog *include_prog,
2416 u64 bpf_cookie,
2417 struct bpf_prog_array **new_array)
2418 {
2419 int new_prog_cnt, carry_prog_cnt = 0;
2420 struct bpf_prog_array_item *existing, *new;
2421 struct bpf_prog_array *array;
2422 bool found_exclude = false;
2423
2424 /* Figure out how many existing progs we need to carry over to
2425 * the new array.
2426 */
2427 if (old_array) {
2428 existing = old_array->items;
2429 for (; existing->prog; existing++) {
2430 if (existing->prog == exclude_prog) {
2431 found_exclude = true;
2432 continue;
2433 }
2434 if (existing->prog != &dummy_bpf_prog.prog)
2435 carry_prog_cnt++;
2436 if (existing->prog == include_prog)
2437 return -EEXIST;
2438 }
2439 }
2440
2441 if (exclude_prog && !found_exclude)
2442 return -ENOENT;
2443
2444 /* How many progs (not NULL) will be in the new array? */
2445 new_prog_cnt = carry_prog_cnt;
2446 if (include_prog)
2447 new_prog_cnt += 1;
2448
2449 /* Do we have any prog (not NULL) in the new array? */
2450 if (!new_prog_cnt) {
2451 *new_array = NULL;
2452 return 0;
2453 }
2454
2455 /* +1 as the end of prog_array is marked with NULL */
2456 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2457 if (!array)
2458 return -ENOMEM;
2459 new = array->items;
2460
2461 /* Fill in the new prog array */
2462 if (carry_prog_cnt) {
2463 existing = old_array->items;
2464 for (; existing->prog; existing++) {
2465 if (existing->prog == exclude_prog ||
2466 existing->prog == &dummy_bpf_prog.prog)
2467 continue;
2468
2469 new->prog = existing->prog;
2470 new->bpf_cookie = existing->bpf_cookie;
2471 new++;
2472 }
2473 }
2474 if (include_prog) {
2475 new->prog = include_prog;
2476 new->bpf_cookie = bpf_cookie;
2477 new++;
2478 }
2479 new->prog = NULL;
2480 *new_array = array;
2481 return 0;
2482 }
2483
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)2484 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2485 u32 *prog_ids, u32 request_cnt,
2486 u32 *prog_cnt)
2487 {
2488 u32 cnt = 0;
2489
2490 if (array)
2491 cnt = bpf_prog_array_length(array);
2492
2493 *prog_cnt = cnt;
2494
2495 /* return early if user requested only program count or nothing to copy */
2496 if (!request_cnt || !cnt)
2497 return 0;
2498
2499 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2500 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2501 : 0;
2502 }
2503
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2504 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2505 struct bpf_map **used_maps, u32 len)
2506 {
2507 struct bpf_map *map;
2508 u32 i;
2509
2510 for (i = 0; i < len; i++) {
2511 map = used_maps[i];
2512 if (map->ops->map_poke_untrack)
2513 map->ops->map_poke_untrack(map, aux);
2514 bpf_map_put(map);
2515 }
2516 }
2517
bpf_free_used_maps(struct bpf_prog_aux * aux)2518 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2519 {
2520 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2521 kfree(aux->used_maps);
2522 }
2523
__bpf_free_used_btfs(struct bpf_prog_aux * aux,struct btf_mod_pair * used_btfs,u32 len)2524 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2525 struct btf_mod_pair *used_btfs, u32 len)
2526 {
2527 #ifdef CONFIG_BPF_SYSCALL
2528 struct btf_mod_pair *btf_mod;
2529 u32 i;
2530
2531 for (i = 0; i < len; i++) {
2532 btf_mod = &used_btfs[i];
2533 if (btf_mod->module)
2534 module_put(btf_mod->module);
2535 btf_put(btf_mod->btf);
2536 }
2537 #endif
2538 }
2539
bpf_free_used_btfs(struct bpf_prog_aux * aux)2540 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2541 {
2542 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2543 kfree(aux->used_btfs);
2544 }
2545
bpf_prog_free_deferred(struct work_struct * work)2546 static void bpf_prog_free_deferred(struct work_struct *work)
2547 {
2548 struct bpf_prog_aux *aux;
2549 int i;
2550
2551 aux = container_of(work, struct bpf_prog_aux, work);
2552 #ifdef CONFIG_BPF_SYSCALL
2553 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2554 #endif
2555 bpf_free_used_maps(aux);
2556 bpf_free_used_btfs(aux);
2557 if (bpf_prog_is_dev_bound(aux))
2558 bpf_prog_offload_destroy(aux->prog);
2559 #ifdef CONFIG_PERF_EVENTS
2560 if (aux->prog->has_callchain_buf)
2561 put_callchain_buffers();
2562 #endif
2563 if (aux->dst_trampoline)
2564 bpf_trampoline_put(aux->dst_trampoline);
2565 for (i = 0; i < aux->func_cnt; i++) {
2566 /* We can just unlink the subprog poke descriptor table as
2567 * it was originally linked to the main program and is also
2568 * released along with it.
2569 */
2570 aux->func[i]->aux->poke_tab = NULL;
2571 bpf_jit_free(aux->func[i]);
2572 }
2573 if (aux->func_cnt) {
2574 kfree(aux->func);
2575 bpf_prog_unlock_free(aux->prog);
2576 } else {
2577 bpf_jit_free(aux->prog);
2578 }
2579 }
2580
bpf_prog_free(struct bpf_prog * fp)2581 void bpf_prog_free(struct bpf_prog *fp)
2582 {
2583 struct bpf_prog_aux *aux = fp->aux;
2584
2585 if (aux->dst_prog)
2586 bpf_prog_put(aux->dst_prog);
2587 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2588 schedule_work(&aux->work);
2589 }
2590 EXPORT_SYMBOL_GPL(bpf_prog_free);
2591
2592 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2593 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2594
bpf_user_rnd_init_once(void)2595 void bpf_user_rnd_init_once(void)
2596 {
2597 prandom_init_once(&bpf_user_rnd_state);
2598 }
2599
BPF_CALL_0(bpf_user_rnd_u32)2600 BPF_CALL_0(bpf_user_rnd_u32)
2601 {
2602 /* Should someone ever have the rather unwise idea to use some
2603 * of the registers passed into this function, then note that
2604 * this function is called from native eBPF and classic-to-eBPF
2605 * transformations. Register assignments from both sides are
2606 * different, f.e. classic always sets fn(ctx, A, X) here.
2607 */
2608 struct rnd_state *state;
2609 u32 res;
2610
2611 state = &get_cpu_var(bpf_user_rnd_state);
2612 res = prandom_u32_state(state);
2613 put_cpu_var(bpf_user_rnd_state);
2614
2615 return res;
2616 }
2617
BPF_CALL_0(bpf_get_raw_cpu_id)2618 BPF_CALL_0(bpf_get_raw_cpu_id)
2619 {
2620 return raw_smp_processor_id();
2621 }
2622
2623 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2624 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2625 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2626 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2627 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2628 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2629 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2630 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2631 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2632 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2633 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2634
2635 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2636 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2637 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2638 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2639 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2640 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2641
2642 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2643 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2644 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2645 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2646 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2647 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2648 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2649 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2650 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2651
bpf_get_trace_printk_proto(void)2652 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2653 {
2654 return NULL;
2655 }
2656
bpf_get_trace_vprintk_proto(void)2657 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2658 {
2659 return NULL;
2660 }
2661
2662 u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2663 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2664 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2665 {
2666 return -ENOTSUPP;
2667 }
2668 EXPORT_SYMBOL_GPL(bpf_event_output);
2669
2670 /* Always built-in helper functions. */
2671 const struct bpf_func_proto bpf_tail_call_proto = {
2672 .func = NULL,
2673 .gpl_only = false,
2674 .ret_type = RET_VOID,
2675 .arg1_type = ARG_PTR_TO_CTX,
2676 .arg2_type = ARG_CONST_MAP_PTR,
2677 .arg3_type = ARG_ANYTHING,
2678 };
2679
2680 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2681 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2682 * eBPF and implicitly also cBPF can get JITed!
2683 */
bpf_int_jit_compile(struct bpf_prog * prog)2684 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2685 {
2686 return prog;
2687 }
2688
2689 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2690 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2691 */
bpf_jit_compile(struct bpf_prog * prog)2692 void __weak bpf_jit_compile(struct bpf_prog *prog)
2693 {
2694 }
2695
bpf_helper_changes_pkt_data(void * func)2696 bool __weak bpf_helper_changes_pkt_data(void *func)
2697 {
2698 return false;
2699 }
2700
2701 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2702 * analysis code and wants explicit zero extension inserted by verifier.
2703 * Otherwise, return FALSE.
2704 *
2705 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2706 * you don't override this. JITs that don't want these extra insns can detect
2707 * them using insn_is_zext.
2708 */
bpf_jit_needs_zext(void)2709 bool __weak bpf_jit_needs_zext(void)
2710 {
2711 return false;
2712 }
2713
2714 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)2715 bool __weak bpf_jit_supports_subprog_tailcalls(void)
2716 {
2717 return false;
2718 }
2719
bpf_jit_supports_kfunc_call(void)2720 bool __weak bpf_jit_supports_kfunc_call(void)
2721 {
2722 return false;
2723 }
2724
2725 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2726 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2727 */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2728 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2729 int len)
2730 {
2731 return -EFAULT;
2732 }
2733
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)2734 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2735 void *addr1, void *addr2)
2736 {
2737 return -ENOTSUPP;
2738 }
2739
bpf_arch_text_copy(void * dst,void * src,size_t len)2740 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2741 {
2742 return ERR_PTR(-ENOTSUPP);
2743 }
2744
bpf_arch_text_invalidate(void * dst,size_t len)2745 int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2746 {
2747 return -ENOTSUPP;
2748 }
2749
2750 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2751 EXPORT_SYMBOL(bpf_stats_enabled_key);
2752
2753 /* All definitions of tracepoints related to BPF. */
2754 #define CREATE_TRACE_POINTS
2755 #include <linux/bpf_trace.h>
2756
2757 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2758 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
2759