Lines Matching refs:env

194 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
195 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
196 static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
197 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
198 static int ref_set_non_owning(struct bpf_verifier_env *env,
200 static void specialize_kfunc(struct bpf_verifier_env *env,
341 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) in find_linfo() argument
347 prog = env->prog; in find_linfo()
363 struct bpf_verifier_env *env = private_data; in verbose() local
366 if (!bpf_verifier_log_needed(&env->log)) in verbose()
370 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
382 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, in verbose_linfo() argument
388 if (!bpf_verifier_log_needed(&env->log)) in verbose_linfo()
391 linfo = find_linfo(env, insn_off); in verbose_linfo()
392 if (!linfo || linfo == env->prev_linfo) in verbose_linfo()
399 bpf_verifier_vlog(&env->log, prefix_fmt, args); in verbose_linfo()
403 verbose(env, "%s\n", in verbose_linfo()
404 ltrim(btf_name_by_offset(env->prog->aux->btf, in verbose_linfo()
407 env->prev_linfo = linfo; in verbose_linfo()
410 static void verbose_invalid_scalar(struct bpf_verifier_env *env, in verbose_invalid_scalar() argument
417 verbose(env, "At %s the register %s ", ctx, reg_name); in verbose_invalid_scalar()
420 verbose(env, "has value %s", tn_buf); in verbose_invalid_scalar()
422 verbose(env, "has unknown scalar value"); in verbose_invalid_scalar()
425 verbose(env, " should have been in %s\n", tn_buf); in verbose_invalid_scalar()
491 static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog) in subprog_is_global() argument
493 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; in subprog_is_global()
607 static const char *reg_type_str(struct bpf_verifier_env *env, in reg_type_str() argument
652 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s", in reg_type_str()
654 return env->tmp_str_buf; in reg_type_str()
666 static void print_liveness(struct bpf_verifier_env *env, in print_liveness() argument
670 verbose(env, "_"); in print_liveness()
672 verbose(env, "r"); in print_liveness()
674 verbose(env, "w"); in print_liveness()
676 verbose(env, "D"); in print_liveness()
684 static struct bpf_func_state *func(struct bpf_verifier_env *env, in func() argument
687 struct bpf_verifier_state *cur = env->cur_state; in func()
706 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in stack_slot_obj_get_spi() argument
712 verbose(env, "%s has to be at a constant offset\n", obj_kind); in stack_slot_obj_get_spi()
718 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); in stack_slot_obj_get_spi()
724 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); in stack_slot_obj_get_spi()
728 if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots)) in stack_slot_obj_get_spi()
733 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in dynptr_get_spi() argument
735 return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS); in dynptr_get_spi()
738 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots) in iter_get_spi() argument
740 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots); in iter_get_spi()
791 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) in mark_reg_scratched() argument
793 env->scratched_regs |= 1U << regno; in mark_reg_scratched()
796 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) in mark_stack_slot_scratched() argument
798 env->scratched_stack_slots |= 1ULL << spi; in mark_stack_slot_scratched()
801 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) in reg_scratched() argument
803 return (env->scratched_regs >> regno) & 1; in reg_scratched()
806 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) in stack_slot_scratched() argument
808 return (env->scratched_stack_slots >> regno) & 1; in stack_slot_scratched()
811 static bool verifier_state_scratched(const struct bpf_verifier_env *env) in verifier_state_scratched() argument
813 return env->scratched_regs || env->scratched_stack_slots; in verifier_state_scratched()
816 static void mark_verifier_state_clean(struct bpf_verifier_env *env) in mark_verifier_state_clean() argument
818 env->scratched_regs = 0U; in mark_verifier_state_clean()
819 env->scratched_stack_slots = 0ULL; in mark_verifier_state_clean()
823 static void mark_verifier_state_scratched(struct bpf_verifier_env *env) in mark_verifier_state_scratched() argument
825 env->scratched_regs = ~0U; in mark_verifier_state_scratched()
826 env->scratched_stack_slots = ~0ULL; in mark_verifier_state_scratched()
870 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
873 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env, in mark_dynptr_stack_regs() argument
878 int id = ++env->id_gen; in mark_dynptr_stack_regs()
884 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env, in mark_dynptr_cb_reg() argument
888 __mark_dynptr_reg(reg, type, true, ++env->id_gen); in mark_dynptr_cb_reg()
891 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
894 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in mark_stack_slots_dynptr() argument
897 struct bpf_func_state *state = func(env, reg); in mark_stack_slots_dynptr()
901 spi = dynptr_get_spi(env, reg); in mark_stack_slots_dynptr()
914 err = destroy_if_dynptr_stack_slot(env, state, spi); in mark_stack_slots_dynptr()
917 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); in mark_stack_slots_dynptr()
930 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, in mark_stack_slots_dynptr()
940 id = acquire_reference_state(env, insn_idx); in mark_stack_slots_dynptr()
955 static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi) in invalidate_dynptr() argument
964 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in invalidate_dynptr()
965 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in invalidate_dynptr()
992 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in unmark_stack_slots_dynptr() argument
994 struct bpf_func_state *state = func(env, reg); in unmark_stack_slots_dynptr()
997 spi = dynptr_get_spi(env, reg); in unmark_stack_slots_dynptr()
1002 invalidate_dynptr(env, state, spi); in unmark_stack_slots_dynptr()
1016 WARN_ON_ONCE(release_reference(env, ref_obj_id)); in unmark_stack_slots_dynptr()
1028 verbose(env, "verifier internal error: misconfigured ref_obj_id\n"); in unmark_stack_slots_dynptr()
1032 invalidate_dynptr(env, state, i); in unmark_stack_slots_dynptr()
1038 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1041 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) in mark_reg_invalid() argument
1043 if (!env->allow_ptr_leaks) in mark_reg_invalid()
1044 __mark_reg_not_init(env, reg); in mark_reg_invalid()
1046 __mark_reg_unknown(env, reg); in mark_reg_invalid()
1049 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, in destroy_if_dynptr_stack_slot() argument
1069 verbose(env, "cannot overwrite referenced dynptr\n"); in destroy_if_dynptr_stack_slot()
1073 mark_stack_slot_scratched(env, spi); in destroy_if_dynptr_stack_slot()
1074 mark_stack_slot_scratched(env, spi - 1); in destroy_if_dynptr_stack_slot()
1084 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ in destroy_if_dynptr_stack_slot()
1089 mark_reg_invalid(env, dreg); in destroy_if_dynptr_stack_slot()
1095 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in destroy_if_dynptr_stack_slot()
1096 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in destroy_if_dynptr_stack_slot()
1105 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in is_dynptr_reg_valid_uninit() argument
1112 spi = dynptr_get_spi(env, reg); in is_dynptr_reg_valid_uninit()
1133 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in is_dynptr_reg_valid_init() argument
1135 struct bpf_func_state *state = func(env, reg); in is_dynptr_reg_valid_init()
1147 spi = dynptr_get_spi(env, reg); in is_dynptr_reg_valid_init()
1162 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in is_dynptr_type_expected() argument
1165 struct bpf_func_state *state = func(env, reg); in is_dynptr_type_expected()
1177 spi = dynptr_get_spi(env, reg); in is_dynptr_type_expected()
1186 static int mark_stack_slots_iter(struct bpf_verifier_env *env, in mark_stack_slots_iter() argument
1190 struct bpf_func_state *state = func(env, reg); in mark_stack_slots_iter()
1193 spi = iter_get_spi(env, reg, nr_slots); in mark_stack_slots_iter()
1197 id = acquire_reference_state(env, insn_idx); in mark_stack_slots_iter()
1217 mark_stack_slot_scratched(env, spi - i); in mark_stack_slots_iter()
1223 static int unmark_stack_slots_iter(struct bpf_verifier_env *env, in unmark_stack_slots_iter() argument
1226 struct bpf_func_state *state = func(env, reg); in unmark_stack_slots_iter()
1229 spi = iter_get_spi(env, reg, nr_slots); in unmark_stack_slots_iter()
1238 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); in unmark_stack_slots_iter()
1240 __mark_reg_not_init(env, st); in unmark_stack_slots_iter()
1248 mark_stack_slot_scratched(env, spi - i); in unmark_stack_slots_iter()
1254 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env, in is_iter_reg_valid_uninit() argument
1257 struct bpf_func_state *state = func(env, reg); in is_iter_reg_valid_uninit()
1264 spi = iter_get_spi(env, reg, nr_slots); in is_iter_reg_valid_uninit()
1281 static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in is_iter_reg_valid_init() argument
1284 struct bpf_func_state *state = func(env, reg); in is_iter_reg_valid_init()
1287 spi = iter_get_spi(env, reg, nr_slots); in is_iter_reg_valid_init()
1355 static void print_verifier_state(struct bpf_verifier_env *env, in print_verifier_state() argument
1364 verbose(env, " frame%d:", state->frameno); in print_verifier_state()
1370 if (!print_all && !reg_scratched(env, i)) in print_verifier_state()
1372 verbose(env, " R%d", i); in print_verifier_state()
1373 print_liveness(env, reg->live); in print_verifier_state()
1374 verbose(env, "="); in print_verifier_state()
1376 verbose(env, "P"); in print_verifier_state()
1380 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); in print_verifier_state()
1381 verbose(env, "%lld", reg->var_off.value + reg->off); in print_verifier_state()
1385 verbose(env, "%s", reg_type_str(env, t)); in print_verifier_state()
1387 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id)); in print_verifier_state()
1388 verbose(env, "("); in print_verifier_state()
1393 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; }) in print_verifier_state()
1449 verbose(env, ")"); in print_verifier_state()
1465 if (!print_all && !stack_slot_scratched(env, i)) in print_verifier_state()
1472 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1473 print_liveness(env, reg->live); in print_verifier_state()
1474 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); in print_verifier_state()
1476 verbose(env, "P"); in print_verifier_state()
1478 verbose(env, "%lld", reg->var_off.value + reg->off); in print_verifier_state()
1484 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1485 print_liveness(env, reg->live); in print_verifier_state()
1486 verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type)); in print_verifier_state()
1488 verbose(env, "(ref_id=%d)", reg->ref_obj_id); in print_verifier_state()
1496 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1497 print_liveness(env, reg->live); in print_verifier_state()
1498 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)", in print_verifier_state()
1512 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1513 print_liveness(env, reg->live); in print_verifier_state()
1514 verbose(env, "=%s", types_buf); in print_verifier_state()
1519 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
1522 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
1525 verbose(env, " cb"); in print_verifier_state()
1527 verbose(env, " async_cb"); in print_verifier_state()
1528 verbose(env, "\n"); in print_verifier_state()
1530 mark_verifier_state_clean(env); in print_verifier_state()
1539 static void print_insn_state(struct bpf_verifier_env *env, in print_insn_state() argument
1542 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) { in print_insn_state()
1544 bpf_vlog_reset(&env->log, env->prev_log_pos - 1); in print_insn_state()
1545 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' '); in print_insn_state()
1547 verbose(env, "%d:", env->insn_idx); in print_insn_state()
1549 print_verifier_state(env, state, false); in print_insn_state()
1649 static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size) in grow_stack_state() argument
1663 if (env->subprog_info[state->subprogno].stack_depth < size) in grow_stack_state()
1664 env->subprog_info[state->subprogno].stack_depth = size; in grow_stack_state()
1674 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) in acquire_reference_state() argument
1676 struct bpf_func_state *state = cur_func(env); in acquire_reference_state()
1683 id = ++env->id_gen; in acquire_reference_state()
1803 static u32 state_htab_size(struct bpf_verifier_env *env) in state_htab_size() argument
1805 return env->prog->len; in state_htab_size()
1808 static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx) in explored_state() argument
1810 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
1813 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
1987 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) in update_branch_counts() argument
2012 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, in pop_stack() argument
2015 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
2016 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
2019 if (env->head == NULL) in pop_stack()
2028 bpf_vlog_reset(&env->log, head->log_pos); in pop_stack()
2036 env->head = elem; in pop_stack()
2037 env->stack_size--; in pop_stack()
2041 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, in push_stack() argument
2045 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
2055 elem->next = env->head; in push_stack()
2056 elem->log_pos = env->log.end_pos; in push_stack()
2057 env->head = elem; in push_stack()
2058 env->stack_size++; in push_stack()
2063 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
2064 verbose(env, "The sequence of %d jumps is too complex.\n", in push_stack()
2065 env->stack_size); in push_stack()
2082 free_verifier_state(env->cur_state, true); in push_stack()
2083 env->cur_state = NULL; in push_stack()
2085 while (!pop_stack(env, NULL, NULL, false)); in push_stack()
2145 static void mark_reg_known_zero(struct bpf_verifier_env *env, in mark_reg_known_zero() argument
2149 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); in mark_reg_known_zero()
2152 __mark_reg_not_init(env, regs + regno); in mark_reg_known_zero()
2478 static void __mark_reg_unknown(const struct bpf_verifier_env *env, in __mark_reg_unknown() argument
2491 reg->precise = !env->bpf_capable; in __mark_reg_unknown()
2495 static void mark_reg_unknown(struct bpf_verifier_env *env, in mark_reg_unknown() argument
2499 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); in mark_reg_unknown()
2502 __mark_reg_not_init(env, regs + regno); in mark_reg_unknown()
2505 __mark_reg_unknown(env, regs + regno); in mark_reg_unknown()
2508 static void __mark_reg_not_init(const struct bpf_verifier_env *env, in __mark_reg_not_init() argument
2511 __mark_reg_unknown(env, reg); in __mark_reg_not_init()
2515 static void mark_reg_not_init(struct bpf_verifier_env *env, in mark_reg_not_init() argument
2519 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); in mark_reg_not_init()
2522 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
2525 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
2528 static void mark_btf_ld_reg(struct bpf_verifier_env *env, in mark_btf_ld_reg() argument
2535 mark_reg_unknown(env, regs, regno); in mark_btf_ld_reg()
2538 mark_reg_known_zero(env, regs, regno); in mark_btf_ld_reg()
2545 static void init_reg_state(struct bpf_verifier_env *env, in init_reg_state() argument
2552 mark_reg_not_init(env, regs, i); in init_reg_state()
2560 mark_reg_known_zero(env, regs, BPF_REG_FP); in init_reg_state()
2565 static void init_func_state(struct bpf_verifier_env *env, in init_func_state() argument
2573 init_reg_state(env, state); in init_func_state()
2574 mark_verifier_state_scratched(env); in init_func_state()
2578 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, in push_async_cb() argument
2591 elem->next = env->head; in push_async_cb()
2592 elem->log_pos = env->log.end_pos; in push_async_cb()
2593 env->head = elem; in push_async_cb()
2594 env->stack_size++; in push_async_cb()
2595 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_async_cb()
2596 verbose(env, in push_async_cb()
2598 env->stack_size); in push_async_cb()
2610 init_func_state(env, frame, in push_async_cb()
2617 free_verifier_state(env->cur_state, true); in push_async_cb()
2618 env->cur_state = NULL; in push_async_cb()
2620 while (!pop_stack(env, NULL, NULL, false)); in push_async_cb()
2637 static int find_subprog(struct bpf_verifier_env *env, int off) in find_subprog() argument
2641 p = bsearch(&off, env->subprog_info, env->subprog_cnt, in find_subprog()
2642 sizeof(env->subprog_info[0]), cmp_subprogs); in find_subprog()
2645 return p - env->subprog_info; in find_subprog()
2649 static int add_subprog(struct bpf_verifier_env *env, int off) in add_subprog() argument
2651 int insn_cnt = env->prog->len; in add_subprog()
2655 verbose(env, "call to invalid destination\n"); in add_subprog()
2658 ret = find_subprog(env, off); in add_subprog()
2661 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
2662 verbose(env, "too many subprograms\n"); in add_subprog()
2666 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
2667 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
2668 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
2669 return env->subprog_cnt - 1; in add_subprog()
2748 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, in __find_kfunc_desc_btf() argument
2758 tab = env->prog->aux->kfunc_btf_tab; in __find_kfunc_desc_btf()
2763 verbose(env, "too many different module BTFs\n"); in __find_kfunc_desc_btf()
2767 if (bpfptr_is_null(env->fd_array)) { in __find_kfunc_desc_btf()
2768 verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); in __find_kfunc_desc_btf()
2772 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, in __find_kfunc_desc_btf()
2779 verbose(env, "invalid module BTF fd specified\n"); in __find_kfunc_desc_btf()
2784 verbose(env, "BTF fd for kfunc is not a module BTF\n"); in __find_kfunc_desc_btf()
2818 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) in find_kfunc_desc_btf() argument
2825 verbose(env, "negative offset disallowed for kernel module function call\n"); in find_kfunc_desc_btf()
2829 return __find_kfunc_desc_btf(env, offset); in find_kfunc_desc_btf()
2834 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) in add_kfunc_call() argument
2847 prog_aux = env->prog->aux; in add_kfunc_call()
2852 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); in add_kfunc_call()
2856 if (!env->prog->jit_requested) { in add_kfunc_call()
2857 verbose(env, "JIT is required for calling kernel function\n"); in add_kfunc_call()
2862 verbose(env, "JIT does not support calling kernel function\n"); in add_kfunc_call()
2866 if (!env->prog->gpl_compatible) { in add_kfunc_call()
2867 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); in add_kfunc_call()
2893 desc_btf = find_kfunc_desc_btf(env, offset); in add_kfunc_call()
2895 verbose(env, "failed to find BTF for kernel function\n"); in add_kfunc_call()
2899 if (find_kfunc_desc(env->prog, func_id, offset)) in add_kfunc_call()
2903 verbose(env, "too many different kernel function calls\n"); in add_kfunc_call()
2909 verbose(env, "kernel btf_id %u is not a function\n", in add_kfunc_call()
2915 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", in add_kfunc_call()
2923 verbose(env, "cannot find address for kernel function %s\n", in add_kfunc_call()
2927 specialize_kfunc(env, func_id, offset, &addr); in add_kfunc_call()
2935 verbose(env, "address of kernel function %s is out of range\n", in add_kfunc_call()
2942 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); in add_kfunc_call()
2952 err = btf_distill_func_proto(&env->log, desc_btf, in add_kfunc_call()
3008 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) in add_subprog_and_kfunc() argument
3010 struct bpf_subprog_info *subprog = env->subprog_info; in add_subprog_and_kfunc()
3011 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc()
3012 int i, ret, insn_cnt = env->prog->len; in add_subprog_and_kfunc()
3015 ret = add_subprog(env, 0); in add_subprog_and_kfunc()
3024 if (!env->bpf_capable) { in add_subprog_and_kfunc()
3025 …verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_AD… in add_subprog_and_kfunc()
3030 ret = add_subprog(env, i + insn->imm + 1); in add_subprog_and_kfunc()
3032 ret = add_kfunc_call(env, insn->imm, insn->off); in add_subprog_and_kfunc()
3041 subprog[env->subprog_cnt].start = insn_cnt; in add_subprog_and_kfunc()
3043 if (env->log.level & BPF_LOG_LEVEL2) in add_subprog_and_kfunc()
3044 for (i = 0; i < env->subprog_cnt; i++) in add_subprog_and_kfunc()
3045 verbose(env, "func#%d @%d\n", i, subprog[i].start); in add_subprog_and_kfunc()
3050 static int check_subprogs(struct bpf_verifier_env *env) in check_subprogs() argument
3053 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
3054 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
3055 int insn_cnt = env->prog->len; in check_subprogs()
3079 verbose(env, "jump out of range from insn %d to %d\n", i, off); in check_subprogs()
3091 verbose(env, "last insn is not an exit or jmp\n"); in check_subprogs()
3096 if (cur_subprog < env->subprog_cnt) in check_subprogs()
3106 static int mark_reg_read(struct bpf_verifier_env *env, in mark_reg_read() argument
3118 verbose(env, "verifier BUG type %s var_off %lld off %d\n", in mark_reg_read()
3119 reg_type_str(env, parent->type), in mark_reg_read()
3149 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
3150 env->longest_mark_read_walk = cnt; in mark_reg_read()
3154 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in mark_dynptr_read() argument
3156 struct bpf_func_state *state = func(env, reg); in mark_dynptr_read()
3165 spi = dynptr_get_spi(env, reg); in mark_dynptr_read()
3172 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, in mark_dynptr_read()
3176 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, in mark_dynptr_read()
3180 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in mark_iter_read() argument
3183 struct bpf_func_state *state = func(env, reg); in mark_iter_read()
3189 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); in mark_iter_read()
3193 mark_stack_slot_scratched(env, spi - i); in mark_iter_read()
3203 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, in is_reg64() argument
3313 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) in insn_has_def32() argument
3320 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); in insn_has_def32()
3323 static void mark_insn_zext(struct bpf_verifier_env *env, in mark_insn_zext() argument
3331 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
3336 static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, in __check_reg_arg() argument
3339 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in __check_reg_arg()
3344 verbose(env, "R%d is invalid\n", regno); in __check_reg_arg()
3348 mark_reg_scratched(env, regno); in __check_reg_arg()
3351 rw64 = is_reg64(env, insn, regno, reg, t); in __check_reg_arg()
3355 verbose(env, "R%d !read_ok\n", regno); in __check_reg_arg()
3363 mark_insn_zext(env, reg); in __check_reg_arg()
3365 return mark_reg_read(env, reg, reg->parent, in __check_reg_arg()
3370 verbose(env, "frame pointer is read only\n"); in __check_reg_arg()
3374 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in __check_reg_arg()
3376 mark_reg_unknown(env, regs, regno); in __check_reg_arg()
3381 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, in check_reg_arg() argument
3384 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
3387 return __check_reg_arg(env, state->regs, regno, t); in check_reg_arg()
3390 static void mark_jmp_point(struct bpf_verifier_env *env, int idx) in mark_jmp_point() argument
3392 env->insn_aux_data[idx].jmp_point = true; in mark_jmp_point()
3395 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) in is_jmp_point() argument
3397 return env->insn_aux_data[insn_idx].jmp_point; in is_jmp_point()
3401 static int push_jmp_history(struct bpf_verifier_env *env, in push_jmp_history() argument
3408 if (!is_jmp_point(env, env->insn_idx)) in push_jmp_history()
3416 p[cnt - 1].idx = env->insn_idx; in push_jmp_history()
3417 p[cnt - 1].prev_idx = env->prev_insn_idx; in push_jmp_history()
3480 struct bpf_verifier_env *env = bt->env; in bt_reset() local
3483 bt->env = env; in bt_reset()
3500 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); in bt_subprog_enter()
3511 verbose(bt->env, "BUG subprog exit from frame 0\n"); in bt_subprog_exit()
3628 static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
3639 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, in backtrack_insn() argument
3645 .private_data = env, in backtrack_insn()
3647 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
3657 if (env->log.level & BPF_LOG_LEVEL2) { in backtrack_insn()
3658 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); in backtrack_insn()
3659 verbose(env, "mark_precise: frame%d: regs=%s ", in backtrack_insn()
3660 bt->frame, env->tmp_str_buf); in backtrack_insn()
3661 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); in backtrack_insn()
3662 verbose(env, "stack=%s before ", env->tmp_str_buf); in backtrack_insn()
3663 verbose(env, "%d: ", idx); in backtrack_insn()
3664 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
3723 verbose(env, "BUG spi %d\n", spi); in backtrack_insn()
3740 verbose(env, "BUG spi %d\n", spi); in backtrack_insn()
3754 subprog = find_subprog(env, subprog_insn_idx); in backtrack_insn()
3758 if (subprog_is_global(env, subprog)) { in backtrack_insn()
3771 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); in backtrack_insn()
3786 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); in backtrack_insn()
3814 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); in backtrack_insn()
3839 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); in backtrack_insn()
3854 if (subseq_idx >= 0 && calls_callback(env, subseq_idx)) in backtrack_insn()
3858 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); in backtrack_insn()
3872 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && in backtrack_insn()
3969 static void mark_all_scalars_precise(struct bpf_verifier_env *env, in mark_all_scalars_precise() argument
3976 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
3977 verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n", in mark_all_scalars_precise()
3995 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
3996 verbose(env, "force_precise: frame%d: forcing r%d to be precise\n", in mark_all_scalars_precise()
4007 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4008 verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n", in mark_all_scalars_precise()
4016 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) in mark_all_scalars_imprecise() argument
4068 static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st) in mark_precise_scalar_ids() argument
4070 struct bpf_idset *precise_ids = &env->idset_scratch; in mark_precise_scalar_ids()
4071 struct backtrack_state *bt = &env->bt; in mark_precise_scalar_ids()
4218 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) in __mark_chain_precision() argument
4220 struct backtrack_state *bt = &env->bt; in __mark_chain_precision()
4221 struct bpf_verifier_state *st = env->cur_state; in __mark_chain_precision()
4223 int last_idx = env->insn_idx; in __mark_chain_precision()
4230 if (!env->bpf_capable) in __mark_chain_precision()
4234 bt_init(bt, env->cur_state->curframe); in __mark_chain_precision()
4257 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4258 verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", in __mark_chain_precision()
4284 if (mark_precise_scalar_ids(env, st)) in __mark_chain_precision()
4308 verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n", in __mark_chain_precision()
4319 err = backtrack_insn(env, i, subseq_idx, bt); in __mark_chain_precision()
4322 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4338 if (i >= env->prog->len) { in __mark_chain_precision()
4345 verbose(env, "BUG backtracking idx %d\n", i); in __mark_chain_precision()
4385 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4400 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4401 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4403 verbose(env, "mark_precise: frame%d: parent state regs=%s ", in __mark_chain_precision()
4404 fr, env->tmp_str_buf); in __mark_chain_precision()
4405 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4407 verbose(env, "stack=%s: ", env->tmp_str_buf); in __mark_chain_precision()
4408 print_verifier_state(env, func, true); in __mark_chain_precision()
4425 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4432 int mark_chain_precision(struct bpf_verifier_env *env, int regno) in mark_chain_precision() argument
4434 return __mark_chain_precision(env, regno); in mark_chain_precision()
4440 static int mark_chain_precision_batch(struct bpf_verifier_env *env) in mark_chain_precision_batch() argument
4442 return __mark_chain_precision(env, -1); in mark_chain_precision_batch()
4542 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, in check_stack_write_fixed_off() argument
4550 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
4557 if (!env->allow_ptr_leaks && in check_stack_write_fixed_off()
4560 verbose(env, "attempt to corrupt spilled pointer on stack\n"); in check_stack_write_fixed_off()
4564 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_fixed_off()
4567 if (!env->bypass_spec_v4) { in check_stack_write_fixed_off()
4580 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; in check_stack_write_fixed_off()
4583 err = destroy_if_dynptr_stack_slot(env, state, spi); in check_stack_write_fixed_off()
4587 mark_stack_slot_scratched(env, spi); in check_stack_write_fixed_off()
4589 !register_is_null(reg) && env->bpf_capable) { in check_stack_write_fixed_off()
4597 err = mark_chain_precision(env, value_regno); in check_stack_write_fixed_off()
4606 insn->imm != 0 && env->bpf_capable) { in check_stack_write_fixed_off()
4615 verbose_linfo(env, insn_idx, "; "); in check_stack_write_fixed_off()
4616 verbose(env, "invalid size of register spill\n"); in check_stack_write_fixed_off()
4620 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); in check_stack_write_fixed_off()
4649 err = mark_chain_precision(env, value_regno); in check_stack_write_fixed_off()
4682 static int check_stack_write_var_off(struct bpf_verifier_env *env, in check_stack_write_var_off() argument
4692 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
4699 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_var_off()
4713 err = destroy_if_dynptr_stack_slot(env, state, spi); in check_stack_write_var_off()
4726 mark_stack_slot_scratched(env, spi); in check_stack_write_var_off()
4728 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { in check_stack_write_var_off()
4740 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", in check_stack_write_var_off()
4762 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { in check_stack_write_var_off()
4763 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", in check_stack_write_var_off()
4771 err = mark_chain_precision(env, value_regno); in check_stack_write_var_off()
4786 static void mark_reg_stack_read(struct bpf_verifier_env *env, in mark_reg_stack_read() argument
4791 struct bpf_verifier_state *vstate = env->cur_state; in mark_reg_stack_read()
4800 mark_stack_slot_scratched(env, spi); in mark_reg_stack_read()
4824 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
4838 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, in check_stack_read_fixed_off() argument
4843 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read_fixed_off()
4852 mark_stack_slot_scratched(env, spi); in check_stack_read_fixed_off()
4862 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read_fixed_off()
4863 verbose(env, "invalid size of register fill\n"); in check_stack_read_fixed_off()
4867 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4886 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
4888 verbose(env, "invalid read from stack off %d+%d size %d\n", in check_stack_read_fixed_off()
4892 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
4906 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { in check_stack_read_fixed_off()
4913 verbose(env, "leaking pointer from stack off %d\n", in check_stack_read_fixed_off()
4917 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4925 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
4927 verbose(env, "invalid read from stack off %d+%d size %d\n", in check_stack_read_fixed_off()
4931 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
4933 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); in check_stack_read_fixed_off()
4943 static int check_stack_range_initialized(struct bpf_verifier_env *env,
4949 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) in reg_state() argument
4951 return cur_regs(env) + regno; in reg_state()
4967 static int check_stack_read_var_off(struct bpf_verifier_env *env, in check_stack_read_var_off() argument
4971 struct bpf_reg_state *reg = reg_state(env, ptr_regno); in check_stack_read_var_off()
4972 struct bpf_func_state *ptr_state = func(env, reg); in check_stack_read_var_off()
4978 err = check_stack_range_initialized(env, ptr_regno, off, size, in check_stack_read_var_off()
4985 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); in check_stack_read_var_off()
4998 static int check_stack_read(struct bpf_verifier_env *env, in check_stack_read() argument
5002 struct bpf_reg_state *reg = reg_state(env, ptr_regno); in check_stack_read()
5003 struct bpf_func_state *state = func(env, reg); in check_stack_read()
5016 …verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=… in check_stack_read()
5031 err = check_stack_read_fixed_off(env, state, off, size, in check_stack_read()
5038 err = check_stack_read_var_off(env, ptr_regno, off, size, in check_stack_read()
5055 static int check_stack_write(struct bpf_verifier_env *env, in check_stack_write() argument
5059 struct bpf_reg_state *reg = reg_state(env, ptr_regno); in check_stack_write()
5060 struct bpf_func_state *state = func(env, reg); in check_stack_write()
5065 err = check_stack_write_fixed_off(env, state, off, size, in check_stack_write()
5071 err = check_stack_write_var_off(env, state, in check_stack_write()
5078 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, in check_map_access_type() argument
5081 struct bpf_reg_state *regs = cur_regs(env); in check_map_access_type()
5086 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", in check_map_access_type()
5092 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", in check_map_access_type()
5101 static int __check_mem_access(struct bpf_verifier_env *env, int regno, in __check_mem_access() argument
5111 reg = &cur_regs(env)[regno]; in __check_mem_access()
5114 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", in __check_mem_access()
5118 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", in __check_mem_access()
5124 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", in __check_mem_access()
5129 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", in __check_mem_access()
5137 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, in check_mem_region_access() argument
5141 struct bpf_verifier_state *vstate = env->cur_state; in check_mem_region_access()
5160 …verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n… in check_mem_region_access()
5164 err = __check_mem_access(env, regno, reg->smin_value + off, size, in check_mem_region_access()
5167 verbose(env, "R%d min value is outside of the allowed memory range\n", in check_mem_region_access()
5177 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", in check_mem_region_access()
5181 err = __check_mem_access(env, regno, reg->umax_value + off, size, in check_mem_region_access()
5184 verbose(env, "R%d max value is outside of the allowed memory range\n", in check_mem_region_access()
5192 static int __check_ptr_off_reg(struct bpf_verifier_env *env, in __check_ptr_off_reg() argument
5201 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", in __check_ptr_off_reg()
5202 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5207 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", in __check_ptr_off_reg()
5208 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5216 verbose(env, "variable %s access var_off=%s disallowed\n", in __check_ptr_off_reg()
5217 reg_type_str(env, reg->type), tn_buf); in __check_ptr_off_reg()
5224 int check_ptr_off_reg(struct bpf_verifier_env *env, in check_ptr_off_reg() argument
5227 return __check_ptr_off_reg(env, reg, regno, false); in check_ptr_off_reg()
5230 static int map_kptr_match_type(struct bpf_verifier_env *env, in map_kptr_match_type() argument
5260 if (__check_ptr_off_reg(env, reg, regno, true)) in map_kptr_match_type()
5287 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in map_kptr_match_type()
5293 verbose(env, "invalid kptr access, R%d type=%s%s ", regno, in map_kptr_match_type()
5294 reg_type_str(env, reg->type), reg_name); in map_kptr_match_type()
5295 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name); in map_kptr_match_type()
5297 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED), in map_kptr_match_type()
5300 verbose(env, "\n"); in map_kptr_match_type()
5307 static bool in_rcu_cs(struct bpf_verifier_env *env) in in_rcu_cs() argument
5309 return env->cur_state->active_rcu_lock || in in_rcu_cs()
5310 env->cur_state->active_lock.ptr || in in_rcu_cs()
5311 !env->prog->aux->sleepable; in in_rcu_cs()
5336 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, in check_map_kptr_access() argument
5340 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
5352 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n"); in check_map_kptr_access()
5360 verbose(env, "store to referenced kptr disallowed\n"); in check_map_kptr_access()
5365 val_reg = reg_state(env, value_regno); in check_map_kptr_access()
5369 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, in check_map_kptr_access()
5371 rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ? in check_map_kptr_access()
5375 val_reg->id = ++env->id_gen; in check_map_kptr_access()
5377 val_reg = reg_state(env, value_regno); in check_map_kptr_access()
5379 map_kptr_match_type(env, kptr_field, val_reg, value_regno)) in check_map_kptr_access()
5383 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n", in check_map_kptr_access()
5388 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n"); in check_map_kptr_access()
5395 static int check_map_access(struct bpf_verifier_env *env, u32 regno, in check_map_access() argument
5399 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
5406 err = check_mem_region_access(env, regno, off, size, map->value_size, in check_map_access()
5428 verbose(env, "kptr cannot be accessed indirectly by helper\n"); in check_map_access()
5432 verbose(env, "kptr access cannot have variable offset\n"); in check_map_access()
5436 verbose(env, "kptr access misaligned expected=%u off=%llu\n", in check_map_access()
5441 verbose(env, "kptr access size must be BPF_DW\n"); in check_map_access()
5446 verbose(env, "%s cannot be accessed directly by load/store\n", in check_map_access()
5457 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, in may_access_direct_pkt_data() argument
5461 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
5485 env->seen_direct_write = true; in may_access_direct_pkt_data()
5490 env->seen_direct_write = true; in may_access_direct_pkt_data()
5499 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, in check_packet_access() argument
5502 struct bpf_reg_state *regs = cur_regs(env); in check_packet_access()
5515 …verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n… in check_packet_access()
5521 __check_mem_access(env, regno, off, size, reg->range, in check_packet_access()
5524 verbose(env, "R%d offset is outside of the packet\n", regno); in check_packet_access()
5534 env->prog->aux->max_pkt_offset = in check_packet_access()
5535 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
5542 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, in check_ctx_access() argument
5548 .log = &env->log, in check_ctx_access()
5551 if (env->ops->is_valid_access && in check_ctx_access()
5552 env->ops->is_valid_access(off, size, t, env->prog, &info)) { in check_ctx_access()
5566 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
5569 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
5570 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
5574 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); in check_ctx_access()
5578 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, in check_flow_keys_access() argument
5583 verbose(env, "invalid access to flow keys off=%d size=%d\n", in check_flow_keys_access()
5590 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, in check_sock_access() argument
5594 struct bpf_reg_state *regs = cur_regs(env); in check_sock_access()
5600 …verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n… in check_sock_access()
5624 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
5629 verbose(env, "R%d invalid %s access off=%d size=%d\n", in check_sock_access()
5630 regno, reg_type_str(env, reg->type), off, size); in check_sock_access()
5635 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) in is_pointer_value() argument
5637 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
5640 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) in is_ctx_reg() argument
5642 const struct bpf_reg_state *reg = reg_state(env, regno); in is_ctx_reg()
5647 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) in is_sk_reg() argument
5649 const struct bpf_reg_state *reg = reg_state(env, regno); in is_sk_reg()
5654 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) in is_pkt_reg() argument
5656 const struct bpf_reg_state *reg = reg_state(env, regno); in is_pkt_reg()
5661 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) in is_flow_key_reg() argument
5663 const struct bpf_reg_state *reg = reg_state(env, regno); in is_flow_key_reg()
5711 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, in check_pkt_ptr_alignment() argument
5737 verbose(env, in check_pkt_ptr_alignment()
5746 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, in check_generic_ptr_alignment() argument
5762 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", in check_generic_ptr_alignment()
5770 static int check_ptr_alignment(struct bpf_verifier_env *env, in check_ptr_alignment() argument
5774 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
5783 return check_pkt_ptr_alignment(env, reg, off, size, strict); in check_ptr_alignment()
5819 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, in check_ptr_alignment()
5829 static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) in check_max_stack_depth_subprog() argument
5831 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth_subprog()
5832 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog()
5861 verbose(env, in check_max_stack_depth_subprog()
5871 verbose(env, "combined stack size of %d calls is %d. Too large\n", in check_max_stack_depth_subprog()
5888 sidx = find_subprog(env, next_insn); in check_max_stack_depth_subprog()
5896 verbose(env, "verifier bug. subprog has tail_call and async cb\n"); in check_max_stack_depth_subprog()
5911 verbose(env, "the call stack of %d frames is too deep !\n", in check_max_stack_depth_subprog()
5926 env->prog->aux->tail_call_reachable = true; in check_max_stack_depth_subprog()
5940 static int check_max_stack_depth(struct bpf_verifier_env *env) in check_max_stack_depth() argument
5942 struct bpf_subprog_info *si = env->subprog_info; in check_max_stack_depth()
5945 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
5947 ret = check_max_stack_depth_subprog(env, i); in check_max_stack_depth()
5957 static int get_callee_stack_depth(struct bpf_verifier_env *env, in get_callee_stack_depth() argument
5962 subprog = find_subprog(env, start); in get_callee_stack_depth()
5968 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
5972 static int __check_buffer_access(struct bpf_verifier_env *env, in __check_buffer_access() argument
5978 verbose(env, in __check_buffer_access()
5987 verbose(env, in __check_buffer_access()
5996 static int check_tp_buffer_access(struct bpf_verifier_env *env, in check_tp_buffer_access() argument
6002 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); in check_tp_buffer_access()
6006 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
6007 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
6012 static int check_buffer_access(struct bpf_verifier_env *env, in check_buffer_access() argument
6021 err = __check_buffer_access(env, buf_info, reg, regno, off, size); in check_buffer_access()
6331 static bool type_is_rcu(struct bpf_verifier_env *env, in type_is_rcu() argument
6339 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); in type_is_rcu()
6342 static bool type_is_rcu_or_null(struct bpf_verifier_env *env, in type_is_rcu_or_null() argument
6350 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); in type_is_rcu_or_null()
6353 static bool type_is_trusted(struct bpf_verifier_env *env, in type_is_trusted() argument
6364 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); in type_is_trusted()
6367 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, in check_ptr_to_btf_access() argument
6381 if (!env->allow_ptr_leaks) { in check_ptr_to_btf_access()
6382 verbose(env, in check_ptr_to_btf_access()
6387 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
6388 verbose(env, in check_ptr_to_btf_access()
6394 verbose(env, in check_ptr_to_btf_access()
6403 verbose(env, in check_ptr_to_btf_access()
6410 verbose(env, in check_ptr_to_btf_access()
6417 verbose(env, in check_ptr_to_btf_access()
6423 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { in check_ptr_to_btf_access()
6425 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); in check_ptr_to_btf_access()
6428 ret = env->ops->btf_struct_access(&env->log, reg, off, size); in check_ptr_to_btf_access()
6435 verbose(env, "only read is supported\n"); in check_ptr_to_btf_access()
6441 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); in check_ptr_to_btf_access()
6445 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); in check_ptr_to_btf_access()
6473 if (type_is_trusted(env, reg, field_name, btf_id)) { in check_ptr_to_btf_access()
6475 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { in check_ptr_to_btf_access()
6476 if (type_is_rcu(env, reg, field_name, btf_id)) { in check_ptr_to_btf_access()
6480 type_is_rcu_or_null(env, reg, field_name, btf_id)) { in check_ptr_to_btf_access()
6485 if (type_is_rcu_or_null(env, reg, field_name, btf_id) && in check_ptr_to_btf_access()
6510 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
6515 static int check_ptr_to_map_access(struct bpf_verifier_env *env, in check_ptr_to_map_access() argument
6531 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); in check_ptr_to_map_access()
6536 verbose(env, "map_ptr access not supported for map type %d\n", in check_ptr_to_map_access()
6544 if (!env->allow_ptr_leaks) { in check_ptr_to_map_access()
6545 verbose(env, in check_ptr_to_map_access()
6552 verbose(env, "R%d is %s invalid negative access: off=%d\n", in check_ptr_to_map_access()
6558 verbose(env, "only read from %s is supported\n", tname); in check_ptr_to_map_access()
6564 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); in check_ptr_to_map_access()
6565 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); in check_ptr_to_map_access()
6570 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); in check_ptr_to_map_access()
6581 static int check_stack_slot_within_bounds(struct bpf_verifier_env *env, in check_stack_slot_within_bounds() argument
6588 if (t == BPF_WRITE || env->allow_uninit_stack) in check_stack_slot_within_bounds()
6604 struct bpf_verifier_env *env, in check_stack_access_within_bounds() argument
6608 struct bpf_reg_state *regs = cur_regs(env); in check_stack_access_within_bounds()
6610 struct bpf_func_state *state = func(env, reg); in check_stack_access_within_bounds()
6629 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", in check_stack_access_within_bounds()
6637 err = check_stack_slot_within_bounds(env, min_off, state, type); in check_stack_access_within_bounds()
6643 verbose(env, "invalid%s stack R%d off=%d size=%d\n", in check_stack_access_within_bounds()
6649 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", in check_stack_access_within_bounds()
6655 return grow_stack_state(env, state, round_up(-min_off, BPF_REG_SIZE)); in check_stack_access_within_bounds()
6664 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, in check_mem_access() argument
6668 struct bpf_reg_state *regs = cur_regs(env); in check_mem_access()
6677 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); in check_mem_access()
6686 verbose(env, "write to change key R%d not allowed\n", regno); in check_mem_access()
6690 err = check_mem_region_access(env, regno, off, size, in check_mem_access()
6695 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6700 is_pointer_value(env, value_regno)) { in check_mem_access()
6701 verbose(env, "R%d leaks addr into map\n", value_regno); in check_mem_access()
6704 err = check_map_access_type(env, regno, off, size, t); in check_mem_access()
6707 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT); in check_mem_access()
6714 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); in check_mem_access()
6733 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6740 verbose(env, "R%d invalid mem access '%s'\n", regno, in check_mem_access()
6741 reg_type_str(env, reg->type)); in check_mem_access()
6746 verbose(env, "R%d cannot write into %s\n", in check_mem_access()
6747 regno, reg_type_str(env, reg->type)); in check_mem_access()
6752 is_pointer_value(env, value_regno)) { in check_mem_access()
6753 verbose(env, "R%d leaks addr into mem\n", value_regno); in check_mem_access()
6757 err = check_mem_region_access(env, regno, off, size, in check_mem_access()
6760 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6767 is_pointer_value(env, value_regno)) { in check_mem_access()
6768 verbose(env, "R%d leaks addr into ctx\n", value_regno); in check_mem_access()
6772 err = check_ptr_off_reg(env, reg, regno); in check_mem_access()
6776 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf, in check_mem_access()
6779 verbose_linfo(env, insn_idx, "; "); in check_mem_access()
6786 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6788 mark_reg_known_zero(env, regs, in check_mem_access()
6791 regs[value_regno].id = ++env->id_gen; in check_mem_access()
6808 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); in check_mem_access()
6813 err = check_stack_read(env, regno, off, size, in check_mem_access()
6816 err = check_stack_write(env, regno, off, size, in check_mem_access()
6819 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { in check_mem_access()
6820 verbose(env, "cannot write into packet\n"); in check_mem_access()
6824 is_pointer_value(env, value_regno)) { in check_mem_access()
6825 verbose(env, "R%d leaks addr into packet\n", in check_mem_access()
6829 err = check_packet_access(env, regno, off, size, false); in check_mem_access()
6831 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6834 is_pointer_value(env, value_regno)) { in check_mem_access()
6835 verbose(env, "R%d leaks addr into flow keys\n", in check_mem_access()
6840 err = check_flow_keys_access(env, off, size); in check_mem_access()
6842 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6845 verbose(env, "R%d cannot write into %s\n", in check_mem_access()
6846 regno, reg_type_str(env, reg->type)); in check_mem_access()
6849 err = check_sock_access(env, insn_idx, regno, off, size, t); in check_mem_access()
6851 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6853 err = check_tp_buffer_access(env, reg, regno, off, size); in check_mem_access()
6855 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6858 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, in check_mem_access()
6861 err = check_ptr_to_map_access(env, regs, regno, off, size, t, in check_mem_access()
6869 verbose(env, "R%d cannot write into %s\n", in check_mem_access()
6870 regno, reg_type_str(env, reg->type)); in check_mem_access()
6873 max_access = &env->prog->aux->max_rdonly_access; in check_mem_access()
6875 max_access = &env->prog->aux->max_rdwr_access; in check_mem_access()
6878 err = check_buffer_access(env, reg, regno, off, size, false, in check_mem_access()
6882 mark_reg_unknown(env, regs, value_regno); in check_mem_access()
6884 verbose(env, "R%d invalid mem access '%s'\n", regno, in check_mem_access()
6885 reg_type_str(env, reg->type)); in check_mem_access()
6900 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) in check_atomic() argument
6918 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); in check_atomic()
6923 verbose(env, "invalid atomic operand size\n"); in check_atomic()
6928 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_atomic()
6933 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_atomic()
6941 err = check_reg_arg(env, aux_reg, SRC_OP); in check_atomic()
6945 if (is_pointer_value(env, aux_reg)) { in check_atomic()
6946 verbose(env, "R%d leaks addr into mem\n", aux_reg); in check_atomic()
6951 if (is_pointer_value(env, insn->src_reg)) { in check_atomic()
6952 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_atomic()
6956 if (is_ctx_reg(env, insn->dst_reg) || in check_atomic()
6957 is_pkt_reg(env, insn->dst_reg) || in check_atomic()
6958 is_flow_key_reg(env, insn->dst_reg) || in check_atomic()
6959 is_sk_reg(env, insn->dst_reg)) { in check_atomic()
6960 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", in check_atomic()
6962 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic()
6973 err = check_reg_arg(env, load_reg, DST_OP); in check_atomic()
6986 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
6989 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
6996 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7015 struct bpf_verifier_env *env, int regno, int off, in check_stack_range_initialized() argument
7019 struct bpf_reg_state *reg = reg_state(env, regno); in check_stack_range_initialized()
7020 struct bpf_func_state *state = func(env, reg); in check_stack_range_initialized()
7030 verbose(env, "invalid zero-sized read\n"); in check_stack_range_initialized()
7044 err = check_stack_access_within_bounds(env, regno, off, access_size, in check_stack_range_initialized()
7058 if (!env->bypass_spec_v1) { in check_stack_range_initialized()
7062 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", in check_stack_range_initialized()
7101 verbose(env, "potential write to dynptr at off=%d disallowed\n", i); in check_stack_range_initialized()
7116 verbose(env, "verifier bug: allocated_stack too small"); in check_stack_range_initialized()
7124 (*stype == STACK_INVALID && env->allow_uninit_stack)) { in check_stack_range_initialized()
7134 env->allow_ptr_leaks)) { in check_stack_range_initialized()
7136 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_range_initialized()
7144 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", in check_stack_range_initialized()
7150 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", in check_stack_range_initialized()
7158 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_range_initialized()
7170 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, in check_helper_mem_access() argument
7174 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_helper_mem_access()
7180 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7184 verbose(env, "R%d cannot write into %s\n", regno, in check_helper_mem_access()
7185 reg_type_str(env, reg->type)); in check_helper_mem_access()
7188 return check_mem_region_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7191 if (check_map_access_type(env, regno, reg->off, access_size, in check_helper_mem_access()
7195 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7200 verbose(env, "R%d cannot write into %s\n", regno, in check_helper_mem_access()
7201 reg_type_str(env, reg->type)); in check_helper_mem_access()
7205 return check_mem_region_access(env, regno, reg->off, in check_helper_mem_access()
7211 verbose(env, "R%d cannot write into %s\n", regno, in check_helper_mem_access()
7212 reg_type_str(env, reg->type)); in check_helper_mem_access()
7216 max_access = &env->prog->aux->max_rdonly_access; in check_helper_mem_access()
7218 max_access = &env->prog->aux->max_rdwr_access; in check_helper_mem_access()
7220 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
7225 env, in check_helper_mem_access()
7229 return check_ptr_to_btf_access(env, regs, regno, reg->off, in check_helper_mem_access()
7237 if (!env->ops->convert_ctx_access) { in check_helper_mem_access()
7245 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, in check_helper_mem_access()
7256 verbose(env, "R%d type=%s ", regno, in check_helper_mem_access()
7257 reg_type_str(env, reg->type)); in check_helper_mem_access()
7258 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); in check_helper_mem_access()
7263 static int check_mem_size_reg(struct bpf_verifier_env *env, in check_mem_size_reg() argument
7292 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", in check_mem_size_reg()
7298 err = check_helper_mem_access(env, regno - 1, 0, in check_mem_size_reg()
7306 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", in check_mem_size_reg()
7310 err = check_helper_mem_access(env, regno - 1, in check_mem_size_reg()
7314 err = mark_chain_precision(env, regno); in check_mem_size_reg()
7318 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in check_mem_reg() argument
7339 err = check_helper_mem_access(env, regno, mem_size, true, &meta); in check_mem_reg()
7342 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); in check_mem_reg()
7350 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, in check_kfunc_mem_size_reg() argument
7353 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; in check_kfunc_mem_size_reg()
7368 err = check_mem_size_reg(env, reg, regno, true, &meta); in check_kfunc_mem_size_reg()
7371 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); in check_kfunc_mem_size_reg()
7400 static int process_spin_lock(struct bpf_verifier_env *env, int regno, in process_spin_lock() argument
7403 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_spin_lock()
7404 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
7412 verbose(env, in process_spin_lock()
7420 verbose(env, in process_spin_lock()
7431 verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local", in process_spin_lock()
7436 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n", in process_spin_lock()
7442 verbose(env, in process_spin_lock()
7460 verbose(env, "bpf_spin_unlock without taking a lock\n"); in process_spin_lock()
7465 verbose(env, "bpf_spin_unlock of different lock\n"); in process_spin_lock()
7469 invalidate_non_owning_refs(env); in process_spin_lock()
7477 static int process_timer_func(struct bpf_verifier_env *env, int regno, in process_timer_func() argument
7480 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_timer_func()
7486 verbose(env, in process_timer_func()
7492 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", in process_timer_func()
7497 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); in process_timer_func()
7501 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", in process_timer_func()
7506 verbose(env, "verifier bug. Two map pointers in a timer helper\n"); in process_timer_func()
7514 static int process_kptr_func(struct bpf_verifier_env *env, int regno, in process_kptr_func() argument
7517 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_kptr_func()
7523 verbose(env, in process_kptr_func()
7529 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", in process_kptr_func()
7534 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); in process_kptr_func()
7542 verbose(env, "off=%d doesn't point to kptr\n", kptr_off); in process_kptr_func()
7546 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); in process_kptr_func()
7578 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx, in process_dynptr_func() argument
7581 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_dynptr_func()
7588 verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n"); in process_dynptr_func()
7610 if (!is_dynptr_reg_valid_uninit(env, reg)) { in process_dynptr_func()
7611 verbose(env, "Dynptr has to be an uninitialized dynptr\n"); in process_dynptr_func()
7617 err = check_mem_access(env, insn_idx, regno, in process_dynptr_func()
7623 err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id); in process_dynptr_func()
7627 verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); in process_dynptr_func()
7631 if (!is_dynptr_reg_valid_init(env, reg)) { in process_dynptr_func()
7632 verbose(env, in process_dynptr_func()
7639 if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { in process_dynptr_func()
7640 verbose(env, in process_dynptr_func()
7646 err = mark_dynptr_read(env, reg); in process_dynptr_func()
7651 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi) in iter_ref_obj_id() argument
7653 struct bpf_func_state *state = func(env, reg); in iter_ref_obj_id()
7686 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx, in process_iter_arg() argument
7689 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in process_iter_arg()
7703 if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { in process_iter_arg()
7704 verbose(env, "expected uninitialized iter_%s as arg #%d\n", in process_iter_arg()
7710 err = check_mem_access(env, insn_idx, regno, in process_iter_arg()
7716 err = mark_stack_slots_iter(env, reg, insn_idx, meta->btf, btf_id, nr_slots); in process_iter_arg()
7721 if (!is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots)) { in process_iter_arg()
7722 verbose(env, "expected an initialized iter_%s as arg #%d\n", in process_iter_arg()
7727 spi = iter_get_spi(env, reg, nr_slots); in process_iter_arg()
7731 err = mark_iter_read(env, reg, spi, nr_slots); in process_iter_arg()
7738 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); in process_iter_arg()
7741 err = unmark_stack_slots_iter(env, reg, nr_slots); in process_iter_arg()
7753 static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env, in find_prev_entry() argument
7761 sl = *explored_state(env, insn_idx); in find_prev_entry()
7775 static void reset_idmap_scratch(struct bpf_verifier_env *env);
7780 static void maybe_widen_reg(struct bpf_verifier_env *env, in maybe_widen_reg() argument
7790 __mark_reg_unknown(env, rcur); in maybe_widen_reg()
7793 static int widen_imprecise_scalars(struct bpf_verifier_env *env, in widen_imprecise_scalars() argument
7800 reset_idmap_scratch(env); in widen_imprecise_scalars()
7806 maybe_widen_reg(env, in widen_imprecise_scalars()
7809 &env->idmap_scratch); in widen_imprecise_scalars()
7816 maybe_widen_reg(env, in widen_imprecise_scalars()
7819 &env->idmap_scratch); in widen_imprecise_scalars()
7903 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, in process_iter_next_call() argument
7906 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in process_iter_next_call()
7914 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr; in process_iter_next_call()
7918 verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n", in process_iter_next_call()
7929 verbose(env, "bug: bad parent state for iter next call"); in process_iter_next_call()
7936 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); in process_iter_next_call()
7938 queued_st = push_stack(env, insn_idx + 1, insn_idx, false); in process_iter_next_call()
7946 widen_imprecise_scalars(env, prev_st, queued_st); in process_iter_next_call()
7986 static int resolve_map_arg_type(struct bpf_verifier_env *env, in resolve_map_arg_type() argument
7992 verbose(env, "invalid map_ptr to access map->type\n"); in resolve_map_arg_type()
8002 verbose(env, "invalid arg_type for sockmap/sockhash\n"); in resolve_map_arg_type()
8133 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, in check_reg_type() argument
8138 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_reg_type()
8145 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); in check_reg_type()
8180 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); in check_reg_type()
8182 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); in check_reg_type()
8183 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); in check_reg_type()
8192 verbose(env, in check_reg_type()
8195 regno, reg_type_str(env, reg->type)); in check_reg_type()
8217 verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno); in check_reg_type()
8223 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); in check_reg_type()
8230 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8234 verbose(env, "verifier internal error:"); in check_reg_type()
8235 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", in check_reg_type()
8240 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in check_reg_type()
8243 verbose(env, "R%d is of type %s but %s is expected\n", in check_reg_type()
8254 verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); in check_reg_type()
8258 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8267 verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n"); in check_reg_type()
8290 int check_func_arg_reg_off(struct bpf_verifier_env *env, in check_func_arg_reg_off() argument
8317 verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n", in check_func_arg_reg_off()
8321 return __check_ptr_off_reg(env, reg, regno, false); in check_func_arg_reg_off()
8354 return __check_ptr_off_reg(env, reg, regno, true); in check_func_arg_reg_off()
8356 return __check_ptr_off_reg(env, reg, regno, false); in check_func_arg_reg_off()
8360 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env, in get_dynptr_arg_reg() argument
8370 verbose(env, "verifier internal error: multiple dynptr args\n"); in get_dynptr_arg_reg()
8377 verbose(env, "verifier internal error: no dynptr arg found\n"); in get_dynptr_arg_reg()
8382 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in dynptr_id() argument
8384 struct bpf_func_state *state = func(env, reg); in dynptr_id()
8389 spi = dynptr_get_spi(env, reg); in dynptr_id()
8395 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in dynptr_ref_obj_id() argument
8397 struct bpf_func_state *state = func(env, reg); in dynptr_ref_obj_id()
8402 spi = dynptr_get_spi(env, reg); in dynptr_ref_obj_id()
8408 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env, in dynptr_get_type() argument
8411 struct bpf_func_state *state = func(env, reg); in dynptr_get_type()
8419 verbose(env, "verifier internal error: invalid spi when querying dynptr type\n"); in dynptr_get_type()
8426 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, in check_func_arg() argument
8432 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; in check_func_arg()
8441 err = check_reg_arg(env, regno, SRC_OP); in check_func_arg()
8446 if (is_pointer_value(env, regno)) { in check_func_arg()
8447 verbose(env, "R%d leaks addr into helper function\n", in check_func_arg()
8455 !may_access_direct_pkt_data(env, meta, BPF_READ)) { in check_func_arg()
8456 verbose(env, "helper access to the packet is not allowed\n"); in check_func_arg()
8461 err = resolve_map_arg_type(env, meta, &arg_type); in check_func_arg()
8477 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); in check_func_arg()
8481 err = check_func_arg_reg_off(env, reg, regno, arg_type); in check_func_arg()
8488 struct bpf_func_state *state = func(env, reg); in check_func_arg()
8497 spi = dynptr_get_spi(env, reg); in check_func_arg()
8499 verbose(env, "arg %d is an unacquired reference\n", regno); in check_func_arg()
8503 verbose(env, "cannot release unowned const bpf_dynptr\n"); in check_func_arg()
8507 verbose(env, "R%d must be referenced when passed to release function\n", in check_func_arg()
8512 verbose(env, "verifier internal error: more than one release argument\n"); in check_func_arg()
8520 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", in check_func_arg()
8546 verbose(env, in check_func_arg()
8566 verbose(env, "invalid map_ptr to access map->key\n"); in check_func_arg()
8569 err = check_helper_mem_access(env, regno, in check_func_arg()
8582 verbose(env, "invalid map_ptr to access map->value\n"); in check_func_arg()
8586 err = check_helper_mem_access(env, regno, in check_func_arg()
8592 verbose(env, "Helper has invalid btf_id in R%d\n", regno); in check_func_arg()
8599 if (in_rbtree_lock_required_cb(env)) { in check_func_arg()
8600 verbose(env, "can't spin_{lock,unlock} in rbtree cb\n"); in check_func_arg()
8604 err = process_spin_lock(env, regno, true); in check_func_arg()
8608 err = process_spin_lock(env, regno, false); in check_func_arg()
8612 verbose(env, "verifier internal error\n"); in check_func_arg()
8617 err = process_timer_func(env, regno, meta); in check_func_arg()
8630 err = check_helper_mem_access(env, regno, in check_func_arg()
8636 err = check_mem_size_reg(env, reg, regno, false, meta); in check_func_arg()
8639 err = check_mem_size_reg(env, reg, regno, true, meta); in check_func_arg()
8642 err = process_dynptr_func(env, regno, insn_idx, arg_type, 0); in check_func_arg()
8648 verbose(env, "R%d is not a known constant'\n", in check_func_arg()
8653 err = mark_chain_precision(env, regno); in check_func_arg()
8662 err = check_helper_mem_access(env, regno, size, false, meta); in check_func_arg()
8665 err = check_ptr_alignment(env, reg, 0, size, true); in check_func_arg()
8676 verbose(env, "R%d does not point to a readonly map'\n", regno); in check_func_arg()
8681 verbose(env, "R%d is not a constant address'\n", regno); in check_func_arg()
8686 verbose(env, "no direct value access support for this map type\n"); in check_func_arg()
8690 err = check_map_access(env, regno, reg->off, in check_func_arg()
8699 verbose(env, "direct value access on string failed\n"); in check_func_arg()
8705 verbose(env, "string is not zero-terminated\n"); in check_func_arg()
8711 err = process_kptr_func(env, regno, meta); in check_func_arg()
8720 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) in may_update_sockmap() argument
8722 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
8723 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
8748 verbose(env, "cannot update sockmap in this context\n"); in may_update_sockmap()
8752 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) in allow_tail_call_in_subprogs() argument
8754 return env->prog->jit_requested && in allow_tail_call_in_subprogs()
8758 static int check_map_func_compatibility(struct bpf_verifier_env *env, in check_map_func_compatibility() argument
8835 !may_update_sockmap(env, func_id)) in check_map_func_compatibility()
8845 !may_update_sockmap(env, func_id)) in check_map_func_compatibility()
8897 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { in check_map_func_compatibility()
8898 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in check_map_func_compatibility()
9006 verbose(env, "cannot pass map_type %d into func %s#%d\n", in check_map_func_compatibility()
9098 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) in clear_all_pkt_pointers() argument
9103 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in clear_all_pkt_pointers()
9105 mark_reg_invalid(env, reg); in clear_all_pkt_pointers()
9138 static int release_reference(struct bpf_verifier_env *env, in release_reference() argument
9145 err = release_reference_state(cur_func(env), ref_obj_id); in release_reference()
9149 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in release_reference()
9151 mark_reg_invalid(env, reg); in release_reference()
9157 static void invalidate_non_owning_refs(struct bpf_verifier_env *env) in invalidate_non_owning_refs() argument
9162 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in invalidate_non_owning_refs()
9164 mark_reg_invalid(env, reg); in invalidate_non_owning_refs()
9168 static void clear_caller_saved_regs(struct bpf_verifier_env *env, in clear_caller_saved_regs() argument
9175 mark_reg_not_init(env, regs, caller_saved[i]); in clear_caller_saved_regs()
9176 __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK); in clear_caller_saved_regs()
9180 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
9185 static int set_callee_state(struct bpf_verifier_env *env,
9189 static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite, in setup_func_entry() argument
9197 verbose(env, "the call stack of %d frames is too deep\n", in setup_func_entry()
9203 verbose(env, "verifier bug. Frame %d already allocated\n", in setup_func_entry()
9218 init_func_state(env, callee, in setup_func_entry()
9225 err = err ?: set_callee_state_cb(env, caller, callee, callsite); in setup_func_entry()
9240 static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in push_callback_call() argument
9244 struct bpf_verifier_state *state = env->cur_state, *callback_state; in push_callback_call()
9249 err = btf_check_subprog_call(env, subprog, caller->regs); in push_callback_call()
9259 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", in push_callback_call()
9264 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", in push_callback_call()
9275 env->subprog_info[subprog].is_async_cb = true; in push_callback_call()
9276 async_cb = push_async_cb(env, env->subprog_info[subprog].start, in push_callback_call()
9284 err = set_callee_state_cb(env, caller, callee, insn_idx); in push_callback_call()
9294 callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); in push_callback_call()
9298 err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb, in push_callback_call()
9309 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_func_call() argument
9312 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
9317 subprog = find_subprog(env, target_insn); in check_func_call()
9319 verbose(env, "verifier bug. No program starts at insn %d\n", target_insn); in check_func_call()
9324 err = btf_check_subprog_call(env, subprog, caller->regs); in check_func_call()
9327 if (subprog_is_global(env, subprog)) { in check_func_call()
9329 verbose(env, "Caller passes invalid args into func#%d\n", subprog); in check_func_call()
9333 if (env->log.level & BPF_LOG_LEVEL) in check_func_call()
9334 verbose(env, "Func#%d is global and valid. Skipping.\n", subprog); in check_func_call()
9335 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9338 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
9348 err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state); in check_func_call()
9352 clear_caller_saved_regs(env, caller->regs); in check_func_call()
9355 *insn_idx = env->subprog_info[subprog].start - 1; in check_func_call()
9357 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
9358 verbose(env, "caller:\n"); in check_func_call()
9359 print_verifier_state(env, caller, true); in check_func_call()
9360 verbose(env, "callee:\n"); in check_func_call()
9361 print_verifier_state(env, state->frame[state->curframe], true); in check_func_call()
9367 int map_set_for_each_callback_args(struct bpf_verifier_env *env, in map_set_for_each_callback_args() argument
9390 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
9394 static int set_callee_state(struct bpf_verifier_env *env, in set_callee_state() argument
9408 static int set_map_elem_callback_state(struct bpf_verifier_env *env, in set_map_elem_callback_state() argument
9413 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; in set_map_elem_callback_state()
9418 verbose(env, "tail_call abusing map_ptr\n"); in set_map_elem_callback_state()
9425 verbose(env, "callback function not allowed for map\n"); in set_map_elem_callback_state()
9429 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
9438 static int set_loop_callback_state(struct bpf_verifier_env *env, in set_loop_callback_state() argument
9451 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
9452 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
9453 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
9460 static int set_timer_callback_state(struct bpf_verifier_env *env, in set_timer_callback_state() argument
9483 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
9484 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
9490 static int set_find_vma_callback_state(struct bpf_verifier_env *env, in set_find_vma_callback_state() argument
9511 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
9512 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
9518 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, in set_user_ringbuf_callback_state() argument
9527 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
9528 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
9532 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
9533 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
9534 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
9541 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, in set_rbtree_add_callback_state() argument
9561 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
9563 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
9565 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
9566 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
9567 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
9579 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) in in_rbtree_lock_required_cb() argument
9581 struct bpf_verifier_state *state = env->cur_state; in in_rbtree_lock_required_cb()
9582 struct bpf_insn *insn = env->prog->insnsi; in in_rbtree_lock_required_cb()
9598 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) in prepare_func_exit() argument
9600 struct bpf_verifier_state *state = env->cur_state, *prev_st; in prepare_func_exit()
9615 verbose(env, "cannot return stack pointer to the caller\n"); in prepare_func_exit()
9625 verbose(env, "R0 not a scalar value\n"); in prepare_func_exit()
9630 err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64); in prepare_func_exit()
9631 err = err ?: mark_chain_precision(env, BPF_REG_0); in prepare_func_exit()
9636 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); in prepare_func_exit()
9639 if (!calls_callback(env, callee->callsite)) { in prepare_func_exit()
9640 verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n", in prepare_func_exit()
9671 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
9672 verbose(env, "returning from callee:\n"); in prepare_func_exit()
9673 print_verifier_state(env, callee, true); in prepare_func_exit()
9674 verbose(env, "to caller at %d:\n", *insn_idx); in prepare_func_exit()
9675 print_verifier_state(env, caller, true); in prepare_func_exit()
9692 prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL; in prepare_func_exit()
9694 err = widen_imprecise_scalars(env, prev_st, state); in prepare_func_exit()
9737 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, in record_func_map() argument
9740 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
9756 verbose(env, "kernel subsystem misconfigured verifier\n"); in record_func_map()
9769 verbose(env, "write into map forbidden\n"); in record_func_map()
9783 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, in record_func_key() argument
9786 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
9787 struct bpf_reg_state *regs = cur_regs(env), *reg; in record_func_key()
9795 verbose(env, "kernel subsystem misconfigured verifier\n"); in record_func_key()
9808 err = mark_chain_precision(env, BPF_REG_3); in record_func_key()
9819 static int check_reference_leak(struct bpf_verifier_env *env) in check_reference_leak() argument
9821 struct bpf_func_state *state = cur_func(env); in check_reference_leak()
9831 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", in check_reference_leak()
9838 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, in check_bpf_snprintf_call() argument
9861 verbose(env, "verifier bug\n"); in check_bpf_snprintf_call()
9871 verbose(env, "Invalid format string\n"); in check_bpf_snprintf_call()
9876 static int check_get_func_ip(struct bpf_verifier_env *env) in check_get_func_ip() argument
9878 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_get_func_ip()
9882 if (!bpf_prog_has_trampoline(env->prog)) { in check_get_func_ip()
9883 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", in check_get_func_ip()
9892 verbose(env, "func %s#%d not supported for program type %d\n", in check_get_func_ip()
9897 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) in cur_aux() argument
9899 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
9902 static bool loop_flag_is_zero(struct bpf_verifier_env *env) in loop_flag_is_zero() argument
9904 struct bpf_reg_state *regs = cur_regs(env); in loop_flag_is_zero()
9909 mark_chain_precision(env, BPF_REG_4); in loop_flag_is_zero()
9914 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno) in update_loop_inline_state() argument
9916 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; in update_loop_inline_state()
9920 state->fit_for_inline = loop_flag_is_zero(env); in update_loop_inline_state()
9928 state->fit_for_inline = (loop_flag_is_zero(env) && in update_loop_inline_state()
9932 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_helper_call() argument
9935 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_helper_call()
9948 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), in check_helper_call()
9953 if (env->ops->get_func_proto) in check_helper_call()
9954 fn = env->ops->get_func_proto(func_id, env->prog); in check_helper_call()
9956 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), in check_helper_call()
9962 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
9963 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
9967 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
9968 verbose(env, "helper call is not allowed in probe\n"); in check_helper_call()
9972 if (!env->prog->aux->sleepable && fn->might_sleep) { in check_helper_call()
9973 verbose(env, "helper call might sleep in a non-sleepable prog\n"); in check_helper_call()
9980 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", in check_helper_call()
9990 verbose(env, "kernel subsystem misconfigured func %s#%d\n", in check_helper_call()
9995 if (env->cur_state->active_rcu_lock) { in check_helper_call()
9997 verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n", in check_helper_call()
10002 if (env->prog->aux->sleepable && is_storage_get_function(func_id)) in check_helper_call()
10003 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
10009 err = check_func_arg(env, i, &meta, fn, insn_idx); in check_helper_call()
10014 err = record_func_map(env, &meta, func_id, insn_idx); in check_helper_call()
10018 err = record_func_key(env, &meta, func_id, insn_idx); in check_helper_call()
10026 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, in check_helper_call()
10032 regs = cur_regs(env); in check_helper_call()
10042 verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n"); in check_helper_call()
10045 err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]); in check_helper_call()
10047 err = release_reference(env, meta.ref_obj_id); in check_helper_call()
10055 verbose(env, "func %s#%d reference has not been acquired before\n", in check_helper_call()
10063 err = check_reference_leak(env); in check_helper_call()
10065 verbose(env, "tail_call would lead to reference leak\n"); in check_helper_call()
10074 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
10079 err = push_callback_call(env, insn, insn_idx, meta.subprogno, in check_helper_call()
10083 err = push_callback_call(env, insn, insn_idx, meta.subprogno, in check_helper_call()
10087 err = push_callback_call(env, insn, insn_idx, meta.subprogno, in check_helper_call()
10091 err = check_bpf_snprintf_call(env, regs); in check_helper_call()
10094 update_loop_inline_state(env, meta.subprogno); in check_helper_call()
10098 err = mark_chain_precision(env, BPF_REG_1); in check_helper_call()
10101 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { in check_helper_call()
10102 err = push_callback_call(env, insn, insn_idx, meta.subprogno, in check_helper_call()
10105 cur_func(env)->callback_depth = 0; in check_helper_call()
10106 if (env->log.level & BPF_LOG_LEVEL2) in check_helper_call()
10107 verbose(env, "frame%d bpf_loop iteration limit reached\n", in check_helper_call()
10108 env->cur_state->curframe); in check_helper_call()
10113 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", in check_helper_call()
10114 reg_type_str(env, regs[BPF_REG_1].type)); in check_helper_call()
10120 env->prog->expected_attach_type == BPF_LSM_CGROUP) { in check_helper_call()
10121 if (!env->prog->aux->attach_func_proto->type) { in check_helper_call()
10125 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); in check_helper_call()
10135 reg = get_dynptr_arg_reg(env, fn, regs); in check_helper_call()
10141 verbose(env, "verifier internal error: meta.dynptr_id already set\n"); in check_helper_call()
10145 verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); in check_helper_call()
10149 id = dynptr_id(env, reg); in check_helper_call()
10151 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); in check_helper_call()
10155 ref_obj_id = dynptr_ref_obj_id(env, reg); in check_helper_call()
10157 verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); in check_helper_call()
10171 reg = get_dynptr_arg_reg(env, fn, regs); in check_helper_call()
10175 dynptr_type = dynptr_get_type(env, reg); in check_helper_call()
10188 err = push_callback_call(env, insn, insn_idx, meta.subprogno, in check_helper_call()
10198 mark_reg_not_init(env, regs, caller_saved[i]); in check_helper_call()
10199 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); in check_helper_call()
10212 mark_reg_unknown(env, regs, BPF_REG_0); in check_helper_call()
10219 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10225 verbose(env, in check_helper_call()
10234 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10238 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10242 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10246 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10250 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10258 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10269 verbose(env, "unable to resolve the size of type '%s': %ld\n", in check_helper_call()
10294 mark_reg_known_zero(env, regs, BPF_REG_0); in check_helper_call()
10303 verbose(env, "verifier internal error:"); in check_helper_call()
10304 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", in check_helper_call()
10312 verbose(env, "invalid return type %u of func %s#%d\n", in check_helper_call()
10322 verbose(env, "unknown return type %u of func %s#%d\n", in check_helper_call()
10328 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
10331 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n", in check_helper_call()
10343 int id = acquire_reference_state(env, insn_idx); in check_helper_call()
10355 err = check_map_func_compatibility(env, meta.map_ptr, func_id); in check_helper_call()
10361 !env->prog->has_callchain_buf) { in check_helper_call()
10372 verbose(env, err_str, func_id_name(func_id), func_id); in check_helper_call()
10376 env->prog->has_callchain_buf = true; in check_helper_call()
10380 env->prog->call_get_stack = true; in check_helper_call()
10383 if (check_get_func_ip(env)) in check_helper_call()
10385 env->prog->call_get_func_ip = true; in check_helper_call()
10389 clear_all_pkt_pointers(env); in check_helper_call()
10396 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, in mark_btf_func_reg_size() argument
10399 struct bpf_reg_state *reg = &cur_regs(env)[regno]; in mark_btf_func_reg_size()
10405 DEF_NOT_SUBREG : env->insn_idx + 1; in mark_btf_func_reg_size()
10409 mark_insn_zext(env, reg); in mark_btf_func_reg_size()
10410 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in mark_btf_func_reg_size()
10412 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); in mark_btf_func_reg_size()
10597 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, in is_kfunc_arg_callback() argument
10610 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, in __btf_type_is_scalar_struct() argument
10627 verbose(env, "max struct nesting depth exceeded\n"); in __btf_type_is_scalar_struct()
10630 if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1)) in __btf_type_is_scalar_struct()
10749 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, in get_kfunc_ptr_arg_type() argument
10756 struct bpf_reg_state *regs = cur_regs(env); in get_kfunc_ptr_arg_type()
10768 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) in get_kfunc_ptr_arg_type()
10797 verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n", in get_kfunc_ptr_arg_type()
10804 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
10818 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && in get_kfunc_ptr_arg_type()
10820 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", in get_kfunc_ptr_arg_type()
10827 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, in process_kf_arg_ptr_to_btf_id() argument
10874 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) in process_kf_arg_ptr_to_btf_id()
10881 …if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type… in process_kf_arg_ptr_to_btf_id()
10882 …verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\… in process_kf_arg_ptr_to_btf_id()
10890 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in ref_set_non_owning() argument
10892 struct bpf_verifier_state *state = env->cur_state; in ref_set_non_owning()
10896 verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); in ref_set_non_owning()
10901 verbose(env, "verifier internal error: NON_OWN_REF already set\n"); in ref_set_non_owning()
10912 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id) in ref_convert_owning_non_owning() argument
10918 state = cur_func(env); in ref_convert_owning_non_owning()
10921 verbose(env, "verifier internal error: ref_obj_id is zero for " in ref_convert_owning_non_owning()
10933 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in ref_convert_owning_non_owning()
10936 ref_set_non_owning(env, reg); in ref_convert_owning_non_owning()
10942 verbose(env, "verifier internal error: ref state missing for ref_obj_id\n"); in ref_convert_owning_non_owning()
10990 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg) in check_reg_allocation_locked() argument
11003 verbose(env, "verifier internal error: unknown reg type for lock check\n"); in check_reg_allocation_locked()
11008 if (!env->cur_state->active_lock.ptr) in check_reg_allocation_locked()
11010 if (env->cur_state->active_lock.ptr != ptr || in check_reg_allocation_locked()
11011 env->cur_state->active_lock.id != id) { in check_reg_allocation_locked()
11012 verbose(env, "held lock and object are not in the same allocation\n"); in check_reg_allocation_locked()
11049 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env, in check_kfunc_is_graph_root_api() argument
11063 verbose(env, "verifier internal error: unexpected graph root argument type %s\n", in check_kfunc_is_graph_root_api()
11069 verbose(env, "verifier internal error: %s head arg for unknown kfunc\n", in check_kfunc_is_graph_root_api()
11074 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, in check_kfunc_is_graph_node_api() argument
11090 verbose(env, "verifier internal error: unexpected graph node argument type %s\n", in check_kfunc_is_graph_node_api()
11096 verbose(env, "verifier internal error: %s node arg for unknown kfunc\n", in check_kfunc_is_graph_node_api()
11102 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env, in __process_kf_arg_ptr_to_graph_root() argument
11114 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); in __process_kf_arg_ptr_to_graph_root()
11118 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_root()
11123 verbose(env, in __process_kf_arg_ptr_to_graph_root()
11133 verbose(env, "%s not found at offset=%u\n", head_type_name, head_off); in __process_kf_arg_ptr_to_graph_root()
11138 if (check_reg_allocation_locked(env, reg)) { in __process_kf_arg_ptr_to_graph_root()
11139 verbose(env, "bpf_spin_lock at off=%d must be held for %s\n", in __process_kf_arg_ptr_to_graph_root()
11145 verbose(env, "verifier internal error: repeating %s arg\n", head_type_name); in __process_kf_arg_ptr_to_graph_root()
11152 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env, in process_kf_arg_ptr_to_list_head() argument
11156 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD, in process_kf_arg_ptr_to_list_head()
11160 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env, in process_kf_arg_ptr_to_rbtree_root() argument
11164 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT, in process_kf_arg_ptr_to_rbtree_root()
11169 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env, in __process_kf_arg_ptr_to_graph_node() argument
11182 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); in __process_kf_arg_ptr_to_graph_node()
11186 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_node()
11191 verbose(env, in __process_kf_arg_ptr_to_graph_node()
11200 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); in __process_kf_arg_ptr_to_graph_node()
11208 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, in __process_kf_arg_ptr_to_graph_node()
11210 verbose(env, "operation on %s expects arg#1 %s at offset=%d " in __process_kf_arg_ptr_to_graph_node()
11223 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n", in __process_kf_arg_ptr_to_graph_node()
11233 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, in process_kf_arg_ptr_to_list_node() argument
11237 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, in process_kf_arg_ptr_to_list_node()
11242 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, in process_kf_arg_ptr_to_rbtree_node() argument
11246 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, in process_kf_arg_ptr_to_rbtree_node()
11251 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, in check_kfunc_args() argument
11264 verbose(env, "Function %s has %d > %d args\n", func_name, nargs, in check_kfunc_args()
11273 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1]; in check_kfunc_args()
11287 verbose(env, "R%d is not a scalar\n", regno); in check_kfunc_args()
11293 verbose(env, "verifier internal error: only one constant argument permitted\n"); in check_kfunc_args()
11297 verbose(env, "R%d must be a known constant\n", regno); in check_kfunc_args()
11300 ret = mark_chain_precision(env, regno); in check_kfunc_args()
11314 verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); in check_kfunc_args()
11319 verbose(env, "R%d is not a const\n", regno); in check_kfunc_args()
11324 ret = mark_chain_precision(env, regno); in check_kfunc_args()
11332 verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t)); in check_kfunc_args()
11338 verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); in check_kfunc_args()
11344 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", in check_kfunc_args()
11357 kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs); in check_kfunc_args()
11369 verbose(env, "R%d must be referenced or trusted\n", regno); in check_kfunc_args()
11373 verbose(env, "R%d must be a rcu pointer\n", regno); in check_kfunc_args()
11402 ret = check_func_arg_reg_off(env, reg, regno, arg_type); in check_kfunc_args()
11409 verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t)); in check_kfunc_args()
11414 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); in check_kfunc_args()
11422 verbose(env, "arg#%d expected pointer to allocated object\n", i); in check_kfunc_args()
11426 verbose(env, "allocated object must be referenced\n"); in check_kfunc_args()
11442 verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); in check_kfunc_args()
11461 verbose(env, "verifier internal error: no dynptr type for parent of clone\n"); in check_kfunc_args()
11468 verbose(env, "verifier internal error: missing ref obj id for parent of clone\n"); in check_kfunc_args()
11473 ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id); in check_kfunc_args()
11478 int id = dynptr_id(env, reg); in check_kfunc_args()
11481 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); in check_kfunc_args()
11485 meta->initialized_dynptr.type = dynptr_get_type(env, reg); in check_kfunc_args()
11486 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); in check_kfunc_args()
11492 ret = process_iter_arg(env, regno, insn_idx, meta); in check_kfunc_args()
11499 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); in check_kfunc_args()
11503 verbose(env, "allocated object must be referenced\n"); in check_kfunc_args()
11506 ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta); in check_kfunc_args()
11513 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); in check_kfunc_args()
11517 verbose(env, "allocated object must be referenced\n"); in check_kfunc_args()
11520 ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta); in check_kfunc_args()
11526 verbose(env, "arg#%d expected pointer to allocated object\n", i); in check_kfunc_args()
11530 verbose(env, "allocated object must be referenced\n"); in check_kfunc_args()
11533 ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta); in check_kfunc_args()
11540 verbose(env, "rbtree_remove node input must be non-owning ref\n"); in check_kfunc_args()
11543 if (in_rbtree_lock_required_cb(env)) { in check_kfunc_args()
11544 verbose(env, "rbtree_remove not allowed in rbtree cb\n"); in check_kfunc_args()
11549 verbose(env, "arg#%d expected pointer to allocated object\n", i); in check_kfunc_args()
11553 verbose(env, "allocated object must be referenced\n"); in check_kfunc_args()
11558 ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); in check_kfunc_args()
11567 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); in check_kfunc_args()
11568 verbose(env, "expected %s or socket\n", in check_kfunc_args()
11569 reg_type_str(env, base_type(reg->type) | in check_kfunc_args()
11573 ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i); in check_kfunc_args()
11580 verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n", in check_kfunc_args()
11584 ret = check_mem_reg(env, reg, regno, type_size); in check_kfunc_args()
11596 ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1); in check_kfunc_args()
11598 verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1); in check_kfunc_args()
11605 verbose(env, "verifier internal error: only one constant argument permitted\n"); in check_kfunc_args()
11609 verbose(env, "R%d must be a known constant\n", regno + 1); in check_kfunc_args()
11622 verbose(env, "arg%d expected pointer to func\n", i); in check_kfunc_args()
11629 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); in check_kfunc_args()
11637 verbose(env, "verifier internal error: Couldn't find btf_record\n"); in check_kfunc_args()
11642 verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); in check_kfunc_args()
11653 verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", in check_kfunc_args()
11661 static int fetch_kfunc_meta(struct bpf_verifier_env *env, in fetch_kfunc_meta() argument
11677 desc_btf = find_kfunc_desc_btf(env, insn->off); in fetch_kfunc_meta()
11688 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); in fetch_kfunc_meta()
11703 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in check_kfunc_call() argument
11708 struct bpf_reg_state *regs = cur_regs(env); in check_kfunc_call()
11722 err = fetch_kfunc_meta(env, insn, &meta, &func_name); in check_kfunc_call()
11724 verbose(env, "calling kernel function %s is not allowed\n", func_name); in check_kfunc_call()
11728 insn_aux = &env->insn_aux_data[insn_idx]; in check_kfunc_call()
11733 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n"); in check_kfunc_call()
11738 if (sleepable && !env->prog->aux->sleepable) { in check_kfunc_call()
11739 verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); in check_kfunc_call()
11744 err = check_kfunc_args(env, &meta, insn_idx); in check_kfunc_call()
11749 err = push_callback_call(env, insn, insn_idx, meta.subprogno, in check_kfunc_call()
11752 verbose(env, "kfunc %s#%d failed callback verification\n", in check_kfunc_call()
11761 if (env->cur_state->active_rcu_lock) { in check_kfunc_call()
11765 if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { in check_kfunc_call()
11766 verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); in check_kfunc_call()
11771 verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); in check_kfunc_call()
11774 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in check_kfunc_call()
11780 env->cur_state->active_rcu_lock = false; in check_kfunc_call()
11782 verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name); in check_kfunc_call()
11786 env->cur_state->active_rcu_lock = true; in check_kfunc_call()
11788 verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name); in check_kfunc_call()
11796 err = release_reference(env, regs[meta.release_regno].ref_obj_id); in check_kfunc_call()
11798 verbose(env, "kfunc %s#%d reference has not been acquired before\n", in check_kfunc_call()
11810 err = ref_convert_owning_non_owning(env, release_ref_obj_id); in check_kfunc_call()
11812 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", in check_kfunc_call()
11817 err = release_reference(env, release_ref_obj_id); in check_kfunc_call()
11819 verbose(env, "kfunc %s#%d reference has not been acquired before\n", in check_kfunc_call()
11826 mark_reg_not_init(env, regs, caller_saved[i]); in check_kfunc_call()
11836 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); in check_kfunc_call()
11842 mark_reg_unknown(env, regs, BPF_REG_0); in check_kfunc_call()
11843 mark_btf_func_reg_size(env, BPF_REG_0, t->size); in check_kfunc_call()
11856 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); in check_kfunc_call()
11860 ret_btf = env->prog->aux->btf; in check_kfunc_call()
11865 verbose(env, "bpf_obj_new requires prog BTF\n"); in check_kfunc_call()
11871 verbose(env, "bpf_obj_new type ID argument must be of a struct\n"); in check_kfunc_call()
11875 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11884 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11903 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11910 verbose(env, in check_kfunc_call()
11915 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11923 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11926 verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); in check_kfunc_call()
11939 if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { in check_kfunc_call()
11940 verbose(env, "the prog does not allow writes to packet data\n"); in check_kfunc_call()
11946 verbose(env, "verifier internal error: no dynptr id\n"); in check_kfunc_call()
11956 verbose(env, "kernel function %s unhandled dynamic return type\n", in check_kfunc_call()
11972 verbose(env, in check_kfunc_call()
11980 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
11991 mark_reg_known_zero(env, regs, BPF_REG_0); in check_kfunc_call()
12000 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12002 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); in check_kfunc_call()
12004 int id = acquire_reference_state(env, insn_idx); in check_kfunc_call()
12012 ref_set_non_owning(env, &regs[BPF_REG_0]); in check_kfunc_call()
12016 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
12034 mark_btf_func_reg_size(env, regno, sizeof(void *)); in check_kfunc_call()
12037 mark_btf_func_reg_size(env, regno, t->size); in check_kfunc_call()
12041 err = process_iter_next_call(env, insn_idx, &meta); in check_kfunc_call()
12089 static bool check_reg_sane_offset(struct bpf_verifier_env *env, in check_reg_sane_offset() argument
12098 verbose(env, "math between %s pointer and %lld is not allowed\n", in check_reg_sane_offset()
12099 reg_type_str(env, type), val); in check_reg_sane_offset()
12104 verbose(env, "%s pointer offset %d is not allowed\n", in check_reg_sane_offset()
12105 reg_type_str(env, type), reg->off); in check_reg_sane_offset()
12110 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", in check_reg_sane_offset()
12111 reg_type_str(env, type)); in check_reg_sane_offset()
12116 verbose(env, "value %lld makes %s pointer be out of bounds\n", in check_reg_sane_offset()
12117 smin, reg_type_str(env, type)); in check_reg_sane_offset()
12163 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, in can_skip_alu_sanitation() argument
12166 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
12186 static int sanitize_val_alu(struct bpf_verifier_env *env, in sanitize_val_alu() argument
12189 struct bpf_insn_aux_data *aux = cur_aux(env); in sanitize_val_alu()
12191 if (can_skip_alu_sanitation(env, insn)) in sanitize_val_alu()
12208 sanitize_speculative_path(struct bpf_verifier_env *env, in sanitize_speculative_path() argument
12215 branch = push_stack(env, next_idx, curr_idx, true); in sanitize_speculative_path()
12219 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
12221 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
12222 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
12228 static int sanitize_ptr_alu(struct bpf_verifier_env *env, in sanitize_ptr_alu() argument
12236 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; in sanitize_ptr_alu()
12237 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
12247 if (can_skip_alu_sanitation(env, insn)) in sanitize_ptr_alu()
12286 env->explore_alu_limits = true; in sanitize_ptr_alu()
12317 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, in sanitize_ptr_alu()
12318 env->insn_idx); in sanitize_ptr_alu()
12324 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) in sanitize_mark_insn_seen() argument
12326 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_mark_insn_seen()
12334 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in sanitize_mark_insn_seen()
12337 static int sanitize_err(struct bpf_verifier_env *env, in sanitize_err() argument
12348 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", in sanitize_err()
12352 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", in sanitize_err()
12356 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", in sanitize_err()
12360 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", in sanitize_err()
12364 verbose(env, "R%d could not be pushed for speculative verification, %s\n", in sanitize_err()
12368 verbose(env, "verifier internal error: unknown reason (%d)\n", in sanitize_err()
12387 struct bpf_verifier_env *env, in check_stack_access_for_ptr_arithmetic() argument
12396 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", in check_stack_access_for_ptr_arithmetic()
12402 verbose(env, "R%d stack pointer arithmetic goes out of range, " in check_stack_access_for_ptr_arithmetic()
12410 static int sanitize_check_bounds(struct bpf_verifier_env *env, in sanitize_check_bounds() argument
12419 if (env->bypass_spec_v1) in sanitize_check_bounds()
12424 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, in sanitize_check_bounds()
12429 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { in sanitize_check_bounds()
12430 verbose(env, "R%d pointer arithmetic of map value goes out of range, " in sanitize_check_bounds()
12447 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, in adjust_ptr_min_max_vals() argument
12452 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
12472 __mark_reg_unknown(env, dst_reg); in adjust_ptr_min_max_vals()
12478 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
12479 __mark_reg_unknown(env, dst_reg); in adjust_ptr_min_max_vals()
12483 verbose(env, in adjust_ptr_min_max_vals()
12490 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
12491 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
12510 verbose(env, "R%d pointer arithmetic on %s prohibited\n", in adjust_ptr_min_max_vals()
12511 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
12523 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
12524 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
12531 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, in adjust_ptr_min_max_vals()
12534 return sanitize_err(env, insn, ret, off_reg, dst_reg); in adjust_ptr_min_max_vals()
12583 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
12591 verbose(env, "R%d tried to subtract pointer from scalar\n", in adjust_ptr_min_max_vals()
12600 verbose(env, "R%d subtraction from stack pointer prohibited\n", in adjust_ptr_min_max_vals()
12642 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
12652 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", in adjust_ptr_min_max_vals()
12657 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", in adjust_ptr_min_max_vals()
12662 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
12665 if (sanitize_check_bounds(env, insn, dst_reg) < 0) in adjust_ptr_min_max_vals()
12668 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, in adjust_ptr_min_max_vals()
12671 return sanitize_err(env, insn, ret, off_reg, dst_reg); in adjust_ptr_min_max_vals()
13248 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, in adjust_scalar_min_max_vals() argument
13253 struct bpf_reg_state *regs = cur_regs(env); in adjust_scalar_min_max_vals()
13282 __mark_reg_unknown(env, dst_reg); in adjust_scalar_min_max_vals()
13293 __mark_reg_unknown(env, dst_reg); in adjust_scalar_min_max_vals()
13300 __mark_reg_unknown(env, dst_reg); in adjust_scalar_min_max_vals()
13305 ret = sanitize_val_alu(env, insn); in adjust_scalar_min_max_vals()
13307 return sanitize_err(env, insn, ret, NULL, NULL); in adjust_scalar_min_max_vals()
13360 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13373 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13386 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13395 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_scalar_min_max_vals()
13409 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, in adjust_reg_min_max_vals() argument
13412 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
13436 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
13437 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
13440 verbose(env, "R%d pointer %s pointer prohibited\n", in adjust_reg_min_max_vals()
13449 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
13452 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
13457 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
13460 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
13464 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
13476 return adjust_ptr_min_max_vals(env, insn, in adjust_reg_min_max_vals()
13482 print_verifier_state(env, state, true); in adjust_reg_min_max_vals()
13483 verbose(env, "verifier internal error: unexpected ptr_reg\n"); in adjust_reg_min_max_vals()
13487 print_verifier_state(env, state, true); in adjust_reg_min_max_vals()
13488 verbose(env, "verifier internal error: no src_reg\n"); in adjust_reg_min_max_vals()
13491 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); in adjust_reg_min_max_vals()
13495 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_alu_op() argument
13497 struct bpf_reg_state *regs = cur_regs(env); in check_alu_op()
13506 verbose(env, "BPF_NEG uses reserved fields\n"); in check_alu_op()
13514 verbose(env, "BPF_END uses reserved fields\n"); in check_alu_op()
13520 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
13524 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
13525 verbose(env, "R%d pointer arithmetic prohibited\n", in check_alu_op()
13531 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
13539 verbose(env, "BPF_MOV uses reserved fields\n"); in check_alu_op()
13545 verbose(env, "BPF_MOV uses reserved fields\n"); in check_alu_op()
13551 verbose(env, "BPF_MOV uses reserved fields\n"); in check_alu_op()
13557 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
13562 verbose(env, "BPF_MOV uses reserved fields\n"); in check_alu_op()
13568 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
13588 src_reg->id = ++env->id_gen; in check_alu_op()
13594 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
13595 verbose(env, in check_alu_op()
13604 src_reg->id = ++env->id_gen; in check_alu_op()
13612 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
13617 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
13618 verbose(env, in check_alu_op()
13627 src_reg->id = ++env->id_gen; in check_alu_op()
13636 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
13642 src_reg->id = ++env->id_gen; in check_alu_op()
13647 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
13651 mark_reg_unknown(env, regs, in check_alu_op()
13662 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
13674 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); in check_alu_op()
13682 verbose(env, "BPF_ALU uses reserved fields\n"); in check_alu_op()
13686 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
13692 verbose(env, "BPF_ALU uses reserved fields\n"); in check_alu_op()
13698 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
13704 verbose(env, "div by zero\n"); in check_alu_op()
13713 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
13719 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
13723 return adjust_reg_min_max_vals(env, insn); in check_alu_op()
14457 static int check_cond_jmp_op(struct bpf_verifier_env *env, in check_cond_jmp_op() argument
14460 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
14472 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); in check_cond_jmp_op()
14477 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
14484 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); in check_cond_jmp_op()
14489 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
14495 is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
14496 verbose(env, "R%d pointer comparison prohibited\n", in check_cond_jmp_op()
14502 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); in check_cond_jmp_op()
14546 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
14549 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
14559 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
14560 !sanitize_speculative_path(env, insn, *insn_idx + 1, in check_cond_jmp_op()
14563 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
14564 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
14572 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
14573 !sanitize_speculative_path(env, insn, in check_cond_jmp_op()
14577 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
14578 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
14582 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
14694 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
14695 verbose(env, "R%d pointer comparison prohibited\n", in check_cond_jmp_op()
14699 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
14700 print_insn_state(env, this_branch->frame[this_branch->curframe]); in check_cond_jmp_op()
14705 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_ld_imm() argument
14707 struct bpf_insn_aux_data *aux = cur_aux(env); in check_ld_imm()
14708 struct bpf_reg_state *regs = cur_regs(env); in check_ld_imm()
14714 verbose(env, "invalid BPF_LD_IMM insn\n"); in check_ld_imm()
14718 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); in check_ld_imm()
14722 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
14739 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
14752 verbose(env, "bpf verifier is misconfigured\n"); in check_ld_imm()
14759 struct bpf_prog_aux *aux = env->prog->aux; in check_ld_imm()
14760 u32 subprogno = find_subprog(env, in check_ld_imm()
14761 env->insn_idx + insn->imm + 1); in check_ld_imm()
14764 verbose(env, "missing btf func_info\n"); in check_ld_imm()
14768 verbose(env, "callback function not static\n"); in check_ld_imm()
14777 map = env->used_maps[aux->map_index]; in check_ld_imm()
14790 verbose(env, "bpf verifier is misconfigured\n"); in check_ld_imm()
14824 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) in check_ld_abs() argument
14826 struct bpf_reg_state *regs = cur_regs(env); in check_ld_abs()
14831 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
14832 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); in check_ld_abs()
14836 if (!env->ops->gen_ld_abs) { in check_ld_abs()
14837 verbose(env, "bpf verifier is misconfigured\n"); in check_ld_abs()
14844 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); in check_ld_abs()
14849 err = check_reg_arg(env, ctx_reg, SRC_OP); in check_ld_abs()
14857 err = check_reference_leak(env); in check_ld_abs()
14859 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); in check_ld_abs()
14863 if (env->cur_state->active_lock.ptr) { in check_ld_abs()
14864 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); in check_ld_abs()
14868 if (env->cur_state->active_rcu_lock) { in check_ld_abs()
14869 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); in check_ld_abs()
14874 verbose(env, in check_ld_abs()
14881 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
14886 err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg); in check_ld_abs()
14892 mark_reg_not_init(env, regs, caller_saved[i]); in check_ld_abs()
14893 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); in check_ld_abs()
14900 mark_reg_unknown(env, regs, BPF_REG_0); in check_ld_abs()
14902 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
14906 static int check_return_code(struct bpf_verifier_env *env) in check_return_code() argument
14909 const struct bpf_prog *prog = env->prog; in check_return_code()
14912 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
14914 struct bpf_func_state *frame = env->cur_state->frame[0]; in check_return_code()
14940 err = check_reg_arg(env, BPF_REG_0, SRC_OP); in check_return_code()
14944 if (is_pointer_value(env, BPF_REG_0)) { in check_return_code()
14945 verbose(env, "R0 leaks addr as return value\n"); in check_return_code()
14949 reg = cur_regs(env) + BPF_REG_0; in check_return_code()
14954 verbose(env, "In async callback the register R0 is not a known value (%s)\n", in check_return_code()
14955 reg_type_str(env, reg->type)); in check_return_code()
14960 verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0"); in check_return_code()
14968 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", in check_return_code()
14969 reg_type_str(env, reg->type)); in check_return_code()
14977 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
14978 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
14979 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
14980 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
14981 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
14982 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) in check_return_code()
14984 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || in check_return_code()
14985 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) in check_return_code()
14989 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
15001 if (!env->prog->aux->attach_btf_id) in check_return_code()
15006 switch (env->prog->expected_attach_type) { in check_return_code()
15025 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { in check_return_code()
15031 if (!env->prog->aux->attach_func_proto->type) { in check_return_code()
15051 verbose(env, "At program exit the register R0 is not a known value (%s)\n", in check_return_code()
15052 reg_type_str(env, reg->type)); in check_return_code()
15057 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); in check_return_code()
15061 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); in check_return_code()
15067 env->prog->enforce_expected_attach_type = 1; in check_return_code()
15111 static void mark_prune_point(struct bpf_verifier_env *env, int idx) in mark_prune_point() argument
15113 env->insn_aux_data[idx].prune_point = true; in mark_prune_point()
15116 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) in is_prune_point() argument
15118 return env->insn_aux_data[insn_idx].prune_point; in is_prune_point()
15121 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx) in mark_force_checkpoint() argument
15123 env->insn_aux_data[idx].force_checkpoint = true; in mark_force_checkpoint()
15126 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) in is_force_checkpoint() argument
15128 return env->insn_aux_data[insn_idx].force_checkpoint; in is_force_checkpoint()
15131 static void mark_calls_callback(struct bpf_verifier_env *env, int idx) in mark_calls_callback() argument
15133 env->insn_aux_data[idx].calls_callback = true; in mark_calls_callback()
15136 static bool calls_callback(struct bpf_verifier_env *env, int insn_idx) in calls_callback() argument
15138 return env->insn_aux_data[insn_idx].calls_callback; in calls_callback()
15151 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) in push_insn() argument
15153 int *insn_stack = env->cfg.insn_stack; in push_insn()
15154 int *insn_state = env->cfg.insn_state; in push_insn()
15162 if (w < 0 || w >= env->prog->len) { in push_insn()
15163 verbose_linfo(env, t, "%d: ", t); in push_insn()
15164 verbose(env, "jump out of range from insn %d to %d\n", t, w); in push_insn()
15170 mark_prune_point(env, w); in push_insn()
15171 mark_jmp_point(env, w); in push_insn()
15178 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
15180 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
15183 if (env->bpf_capable) in push_insn()
15185 verbose_linfo(env, t, "%d: ", t); in push_insn()
15186 verbose_linfo(env, w, "%d: ", w); in push_insn()
15187 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
15193 verbose(env, "insn state internal bug\n"); in push_insn()
15200 struct bpf_verifier_env *env, in visit_func_call_insn() argument
15206 ret = push_insn(t, t + insn_sz, FALLTHROUGH, env); in visit_func_call_insn()
15210 mark_prune_point(env, t + insn_sz); in visit_func_call_insn()
15212 mark_jmp_point(env, t + insn_sz); in visit_func_call_insn()
15215 mark_prune_point(env, t); in visit_func_call_insn()
15216 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); in visit_func_call_insn()
15226 static int visit_insn(int t, struct bpf_verifier_env *env) in visit_insn() argument
15228 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; in visit_insn()
15232 return visit_func_call_insn(t, insns, env, true); in visit_insn()
15238 return push_insn(t, t + insn_sz, FALLTHROUGH, env); in visit_insn()
15252 mark_prune_point(env, t); in visit_insn()
15263 mark_calls_callback(env, t); in visit_insn()
15264 mark_force_checkpoint(env, t); in visit_insn()
15265 mark_prune_point(env, t); in visit_insn()
15266 mark_jmp_point(env, t); in visit_insn()
15271 ret = fetch_kfunc_meta(env, insn, &meta, NULL); in visit_insn()
15273 mark_prune_point(env, t); in visit_insn()
15285 mark_force_checkpoint(env, t); in visit_insn()
15288 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); in visit_insn()
15300 ret = push_insn(t, t + off + 1, FALLTHROUGH, env); in visit_insn()
15304 mark_prune_point(env, t + off + 1); in visit_insn()
15305 mark_jmp_point(env, t + off + 1); in visit_insn()
15311 mark_prune_point(env, t); in visit_insn()
15313 ret = push_insn(t, t + 1, FALLTHROUGH, env); in visit_insn()
15317 return push_insn(t, t + insn->off + 1, BRANCH, env); in visit_insn()
15324 static int check_cfg(struct bpf_verifier_env *env) in check_cfg() argument
15326 int insn_cnt = env->prog->len; in check_cfg()
15331 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
15335 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
15343 env->cfg.cur_stack = 1; in check_cfg()
15345 while (env->cfg.cur_stack > 0) { in check_cfg()
15346 int t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
15348 ret = visit_insn(t, env); in check_cfg()
15352 env->cfg.cur_stack--; in check_cfg()
15358 verbose(env, "visit_insn internal bug\n"); in check_cfg()
15365 if (env->cfg.cur_stack < 0) { in check_cfg()
15366 verbose(env, "pop stack internal bug\n"); in check_cfg()
15372 struct bpf_insn *insn = &env->prog->insnsi[i]; in check_cfg()
15375 verbose(env, "unreachable insn %d\n", i); in check_cfg()
15381 verbose(env, "jump into the middle of ldimm64 insn %d\n", i); in check_cfg()
15393 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
15397 static int check_abnormal_return(struct bpf_verifier_env *env) in check_abnormal_return() argument
15401 for (i = 1; i < env->subprog_cnt; i++) { in check_abnormal_return()
15402 if (env->subprog_info[i].has_ld_abs) { in check_abnormal_return()
15403 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); in check_abnormal_return()
15406 if (env->subprog_info[i].has_tail_call) { in check_abnormal_return()
15407 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); in check_abnormal_return()
15418 static int check_btf_func(struct bpf_verifier_env *env, in check_btf_func() argument
15436 if (check_abnormal_return(env)) in check_btf_func()
15441 if (nfuncs != env->subprog_cnt) { in check_btf_func()
15442 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); in check_btf_func()
15450 verbose(env, "invalid func info rec size %u\n", urec_size); in check_btf_func()
15454 prog = env->prog; in check_btf_func()
15471 verbose(env, "nonzero tailing record in func info"); in check_btf_func()
15492 verbose(env, in check_btf_func()
15498 verbose(env, in check_btf_func()
15504 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
15505 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); in check_btf_func()
15512 verbose(env, "invalid type id %d in func info", in check_btf_func()
15525 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { in check_btf_func()
15526 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); in check_btf_func()
15529 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { in check_btf_func()
15530 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); in check_btf_func()
15549 static void adjust_btf_func(struct bpf_verifier_env *env) in adjust_btf_func() argument
15551 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
15557 for (i = 0; i < env->subprog_cnt; i++) in adjust_btf_func()
15558 aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
15564 static int check_btf_line(struct bpf_verifier_env *env, in check_btf_line() argument
15596 prog = env->prog; in check_btf_line()
15600 sub = env->subprog_info; in check_btf_line()
15608 verbose(env, "nonzero tailing record in line_info"); in check_btf_line()
15635 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
15643 verbose(env, in check_btf_line()
15652 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); in check_btf_line()
15657 if (s != env->subprog_cnt) { in check_btf_line()
15662 verbose(env, "missing bpf_line_info for func#%u\n", s); in check_btf_line()
15672 if (s != env->subprog_cnt) { in check_btf_line()
15673 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", in check_btf_line()
15674 env->subprog_cnt - s, s); in check_btf_line()
15692 static int check_core_relo(struct bpf_verifier_env *env, in check_core_relo() argument
15698 struct bpf_prog *prog = env->prog; in check_core_relo()
15701 .log = &env->log, in check_core_relo()
15731 verbose(env, "nonzero tailing record in core_relo"); in check_core_relo()
15746 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", in check_core_relo()
15761 static int check_btf_info(struct bpf_verifier_env *env, in check_btf_info() argument
15769 if (check_abnormal_return(env)) in check_btf_info()
15781 env->prog->aux->btf = btf; in check_btf_info()
15783 err = check_btf_func(env, attr, uattr); in check_btf_info()
15787 err = check_btf_line(env, attr, uattr); in check_btf_info()
15791 err = check_core_relo(env, attr, uattr); in check_btf_info()
15863 static void clean_func_state(struct bpf_verifier_env *env, in clean_func_state() argument
15877 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
15885 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); in clean_func_state()
15892 static void clean_verifier_state(struct bpf_verifier_env *env, in clean_verifier_state() argument
15902 clean_func_state(env, st->frame[i]); in clean_verifier_state()
15937 static void clean_live_states(struct bpf_verifier_env *env, int insn, in clean_live_states() argument
15942 sl = *explored_state(env, insn); in clean_live_states()
15949 clean_verifier_state(env, &sl->state); in clean_live_states()
15965 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, in regsafe() argument
16006 if (env->explore_alu_limits) { in regsafe()
16086 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, in stacksafe() argument
16114 if (env->allow_uninit_stack && in stacksafe()
16154 if (!regsafe(env, &old->stack[spi].spilled_ptr, in stacksafe()
16236 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, in func_states_equal() argument
16242 if (!regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
16243 &env->idmap_scratch, exact)) in func_states_equal()
16246 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) in func_states_equal()
16249 if (!refsafe(old, cur, &env->idmap_scratch)) in func_states_equal()
16255 static void reset_idmap_scratch(struct bpf_verifier_env *env) in reset_idmap_scratch() argument
16257 env->idmap_scratch.tmp_id_gen = env->id_gen; in reset_idmap_scratch()
16258 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); in reset_idmap_scratch()
16261 static bool states_equal(struct bpf_verifier_env *env, in states_equal() argument
16271 reset_idmap_scratch(env); in states_equal()
16289 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) in states_equal()
16301 if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) in states_equal()
16310 static int propagate_liveness_reg(struct bpf_verifier_env *env, in propagate_liveness_reg() argument
16329 err = mark_reg_read(env, reg, parent_reg, flag); in propagate_liveness_reg()
16343 static int propagate_liveness(struct bpf_verifier_env *env, in propagate_liveness() argument
16365 err = propagate_liveness_reg(env, &state_reg[i], in propagate_liveness()
16370 mark_insn_zext(env, &parent_reg[i]); in propagate_liveness()
16378 err = propagate_liveness_reg(env, state_reg, in propagate_liveness()
16390 static int propagate_precision(struct bpf_verifier_env *env, in propagate_precision() argument
16407 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
16409 verbose(env, "frame %d: propagating r%d", fr, i); in propagate_precision()
16411 verbose(env, ",r%d", i); in propagate_precision()
16413 bt_set_frame_reg(&env->bt, fr, i); in propagate_precision()
16425 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
16427 verbose(env, "frame %d: propagating fp%d", in propagate_precision()
16430 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
16432 bt_set_frame_slot(&env->bt, fr, i); in propagate_precision()
16436 verbose(env, "\n"); in propagate_precision()
16439 err = mark_chain_precision_batch(env); in propagate_precision()
16464 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx) in is_iter_next_insn() argument
16466 return env->insn_aux_data[insn_idx].is_iter_next; in is_iter_next_insn()
16551 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) in is_state_visited() argument
16555 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; in is_state_visited()
16557 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); in is_state_visited()
16569 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
16570 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
16573 pprev = explored_state(env, insn_idx); in is_state_visited()
16576 clean_live_states(env, insn_idx, cur); in is_state_visited()
16637 if (is_iter_next_insn(env, insn_idx)) { in is_state_visited()
16638 if (states_equal(env, &sl->state, cur, true)) { in is_state_visited()
16653 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; in is_state_visited()
16661 if (calls_callback(env, insn_idx)) { in is_state_visited()
16662 if (states_equal(env, &sl->state, cur, true)) in is_state_visited()
16668 states_equal(env, &sl->state, cur, false) && in is_state_visited()
16671 verbose_linfo(env, insn_idx, "; "); in is_state_visited()
16672 verbose(env, "infinite loop detected at insn %d\n", insn_idx); in is_state_visited()
16673 verbose(env, "cur state:"); in is_state_visited()
16674 print_verifier_state(env, cur->frame[cur->curframe], true); in is_state_visited()
16675 verbose(env, "old state:"); in is_state_visited()
16676 print_verifier_state(env, sl->state.frame[cur->curframe], true); in is_state_visited()
16693 env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
16694 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
16725 if (states_equal(env, &sl->state, cur, force_exact)) { in is_state_visited()
16740 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
16747 err = err ? : push_jmp_history(env, cur); in is_state_visited()
16748 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
16770 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; in is_state_visited()
16785 env->peak_states--; in is_state_visited()
16791 sl->next = env->free_list; in is_state_visited()
16792 env->free_list = sl; in is_state_visited()
16802 if (env->max_states_per_insn < states_cnt) in is_state_visited()
16803 env->max_states_per_insn = states_cnt; in is_state_visited()
16805 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
16823 env->total_states++; in is_state_visited()
16824 env->peak_states++; in is_state_visited()
16825 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
16826 env->prev_insn_processed = env->insn_processed; in is_state_visited()
16829 if (env->bpf_capable) in is_state_visited()
16830 mark_all_scalars_imprecise(env, cur); in is_state_visited()
16848 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
16849 *explored_state(env, insn_idx) = new_sl; in is_state_visited()
16918 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, in save_aux_ptr_type() argument
16921 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; in save_aux_ptr_type()
16948 verbose(env, "same insn cannot be used with different pointers\n"); in save_aux_ptr_type()
16956 static int do_check(struct bpf_verifier_env *env) in do_check() argument
16958 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check()
16959 struct bpf_verifier_state *state = env->cur_state; in do_check()
16960 struct bpf_insn *insns = env->prog->insnsi; in do_check()
16962 int insn_cnt = env->prog->len; in do_check()
16971 env->prev_insn_idx = prev_insn_idx; in do_check()
16972 if (env->insn_idx >= insn_cnt) { in do_check()
16973 verbose(env, "invalid insn idx %d insn_cnt %d\n", in do_check()
16974 env->insn_idx, insn_cnt); in do_check()
16978 insn = &insns[env->insn_idx]; in do_check()
16981 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
16982 verbose(env, in do_check()
16984 env->insn_processed); in do_check()
16988 state->last_insn_idx = env->prev_insn_idx; in do_check()
16990 if (is_prune_point(env, env->insn_idx)) { in do_check()
16991 err = is_state_visited(env, env->insn_idx); in do_check()
16996 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
16998 verbose(env, "\nfrom %d to %d%s: safe\n", in do_check()
16999 env->prev_insn_idx, env->insn_idx, in do_check()
17000 env->cur_state->speculative ? in do_check()
17003 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
17009 if (is_jmp_point(env, env->insn_idx)) { in do_check()
17010 err = push_jmp_history(env, state); in do_check()
17021 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { in do_check()
17022 verbose(env, "\nfrom %d to %d%s:", in do_check()
17023 env->prev_insn_idx, env->insn_idx, in do_check()
17024 env->cur_state->speculative ? in do_check()
17026 print_verifier_state(env, state->frame[state->curframe], true); in do_check()
17030 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
17034 .private_data = env, in do_check()
17037 if (verifier_state_scratched(env)) in do_check()
17038 print_insn_state(env, state->frame[state->curframe]); in do_check()
17040 verbose_linfo(env, env->insn_idx, "; "); in do_check()
17041 env->prev_log_pos = env->log.end_pos; in do_check()
17042 verbose(env, "%d: ", env->insn_idx); in do_check()
17043 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
17044 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; in do_check()
17045 env->prev_log_pos = env->log.end_pos; in do_check()
17048 if (bpf_prog_is_offloaded(env->prog->aux)) { in do_check()
17049 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
17050 env->prev_insn_idx); in do_check()
17055 regs = cur_regs(env); in do_check()
17056 sanitize_mark_insn_seen(env); in do_check()
17057 prev_insn_idx = env->insn_idx; in do_check()
17060 err = check_alu_op(env, insn); in do_check()
17070 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
17074 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
17083 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
17090 err = save_aux_ptr_type(env, src_reg_type, true); in do_check()
17097 err = check_atomic(env, env->insn_idx, insn); in do_check()
17100 env->insn_idx++; in do_check()
17105 verbose(env, "BPF_STX uses reserved fields\n"); in do_check()
17110 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
17114 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
17121 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
17127 err = save_aux_ptr_type(env, dst_reg_type, false); in do_check()
17135 verbose(env, "BPF_ST uses reserved fields\n"); in do_check()
17139 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
17146 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
17152 err = save_aux_ptr_type(env, dst_reg_type, false); in do_check()
17158 env->jmps_processed++; in do_check()
17168 verbose(env, "BPF_CALL uses reserved fields\n"); in do_check()
17172 if (env->cur_state->active_lock.ptr) { in do_check()
17177 verbose(env, "function calls are not allowed while holding a lock\n"); in do_check()
17182 err = check_func_call(env, insn, &env->insn_idx); in do_check()
17184 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check()
17186 err = check_helper_call(env, insn, &env->insn_idx); in do_check()
17190 mark_reg_scratched(env, BPF_REG_0); in do_check()
17197 verbose(env, "BPF_JA uses reserved fields\n"); in do_check()
17202 env->insn_idx += insn->off + 1; in do_check()
17204 env->insn_idx += insn->imm + 1; in do_check()
17213 verbose(env, "BPF_EXIT uses reserved fields\n"); in do_check()
17217 if (env->cur_state->active_lock.ptr && in do_check()
17218 !in_rbtree_lock_required_cb(env)) { in do_check()
17219 verbose(env, "bpf_spin_unlock is missing\n"); in do_check()
17223 if (env->cur_state->active_rcu_lock && in do_check()
17224 !in_rbtree_lock_required_cb(env)) { in do_check()
17225 verbose(env, "bpf_rcu_read_unlock is missing\n"); in do_check()
17235 err = check_reference_leak(env); in do_check()
17241 err = prepare_func_exit(env, &env->insn_idx); in do_check()
17248 err = check_return_code(env); in do_check()
17252 mark_verifier_state_scratched(env); in do_check()
17253 update_branch_counts(env, env->cur_state); in do_check()
17254 err = pop_stack(env, &prev_insn_idx, in do_check()
17255 &env->insn_idx, pop_log); in do_check()
17265 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
17273 err = check_ld_abs(env, insn); in do_check()
17278 err = check_ld_imm(env, insn); in do_check()
17282 env->insn_idx++; in do_check()
17283 sanitize_mark_insn_seen(env); in do_check()
17285 verbose(env, "invalid BPF_LD mode\n"); in do_check()
17289 verbose(env, "unknown insn class %d\n", class); in do_check()
17293 env->insn_idx++; in do_check()
17330 static int check_pseudo_btf_id(struct bpf_verifier_env *env, in check_pseudo_btf_id() argument
17350 verbose(env, "invalid module BTF object FD specified.\n"); in check_pseudo_btf_id()
17355 …verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"… in check_pseudo_btf_id()
17364 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); in check_pseudo_btf_id()
17370 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); in check_pseudo_btf_id()
17378 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", in check_pseudo_btf_id()
17418 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", in check_pseudo_btf_id()
17432 for (i = 0; i < env->used_btf_cnt; i++) { in check_pseudo_btf_id()
17433 if (env->used_btfs[i].btf == btf) { in check_pseudo_btf_id()
17439 if (env->used_btf_cnt >= MAX_USED_BTFS) { in check_pseudo_btf_id()
17444 btf_mod = &env->used_btfs[env->used_btf_cnt]; in check_pseudo_btf_id()
17457 env->used_btf_cnt++; in check_pseudo_btf_id()
17479 static int check_map_prog_compatibility(struct bpf_verifier_env *env, in check_map_prog_compatibility() argument
17489 verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n"); in check_map_prog_compatibility()
17496 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); in check_map_prog_compatibility()
17501 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); in check_map_prog_compatibility()
17508 verbose(env, "tracing progs cannot use bpf_timer yet\n"); in check_map_prog_compatibility()
17515 verbose(env, "offload device mismatch between prog and map\n"); in check_map_prog_compatibility()
17520 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); in check_map_prog_compatibility()
17542 verbose(env, in check_map_prog_compatibility()
17563 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) in resolve_pseudo_ldimm64() argument
17565 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
17566 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
17569 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
17577 verbose(env, "BPF_LDX uses reserved fields\n"); in resolve_pseudo_ldimm64()
17591 verbose(env, "invalid bpf_ld_imm64 insn\n"); in resolve_pseudo_ldimm64()
17600 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
17601 err = check_pseudo_btf_id(env, insn, aux); in resolve_pseudo_ldimm64()
17608 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
17626 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); in resolve_pseudo_ldimm64()
17633 if (bpfptr_is_null(env->fd_array)) { in resolve_pseudo_ldimm64()
17634 verbose(env, "fd_idx without fd_array is invalid\n"); in resolve_pseudo_ldimm64()
17637 if (copy_from_bpfptr_offset(&fd, env->fd_array, in resolve_pseudo_ldimm64()
17650 verbose(env, "fd %d is not pointing to valid bpf_map\n", in resolve_pseudo_ldimm64()
17655 err = check_map_prog_compatibility(env, map, env->prog); in resolve_pseudo_ldimm64()
17661 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
17669 verbose(env, "direct value offset of %u is not allowed\n", off); in resolve_pseudo_ldimm64()
17675 verbose(env, "no direct value access support for this map type\n"); in resolve_pseudo_ldimm64()
17682 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", in resolve_pseudo_ldimm64()
17696 for (j = 0; j < env->used_map_cnt; j++) { in resolve_pseudo_ldimm64()
17697 if (env->used_maps[j] == map) { in resolve_pseudo_ldimm64()
17704 if (env->used_map_cnt >= MAX_USED_MAPS) { in resolve_pseudo_ldimm64()
17716 aux->map_index = env->used_map_cnt; in resolve_pseudo_ldimm64()
17717 env->used_maps[env->used_map_cnt++] = map; in resolve_pseudo_ldimm64()
17720 bpf_cgroup_storage_assign(env->prog->aux, map)) { in resolve_pseudo_ldimm64()
17721 verbose(env, "only one cgroup storage of each type is allowed\n"); in resolve_pseudo_ldimm64()
17735 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
17748 static void release_maps(struct bpf_verifier_env *env) in release_maps() argument
17750 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
17751 env->used_map_cnt); in release_maps()
17755 static void release_btfs(struct bpf_verifier_env *env) in release_btfs() argument
17757 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, in release_btfs()
17758 env->used_btf_cnt); in release_btfs()
17762 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) in convert_pseudo_ld_imm64() argument
17764 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
17765 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
17781 static void adjust_insn_aux_data(struct bpf_verifier_env *env, in adjust_insn_aux_data() argument
17785 struct bpf_insn_aux_data *old_data = env->insn_aux_data; in adjust_insn_aux_data()
17795 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
17807 new_data[i].zext_dst = insn_has_def32(env, insn + i); in adjust_insn_aux_data()
17809 env->insn_aux_data = new_data; in adjust_insn_aux_data()
17813 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) in adjust_subprog_starts() argument
17820 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
17821 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
17823 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
17841 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, in bpf_patch_insn_data() argument
17848 new_data = vzalloc(array_size(env->prog->len + len - 1, in bpf_patch_insn_data()
17854 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
17857 verbose(env, in bpf_patch_insn_data()
17859 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
17863 adjust_insn_aux_data(env, new_data, new_prog, off, len); in bpf_patch_insn_data()
17864 adjust_subprog_starts(env, off, len); in bpf_patch_insn_data()
17869 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, in adjust_subprog_starts_after_remove() argument
17875 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
17876 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
17879 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
17880 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
17885 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
17889 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
17893 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
17895 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
17896 env->subprog_info + j, in adjust_subprog_starts_after_remove()
17897 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
17898 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
17914 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
17919 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
17920 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
17925 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, in bpf_adj_linfo_after_remove() argument
17928 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
17975 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
17976 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
17980 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
17981 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
17983 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
17989 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) in verifier_remove_insns() argument
17991 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
17992 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
17995 if (bpf_prog_is_offloaded(env->prog->aux)) in verifier_remove_insns()
17996 bpf_prog_offload_remove_insns(env, off, cnt); in verifier_remove_insns()
17998 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
18002 err = adjust_subprog_starts_after_remove(env, off, cnt); in verifier_remove_insns()
18006 err = bpf_adj_linfo_after_remove(env, off, cnt); in verifier_remove_insns()
18027 static void sanitize_dead_code(struct bpf_verifier_env *env) in sanitize_dead_code() argument
18029 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
18031 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
18032 const int insn_cnt = env->prog->len; in sanitize_dead_code()
18057 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) in opt_hard_wire_dead_code_branches() argument
18059 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
18061 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
18062 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
18076 if (bpf_prog_is_offloaded(env->prog->aux)) in opt_hard_wire_dead_code_branches()
18077 bpf_prog_offload_replace_insn(env, i, &ja); in opt_hard_wire_dead_code_branches()
18083 static int opt_remove_dead_code(struct bpf_verifier_env *env) in opt_remove_dead_code() argument
18085 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
18086 int insn_cnt = env->prog->len; in opt_remove_dead_code()
18098 err = verifier_remove_insns(env, i, j); in opt_remove_dead_code()
18101 insn_cnt = env->prog->len; in opt_remove_dead_code()
18107 static int opt_remove_nops(struct bpf_verifier_env *env) in opt_remove_nops() argument
18110 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
18111 int insn_cnt = env->prog->len; in opt_remove_nops()
18118 err = verifier_remove_insns(env, i, 1); in opt_remove_nops()
18128 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, in opt_subreg_zext_lo32_rnd_hi32() argument
18132 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
18133 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
18134 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
18166 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { in opt_subreg_zext_lo32_rnd_hi32()
18205 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); in opt_subreg_zext_lo32_rnd_hi32()
18215 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); in opt_subreg_zext_lo32_rnd_hi32()
18218 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
18220 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
18232 static int convert_ctx_accesses(struct bpf_verifier_env *env) in convert_ctx_accesses() argument
18234 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
18236 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
18243 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
18245 verbose(env, "bpf verifier is misconfigured\n"); in convert_ctx_accesses()
18248 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
18249 env->prog); in convert_ctx_accesses()
18251 verbose(env, "bpf verifier is misconfigured\n"); in convert_ctx_accesses()
18254 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); in convert_ctx_accesses()
18258 env->prog = new_prog; in convert_ctx_accesses()
18263 if (bpf_prog_is_offloaded(env->prog->aux)) in convert_ctx_accesses()
18266 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
18294 env->insn_aux_data[i + delta].sanitize_stack_spill) { in convert_ctx_accesses()
18301 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); in convert_ctx_accesses()
18306 env->prog = new_prog; in convert_ctx_accesses()
18311 switch ((int)env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
18343 env->prog->aux->num_exentries++; in convert_ctx_accesses()
18350 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
18366 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); in convert_ctx_accesses()
18381 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
18385 verbose(env, "bpf verifier is misconfigured\n"); in convert_ctx_accesses()
18393 verbose(env, "bpf verifier narrow ctx load misconfigured\n"); in convert_ctx_accesses()
18417 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in convert_ctx_accesses()
18424 env->prog = new_prog; in convert_ctx_accesses()
18431 static int jit_subprogs(struct bpf_verifier_env *env) in jit_subprogs() argument
18433 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
18440 if (env->subprog_cnt <= 1) in jit_subprogs()
18451 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
18464 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
18480 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
18484 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18486 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
18522 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
18530 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
18540 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; in jit_subprogs()
18553 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18580 func[i]->aux->func_cnt = env->subprog_cnt; in jit_subprogs()
18582 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18586 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
18597 for (i = 1; i < env->subprog_cnt; i++) { in jit_subprogs()
18608 insn[0].imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
18615 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
18616 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
18626 prog->aux->func_cnt = env->subprog_cnt; in jit_subprogs()
18642 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
18657 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
18663 static int fixup_call_args(struct bpf_verifier_env *env) in fixup_call_args() argument
18666 struct bpf_prog *prog = env->prog; in fixup_call_args()
18673 if (env->prog->jit_requested && in fixup_call_args()
18674 !bpf_prog_is_offloaded(env->prog->aux)) { in fixup_call_args()
18675 err = jit_subprogs(env); in fixup_call_args()
18683 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); in fixup_call_args()
18686 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
18690 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in fixup_call_args()
18698 verbose(env, "callbacks are not allowed in non-JITed programs\n"); in fixup_call_args()
18704 depth = get_callee_stack_depth(env, insn, i); in fixup_call_args()
18715 static void specialize_kfunc(struct bpf_verifier_env *env, in specialize_kfunc() argument
18718 struct bpf_prog *prog = env->prog; in specialize_kfunc()
18736 seen_direct_write = env->seen_direct_write; in specialize_kfunc()
18737 is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE); in specialize_kfunc()
18745 env->seen_direct_write = seen_direct_write; in specialize_kfunc()
18766 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, in fixup_kfunc_call() argument
18772 verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); in fixup_kfunc_call()
18782 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
18784 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", in fixup_kfunc_call()
18794 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
18796 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; in fixup_kfunc_call()
18805 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
18810 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", in fixup_kfunc_call()
18822 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
18833 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", in fixup_kfunc_call()
18838 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, in fixup_kfunc_call()
18851 static int do_misc_fixups(struct bpf_verifier_env *env) in do_misc_fixups() argument
18853 struct bpf_prog *prog = env->prog; in do_misc_fixups()
18898 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); in do_misc_fixups()
18903 env->prog = prog = new_prog; in do_misc_fixups()
18912 cnt = env->ops->gen_ld_abs(insn, insn_buf); in do_misc_fixups()
18914 verbose(env, "bpf verifier is misconfigured\n"); in do_misc_fixups()
18918 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
18923 env->prog = prog = new_prog; in do_misc_fixups()
18937 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
18971 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
18976 env->prog = prog = new_prog; in do_misc_fixups()
18986 ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt); in do_misc_fixups()
18992 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
18997 env->prog = prog = new_prog; in do_misc_fixups()
19015 if (!allow_tail_call_in_subprogs(env)) in do_misc_fixups()
19027 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
19028 if (env->bpf_capable && !prog->blinding_requested && in do_misc_fixups()
19042 verbose(env, "adding tail call poke descriptor failed\n"); in do_misc_fixups()
19060 verbose(env, "tail_call abusing map_ptr\n"); in do_misc_fixups()
19073 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
19078 env->prog = prog = new_prog; in do_misc_fixups()
19106 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
19111 env->prog = prog = new_prog; in do_misc_fixups()
19117 if (!env->prog->aux->sleepable || in do_misc_fixups()
19118 env->insn_aux_data[i + delta].storage_get_func_atomic) in do_misc_fixups()
19125 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
19130 env->prog = prog = new_prog; in do_misc_fixups()
19149 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
19161 verbose(env, "bpf verifier is misconfigured\n"); in do_misc_fixups()
19165 new_prog = bpf_patch_insn_data(env, i + delta, in do_misc_fixups()
19171 env->prog = prog = new_prog; in do_misc_fixups()
19248 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, in do_misc_fixups()
19254 env->prog = prog = new_prog; in do_misc_fixups()
19274 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
19279 env->prog = prog = new_prog; in do_misc_fixups()
19302 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); in do_misc_fixups()
19307 env->prog = prog = new_prog; in do_misc_fixups()
19318 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); in do_misc_fixups()
19322 env->prog = prog = new_prog; in do_misc_fixups()
19333 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); in do_misc_fixups()
19337 env->prog = prog = new_prog; in do_misc_fixups()
19343 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
19348 verbose(env, in do_misc_fixups()
19362 verbose(env, "bpf verifier is misconfigured\n"); in do_misc_fixups()
19368 verbose(env, "tracking tail call prog failed\n"); in do_misc_fixups()
19373 sort_kfunc_descs_by_imm_off(env->prog); in do_misc_fixups()
19378 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, in inline_bpf_loop() argument
19439 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt); in inline_bpf_loop()
19444 callback_start = env->subprog_info[callback_subprogno].start; in inline_bpf_loop()
19469 static int optimize_bpf_loop(struct bpf_verifier_env *env) in optimize_bpf_loop() argument
19471 struct bpf_subprog_info *subprogs = env->subprog_info; in optimize_bpf_loop()
19473 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop()
19474 int insn_cnt = env->prog->len; in optimize_bpf_loop()
19481 &env->insn_aux_data[i + delta].loop_inline_state; in optimize_bpf_loop()
19487 new_prog = inline_bpf_loop(env, in optimize_bpf_loop()
19496 env->prog = new_prog; in optimize_bpf_loop()
19509 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in optimize_bpf_loop()
19514 static void free_states(struct bpf_verifier_env *env) in free_states() argument
19519 sl = env->free_list; in free_states()
19526 env->free_list = NULL; in free_states()
19528 if (!env->explored_states) in free_states()
19531 for (i = 0; i < state_htab_size(env); i++) { in free_states()
19532 sl = env->explored_states[i]; in free_states()
19540 env->explored_states[i] = NULL; in free_states()
19544 static int do_check_common(struct bpf_verifier_env *env, int subprog) in do_check_common() argument
19546 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check_common()
19551 env->prev_linfo = NULL; in do_check_common()
19552 env->pass_cnt++; in do_check_common()
19565 env->cur_state = state; in do_check_common()
19566 init_func_state(env, state->frame[0], in do_check_common()
19570 state->first_insn_idx = env->subprog_info[subprog].start; in do_check_common()
19574 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
19575 ret = btf_prepare_func_args(env, subprog, regs); in do_check_common()
19580 mark_reg_known_zero(env, regs, i); in do_check_common()
19582 mark_reg_unknown(env, regs, i); in do_check_common()
19586 mark_reg_known_zero(env, regs, i); in do_check_common()
19588 regs[i].id = ++env->id_gen; in do_check_common()
19594 mark_reg_known_zero(env, regs, BPF_REG_1); in do_check_common()
19595 ret = btf_check_subprog_arg_match(env, subprog, regs); in do_check_common()
19609 ret = do_check(env); in do_check_common()
19614 if (env->cur_state) { in do_check_common()
19615 free_verifier_state(env->cur_state, true); in do_check_common()
19616 env->cur_state = NULL; in do_check_common()
19618 while (!pop_stack(env, NULL, NULL, false)); in do_check_common()
19620 bpf_vlog_reset(&env->log, 0); in do_check_common()
19621 free_states(env); in do_check_common()
19642 static int do_check_subprogs(struct bpf_verifier_env *env) in do_check_subprogs() argument
19644 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
19650 for (i = 1; i < env->subprog_cnt; i++) { in do_check_subprogs()
19653 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
19654 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
19655 ret = do_check_common(env, i); in do_check_subprogs()
19658 } else if (env->log.level & BPF_LOG_LEVEL) { in do_check_subprogs()
19659 verbose(env, in do_check_subprogs()
19667 static int do_check_main(struct bpf_verifier_env *env) in do_check_main() argument
19671 env->insn_idx = 0; in do_check_main()
19672 ret = do_check_common(env, 0); in do_check_main()
19674 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
19679 static void print_verification_stats(struct bpf_verifier_env *env) in print_verification_stats() argument
19683 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
19684 verbose(env, "verification time %lld usec\n", in print_verification_stats()
19685 div_u64(env->verification_time, 1000)); in print_verification_stats()
19686 verbose(env, "stack depth "); in print_verification_stats()
19687 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
19688 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
19690 verbose(env, "%d", depth); in print_verification_stats()
19691 if (i + 1 < env->subprog_cnt) in print_verification_stats()
19692 verbose(env, "+"); in print_verification_stats()
19694 verbose(env, "\n"); in print_verification_stats()
19696 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " in print_verification_stats()
19698 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
19699 env->max_states_per_insn, env->total_states, in print_verification_stats()
19700 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
19703 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) in check_struct_ops_btf_id() argument
19708 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id()
19713 verbose(env, "struct ops programs must have a GPL compatible license\n"); in check_struct_ops_btf_id()
19720 verbose(env, "attach_btf_id %u is not a supported struct\n", in check_struct_ops_btf_id()
19728 verbose(env, "attach to invalid member idx %u of struct %s\n", in check_struct_ops_btf_id()
19738 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", in check_struct_ops_btf_id()
19747 verbose(env, "attach to unsupported member %s of struct %s\n", in check_struct_ops_btf_id()
19755 env->ops = st_ops->verifier_ops; in check_struct_ops_btf_id()
20093 static int check_attach_btf_id(struct bpf_verifier_env *env) in check_attach_btf_id() argument
20095 struct bpf_prog *prog = env->prog; in check_attach_btf_id()
20107 verbose(env, "Syscall programs can only be sleepable\n"); in check_attach_btf_id()
20112 …verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepa… in check_attach_btf_id()
20117 return check_struct_ops_btf_id(env); in check_attach_btf_id()
20124 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
20133 env->ops = bpf_verifier_ops[tgt_prog->type]; in check_attach_btf_id()
20157 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
20191 struct bpf_verifier_env *env; in bpf_check() local
20203 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); in bpf_check()
20204 if (!env) in bpf_check()
20207 env->bt.env = env; in bpf_check()
20210 env->insn_aux_data = in bpf_check()
20213 if (!env->insn_aux_data) in bpf_check()
20216 env->insn_aux_data[i].orig_idx = i; in bpf_check()
20217 env->prog = *prog; in bpf_check()
20218 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
20219 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); in bpf_check()
20231 ret = bpf_vlog_init(&env->log, attr->log_level, in bpf_check()
20237 mark_verifier_state_clean(env); in bpf_check()
20241 verbose(env, "in-kernel BTF is malformed\n"); in bpf_check()
20246 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
20248 env->strict_alignment = true; in bpf_check()
20250 env->strict_alignment = false; in bpf_check()
20252 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); in bpf_check()
20253 env->allow_uninit_stack = bpf_allow_uninit_stack(); in bpf_check()
20254 env->bypass_spec_v1 = bpf_bypass_spec_v1(); in bpf_check()
20255 env->bypass_spec_v4 = bpf_bypass_spec_v4(); in bpf_check()
20256 env->bpf_capable = bpf_capable(); in bpf_check()
20259 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
20261 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
20265 if (!env->explored_states) in bpf_check()
20268 ret = add_subprog_and_kfunc(env); in bpf_check()
20272 ret = check_subprogs(env); in bpf_check()
20276 ret = check_btf_info(env, attr, uattr); in bpf_check()
20280 ret = check_attach_btf_id(env); in bpf_check()
20284 ret = resolve_pseudo_ldimm64(env); in bpf_check()
20288 if (bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
20289 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
20294 ret = check_cfg(env); in bpf_check()
20298 ret = do_check_subprogs(env); in bpf_check()
20299 ret = ret ?: do_check_main(env); in bpf_check()
20301 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) in bpf_check()
20302 ret = bpf_prog_offload_finalize(env); in bpf_check()
20305 kvfree(env->explored_states); in bpf_check()
20308 ret = check_max_stack_depth(env); in bpf_check()
20312 ret = optimize_bpf_loop(env); in bpf_check()
20316 opt_hard_wire_dead_code_branches(env); in bpf_check()
20318 ret = opt_remove_dead_code(env); in bpf_check()
20320 ret = opt_remove_nops(env); in bpf_check()
20323 sanitize_dead_code(env); in bpf_check()
20328 ret = convert_ctx_accesses(env); in bpf_check()
20331 ret = do_misc_fixups(env); in bpf_check()
20336 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
20337 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); in bpf_check()
20338 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
20343 ret = fixup_call_args(env); in bpf_check()
20345 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
20346 print_verification_stats(env); in bpf_check()
20347 env->prog->aux->verified_insns = env->insn_processed; in bpf_check()
20350 err = bpf_vlog_finalize(&env->log, &log_true_size); in bpf_check()
20364 if (env->used_map_cnt) { in bpf_check()
20366 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
20367 sizeof(env->used_maps[0]), in bpf_check()
20370 if (!env->prog->aux->used_maps) { in bpf_check()
20375 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
20376 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
20377 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
20379 if (env->used_btf_cnt) { in bpf_check()
20381 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, in bpf_check()
20382 sizeof(env->used_btfs[0]), in bpf_check()
20384 if (!env->prog->aux->used_btfs) { in bpf_check()
20389 memcpy(env->prog->aux->used_btfs, env->used_btfs, in bpf_check()
20390 sizeof(env->used_btfs[0]) * env->used_btf_cnt); in bpf_check()
20391 env->prog->aux->used_btf_cnt = env->used_btf_cnt; in bpf_check()
20393 if (env->used_map_cnt || env->used_btf_cnt) { in bpf_check()
20397 convert_pseudo_ld_imm64(env); in bpf_check()
20400 adjust_btf_func(env); in bpf_check()
20403 if (!env->prog->aux->used_maps) in bpf_check()
20407 release_maps(env); in bpf_check()
20408 if (!env->prog->aux->used_btfs) in bpf_check()
20409 release_btfs(env); in bpf_check()
20414 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
20415 env->prog->expected_attach_type = 0; in bpf_check()
20417 *prog = env->prog; in bpf_check()
20421 vfree(env->insn_aux_data); in bpf_check()
20423 kfree(env); in bpf_check()