Lines Matching refs:insn
17 const struct bpf_insn *insn,
22 if (!insn->src_reg &&
23 insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
24 func_id_str[insn->imm])
25 return func_id_str[insn->imm];
30 res = cbs->cb_call(cbs->private_data, insn);
35 if (insn->src_reg == BPF_PSEUDO_CALL)
36 snprintf(buff, len, "%+d", insn->imm);
37 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
44 const struct bpf_insn *insn,
48 return cbs->cb_imm(cbs->private_data, insn, full_imm);
140 const struct bpf_insn *insn)
143 insn->code, insn->dst_reg,
144 BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le",
145 insn->imm, insn->dst_reg);
150 const struct bpf_insn *insn)
153 insn->code, insn->dst_reg,
154 insn->imm, insn->dst_reg);
157 static bool is_sdiv_smod(const struct bpf_insn *insn)
159 return (BPF_OP(insn->code) == BPF_DIV || BPF_OP(insn->code) == BPF_MOD) &&
160 insn->off == 1;
163 static bool is_movsx(const struct bpf_insn *insn)
165 return BPF_OP(insn->code) == BPF_MOV &&
166 (insn->off == 8 || insn->off == 16 || insn->off == 32);
170 const struct bpf_insn *insn,
174 u8 class = BPF_CLASS(insn->code);
177 if (BPF_OP(insn->code) == BPF_END) {
179 print_bpf_bswap_insn(verbose, cbs->private_data, insn);
181 print_bpf_end_insn(verbose, cbs->private_data, insn);
182 } else if (BPF_OP(insn->code) == BPF_NEG) {
184 insn->code, class == BPF_ALU ? 'w' : 'r',
185 insn->dst_reg, class == BPF_ALU ? 'w' : 'r',
186 insn->dst_reg);
187 } else if (BPF_SRC(insn->code) == BPF_X) {
189 insn->code, class == BPF_ALU ? 'w' : 'r',
190 insn->dst_reg,
191 is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
192 : bpf_alu_string[BPF_OP(insn->code) >> 4],
193 is_movsx(insn) ? bpf_movsx_string[(insn->off >> 3) - 1] : "",
195 insn->src_reg);
198 insn->code, class == BPF_ALU ? 'w' : 'r',
199 insn->dst_reg,
200 is_sdiv_smod(insn) ? bpf_alu_sign_string[BPF_OP(insn->code) >> 4]
201 : bpf_alu_string[BPF_OP(insn->code) >> 4],
202 insn->imm);
205 if (BPF_MODE(insn->code) == BPF_MEM)
207 insn->code,
208 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
209 insn->dst_reg,
210 insn->off, insn->src_reg);
211 else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
212 (insn->imm == BPF_ADD || insn->imm == BPF_AND ||
213 insn->imm == BPF_OR || insn->imm == BPF_XOR)) {
215 insn->code,
216 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
217 insn->dst_reg, insn->off,
218 bpf_alu_string[BPF_OP(insn->imm) >> 4],
219 insn->src_reg);
220 } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
221 (insn->imm == (BPF_ADD | BPF_FETCH) ||
222 insn->imm == (BPF_AND | BPF_FETCH) ||
223 insn->imm == (BPF_OR | BPF_FETCH) ||
224 insn->imm == (BPF_XOR | BPF_FETCH))) {
226 insn->code, insn->src_reg,
227 BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
228 bpf_atomic_alu_string[BPF_OP(insn->imm) >> 4],
229 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
230 insn->dst_reg, insn->off, insn->src_reg);
231 } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
232 insn->imm == BPF_CMPXCHG) {
234 insn->code,
235 BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
236 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
237 insn->dst_reg, insn->off,
238 insn->src_reg);
239 } else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
240 insn->imm == BPF_XCHG) {
242 insn->code, insn->src_reg,
243 BPF_SIZE(insn->code) == BPF_DW ? "64" : "",
244 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
245 insn->dst_reg, insn->off, insn->src_reg);
247 verbose(cbs->private_data, "BUG_%02x\n", insn->code);
250 if (BPF_MODE(insn->code) == BPF_MEM) {
252 insn->code,
253 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
254 insn->dst_reg,
255 insn->off, insn->imm);
256 } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
257 verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
259 verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
262 if (BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) {
263 verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
267 insn->code, insn->dst_reg,
268 BPF_MODE(insn->code) == BPF_MEM ?
269 bpf_ldst_string[BPF_SIZE(insn->code) >> 3] :
270 bpf_ldsx_string[BPF_SIZE(insn->code) >> 3],
271 insn->src_reg, insn->off);
273 if (BPF_MODE(insn->code) == BPF_ABS) {
275 insn->code,
276 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
277 insn->imm);
278 } else if (BPF_MODE(insn->code) == BPF_IND) {
280 insn->code,
281 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
282 insn->src_reg, insn->imm);
283 } else if (BPF_MODE(insn->code) == BPF_IMM &&
284 BPF_SIZE(insn->code) == BPF_DW) {
286 * part of the ldimm64 insn is accessible.
288 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
289 bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD ||
290 insn->src_reg == BPF_PSEUDO_MAP_VALUE;
297 insn->code, insn->dst_reg,
298 __func_imm_name(cbs, insn, imm,
301 verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code);
305 u8 opcode = BPF_OP(insn->code);
310 if (insn->src_reg == BPF_PSEUDO_CALL) {
312 insn->code,
313 __func_get_name(cbs, insn,
317 verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code,
318 __func_get_name(cbs, insn,
320 insn->imm);
322 } else if (insn->code == (BPF_JMP | BPF_JA)) {
324 insn->code, insn->off);
325 } else if (insn->code == (BPF_JMP32 | BPF_JA)) {
327 insn->code, insn->imm);
328 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
329 verbose(cbs->private_data, "(%02x) exit\n", insn->code);
330 } else if (BPF_SRC(insn->code) == BPF_X) {
333 insn->code, class == BPF_JMP32 ? 'w' : 'r',
334 insn->dst_reg,
335 bpf_jmp_string[BPF_OP(insn->code) >> 4],
337 insn->src_reg, insn->off);
341 insn->code, class == BPF_JMP32 ? 'w' : 'r',
342 insn->dst_reg,
343 bpf_jmp_string[BPF_OP(insn->code) >> 4],
344 insn->imm, insn->off);
348 insn->code, bpf_class_string[class]);