1 // SPDX-License-Identifier: (Apache-2.0 OR MIT)
2 // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
3
4 //! This module contains all the definitions related to eBPF, and some functions permitting to
5 //! manipulate eBPF instructions.
6 //!
7 //! The number of bytes in an instruction, the maximum number of instructions in a program, and
8 //! also all operation codes are defined here as constants.
9 //!
10 //! The structure for an instruction used by this crate, as well as the function to extract it from
11 //! a program, is also defined in the module.
12 //!
13 //! To learn more about these instructions, see the Linux kernel documentation:
14 //! <https://www.kernel.org/doc/Documentation/networking/filter.txt>, or for a shorter version of
15 //! the list of the operation codes: <https://github.com/iovisor/bpf-docs/blob/master/eBPF.md>
16
17 use alloc::{vec, vec::Vec};
18
19 use byteorder::{ByteOrder, LittleEndian};
20
21 /// The maximum call depth is 8
22 pub const RBPF_MAX_CALL_DEPTH: usize = 8;
23
24 /// Maximum number of instructions in an eBPF program.
25 pub const PROG_MAX_INSNS: usize = 1000000;
26 /// Size of an eBPF instructions, in bytes.
27 pub const INSN_SIZE: usize = 8;
28 /// Maximum size of an eBPF program, in bytes.
29 pub const PROG_MAX_SIZE: usize = PROG_MAX_INSNS * INSN_SIZE;
30 /// Stack for the eBPF stack, in bytes.
31 pub const STACK_SIZE: usize = 512;
32
33 // eBPF op codes.
34 // See also https://www.kernel.org/doc/Documentation/networking/filter.txt
35
36 // Three least significant bits are operation class:
37 /// BPF operation class: load from immediate.
38 pub const BPF_LD: u8 = 0x00;
39 /// BPF operation class: load from register.
40 pub const BPF_LDX: u8 = 0x01;
41 /// BPF operation class: store immediate.
42 pub const BPF_ST: u8 = 0x02;
43 /// BPF operation class: store value from register.
44 pub const BPF_STX: u8 = 0x03;
45 /// BPF operation class: 32 bits arithmetic operation.
46 pub const BPF_ALU: u8 = 0x04;
47 /// BPF operation class: jump (64-bit wide operands for comparisons).
48 pub const BPF_JMP: u8 = 0x05;
49 /// BPF operation class: jump (32-bit wide operands for comparisons).
50 pub const BPF_JMP32: u8 = 0x06;
51 // [ class 6 unused, reserved for future use ]
52 /// BPF operation class: 64 bits arithmetic operation.
53 pub const BPF_ALU64: u8 = 0x07;
54
55 // For load and store instructions:
56 // +------------+--------+------------+
57 // | 3 bits | 2 bits | 3 bits |
58 // | mode | size | insn class |
59 // +------------+--------+------------+
60 // (MSB) (LSB)
61
62 // Size modifiers:
63 /// BPF size modifier: word (4 bytes).
64 pub const BPF_W: u8 = 0x00;
65 /// BPF size modifier: half-word (2 bytes).
66 pub const BPF_H: u8 = 0x08;
67 /// BPF size modifier: byte (1 byte).
68 pub const BPF_B: u8 = 0x10;
69 /// BPF size modifier: double word (8 bytes).
70 pub const BPF_DW: u8 = 0x18;
71
72 // Mode modifiers:
73 /// BPF mode modifier: immediate value.
74 pub const BPF_IMM: u8 = 0x00;
75 /// BPF mode modifier: absolute load.
76 pub const BPF_ABS: u8 = 0x20;
77 /// BPF mode modifier: indirect load.
78 pub const BPF_IND: u8 = 0x40;
79 /// BPF mode modifier: load from / store to memory.
80 pub const BPF_MEM: u8 = 0x60;
81 // [ 0x80 reserved ]
82 // [ 0xa0 reserved ]
83 /// BPF mode modifier: exclusive add.
84 pub const BPF_XADD: u8 = 0xc0;
85
86 // For arithmetic (BPF_ALU/BPF_ALU64) and jump (BPF_JMP) instructions:
87 // +----------------+--------+--------+
88 // | 4 bits |1 b.| 3 bits |
89 // | operation code | src| insn class |
90 // +----------------+----+------------+
91 // (MSB) (LSB)
92
93 // Source modifiers:
94 /// BPF source operand modifier: 32-bit immediate value.
95 pub const BPF_K: u8 = 0x00;
96 /// BPF source operand modifier: `src` register.
97 pub const BPF_X: u8 = 0x08;
98
99 // Operation codes -- BPF_ALU or BPF_ALU64 classes:
100 /// BPF ALU/ALU64 operation code: addition.
101 pub const BPF_ADD: u8 = 0x00;
102 /// BPF ALU/ALU64 operation code: subtraction.
103 pub const BPF_SUB: u8 = 0x10;
104 /// BPF ALU/ALU64 operation code: multiplication.
105 pub const BPF_MUL: u8 = 0x20;
106 /// BPF ALU/ALU64 operation code: division.
107 pub const BPF_DIV: u8 = 0x30;
108 /// BPF ALU/ALU64 operation code: or.
109 pub const BPF_OR: u8 = 0x40;
110 /// BPF ALU/ALU64 operation code: and.
111 pub const BPF_AND: u8 = 0x50;
112 /// BPF ALU/ALU64 operation code: left shift.
113 pub const BPF_LSH: u8 = 0x60;
114 /// BPF ALU/ALU64 operation code: right shift.
115 pub const BPF_RSH: u8 = 0x70;
116 /// BPF ALU/ALU64 operation code: negation.
117 pub const BPF_NEG: u8 = 0x80;
118 /// BPF ALU/ALU64 operation code: modulus.
119 pub const BPF_MOD: u8 = 0x90;
120 /// BPF ALU/ALU64 operation code: exclusive or.
121 pub const BPF_XOR: u8 = 0xa0;
122 /// BPF ALU/ALU64 operation code: move.
123 pub const BPF_MOV: u8 = 0xb0;
124 /// BPF ALU/ALU64 operation code: sign extending right shift.
125 pub const BPF_ARSH: u8 = 0xc0;
126 /// BPF ALU/ALU64 operation code: endianness conversion.
127 pub const BPF_END: u8 = 0xd0;
128
129 // Operation codes -- BPF_JMP or BPF_JMP32 classes:
130 /// BPF JMP operation code: jump.
131 pub const BPF_JA: u8 = 0x00;
132 /// BPF JMP operation code: jump if equal.
133 pub const BPF_JEQ: u8 = 0x10;
134 /// BPF JMP operation code: jump if greater than.
135 pub const BPF_JGT: u8 = 0x20;
136 /// BPF JMP operation code: jump if greater or equal.
137 pub const BPF_JGE: u8 = 0x30;
138 /// BPF JMP operation code: jump if `src` & `reg`.
139 pub const BPF_JSET: u8 = 0x40;
140 /// BPF JMP operation code: jump if not equal.
141 pub const BPF_JNE: u8 = 0x50;
142 /// BPF JMP operation code: jump if greater than (signed).
143 pub const BPF_JSGT: u8 = 0x60;
144 /// BPF JMP operation code: jump if greater or equal (signed).
145 pub const BPF_JSGE: u8 = 0x70;
146 /// BPF JMP operation code: helper function call.
147 pub const BPF_CALL: u8 = 0x80;
148 /// BPF JMP operation code: return from program.
149 pub const BPF_EXIT: u8 = 0x90;
150 /// BPF JMP operation code: jump if lower than.
151 pub const BPF_JLT: u8 = 0xa0;
152 /// BPF JMP operation code: jump if lower or equal.
153 pub const BPF_JLE: u8 = 0xb0;
154 /// BPF JMP operation code: jump if lower than (signed).
155 pub const BPF_JSLT: u8 = 0xc0;
156 /// BPF JMP operation code: jump if lower or equal (signed).
157 pub const BPF_JSLE: u8 = 0xd0;
158
159 // Op codes
160 // (Following operation names are not “official”, but may be proper to rbpf; Linux kernel only
161 // combines above flags and does not attribute a name per operation.)
162
163 /// BPF opcode: `ldabsb src, dst, imm`.
164 pub const LD_ABS_B: u8 = BPF_LD | BPF_ABS | BPF_B;
165 /// BPF opcode: `ldabsh src, dst, imm`.
166 pub const LD_ABS_H: u8 = BPF_LD | BPF_ABS | BPF_H;
167 /// BPF opcode: `ldabsw src, dst, imm`.
168 pub const LD_ABS_W: u8 = BPF_LD | BPF_ABS | BPF_W;
169 /// BPF opcode: `ldabsdw src, dst, imm`.
170 pub const LD_ABS_DW: u8 = BPF_LD | BPF_ABS | BPF_DW;
171 /// BPF opcode: `ldindb src, dst, imm`.
172 pub const LD_IND_B: u8 = BPF_LD | BPF_IND | BPF_B;
173 /// BPF opcode: `ldindh src, dst, imm`.
174 pub const LD_IND_H: u8 = BPF_LD | BPF_IND | BPF_H;
175 /// BPF opcode: `ldindw src, dst, imm`.
176 pub const LD_IND_W: u8 = BPF_LD | BPF_IND | BPF_W;
177 /// BPF opcode: `ldinddw src, dst, imm`.
178 pub const LD_IND_DW: u8 = BPF_LD | BPF_IND | BPF_DW;
179
180 #[allow(unknown_lints)]
181 #[allow(clippy::eq_op)]
182 /// BPF opcode: `lddw dst, imm` /// `dst = imm`.
183 pub const LD_DW_IMM: u8 = BPF_LD | BPF_IMM | BPF_DW;
184 /// BPF opcode: `ldxb dst, [src + off]` /// `dst = (src + off) as u8`.
185 pub const LD_B_REG: u8 = BPF_LDX | BPF_MEM | BPF_B;
186 /// BPF opcode: `ldxh dst, [src + off]` /// `dst = (src + off) as u16`.
187 pub const LD_H_REG: u8 = BPF_LDX | BPF_MEM | BPF_H;
188 /// BPF opcode: `ldxw dst, [src + off]` /// `dst = (src + off) as u32`.
189 pub const LD_W_REG: u8 = BPF_LDX | BPF_MEM | BPF_W;
190 /// BPF opcode: `ldxdw dst, [src + off]` /// `dst = (src + off) as u64`.
191 pub const LD_DW_REG: u8 = BPF_LDX | BPF_MEM | BPF_DW;
192 /// BPF opcode: `stb [dst + off], imm` /// `(dst + offset) as u8 = imm`.
193 pub const ST_B_IMM: u8 = BPF_ST | BPF_MEM | BPF_B;
194 /// BPF opcode: `sth [dst + off], imm` /// `(dst + offset) as u16 = imm`.
195 pub const ST_H_IMM: u8 = BPF_ST | BPF_MEM | BPF_H;
196 /// BPF opcode: `stw [dst + off], imm` /// `(dst + offset) as u32 = imm`.
197 pub const ST_W_IMM: u8 = BPF_ST | BPF_MEM | BPF_W;
198 /// BPF opcode: `stdw [dst + off], imm` /// `(dst + offset) as u64 = imm`.
199 pub const ST_DW_IMM: u8 = BPF_ST | BPF_MEM | BPF_DW;
200 /// BPF opcode: `stxb [dst + off], src` /// `(dst + offset) as u8 = src`.
201 pub const ST_B_REG: u8 = BPF_STX | BPF_MEM | BPF_B;
202 /// BPF opcode: `stxh [dst + off], src` /// `(dst + offset) as u16 = src`.
203 pub const ST_H_REG: u8 = BPF_STX | BPF_MEM | BPF_H;
204 /// BPF opcode: `stxw [dst + off], src` /// `(dst + offset) as u32 = src`.
205 pub const ST_W_REG: u8 = BPF_STX | BPF_MEM | BPF_W;
206 /// BPF opcode: `stxdw [dst + off], src` /// `(dst + offset) as u64 = src`.
207 pub const ST_DW_REG: u8 = BPF_STX | BPF_MEM | BPF_DW;
208
209 /// BPF opcode: `stxxaddw [dst + off], src`.
210 pub const ST_W_XADD: u8 = BPF_STX | BPF_XADD | BPF_W;
211 /// BPF opcode: `stxxadddw [dst + off], src`.
212 pub const ST_DW_XADD: u8 = BPF_STX | BPF_XADD | BPF_DW;
213
214 /// BPF opcode: `add32 dst, imm` /// `dst += imm`.
215 pub const ADD32_IMM: u8 = BPF_ALU | BPF_K | BPF_ADD;
216 /// BPF opcode: `add32 dst, src` /// `dst += src`.
217 pub const ADD32_REG: u8 = BPF_ALU | BPF_X | BPF_ADD;
218 /// BPF opcode: `sub32 dst, imm` /// `dst -= imm`.
219 pub const SUB32_IMM: u8 = BPF_ALU | BPF_K | BPF_SUB;
220 /// BPF opcode: `sub32 dst, src` /// `dst -= src`.
221 pub const SUB32_REG: u8 = BPF_ALU | BPF_X | BPF_SUB;
222 /// BPF opcode: `mul32 dst, imm` /// `dst *= imm`.
223 pub const MUL32_IMM: u8 = BPF_ALU | BPF_K | BPF_MUL;
224 /// BPF opcode: `mul32 dst, src` /// `dst *= src`.
225 pub const MUL32_REG: u8 = BPF_ALU | BPF_X | BPF_MUL;
226 /// BPF opcode: `div32 dst, imm` /// `dst /= imm`.
227 pub const DIV32_IMM: u8 = BPF_ALU | BPF_K | BPF_DIV;
228 /// BPF opcode: `div32 dst, src` /// `dst /= src`.
229 pub const DIV32_REG: u8 = BPF_ALU | BPF_X | BPF_DIV;
230 /// BPF opcode: `or32 dst, imm` /// `dst |= imm`.
231 pub const OR32_IMM: u8 = BPF_ALU | BPF_K | BPF_OR;
232 /// BPF opcode: `or32 dst, src` /// `dst |= src`.
233 pub const OR32_REG: u8 = BPF_ALU | BPF_X | BPF_OR;
234 /// BPF opcode: `and32 dst, imm` /// `dst &= imm`.
235 pub const AND32_IMM: u8 = BPF_ALU | BPF_K | BPF_AND;
236 /// BPF opcode: `and32 dst, src` /// `dst &= src`.
237 pub const AND32_REG: u8 = BPF_ALU | BPF_X | BPF_AND;
238 /// BPF opcode: `lsh32 dst, imm` /// `dst <<= imm`.
239 pub const LSH32_IMM: u8 = BPF_ALU | BPF_K | BPF_LSH;
240 /// BPF opcode: `lsh32 dst, src` /// `dst <<= src`.
241 pub const LSH32_REG: u8 = BPF_ALU | BPF_X | BPF_LSH;
242 /// BPF opcode: `rsh32 dst, imm` /// `dst >>= imm`.
243 pub const RSH32_IMM: u8 = BPF_ALU | BPF_K | BPF_RSH;
244 /// BPF opcode: `rsh32 dst, src` /// `dst >>= src`.
245 pub const RSH32_REG: u8 = BPF_ALU | BPF_X | BPF_RSH;
246 /// BPF opcode: `neg32 dst` /// `dst = -dst`.
247 pub const NEG32: u8 = BPF_ALU | BPF_NEG;
248 /// BPF opcode: `mod32 dst, imm` /// `dst %= imm`.
249 pub const MOD32_IMM: u8 = BPF_ALU | BPF_K | BPF_MOD;
250 /// BPF opcode: `mod32 dst, src` /// `dst %= src`.
251 pub const MOD32_REG: u8 = BPF_ALU | BPF_X | BPF_MOD;
252 /// BPF opcode: `xor32 dst, imm` /// `dst ^= imm`.
253 pub const XOR32_IMM: u8 = BPF_ALU | BPF_K | BPF_XOR;
254 /// BPF opcode: `xor32 dst, src` /// `dst ^= src`.
255 pub const XOR32_REG: u8 = BPF_ALU | BPF_X | BPF_XOR;
256 /// BPF opcode: `mov32 dst, imm` /// `dst = imm`.
257 pub const MOV32_IMM: u8 = BPF_ALU | BPF_K | BPF_MOV;
258 /// BPF opcode: `mov32 dst, src` /// `dst = src`.
259 pub const MOV32_REG: u8 = BPF_ALU | BPF_X | BPF_MOV;
260 /// BPF opcode: `arsh32 dst, imm` /// `dst >>= imm (arithmetic)`.
261 ///
262 /// <https://en.wikipedia.org/wiki/Arithmetic_shift>
263 pub const ARSH32_IMM: u8 = BPF_ALU | BPF_K | BPF_ARSH;
264 /// BPF opcode: `arsh32 dst, src` /// `dst >>= src (arithmetic)`.
265 ///
266 /// <https://en.wikipedia.org/wiki/Arithmetic_shift>
267 pub const ARSH32_REG: u8 = BPF_ALU | BPF_X | BPF_ARSH;
268
269 /// BPF opcode: `le dst` /// `dst = htole<imm>(dst), with imm in {16, 32, 64}`.
270 pub const LE: u8 = BPF_ALU | BPF_K | BPF_END;
271 /// BPF opcode: `be dst` /// `dst = htobe<imm>(dst), with imm in {16, 32, 64}`.
272 pub const BE: u8 = BPF_ALU | BPF_X | BPF_END;
273
274 /// BPF opcode: `add64 dst, imm` /// `dst += imm`.
275 pub const ADD64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_ADD;
276 /// BPF opcode: `add64 dst, src` /// `dst += src`.
277 pub const ADD64_REG: u8 = BPF_ALU64 | BPF_X | BPF_ADD;
278 /// BPF opcode: `sub64 dst, imm` /// `dst -= imm`.
279 pub const SUB64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_SUB;
280 /// BPF opcode: `sub64 dst, src` /// `dst -= src`.
281 pub const SUB64_REG: u8 = BPF_ALU64 | BPF_X | BPF_SUB;
282 /// BPF opcode: `div64 dst, imm` /// `dst /= imm`.
283 pub const MUL64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_MUL;
284 /// BPF opcode: `div64 dst, src` /// `dst /= src`.
285 pub const MUL64_REG: u8 = BPF_ALU64 | BPF_X | BPF_MUL;
286 /// BPF opcode: `div64 dst, imm` /// `dst /= imm`.
287 pub const DIV64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_DIV;
288 /// BPF opcode: `div64 dst, src` /// `dst /= src`.
289 pub const DIV64_REG: u8 = BPF_ALU64 | BPF_X | BPF_DIV;
290 /// BPF opcode: `or64 dst, imm` /// `dst |= imm`.
291 pub const OR64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_OR;
292 /// BPF opcode: `or64 dst, src` /// `dst |= src`.
293 pub const OR64_REG: u8 = BPF_ALU64 | BPF_X | BPF_OR;
294 /// BPF opcode: `and64 dst, imm` /// `dst &= imm`.
295 pub const AND64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_AND;
296 /// BPF opcode: `and64 dst, src` /// `dst &= src`.
297 pub const AND64_REG: u8 = BPF_ALU64 | BPF_X | BPF_AND;
298 /// BPF opcode: `lsh64 dst, imm` /// `dst <<= imm`.
299 pub const LSH64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_LSH;
300 /// BPF opcode: `lsh64 dst, src` /// `dst <<= src`.
301 pub const LSH64_REG: u8 = BPF_ALU64 | BPF_X | BPF_LSH;
302 /// BPF opcode: `rsh64 dst, imm` /// `dst >>= imm`.
303 pub const RSH64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_RSH;
304 /// BPF opcode: `rsh64 dst, src` /// `dst >>= src`.
305 pub const RSH64_REG: u8 = BPF_ALU64 | BPF_X | BPF_RSH;
306 /// BPF opcode: `neg64 dst, imm` /// `dst = -dst`.
307 pub const NEG64: u8 = BPF_ALU64 | BPF_NEG;
308 /// BPF opcode: `mod64 dst, imm` /// `dst %= imm`.
309 pub const MOD64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_MOD;
310 /// BPF opcode: `mod64 dst, src` /// `dst %= src`.
311 pub const MOD64_REG: u8 = BPF_ALU64 | BPF_X | BPF_MOD;
312 /// BPF opcode: `xor64 dst, imm` /// `dst ^= imm`.
313 pub const XOR64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_XOR;
314 /// BPF opcode: `xor64 dst, src` /// `dst ^= src`.
315 pub const XOR64_REG: u8 = BPF_ALU64 | BPF_X | BPF_XOR;
316 /// BPF opcode: `mov64 dst, imm` /// `dst = imm`.
317 pub const MOV64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_MOV;
318 /// BPF opcode: `mov64 dst, src` /// `dst = src`.
319 pub const MOV64_REG: u8 = BPF_ALU64 | BPF_X | BPF_MOV;
320 /// BPF opcode: `arsh64 dst, imm` /// `dst >>= imm (arithmetic)`.
321 ///
322 /// <https://en.wikipedia.org/wiki/Arithmetic_shift>
323 pub const ARSH64_IMM: u8 = BPF_ALU64 | BPF_K | BPF_ARSH;
324 /// BPF opcode: `arsh64 dst, src` /// `dst >>= src (arithmetic)`.
325 ///
326 /// <https://en.wikipedia.org/wiki/Arithmetic_shift>
327 pub const ARSH64_REG: u8 = BPF_ALU64 | BPF_X | BPF_ARSH;
328
329 /// BPF opcode: `ja +off` /// `PC += off`.
330 pub const JA: u8 = BPF_JMP | BPF_JA;
331 /// BPF opcode: `jeq dst, imm, +off` /// `PC += off if dst == imm`.
332 pub const JEQ_IMM: u8 = BPF_JMP | BPF_K | BPF_JEQ;
333 /// BPF opcode: `jeq dst, src, +off` /// `PC += off if dst == src`.
334 pub const JEQ_REG: u8 = BPF_JMP | BPF_X | BPF_JEQ;
335 /// BPF opcode: `jgt dst, imm, +off` /// `PC += off if dst > imm`.
336 pub const JGT_IMM: u8 = BPF_JMP | BPF_K | BPF_JGT;
337 /// BPF opcode: `jgt dst, src, +off` /// `PC += off if dst > src`.
338 pub const JGT_REG: u8 = BPF_JMP | BPF_X | BPF_JGT;
339 /// BPF opcode: `jge dst, imm, +off` /// `PC += off if dst >= imm`.
340 pub const JGE_IMM: u8 = BPF_JMP | BPF_K | BPF_JGE;
341 /// BPF opcode: `jge dst, src, +off` /// `PC += off if dst >= src`.
342 pub const JGE_REG: u8 = BPF_JMP | BPF_X | BPF_JGE;
343 /// BPF opcode: `jlt dst, imm, +off` /// `PC += off if dst < imm`.
344 pub const JLT_IMM: u8 = BPF_JMP | BPF_K | BPF_JLT;
345 /// BPF opcode: `jlt dst, src, +off` /// `PC += off if dst < src`.
346 pub const JLT_REG: u8 = BPF_JMP | BPF_X | BPF_JLT;
347 /// BPF opcode: `jle dst, imm, +off` /// `PC += off if dst <= imm`.
348 pub const JLE_IMM: u8 = BPF_JMP | BPF_K | BPF_JLE;
349 /// BPF opcode: `jle dst, src, +off` /// `PC += off if dst <= src`.
350 pub const JLE_REG: u8 = BPF_JMP | BPF_X | BPF_JLE;
351 /// BPF opcode: `jset dst, imm, +off` /// `PC += off if dst & imm`.
352 pub const JSET_IMM: u8 = BPF_JMP | BPF_K | BPF_JSET;
353 /// BPF opcode: `jset dst, src, +off` /// `PC += off if dst & src`.
354 pub const JSET_REG: u8 = BPF_JMP | BPF_X | BPF_JSET;
355 /// BPF opcode: `jne dst, imm, +off` /// `PC += off if dst != imm`.
356 pub const JNE_IMM: u8 = BPF_JMP | BPF_K | BPF_JNE;
357 /// BPF opcode: `jne dst, src, +off` /// `PC += off if dst != src`.
358 pub const JNE_REG: u8 = BPF_JMP | BPF_X | BPF_JNE;
359 /// BPF opcode: `jsgt dst, imm, +off` /// `PC += off if dst > imm (signed)`.
360 pub const JSGT_IMM: u8 = BPF_JMP | BPF_K | BPF_JSGT;
361 /// BPF opcode: `jsgt dst, src, +off` /// `PC += off if dst > src (signed)`.
362 pub const JSGT_REG: u8 = BPF_JMP | BPF_X | BPF_JSGT;
363 /// BPF opcode: `jsge dst, imm, +off` /// `PC += off if dst >= imm (signed)`.
364 pub const JSGE_IMM: u8 = BPF_JMP | BPF_K | BPF_JSGE;
365 /// BPF opcode: `jsge dst, src, +off` /// `PC += off if dst >= src (signed)`.
366 pub const JSGE_REG: u8 = BPF_JMP | BPF_X | BPF_JSGE;
367 /// BPF opcode: `jslt dst, imm, +off` /// `PC += off if dst < imm (signed)`.
368 pub const JSLT_IMM: u8 = BPF_JMP | BPF_K | BPF_JSLT;
369 /// BPF opcode: `jslt dst, src, +off` /// `PC += off if dst < src (signed)`.
370 pub const JSLT_REG: u8 = BPF_JMP | BPF_X | BPF_JSLT;
371 /// BPF opcode: `jsle dst, imm, +off` /// `PC += off if dst <= imm (signed)`.
372 pub const JSLE_IMM: u8 = BPF_JMP | BPF_K | BPF_JSLE;
373 /// BPF opcode: `jsle dst, src, +off` /// `PC += off if dst <= src (signed)`.
374 pub const JSLE_REG: u8 = BPF_JMP | BPF_X | BPF_JSLE;
375
376 /// BPF opcode: `jeq dst, imm, +off` /// `PC += off if (dst as u32) == imm`.
377 pub const JEQ_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JEQ;
378 /// BPF opcode: `jeq dst, src, +off` /// `PC += off if (dst as u32) == (src as u32)`.
379 pub const JEQ_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JEQ;
380 /// BPF opcode: `jgt dst, imm, +off` /// `PC += off if (dst as u32) > imm`.
381 pub const JGT_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JGT;
382 /// BPF opcode: `jgt dst, src, +off` /// `PC += off if (dst as u32) > (src as u32)`.
383 pub const JGT_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JGT;
384 /// BPF opcode: `jge dst, imm, +off` /// `PC += off if (dst as u32) >= imm`.
385 pub const JGE_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JGE;
386 /// BPF opcode: `jge dst, src, +off` /// `PC += off if (dst as u32) >= (src as u32)`.
387 pub const JGE_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JGE;
388 /// BPF opcode: `jlt dst, imm, +off` /// `PC += off if (dst as u32) < imm`.
389 pub const JLT_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JLT;
390 /// BPF opcode: `jlt dst, src, +off` /// `PC += off if (dst as u32) < (src as u32)`.
391 pub const JLT_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JLT;
392 /// BPF opcode: `jle dst, imm, +off` /// `PC += off if (dst as u32) <= imm`.
393 pub const JLE_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JLE;
394 /// BPF opcode: `jle dst, src, +off` /// `PC += off if (dst as u32) <= (src as u32)`.
395 pub const JLE_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JLE;
396 /// BPF opcode: `jset dst, imm, +off` /// `PC += off if (dst as u32) & imm`.
397 pub const JSET_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JSET;
398 /// BPF opcode: `jset dst, src, +off` /// `PC += off if (dst as u32) & (src as u32)`.
399 pub const JSET_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JSET;
400 /// BPF opcode: `jne dst, imm, +off` /// `PC += off if (dst as u32) != imm`.
401 pub const JNE_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JNE;
402 /// BPF opcode: `jne dst, src, +off` /// `PC += off if (dst as u32) != (src as u32)`.
403 pub const JNE_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JNE;
404 /// BPF opcode: `jsgt dst, imm, +off` /// `PC += off if (dst as i32) > imm (signed)`.
405 pub const JSGT_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JSGT;
406 /// BPF opcode: `jsgt dst, src, +off` /// `PC += off if (dst as i32) > (src as i32) (signed)`.
407 pub const JSGT_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JSGT;
408 /// BPF opcode: `jsge dst, imm, +off` /// `PC += off if (dst as i32) >= imm (signed)`.
409 pub const JSGE_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JSGE;
410 /// BPF opcode: `jsge dst, src, +off` /// `PC += off if (dst as i32) >= (src as i32) (signed)`.
411 pub const JSGE_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JSGE;
412 /// BPF opcode: `jslt dst, imm, +off` /// `PC += off if (dst as i32) < imm (signed)`.
413 pub const JSLT_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JSLT;
414 /// BPF opcode: `jslt dst, src, +off` /// `PC += off if (dst as i32) < (src as i32) (signed)`.
415 pub const JSLT_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JSLT;
416 /// BPF opcode: `jsle dst, imm, +off` /// `PC += off if (dst as i32) <= imm (signed)`.
417 pub const JSLE_IMM32: u8 = BPF_JMP32 | BPF_K | BPF_JSLE;
418 /// BPF opcode: `jsle dst, src, +off` /// `PC += off if (dst as i32) <= (src as i32) (signed)`.
419 pub const JSLE_REG32: u8 = BPF_JMP32 | BPF_X | BPF_JSLE;
420
421 /// BPF opcode: `call imm` /// helper function call to helper with key `imm`.
422 pub const CALL: u8 = BPF_JMP | BPF_CALL;
423 /// BPF opcode: tail call.
424 pub const TAIL_CALL: u8 = BPF_JMP | BPF_X | BPF_CALL;
425 /// BPF opcode: `exit` /// `return r0`.
426 pub const EXIT: u8 = BPF_JMP | BPF_EXIT;
427
428 // Used in JIT
429 /// Mask to extract the operation class from an operation code.
430 pub const BPF_CLS_MASK: u8 = 0x07;
431 /// Mask to extract the arithmetic operation code from an instruction operation code.
432 pub const BPF_ALU_OP_MASK: u8 = 0xf0;
433
434 /// Prototype of an eBPF helper function.
435 pub type Helper = fn(u64, u64, u64, u64, u64) -> u64;
436
437 /// An eBPF instruction.
438 ///
439 /// See <https://www.kernel.org/doc/Documentation/networking/filter.txt> for the Linux kernel
440 /// documentation about eBPF, or <https://github.com/iovisor/bpf-docs/blob/master/eBPF.md> for a
441 /// more concise version.
442 #[derive(Debug, PartialEq, Eq, Clone)]
443 pub struct Insn {
444 /// Operation code.
445 pub opc: u8,
446 /// Destination register operand.
447 pub dst: u8,
448 /// Source register operand.
449 pub src: u8,
450 /// Offset operand.
451 pub off: i16,
452 /// Immediate value operand.
453 pub imm: i32,
454 }
455
456 impl Insn {
457 /// Turn an `Insn` back into an array of bytes.
458 ///
459 /// # Examples
460 ///
461 /// ```
462 /// use rbpf::ebpf;
463 ///
464 /// let prog: &[u8] = &[
465 /// 0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78,
466 /// ];
467 /// let insn = ebpf::Insn {
468 /// opc: 0xb7,
469 /// dst: 2,
470 /// src: 1,
471 /// off: 0x3456,
472 /// imm: 0x789abcde
473 /// };
474 /// assert_eq!(insn.to_array(), prog);
475 /// ```
to_array(&self) -> [u8; INSN_SIZE]476 pub fn to_array(&self) -> [u8; INSN_SIZE] {
477 [
478 self.opc,
479 self.src.wrapping_shl(4) | self.dst,
480 (self.off & 0xff) as u8,
481 self.off.wrapping_shr(8) as u8,
482 (self.imm & 0xff) as u8,
483 (self.imm & 0xff_00).wrapping_shr(8) as u8,
484 (self.imm as u32 & 0xff_00_00).wrapping_shr(16) as u8,
485 (self.imm as u32 & 0xff_00_00_00).wrapping_shr(24) as u8,
486 ]
487 }
488
489 /// Turn an `Insn` into an vector of bytes.
490 ///
491 /// # Examples
492 ///
493 /// ```
494 /// use rbpf::ebpf;
495 ///
496 /// let prog: Vec<u8> = vec![
497 /// 0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78,
498 /// ];
499 /// let insn = ebpf::Insn {
500 /// opc: 0xb7,
501 /// dst: 2,
502 /// src: 1,
503 /// off: 0x3456,
504 /// imm: 0x789abcde
505 /// };
506 /// assert_eq!(insn.to_vec(), prog);
507 /// ```
to_vec(&self) -> Vec<u8>508 pub fn to_vec(&self) -> Vec<u8> {
509 vec![
510 self.opc,
511 self.src.wrapping_shl(4) | self.dst,
512 (self.off & 0xff) as u8,
513 self.off.wrapping_shr(8) as u8,
514 (self.imm & 0xff) as u8,
515 (self.imm & 0xff_00).wrapping_shr(8) as u8,
516 (self.imm as u32 & 0xff_00_00).wrapping_shr(16) as u8,
517 (self.imm as u32 & 0xff_00_00_00).wrapping_shr(24) as u8,
518 ]
519 }
520 }
521
522 /// Get the instruction at `idx` of an eBPF program. `idx` is the index (number) of the
523 /// instruction (not a byte offset). The first instruction has index 0.
524 ///
525 /// # Panics
526 ///
527 /// Panics if it is not possible to get the instruction (if idx is too high, or last instruction is
528 /// incomplete).
529 ///
530 /// # Examples
531 ///
532 /// ```
533 /// use rbpf::ebpf;
534 ///
535 /// let prog = &[
536 /// 0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
537 /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
538 /// ];
539 /// let insn = ebpf::get_insn(prog, 1);
540 /// assert_eq!(insn.opc, 0x95);
541 /// ```
542 ///
543 /// The example below will panic, since the last instruction is not complete and cannot be loaded.
544 ///
545 /// ```rust,should_panic
546 /// use rbpf::ebpf;
547 ///
548 /// let prog = &[
549 /// 0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
550 /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00 // two bytes missing
551 /// ];
552 /// let insn = ebpf::get_insn(prog, 1);
553 /// ```
get_insn(prog: &[u8], idx: usize) -> Insn554 pub fn get_insn(prog: &[u8], idx: usize) -> Insn {
555 // This guard should not be needed in most cases, since the verifier already checks the program
556 // size, and indexes should be fine in the interpreter/JIT. But this function is publicly
557 // available and user can call it with any `idx`, so we have to check anyway.
558 if (idx + 1) * INSN_SIZE > prog.len() {
559 panic!(
560 "Error: cannot reach instruction at index {:?} in program containing {:?} bytes",
561 idx,
562 prog.len()
563 );
564 }
565 Insn {
566 opc: prog[INSN_SIZE * idx],
567 dst: prog[INSN_SIZE * idx + 1] & 0x0f,
568 src: (prog[INSN_SIZE * idx + 1] & 0xf0) >> 4,
569 off: LittleEndian::read_i16(&prog[(INSN_SIZE * idx + 2)..]),
570 imm: LittleEndian::read_i32(&prog[(INSN_SIZE * idx + 4)..]),
571 }
572 }
573
574 /// Return a vector of `struct Insn` built from a program.
575 ///
576 /// This is provided as a convenience for users wishing to manipulate a vector of instructions, for
577 /// example for dumping the program instruction after instruction with a custom format.
578 ///
579 /// Note that the two parts of `LD_DW_IMM` instructions (spanning on 64 bits) are considered as two
580 /// distinct instructions.
581 ///
582 /// # Examples
583 ///
584 /// ```
585 /// use rbpf::ebpf;
586 ///
587 /// let prog = &[
588 /// 0x18, 0x00, 0x00, 0x00, 0x88, 0x77, 0x66, 0x55,
589 /// 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, 0x22, 0x11,
590 /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
591 /// ];
592 ///
593 /// let v = ebpf::to_insn_vec(prog);
594 /// assert_eq!(v, vec![
595 /// ebpf::Insn {
596 /// opc: 0x18,
597 /// dst: 0,
598 /// src: 0,
599 /// off: 0,
600 /// imm: 0x55667788
601 /// },
602 /// ebpf::Insn {
603 /// opc: 0,
604 /// dst: 0,
605 /// src: 0,
606 /// off: 0,
607 /// imm: 0x11223344
608 /// },
609 /// ebpf::Insn {
610 /// opc: 0x95,
611 /// dst: 0,
612 /// src: 0,
613 /// off: 0,
614 /// imm: 0
615 /// },
616 /// ]);
617 /// ```
to_insn_vec(prog: &[u8]) -> Vec<Insn>618 pub fn to_insn_vec(prog: &[u8]) -> Vec<Insn> {
619 if prog.len() % INSN_SIZE != 0 {
620 panic!(
621 "Error: eBPF program length must be a multiple of {:?} octets",
622 INSN_SIZE
623 );
624 }
625
626 let mut res = vec![];
627 let mut insn_ptr: usize = 0;
628
629 while insn_ptr * INSN_SIZE < prog.len() {
630 let insn = get_insn(prog, insn_ptr);
631 res.push(insn);
632 insn_ptr += 1;
633 }
634 res
635 }
636