xref: /DragonOS/kernel/crates/rbpf/src/interpreter.rs (revision 7b0ef10895108a0de5ff5ef3d2f93f40cf2e33a5)
1 // SPDX-License-Identifier: (Apache-2.0 OR MIT)
2 // Derived from uBPF <https://github.com/iovisor/ubpf>
3 // Copyright 2015 Big Switch Networks, Inc
4 //      (uBPF: VM architecture, parts of the interpreter, originally in C)
5 // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
6 //      (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
7 
8 use crate::{
9     ebpf::{self, Insn},
10     helpers::BPF_FUNC_MAPPER,
11     stack::StackFrame,
12     *,
13 };
14 
15 #[cfg(not(feature = "user"))]
16 #[allow(unused)]
17 fn check_mem(
18     addr: u64,
19     len: usize,
20     access_type: &str,
21     insn_ptr: usize,
22     mbuff: &[u8],
23     mem: &[u8],
24     stack: &[u8],
25 ) -> Result<(), Error> {
26     log::trace!(
27         "check_mem: addr {:#x}, len {}, access_type {}, insn_ptr {}",
28         addr,
29         len,
30         access_type,
31         insn_ptr
32     );
33     log::trace!(
34         "check_mem: mbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
35         mbuff.as_ptr() as u64,
36         mbuff.len(),
37         mem.as_ptr() as u64,
38         mem.len(),
39         stack.as_ptr() as u64,
40         stack.len()
41     );
42     Ok(())
43 }
44 
45 #[cfg(feature = "user")]
46 fn check_mem(
47     addr: u64,
48     len: usize,
49     access_type: &str,
50     insn_ptr: usize,
51     mbuff: &[u8],
52     mem: &[u8],
53     stack: &[u8],
54 ) -> Result<(), Error> {
55     if let Some(addr_end) = addr.checked_add(len as u64) {
56         if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
57             return Ok(());
58         }
59         if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 {
60             return Ok(());
61         }
62         if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 {
63             return Ok(());
64         }
65     }
66 
67     Err(Error::new(ErrorKind::Other, format!(
68         "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
69         access_type, insn_ptr, addr, len,
70         mbuff.as_ptr() as u64, mbuff.len(),
71         mem.as_ptr() as u64, mem.len(),
72         stack.as_ptr() as u64, stack.len()
73     )))
74 }
75 
76 #[inline]
77 fn do_jump(insn_ptr: &mut usize, insn: &Insn) {
78     *insn_ptr = (*insn_ptr as i16 + insn.off) as usize;
79 }
80 
81 #[allow(unknown_lints)]
82 #[allow(cyclomatic_complexity)]
83 pub fn execute_program(
84     prog_: Option<&[u8]>,
85     mem: &[u8],
86     mbuff: &[u8],
87     helpers: &HashMap<u32, ebpf::Helper>,
88 ) -> Result<u64, Error> {
89     const U32MAX: u64 = u32::MAX as u64;
90     const SHIFT_MASK_64: u64 = 0x3f;
91 
92     let prog = match prog_ {
93         Some(prog) => prog,
94         None => Err(Error::new(
95             ErrorKind::Other,
96             "Error: No program set, call prog_set() to load one",
97         ))?,
98     };
99     let mut stacks = Vec::new();
100     let stack = StackFrame::new();
101     // R1 points to beginning of memory area, R10 to stack
102     let mut reg: [u64; 11] = [
103         0,
104         0,
105         0,
106         0,
107         0,
108         0,
109         0,
110         0,
111         0,
112         0,
113         stack.as_ptr() as u64 + stack.len() as u64,
114     ];
115     stacks.push(stack);
116     if !mbuff.is_empty() {
117         reg[1] = mbuff.as_ptr() as u64;
118     } else if !mem.is_empty() {
119         reg[1] = mem.as_ptr() as u64;
120     }
121     let check_mem_load =
122         |stack: &[u8], addr: u64, len: usize, insn_ptr: usize| -> Result<(), Error> {
123             check_mem(addr, len, "load", insn_ptr, mbuff, mem, stack)
124         };
125     let check_mem_store =
126         |stack: &[u8], addr: u64, len: usize, insn_ptr: usize| -> Result<(), Error> {
127             check_mem(addr, len, "store", insn_ptr, mbuff, mem, stack)
128         };
129 
130     // Loop on instructions
131     let mut insn_ptr: usize = 0;
132     while insn_ptr * ebpf::INSN_SIZE < prog.len() {
133         let insn = ebpf::get_insn(prog, insn_ptr);
134         insn_ptr += 1;
135         let _dst = insn.dst as usize;
136         let _src = insn.src as usize;
137 
138         match insn.opc {
139             // BPF_LD class
140             // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
141             // Since this pointer is constant, and since we already know it (mem), do not
142             // bother re-fetching it, just use mem already.
143             ebpf::LD_ABS_B => {
144                 reg[0] = unsafe {
145                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
146                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
147                     x.read_unaligned() as u64
148                 }
149             }
150             ebpf::LD_ABS_H => {
151                 reg[0] = unsafe {
152                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
153                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
154                     x.read_unaligned() as u64
155                 }
156             }
157             ebpf::LD_ABS_W => {
158                 reg[0] = unsafe {
159                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
160                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
161                     x.read_unaligned() as u64
162                 }
163             }
164             ebpf::LD_ABS_DW => {
165                 log::info!("executing LD_ABS_DW, set reg[{}] to {:#x}", _dst, insn.imm);
166                 reg[0] = unsafe {
167                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
168                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
169                     x.read_unaligned()
170                 }
171             }
172             ebpf::LD_IND_B => {
173                 reg[0] = unsafe {
174                     let x =
175                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
176                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
177                     x.read_unaligned() as u64
178                 }
179             }
180             ebpf::LD_IND_H => {
181                 reg[0] = unsafe {
182                     let x =
183                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
184                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
185                     x.read_unaligned() as u64
186                 }
187             }
188             ebpf::LD_IND_W => {
189                 reg[0] = unsafe {
190                     let x =
191                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
192                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
193                     x.read_unaligned() as u64
194                 }
195             }
196             ebpf::LD_IND_DW => {
197                 reg[0] = unsafe {
198                     let x =
199                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
200                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
201                     x.read_unaligned()
202                 }
203             }
204 
205             ebpf::LD_DW_IMM => {
206                 let next_insn = ebpf::get_insn(prog, insn_ptr);
207                 insn_ptr += 1;
208                 // log::warn!(
209                 //     "executing LD_DW_IMM, set reg[{}] to {:#x}",
210                 //     _dst,
211                 //     ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32)
212                 // );
213                 reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
214             }
215 
216             // BPF_LDX class
217             ebpf::LD_B_REG => {
218                 reg[_dst] = unsafe {
219                     #[allow(clippy::cast_ptr_alignment)]
220                     let x = (reg[_src] as *const u8).offset(insn.off as isize);
221                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 1, insn_ptr)?;
222                     x.read_unaligned() as u64
223                 }
224             }
225             ebpf::LD_H_REG => {
226                 reg[_dst] = unsafe {
227                     #[allow(clippy::cast_ptr_alignment)]
228                     let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16;
229                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 2, insn_ptr)?;
230                     x.read_unaligned() as u64
231                 }
232             }
233             ebpf::LD_W_REG => {
234                 reg[_dst] = unsafe {
235                     #[allow(clippy::cast_ptr_alignment)]
236                     let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32;
237                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 4, insn_ptr)?;
238                     // log::warn!(
239                     //     "executing LD_W_REG, the ptr is REG:{} -> [{:#x}] + {:#x}",
240                     //     _src,
241                     //     reg[_src],
242                     //     insn.off
243                     // );
244                     x.read_unaligned() as u64
245                 }
246             }
247             ebpf::LD_DW_REG => {
248                 reg[_dst] = unsafe {
249                     #[allow(clippy::cast_ptr_alignment)]
250                     let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64;
251                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
252                     x.read_unaligned()
253                 }
254             }
255 
256             // BPF_ST class
257             ebpf::ST_B_IMM => unsafe {
258                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
259                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 1, insn_ptr)?;
260                 x.write_unaligned(insn.imm as u8);
261             },
262             ebpf::ST_H_IMM => unsafe {
263                 #[allow(clippy::cast_ptr_alignment)]
264                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
265                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 2, insn_ptr)?;
266                 x.write_unaligned(insn.imm as u16);
267             },
268             ebpf::ST_W_IMM => unsafe {
269                 #[allow(clippy::cast_ptr_alignment)]
270                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
271                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 4, insn_ptr)?;
272                 x.write_unaligned(insn.imm as u32);
273             },
274             ebpf::ST_DW_IMM => unsafe {
275                 #[allow(clippy::cast_ptr_alignment)]
276                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
277                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
278                 x.write_unaligned(insn.imm as u64);
279             },
280 
281             // BPF_STX class
282             ebpf::ST_B_REG => unsafe {
283                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
284                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 1, insn_ptr)?;
285                 x.write_unaligned(reg[_src] as u8);
286             },
287             ebpf::ST_H_REG => unsafe {
288                 #[allow(clippy::cast_ptr_alignment)]
289                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
290                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 2, insn_ptr)?;
291                 x.write_unaligned(reg[_src] as u16);
292             },
293             ebpf::ST_W_REG => unsafe {
294                 #[allow(clippy::cast_ptr_alignment)]
295                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
296                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 4, insn_ptr)?;
297                 x.write_unaligned(reg[_src] as u32);
298             },
299             ebpf::ST_DW_REG => unsafe {
300                 #[allow(clippy::cast_ptr_alignment)]
301                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
302                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
303                 x.write_unaligned(reg[_src]);
304             },
305             ebpf::ST_W_XADD => unimplemented!(),
306             ebpf::ST_DW_XADD => unimplemented!(),
307 
308             // BPF_ALU class
309             // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
310             // before we do the operation?
311             // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
312             ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm  as u64)     & U32MAX,
313             ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
314             ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
315             ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
316             ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
317             ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
318             ebpf::DIV32_IMM if insn.imm as u32 == 0 => reg[_dst] = 0,
319             ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
320             ebpf::DIV32_REG if reg[_src] as u32 == 0 => reg[_dst] = 0,
321             ebpf::DIV32_REG => reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64,
322             ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
323             ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
324             ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
325             ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
326             // As for the 64-bit version, we should mask the number of bits to shift with
327             // 0x1f, but .wrappping_shr() already takes care of it for us.
328             ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
329             ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
330             ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
331             ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
332             ebpf::NEG32 => {
333                 reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64;
334                 reg[_dst] &= U32MAX;
335             }
336             ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
337             ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
338             ebpf::MOD32_REG if reg[_src] as u32 == 0 => (),
339             ebpf::MOD32_REG => reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64,
340             ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
341             ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
342             ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64,
343             ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
344             // As for the 64-bit version, we should mask the number of bits to shift with
345             // 0x1f, but .wrappping_shr() already takes care of it for us.
346             ebpf::ARSH32_IMM => {
347                 reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64;
348                 reg[_dst] &= U32MAX;
349             }
350             ebpf::ARSH32_REG => {
351                 reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64;
352                 reg[_dst] &= U32MAX;
353             }
354             ebpf::LE => {
355                 reg[_dst] = match insn.imm {
356                     16 => (reg[_dst] as u16).to_le() as u64,
357                     32 => (reg[_dst] as u32).to_le() as u64,
358                     64 => reg[_dst].to_le(),
359                     _ => unreachable!(),
360                 };
361             }
362             ebpf::BE => {
363                 reg[_dst] = match insn.imm {
364                     16 => (reg[_dst] as u16).to_be() as u64,
365                     32 => (reg[_dst] as u32).to_be() as u64,
366                     64 => reg[_dst].to_be(),
367                     _ => unreachable!(),
368                 };
369             }
370 
371             // BPF_ALU64 class
372             ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
373             ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
374             ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
375             ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
376             ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
377             ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
378             ebpf::DIV64_IMM if insn.imm == 0 => reg[_dst] = 0,
379             ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
380             ebpf::DIV64_REG if reg[_src] == 0 => reg[_dst] = 0,
381             ebpf::DIV64_REG => reg[_dst] /= reg[_src],
382             ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
383             ebpf::OR64_REG => reg[_dst] |= reg[_src],
384             ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
385             ebpf::AND64_REG => reg[_dst] &= reg[_src],
386             ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64,
387             ebpf::LSH64_REG => reg[_dst] <<= reg[_src] & SHIFT_MASK_64,
388             ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64,
389             ebpf::RSH64_REG => reg[_dst] >>= reg[_src] & SHIFT_MASK_64,
390             ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
391             ebpf::MOD64_IMM if insn.imm == 0 => (),
392             ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
393             ebpf::MOD64_REG if reg[_src] == 0 => (),
394             ebpf::MOD64_REG => reg[_dst] %= reg[_src],
395             ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
396             ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
397             ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
398             ebpf::MOV64_REG => reg[_dst] = reg[_src],
399             ebpf::ARSH64_IMM => {
400                 reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64)) as u64
401             }
402             ebpf::ARSH64_REG => {
403                 reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64
404             }
405 
406             // BPF_JMP class
407             // TODO: check this actually works as expected for signed / unsigned ops
408             ebpf::JA => do_jump(&mut insn_ptr, &insn),
409             ebpf::JEQ_IMM => {
410                 if reg[_dst] == insn.imm as u64 {
411                     do_jump(&mut insn_ptr, &insn);
412                 }
413             }
414             ebpf::JEQ_REG => {
415                 if reg[_dst] == reg[_src] {
416                     do_jump(&mut insn_ptr, &insn);
417                 }
418             }
419             ebpf::JGT_IMM => {
420                 if reg[_dst] > insn.imm as u64 {
421                     do_jump(&mut insn_ptr, &insn);
422                 }
423             }
424             ebpf::JGT_REG => {
425                 if reg[_dst] > reg[_src] {
426                     do_jump(&mut insn_ptr, &insn);
427                 }
428             }
429             ebpf::JGE_IMM => {
430                 if reg[_dst] >= insn.imm as u64 {
431                     do_jump(&mut insn_ptr, &insn);
432                 }
433             }
434             ebpf::JGE_REG => {
435                 if reg[_dst] >= reg[_src] {
436                     do_jump(&mut insn_ptr, &insn);
437                 }
438             }
439             ebpf::JLT_IMM => {
440                 if reg[_dst] < insn.imm as u64 {
441                     do_jump(&mut insn_ptr, &insn);
442                 }
443             }
444             ebpf::JLT_REG => {
445                 if reg[_dst] < reg[_src] {
446                     do_jump(&mut insn_ptr, &insn);
447                 }
448             }
449             ebpf::JLE_IMM => {
450                 if reg[_dst] <= insn.imm as u64 {
451                     do_jump(&mut insn_ptr, &insn);
452                 }
453             }
454             ebpf::JLE_REG => {
455                 if reg[_dst] <= reg[_src] {
456                     do_jump(&mut insn_ptr, &insn);
457                 }
458             }
459             ebpf::JSET_IMM => {
460                 if reg[_dst] & insn.imm as u64 != 0 {
461                     do_jump(&mut insn_ptr, &insn);
462                 }
463             }
464             ebpf::JSET_REG => {
465                 if reg[_dst] & reg[_src] != 0 {
466                     do_jump(&mut insn_ptr, &insn);
467                 }
468             }
469             ebpf::JNE_IMM => {
470                 if reg[_dst] != insn.imm as u64 {
471                     do_jump(&mut insn_ptr, &insn);
472                 }
473             }
474             ebpf::JNE_REG => {
475                 if reg[_dst] != reg[_src] {
476                     do_jump(&mut insn_ptr, &insn);
477                 }
478             }
479             ebpf::JSGT_IMM => {
480                 if reg[_dst] as i64 > insn.imm as i64 {
481                     do_jump(&mut insn_ptr, &insn);
482                 }
483             }
484             ebpf::JSGT_REG => {
485                 if reg[_dst] as i64 > reg[_src] as i64 {
486                     do_jump(&mut insn_ptr, &insn);
487                 }
488             }
489             ebpf::JSGE_IMM => {
490                 if reg[_dst] as i64 >= insn.imm as i64 {
491                     do_jump(&mut insn_ptr, &insn);
492                 }
493             }
494             ebpf::JSGE_REG => {
495                 if reg[_dst] as i64 >= reg[_src] as i64 {
496                     do_jump(&mut insn_ptr, &insn);
497                 }
498             }
499             ebpf::JSLT_IMM => {
500                 if (reg[_dst] as i64) < insn.imm as i64 {
501                     do_jump(&mut insn_ptr, &insn);
502                 }
503             }
504             ebpf::JSLT_REG => {
505                 if (reg[_dst] as i64) < reg[_src] as i64 {
506                     do_jump(&mut insn_ptr, &insn);
507                 }
508             }
509             ebpf::JSLE_IMM => {
510                 if reg[_dst] as i64 <= insn.imm as i64 {
511                     do_jump(&mut insn_ptr, &insn);
512                 }
513             }
514             ebpf::JSLE_REG => {
515                 if reg[_dst] as i64 <= reg[_src] as i64 {
516                     do_jump(&mut insn_ptr, &insn);
517                 }
518             }
519 
520             // BPF_JMP32 class
521             ebpf::JEQ_IMM32 => {
522                 if reg[_dst] as u32 == insn.imm as u32 {
523                     do_jump(&mut insn_ptr, &insn);
524                 }
525             }
526             ebpf::JEQ_REG32 => {
527                 if reg[_dst] as u32 == reg[_src] as u32 {
528                     do_jump(&mut insn_ptr, &insn);
529                 }
530             }
531             ebpf::JGT_IMM32 => {
532                 if reg[_dst] as u32 > insn.imm as u32 {
533                     do_jump(&mut insn_ptr, &insn);
534                 }
535             }
536             ebpf::JGT_REG32 => {
537                 if reg[_dst] as u32 > reg[_src] as u32 {
538                     do_jump(&mut insn_ptr, &insn);
539                 }
540             }
541             ebpf::JGE_IMM32 => {
542                 if reg[_dst] as u32 >= insn.imm as u32 {
543                     do_jump(&mut insn_ptr, &insn);
544                 }
545             }
546             ebpf::JGE_REG32 => {
547                 if reg[_dst] as u32 >= reg[_src] as u32 {
548                     do_jump(&mut insn_ptr, &insn);
549                 }
550             }
551             ebpf::JLT_IMM32 => {
552                 if (reg[_dst] as u32) < insn.imm as u32 {
553                     do_jump(&mut insn_ptr, &insn);
554                 }
555             }
556             ebpf::JLT_REG32 => {
557                 if (reg[_dst] as u32) < reg[_src] as u32 {
558                     do_jump(&mut insn_ptr, &insn);
559                 }
560             }
561             ebpf::JLE_IMM32 => {
562                 if reg[_dst] as u32 <= insn.imm as u32 {
563                     do_jump(&mut insn_ptr, &insn);
564                 }
565             }
566             ebpf::JLE_REG32 => {
567                 if reg[_dst] as u32 <= reg[_src] as u32 {
568                     do_jump(&mut insn_ptr, &insn);
569                 }
570             }
571             ebpf::JSET_IMM32 => {
572                 if reg[_dst] as u32 & insn.imm as u32 != 0 {
573                     do_jump(&mut insn_ptr, &insn);
574                 }
575             }
576             ebpf::JSET_REG32 => {
577                 if reg[_dst] as u32 & reg[_src] as u32 != 0 {
578                     do_jump(&mut insn_ptr, &insn);
579                 }
580             }
581             ebpf::JNE_IMM32 => {
582                 if reg[_dst] as u32 != insn.imm as u32 {
583                     do_jump(&mut insn_ptr, &insn);
584                 }
585             }
586             ebpf::JNE_REG32 => {
587                 if reg[_dst] as u32 != reg[_src] as u32 {
588                     do_jump(&mut insn_ptr, &insn);
589                 }
590             }
591             ebpf::JSGT_IMM32 => {
592                 if reg[_dst] as i32 > insn.imm {
593                     do_jump(&mut insn_ptr, &insn);
594                 }
595             }
596             ebpf::JSGT_REG32 => {
597                 if reg[_dst] as i32 > reg[_src] as i32 {
598                     do_jump(&mut insn_ptr, &insn);
599                 }
600             }
601             ebpf::JSGE_IMM32 => {
602                 if reg[_dst] as i32 >= insn.imm {
603                     do_jump(&mut insn_ptr, &insn);
604                 }
605             }
606             ebpf::JSGE_REG32 => {
607                 if reg[_dst] as i32 >= reg[_src] as i32 {
608                     do_jump(&mut insn_ptr, &insn);
609                 }
610             }
611             ebpf::JSLT_IMM32 => {
612                 if (reg[_dst] as i32) < insn.imm {
613                     do_jump(&mut insn_ptr, &insn);
614                 }
615             }
616             ebpf::JSLT_REG32 => {
617                 if (reg[_dst] as i32) < reg[_src] as i32 {
618                     do_jump(&mut insn_ptr, &insn);
619                 }
620             }
621             ebpf::JSLE_IMM32 => {
622                 if reg[_dst] as i32 <= insn.imm {
623                     do_jump(&mut insn_ptr, &insn);
624                 }
625             }
626             ebpf::JSLE_REG32 => {
627                 if reg[_dst] as i32 <= reg[_src] as i32 {
628                     do_jump(&mut insn_ptr, &insn);
629                 }
630             }
631 
632             // Do not delegate the check to the verifier, since registered functions can be
633             // changed after the program has been verified.
634             ebpf::CALL => {
635                 // See https://www.kernel.org/doc/html/latest/bpf/standardization/instruction-set.html#id16
636                 let src_reg = _src;
637                 let call_func_res = match src_reg {
638                     0 => {
639                         // Handle call by address to external function.
640                         if let Some(function) = helpers.get(&(insn.imm as u32)) {
641                             reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
642                             Ok(())
643                         }else {
644                             Err(format!(
645                                 "Error: unknown helper function (id: {:#x}) [{}], (instruction #{})",
646                                 insn.imm as u32,BPF_FUNC_MAPPER[insn.imm as usize],insn_ptr
647                             ))
648                         }
649                     }
650                     1 => {
651                         // bpf to bpf call
652                         // The function is in the same program, so we can just jump to the address
653                         if stacks.len() >= ebpf::RBPF_MAX_CALL_DEPTH{
654                             Err(format!(
655                                 "Error: bpf to bpf call stack limit reached (instruction #{}) max depth: {}",
656                                 insn_ptr, ebpf::RBPF_MAX_CALL_DEPTH
657                             ))
658                         }else {
659                             let mut pre_stack = stacks.last_mut().unwrap();
660                             // Save the callee saved registers
661                             pre_stack.save_registers(&reg[6..=9]);
662                             // Save the return address
663                             pre_stack.save_return_address(insn_ptr as u16);
664                             // save the stack pointer
665                             pre_stack.save_sp(reg[10] as u16);
666                             let mut stack = StackFrame::new();
667                             log::trace!("BPF TO BPF CALL: new pc: {} + {} = {}",insn_ptr ,insn.imm,insn_ptr + insn.imm as usize);
668                             reg[10] = stack.as_ptr() as u64 + stack.len() as u64;
669                             stacks.push(stack);
670                             insn_ptr += insn.imm as usize;
671                             Ok(())
672                         }
673                     }
674                     _ =>{
675                         Err(format!(
676                             "Error: the function call type (id: {:#x}) [{}], (instruction #{}) not supported",
677                             insn.imm as u32,BPF_FUNC_MAPPER[insn.imm as usize],insn_ptr
678                         ))
679                     }
680                 };
681                 if let Err(e) = call_func_res {
682                     Err(Error::new(ErrorKind::Other, e))?;
683                 }
684             }
685             ebpf::TAIL_CALL => unimplemented!(),
686             ebpf::EXIT => {
687                 if stacks.len() == 1 {
688                     return Ok(reg[0]);
689                 } else {
690                     // Pop the stack
691                     stacks.pop();
692                     let stack = stacks.last().unwrap();
693                     // Restore the callee saved registers
694                     reg[6..=9].copy_from_slice(&stack.get_registers());
695                     // Restore the return address
696                     insn_ptr = stack.get_return_address() as usize;
697                     // Restore the stack pointer
698                     reg[10] = stack.get_sp() as u64;
699                     log::trace!("EXIT: new pc: {}", insn_ptr);
700                 }
701             }
702 
703             _ => unreachable!(),
704         }
705     }
706 
707     unreachable!()
708 }
709