xref: /DragonOS/kernel/crates/rbpf/src/interpreter.rs (revision fae6e9ade46a52976ad5d099643d51cc20876448)
1*fae6e9adSlinfeng // SPDX-License-Identifier: (Apache-2.0 OR MIT)
2*fae6e9adSlinfeng // Derived from uBPF <https://github.com/iovisor/ubpf>
3*fae6e9adSlinfeng // Copyright 2015 Big Switch Networks, Inc
4*fae6e9adSlinfeng //      (uBPF: VM architecture, parts of the interpreter, originally in C)
5*fae6e9adSlinfeng // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
6*fae6e9adSlinfeng //      (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers)
7*fae6e9adSlinfeng 
8*fae6e9adSlinfeng use crate::{
9*fae6e9adSlinfeng     ebpf::{self, Insn},
10*fae6e9adSlinfeng     helpers::BPF_FUNC_MAPPER,
11*fae6e9adSlinfeng     stack::StackFrame,
12*fae6e9adSlinfeng     *,
13*fae6e9adSlinfeng };
14*fae6e9adSlinfeng 
15*fae6e9adSlinfeng #[cfg(not(feature = "user"))]
16*fae6e9adSlinfeng #[allow(unused)]
check_mem( addr: u64, len: usize, access_type: &str, insn_ptr: usize, mbuff: &[u8], mem: &[u8], stack: &[u8], ) -> Result<(), Error>17*fae6e9adSlinfeng fn check_mem(
18*fae6e9adSlinfeng     addr: u64,
19*fae6e9adSlinfeng     len: usize,
20*fae6e9adSlinfeng     access_type: &str,
21*fae6e9adSlinfeng     insn_ptr: usize,
22*fae6e9adSlinfeng     mbuff: &[u8],
23*fae6e9adSlinfeng     mem: &[u8],
24*fae6e9adSlinfeng     stack: &[u8],
25*fae6e9adSlinfeng ) -> Result<(), Error> {
26*fae6e9adSlinfeng     log::trace!(
27*fae6e9adSlinfeng         "check_mem: addr {:#x}, len {}, access_type {}, insn_ptr {}",
28*fae6e9adSlinfeng         addr,
29*fae6e9adSlinfeng         len,
30*fae6e9adSlinfeng         access_type,
31*fae6e9adSlinfeng         insn_ptr
32*fae6e9adSlinfeng     );
33*fae6e9adSlinfeng     log::trace!(
34*fae6e9adSlinfeng         "check_mem: mbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
35*fae6e9adSlinfeng         mbuff.as_ptr() as u64,
36*fae6e9adSlinfeng         mbuff.len(),
37*fae6e9adSlinfeng         mem.as_ptr() as u64,
38*fae6e9adSlinfeng         mem.len(),
39*fae6e9adSlinfeng         stack.as_ptr() as u64,
40*fae6e9adSlinfeng         stack.len()
41*fae6e9adSlinfeng     );
42*fae6e9adSlinfeng     Ok(())
43*fae6e9adSlinfeng }
44*fae6e9adSlinfeng 
45*fae6e9adSlinfeng #[cfg(feature = "user")]
check_mem( addr: u64, len: usize, access_type: &str, insn_ptr: usize, mbuff: &[u8], mem: &[u8], stack: &[u8], ) -> Result<(), Error>46*fae6e9adSlinfeng fn check_mem(
47*fae6e9adSlinfeng     addr: u64,
48*fae6e9adSlinfeng     len: usize,
49*fae6e9adSlinfeng     access_type: &str,
50*fae6e9adSlinfeng     insn_ptr: usize,
51*fae6e9adSlinfeng     mbuff: &[u8],
52*fae6e9adSlinfeng     mem: &[u8],
53*fae6e9adSlinfeng     stack: &[u8],
54*fae6e9adSlinfeng ) -> Result<(), Error> {
55*fae6e9adSlinfeng     if let Some(addr_end) = addr.checked_add(len as u64) {
56*fae6e9adSlinfeng         if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 {
57*fae6e9adSlinfeng             return Ok(());
58*fae6e9adSlinfeng         }
59*fae6e9adSlinfeng         if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 {
60*fae6e9adSlinfeng             return Ok(());
61*fae6e9adSlinfeng         }
62*fae6e9adSlinfeng         if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 {
63*fae6e9adSlinfeng             return Ok(());
64*fae6e9adSlinfeng         }
65*fae6e9adSlinfeng     }
66*fae6e9adSlinfeng 
67*fae6e9adSlinfeng     Err(Error::new(ErrorKind::Other, format!(
68*fae6e9adSlinfeng         "Error: out of bounds memory {} (insn #{:?}), addr {:#x}, size {:?}\nmbuff: {:#x}/{:#x}, mem: {:#x}/{:#x}, stack: {:#x}/{:#x}",
69*fae6e9adSlinfeng         access_type, insn_ptr, addr, len,
70*fae6e9adSlinfeng         mbuff.as_ptr() as u64, mbuff.len(),
71*fae6e9adSlinfeng         mem.as_ptr() as u64, mem.len(),
72*fae6e9adSlinfeng         stack.as_ptr() as u64, stack.len()
73*fae6e9adSlinfeng     )))
74*fae6e9adSlinfeng }
75*fae6e9adSlinfeng 
76*fae6e9adSlinfeng #[inline]
do_jump(insn_ptr: &mut usize, insn: &Insn)77*fae6e9adSlinfeng fn do_jump(insn_ptr: &mut usize, insn: &Insn) {
78*fae6e9adSlinfeng     *insn_ptr = (*insn_ptr as i16 + insn.off) as usize;
79*fae6e9adSlinfeng }
80*fae6e9adSlinfeng 
81*fae6e9adSlinfeng #[allow(unknown_lints)]
82*fae6e9adSlinfeng #[allow(cyclomatic_complexity)]
execute_program( prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: &HashMap<u32, ebpf::Helper>, ) -> Result<u64, Error>83*fae6e9adSlinfeng pub fn execute_program(
84*fae6e9adSlinfeng     prog_: Option<&[u8]>,
85*fae6e9adSlinfeng     mem: &[u8],
86*fae6e9adSlinfeng     mbuff: &[u8],
87*fae6e9adSlinfeng     helpers: &HashMap<u32, ebpf::Helper>,
88*fae6e9adSlinfeng ) -> Result<u64, Error> {
89*fae6e9adSlinfeng     const U32MAX: u64 = u32::MAX as u64;
90*fae6e9adSlinfeng     const SHIFT_MASK_64: u64 = 0x3f;
91*fae6e9adSlinfeng 
92*fae6e9adSlinfeng     let prog = match prog_ {
93*fae6e9adSlinfeng         Some(prog) => prog,
94*fae6e9adSlinfeng         None => Err(Error::new(
95*fae6e9adSlinfeng             ErrorKind::Other,
96*fae6e9adSlinfeng             "Error: No program set, call prog_set() to load one",
97*fae6e9adSlinfeng         ))?,
98*fae6e9adSlinfeng     };
99*fae6e9adSlinfeng     let mut stacks = Vec::new();
100*fae6e9adSlinfeng     let stack = StackFrame::new();
101*fae6e9adSlinfeng     // R1 points to beginning of memory area, R10 to stack
102*fae6e9adSlinfeng     let mut reg: [u64; 11] = [
103*fae6e9adSlinfeng         0,
104*fae6e9adSlinfeng         0,
105*fae6e9adSlinfeng         0,
106*fae6e9adSlinfeng         0,
107*fae6e9adSlinfeng         0,
108*fae6e9adSlinfeng         0,
109*fae6e9adSlinfeng         0,
110*fae6e9adSlinfeng         0,
111*fae6e9adSlinfeng         0,
112*fae6e9adSlinfeng         0,
113*fae6e9adSlinfeng         stack.as_ptr() as u64 + stack.len() as u64,
114*fae6e9adSlinfeng     ];
115*fae6e9adSlinfeng     stacks.push(stack);
116*fae6e9adSlinfeng     if !mbuff.is_empty() {
117*fae6e9adSlinfeng         reg[1] = mbuff.as_ptr() as u64;
118*fae6e9adSlinfeng     } else if !mem.is_empty() {
119*fae6e9adSlinfeng         reg[1] = mem.as_ptr() as u64;
120*fae6e9adSlinfeng     }
121*fae6e9adSlinfeng     let check_mem_load =
122*fae6e9adSlinfeng         |stack: &[u8], addr: u64, len: usize, insn_ptr: usize| -> Result<(), Error> {
123*fae6e9adSlinfeng             check_mem(addr, len, "load", insn_ptr, mbuff, mem, stack)
124*fae6e9adSlinfeng         };
125*fae6e9adSlinfeng     let check_mem_store =
126*fae6e9adSlinfeng         |stack: &[u8], addr: u64, len: usize, insn_ptr: usize| -> Result<(), Error> {
127*fae6e9adSlinfeng             check_mem(addr, len, "store", insn_ptr, mbuff, mem, stack)
128*fae6e9adSlinfeng         };
129*fae6e9adSlinfeng 
130*fae6e9adSlinfeng     // Loop on instructions
131*fae6e9adSlinfeng     let mut insn_ptr: usize = 0;
132*fae6e9adSlinfeng     while insn_ptr * ebpf::INSN_SIZE < prog.len() {
133*fae6e9adSlinfeng         let insn = ebpf::get_insn(prog, insn_ptr);
134*fae6e9adSlinfeng         insn_ptr += 1;
135*fae6e9adSlinfeng         let _dst = insn.dst as usize;
136*fae6e9adSlinfeng         let _src = insn.src as usize;
137*fae6e9adSlinfeng 
138*fae6e9adSlinfeng         match insn.opc {
139*fae6e9adSlinfeng             // BPF_LD class
140*fae6e9adSlinfeng             // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
141*fae6e9adSlinfeng             // Since this pointer is constant, and since we already know it (mem), do not
142*fae6e9adSlinfeng             // bother re-fetching it, just use mem already.
143*fae6e9adSlinfeng             ebpf::LD_ABS_B => {
144*fae6e9adSlinfeng                 reg[0] = unsafe {
145*fae6e9adSlinfeng                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8;
146*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
147*fae6e9adSlinfeng                     x.read_unaligned() as u64
148*fae6e9adSlinfeng                 }
149*fae6e9adSlinfeng             }
150*fae6e9adSlinfeng             ebpf::LD_ABS_H => {
151*fae6e9adSlinfeng                 reg[0] = unsafe {
152*fae6e9adSlinfeng                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16;
153*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
154*fae6e9adSlinfeng                     x.read_unaligned() as u64
155*fae6e9adSlinfeng                 }
156*fae6e9adSlinfeng             }
157*fae6e9adSlinfeng             ebpf::LD_ABS_W => {
158*fae6e9adSlinfeng                 reg[0] = unsafe {
159*fae6e9adSlinfeng                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32;
160*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
161*fae6e9adSlinfeng                     x.read_unaligned() as u64
162*fae6e9adSlinfeng                 }
163*fae6e9adSlinfeng             }
164*fae6e9adSlinfeng             ebpf::LD_ABS_DW => {
165*fae6e9adSlinfeng                 log::info!("executing LD_ABS_DW, set reg[{}] to {:#x}", _dst, insn.imm);
166*fae6e9adSlinfeng                 reg[0] = unsafe {
167*fae6e9adSlinfeng                     let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64;
168*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
169*fae6e9adSlinfeng                     x.read_unaligned()
170*fae6e9adSlinfeng                 }
171*fae6e9adSlinfeng             }
172*fae6e9adSlinfeng             ebpf::LD_IND_B => {
173*fae6e9adSlinfeng                 reg[0] = unsafe {
174*fae6e9adSlinfeng                     let x =
175*fae6e9adSlinfeng                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8;
176*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
177*fae6e9adSlinfeng                     x.read_unaligned() as u64
178*fae6e9adSlinfeng                 }
179*fae6e9adSlinfeng             }
180*fae6e9adSlinfeng             ebpf::LD_IND_H => {
181*fae6e9adSlinfeng                 reg[0] = unsafe {
182*fae6e9adSlinfeng                     let x =
183*fae6e9adSlinfeng                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16;
184*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
185*fae6e9adSlinfeng                     x.read_unaligned() as u64
186*fae6e9adSlinfeng                 }
187*fae6e9adSlinfeng             }
188*fae6e9adSlinfeng             ebpf::LD_IND_W => {
189*fae6e9adSlinfeng                 reg[0] = unsafe {
190*fae6e9adSlinfeng                     let x =
191*fae6e9adSlinfeng                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32;
192*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
193*fae6e9adSlinfeng                     x.read_unaligned() as u64
194*fae6e9adSlinfeng                 }
195*fae6e9adSlinfeng             }
196*fae6e9adSlinfeng             ebpf::LD_IND_DW => {
197*fae6e9adSlinfeng                 reg[0] = unsafe {
198*fae6e9adSlinfeng                     let x =
199*fae6e9adSlinfeng                         (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64;
200*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
201*fae6e9adSlinfeng                     x.read_unaligned()
202*fae6e9adSlinfeng                 }
203*fae6e9adSlinfeng             }
204*fae6e9adSlinfeng 
205*fae6e9adSlinfeng             ebpf::LD_DW_IMM => {
206*fae6e9adSlinfeng                 let next_insn = ebpf::get_insn(prog, insn_ptr);
207*fae6e9adSlinfeng                 insn_ptr += 1;
208*fae6e9adSlinfeng                 // log::warn!(
209*fae6e9adSlinfeng                 //     "executing LD_DW_IMM, set reg[{}] to {:#x}",
210*fae6e9adSlinfeng                 //     _dst,
211*fae6e9adSlinfeng                 //     ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32)
212*fae6e9adSlinfeng                 // );
213*fae6e9adSlinfeng                 reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32);
214*fae6e9adSlinfeng             }
215*fae6e9adSlinfeng 
216*fae6e9adSlinfeng             // BPF_LDX class
217*fae6e9adSlinfeng             ebpf::LD_B_REG => {
218*fae6e9adSlinfeng                 reg[_dst] = unsafe {
219*fae6e9adSlinfeng                     #[allow(clippy::cast_ptr_alignment)]
220*fae6e9adSlinfeng                     let x = (reg[_src] as *const u8).offset(insn.off as isize);
221*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 1, insn_ptr)?;
222*fae6e9adSlinfeng                     x.read_unaligned() as u64
223*fae6e9adSlinfeng                 }
224*fae6e9adSlinfeng             }
225*fae6e9adSlinfeng             ebpf::LD_H_REG => {
226*fae6e9adSlinfeng                 reg[_dst] = unsafe {
227*fae6e9adSlinfeng                     #[allow(clippy::cast_ptr_alignment)]
228*fae6e9adSlinfeng                     let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16;
229*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 2, insn_ptr)?;
230*fae6e9adSlinfeng                     x.read_unaligned() as u64
231*fae6e9adSlinfeng                 }
232*fae6e9adSlinfeng             }
233*fae6e9adSlinfeng             ebpf::LD_W_REG => {
234*fae6e9adSlinfeng                 reg[_dst] = unsafe {
235*fae6e9adSlinfeng                     #[allow(clippy::cast_ptr_alignment)]
236*fae6e9adSlinfeng                     let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32;
237*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 4, insn_ptr)?;
238*fae6e9adSlinfeng                     // log::warn!(
239*fae6e9adSlinfeng                     //     "executing LD_W_REG, the ptr is REG:{} -> [{:#x}] + {:#x}",
240*fae6e9adSlinfeng                     //     _src,
241*fae6e9adSlinfeng                     //     reg[_src],
242*fae6e9adSlinfeng                     //     insn.off
243*fae6e9adSlinfeng                     // );
244*fae6e9adSlinfeng                     x.read_unaligned() as u64
245*fae6e9adSlinfeng                 }
246*fae6e9adSlinfeng             }
247*fae6e9adSlinfeng             ebpf::LD_DW_REG => {
248*fae6e9adSlinfeng                 reg[_dst] = unsafe {
249*fae6e9adSlinfeng                     #[allow(clippy::cast_ptr_alignment)]
250*fae6e9adSlinfeng                     let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64;
251*fae6e9adSlinfeng                     check_mem_load(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
252*fae6e9adSlinfeng                     x.read_unaligned()
253*fae6e9adSlinfeng                 }
254*fae6e9adSlinfeng             }
255*fae6e9adSlinfeng 
256*fae6e9adSlinfeng             // BPF_ST class
257*fae6e9adSlinfeng             ebpf::ST_B_IMM => unsafe {
258*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
259*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 1, insn_ptr)?;
260*fae6e9adSlinfeng                 x.write_unaligned(insn.imm as u8);
261*fae6e9adSlinfeng             },
262*fae6e9adSlinfeng             ebpf::ST_H_IMM => unsafe {
263*fae6e9adSlinfeng                 #[allow(clippy::cast_ptr_alignment)]
264*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
265*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 2, insn_ptr)?;
266*fae6e9adSlinfeng                 x.write_unaligned(insn.imm as u16);
267*fae6e9adSlinfeng             },
268*fae6e9adSlinfeng             ebpf::ST_W_IMM => unsafe {
269*fae6e9adSlinfeng                 #[allow(clippy::cast_ptr_alignment)]
270*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
271*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 4, insn_ptr)?;
272*fae6e9adSlinfeng                 x.write_unaligned(insn.imm as u32);
273*fae6e9adSlinfeng             },
274*fae6e9adSlinfeng             ebpf::ST_DW_IMM => unsafe {
275*fae6e9adSlinfeng                 #[allow(clippy::cast_ptr_alignment)]
276*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
277*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
278*fae6e9adSlinfeng                 x.write_unaligned(insn.imm as u64);
279*fae6e9adSlinfeng             },
280*fae6e9adSlinfeng 
281*fae6e9adSlinfeng             // BPF_STX class
282*fae6e9adSlinfeng             ebpf::ST_B_REG => unsafe {
283*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8;
284*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 1, insn_ptr)?;
285*fae6e9adSlinfeng                 x.write_unaligned(reg[_src] as u8);
286*fae6e9adSlinfeng             },
287*fae6e9adSlinfeng             ebpf::ST_H_REG => unsafe {
288*fae6e9adSlinfeng                 #[allow(clippy::cast_ptr_alignment)]
289*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16;
290*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 2, insn_ptr)?;
291*fae6e9adSlinfeng                 x.write_unaligned(reg[_src] as u16);
292*fae6e9adSlinfeng             },
293*fae6e9adSlinfeng             ebpf::ST_W_REG => unsafe {
294*fae6e9adSlinfeng                 #[allow(clippy::cast_ptr_alignment)]
295*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32;
296*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 4, insn_ptr)?;
297*fae6e9adSlinfeng                 x.write_unaligned(reg[_src] as u32);
298*fae6e9adSlinfeng             },
299*fae6e9adSlinfeng             ebpf::ST_DW_REG => unsafe {
300*fae6e9adSlinfeng                 #[allow(clippy::cast_ptr_alignment)]
301*fae6e9adSlinfeng                 let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64;
302*fae6e9adSlinfeng                 check_mem_store(stacks.last().unwrap().as_slice(), x as u64, 8, insn_ptr)?;
303*fae6e9adSlinfeng                 x.write_unaligned(reg[_src]);
304*fae6e9adSlinfeng             },
305*fae6e9adSlinfeng             ebpf::ST_W_XADD => unimplemented!(),
306*fae6e9adSlinfeng             ebpf::ST_DW_XADD => unimplemented!(),
307*fae6e9adSlinfeng 
308*fae6e9adSlinfeng             // BPF_ALU class
309*fae6e9adSlinfeng             // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
310*fae6e9adSlinfeng             // before we do the operation?
311*fae6e9adSlinfeng             // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
312*fae6e9adSlinfeng             ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm  as u64)     & U32MAX,
313*fae6e9adSlinfeng             ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
314*fae6e9adSlinfeng             ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64,
315*fae6e9adSlinfeng             ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
316*fae6e9adSlinfeng             ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64,
317*fae6e9adSlinfeng             ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
318*fae6e9adSlinfeng             ebpf::DIV32_IMM if insn.imm as u32 == 0 => reg[_dst] = 0,
319*fae6e9adSlinfeng             ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64,
320*fae6e9adSlinfeng             ebpf::DIV32_REG if reg[_src] as u32 == 0 => reg[_dst] = 0,
321*fae6e9adSlinfeng             ebpf::DIV32_REG => reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64,
322*fae6e9adSlinfeng             ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64,
323*fae6e9adSlinfeng             ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64,
324*fae6e9adSlinfeng             ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64,
325*fae6e9adSlinfeng             ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64,
326*fae6e9adSlinfeng             // As for the 64-bit version, we should mask the number of bits to shift with
327*fae6e9adSlinfeng             // 0x1f, but .wrappping_shr() already takes care of it for us.
328*fae6e9adSlinfeng             ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64,
329*fae6e9adSlinfeng             ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
330*fae6e9adSlinfeng             ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64,
331*fae6e9adSlinfeng             ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
332*fae6e9adSlinfeng             ebpf::NEG32 => {
333*fae6e9adSlinfeng                 reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64;
334*fae6e9adSlinfeng                 reg[_dst] &= U32MAX;
335*fae6e9adSlinfeng             }
336*fae6e9adSlinfeng             ebpf::MOD32_IMM if insn.imm as u32 == 0 => (),
337*fae6e9adSlinfeng             ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64,
338*fae6e9adSlinfeng             ebpf::MOD32_REG if reg[_src] as u32 == 0 => (),
339*fae6e9adSlinfeng             ebpf::MOD32_REG => reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64,
340*fae6e9adSlinfeng             ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64,
341*fae6e9adSlinfeng             ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64,
342*fae6e9adSlinfeng             ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64,
343*fae6e9adSlinfeng             ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64,
344*fae6e9adSlinfeng             // As for the 64-bit version, we should mask the number of bits to shift with
345*fae6e9adSlinfeng             // 0x1f, but .wrappping_shr() already takes care of it for us.
346*fae6e9adSlinfeng             ebpf::ARSH32_IMM => {
347*fae6e9adSlinfeng                 reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64;
348*fae6e9adSlinfeng                 reg[_dst] &= U32MAX;
349*fae6e9adSlinfeng             }
350*fae6e9adSlinfeng             ebpf::ARSH32_REG => {
351*fae6e9adSlinfeng                 reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64;
352*fae6e9adSlinfeng                 reg[_dst] &= U32MAX;
353*fae6e9adSlinfeng             }
354*fae6e9adSlinfeng             ebpf::LE => {
355*fae6e9adSlinfeng                 reg[_dst] = match insn.imm {
356*fae6e9adSlinfeng                     16 => (reg[_dst] as u16).to_le() as u64,
357*fae6e9adSlinfeng                     32 => (reg[_dst] as u32).to_le() as u64,
358*fae6e9adSlinfeng                     64 => reg[_dst].to_le(),
359*fae6e9adSlinfeng                     _ => unreachable!(),
360*fae6e9adSlinfeng                 };
361*fae6e9adSlinfeng             }
362*fae6e9adSlinfeng             ebpf::BE => {
363*fae6e9adSlinfeng                 reg[_dst] = match insn.imm {
364*fae6e9adSlinfeng                     16 => (reg[_dst] as u16).to_be() as u64,
365*fae6e9adSlinfeng                     32 => (reg[_dst] as u32).to_be() as u64,
366*fae6e9adSlinfeng                     64 => reg[_dst].to_be(),
367*fae6e9adSlinfeng                     _ => unreachable!(),
368*fae6e9adSlinfeng                 };
369*fae6e9adSlinfeng             }
370*fae6e9adSlinfeng 
371*fae6e9adSlinfeng             // BPF_ALU64 class
372*fae6e9adSlinfeng             ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
373*fae6e9adSlinfeng             ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
374*fae6e9adSlinfeng             ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
375*fae6e9adSlinfeng             ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
376*fae6e9adSlinfeng             ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
377*fae6e9adSlinfeng             ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
378*fae6e9adSlinfeng             ebpf::DIV64_IMM if insn.imm == 0 => reg[_dst] = 0,
379*fae6e9adSlinfeng             ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64,
380*fae6e9adSlinfeng             ebpf::DIV64_REG if reg[_src] == 0 => reg[_dst] = 0,
381*fae6e9adSlinfeng             ebpf::DIV64_REG => reg[_dst] /= reg[_src],
382*fae6e9adSlinfeng             ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64,
383*fae6e9adSlinfeng             ebpf::OR64_REG => reg[_dst] |= reg[_src],
384*fae6e9adSlinfeng             ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64,
385*fae6e9adSlinfeng             ebpf::AND64_REG => reg[_dst] &= reg[_src],
386*fae6e9adSlinfeng             ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64,
387*fae6e9adSlinfeng             ebpf::LSH64_REG => reg[_dst] <<= reg[_src] & SHIFT_MASK_64,
388*fae6e9adSlinfeng             ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64,
389*fae6e9adSlinfeng             ebpf::RSH64_REG => reg[_dst] >>= reg[_src] & SHIFT_MASK_64,
390*fae6e9adSlinfeng             ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64,
391*fae6e9adSlinfeng             ebpf::MOD64_IMM if insn.imm == 0 => (),
392*fae6e9adSlinfeng             ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64,
393*fae6e9adSlinfeng             ebpf::MOD64_REG if reg[_src] == 0 => (),
394*fae6e9adSlinfeng             ebpf::MOD64_REG => reg[_dst] %= reg[_src],
395*fae6e9adSlinfeng             ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64,
396*fae6e9adSlinfeng             ebpf::XOR64_REG => reg[_dst] ^= reg[_src],
397*fae6e9adSlinfeng             ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64,
398*fae6e9adSlinfeng             ebpf::MOV64_REG => reg[_dst] = reg[_src],
399*fae6e9adSlinfeng             ebpf::ARSH64_IMM => {
400*fae6e9adSlinfeng                 reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64)) as u64
401*fae6e9adSlinfeng             }
402*fae6e9adSlinfeng             ebpf::ARSH64_REG => {
403*fae6e9adSlinfeng                 reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64
404*fae6e9adSlinfeng             }
405*fae6e9adSlinfeng 
406*fae6e9adSlinfeng             // BPF_JMP class
407*fae6e9adSlinfeng             // TODO: check this actually works as expected for signed / unsigned ops
408*fae6e9adSlinfeng             ebpf::JA => do_jump(&mut insn_ptr, &insn),
409*fae6e9adSlinfeng             ebpf::JEQ_IMM => {
410*fae6e9adSlinfeng                 if reg[_dst] == insn.imm as u64 {
411*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
412*fae6e9adSlinfeng                 }
413*fae6e9adSlinfeng             }
414*fae6e9adSlinfeng             ebpf::JEQ_REG => {
415*fae6e9adSlinfeng                 if reg[_dst] == reg[_src] {
416*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
417*fae6e9adSlinfeng                 }
418*fae6e9adSlinfeng             }
419*fae6e9adSlinfeng             ebpf::JGT_IMM => {
420*fae6e9adSlinfeng                 if reg[_dst] > insn.imm as u64 {
421*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
422*fae6e9adSlinfeng                 }
423*fae6e9adSlinfeng             }
424*fae6e9adSlinfeng             ebpf::JGT_REG => {
425*fae6e9adSlinfeng                 if reg[_dst] > reg[_src] {
426*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
427*fae6e9adSlinfeng                 }
428*fae6e9adSlinfeng             }
429*fae6e9adSlinfeng             ebpf::JGE_IMM => {
430*fae6e9adSlinfeng                 if reg[_dst] >= insn.imm as u64 {
431*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
432*fae6e9adSlinfeng                 }
433*fae6e9adSlinfeng             }
434*fae6e9adSlinfeng             ebpf::JGE_REG => {
435*fae6e9adSlinfeng                 if reg[_dst] >= reg[_src] {
436*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
437*fae6e9adSlinfeng                 }
438*fae6e9adSlinfeng             }
439*fae6e9adSlinfeng             ebpf::JLT_IMM => {
440*fae6e9adSlinfeng                 if reg[_dst] < insn.imm as u64 {
441*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
442*fae6e9adSlinfeng                 }
443*fae6e9adSlinfeng             }
444*fae6e9adSlinfeng             ebpf::JLT_REG => {
445*fae6e9adSlinfeng                 if reg[_dst] < reg[_src] {
446*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
447*fae6e9adSlinfeng                 }
448*fae6e9adSlinfeng             }
449*fae6e9adSlinfeng             ebpf::JLE_IMM => {
450*fae6e9adSlinfeng                 if reg[_dst] <= insn.imm as u64 {
451*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
452*fae6e9adSlinfeng                 }
453*fae6e9adSlinfeng             }
454*fae6e9adSlinfeng             ebpf::JLE_REG => {
455*fae6e9adSlinfeng                 if reg[_dst] <= reg[_src] {
456*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
457*fae6e9adSlinfeng                 }
458*fae6e9adSlinfeng             }
459*fae6e9adSlinfeng             ebpf::JSET_IMM => {
460*fae6e9adSlinfeng                 if reg[_dst] & insn.imm as u64 != 0 {
461*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
462*fae6e9adSlinfeng                 }
463*fae6e9adSlinfeng             }
464*fae6e9adSlinfeng             ebpf::JSET_REG => {
465*fae6e9adSlinfeng                 if reg[_dst] & reg[_src] != 0 {
466*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
467*fae6e9adSlinfeng                 }
468*fae6e9adSlinfeng             }
469*fae6e9adSlinfeng             ebpf::JNE_IMM => {
470*fae6e9adSlinfeng                 if reg[_dst] != insn.imm as u64 {
471*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
472*fae6e9adSlinfeng                 }
473*fae6e9adSlinfeng             }
474*fae6e9adSlinfeng             ebpf::JNE_REG => {
475*fae6e9adSlinfeng                 if reg[_dst] != reg[_src] {
476*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
477*fae6e9adSlinfeng                 }
478*fae6e9adSlinfeng             }
479*fae6e9adSlinfeng             ebpf::JSGT_IMM => {
480*fae6e9adSlinfeng                 if reg[_dst] as i64 > insn.imm as i64 {
481*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
482*fae6e9adSlinfeng                 }
483*fae6e9adSlinfeng             }
484*fae6e9adSlinfeng             ebpf::JSGT_REG => {
485*fae6e9adSlinfeng                 if reg[_dst] as i64 > reg[_src] as i64 {
486*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
487*fae6e9adSlinfeng                 }
488*fae6e9adSlinfeng             }
489*fae6e9adSlinfeng             ebpf::JSGE_IMM => {
490*fae6e9adSlinfeng                 if reg[_dst] as i64 >= insn.imm as i64 {
491*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
492*fae6e9adSlinfeng                 }
493*fae6e9adSlinfeng             }
494*fae6e9adSlinfeng             ebpf::JSGE_REG => {
495*fae6e9adSlinfeng                 if reg[_dst] as i64 >= reg[_src] as i64 {
496*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
497*fae6e9adSlinfeng                 }
498*fae6e9adSlinfeng             }
499*fae6e9adSlinfeng             ebpf::JSLT_IMM => {
500*fae6e9adSlinfeng                 if (reg[_dst] as i64) < insn.imm as i64 {
501*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
502*fae6e9adSlinfeng                 }
503*fae6e9adSlinfeng             }
504*fae6e9adSlinfeng             ebpf::JSLT_REG => {
505*fae6e9adSlinfeng                 if (reg[_dst] as i64) < reg[_src] as i64 {
506*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
507*fae6e9adSlinfeng                 }
508*fae6e9adSlinfeng             }
509*fae6e9adSlinfeng             ebpf::JSLE_IMM => {
510*fae6e9adSlinfeng                 if reg[_dst] as i64 <= insn.imm as i64 {
511*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
512*fae6e9adSlinfeng                 }
513*fae6e9adSlinfeng             }
514*fae6e9adSlinfeng             ebpf::JSLE_REG => {
515*fae6e9adSlinfeng                 if reg[_dst] as i64 <= reg[_src] as i64 {
516*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
517*fae6e9adSlinfeng                 }
518*fae6e9adSlinfeng             }
519*fae6e9adSlinfeng 
520*fae6e9adSlinfeng             // BPF_JMP32 class
521*fae6e9adSlinfeng             ebpf::JEQ_IMM32 => {
522*fae6e9adSlinfeng                 if reg[_dst] as u32 == insn.imm as u32 {
523*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
524*fae6e9adSlinfeng                 }
525*fae6e9adSlinfeng             }
526*fae6e9adSlinfeng             ebpf::JEQ_REG32 => {
527*fae6e9adSlinfeng                 if reg[_dst] as u32 == reg[_src] as u32 {
528*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
529*fae6e9adSlinfeng                 }
530*fae6e9adSlinfeng             }
531*fae6e9adSlinfeng             ebpf::JGT_IMM32 => {
532*fae6e9adSlinfeng                 if reg[_dst] as u32 > insn.imm as u32 {
533*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
534*fae6e9adSlinfeng                 }
535*fae6e9adSlinfeng             }
536*fae6e9adSlinfeng             ebpf::JGT_REG32 => {
537*fae6e9adSlinfeng                 if reg[_dst] as u32 > reg[_src] as u32 {
538*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
539*fae6e9adSlinfeng                 }
540*fae6e9adSlinfeng             }
541*fae6e9adSlinfeng             ebpf::JGE_IMM32 => {
542*fae6e9adSlinfeng                 if reg[_dst] as u32 >= insn.imm as u32 {
543*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
544*fae6e9adSlinfeng                 }
545*fae6e9adSlinfeng             }
546*fae6e9adSlinfeng             ebpf::JGE_REG32 => {
547*fae6e9adSlinfeng                 if reg[_dst] as u32 >= reg[_src] as u32 {
548*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
549*fae6e9adSlinfeng                 }
550*fae6e9adSlinfeng             }
551*fae6e9adSlinfeng             ebpf::JLT_IMM32 => {
552*fae6e9adSlinfeng                 if (reg[_dst] as u32) < insn.imm as u32 {
553*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
554*fae6e9adSlinfeng                 }
555*fae6e9adSlinfeng             }
556*fae6e9adSlinfeng             ebpf::JLT_REG32 => {
557*fae6e9adSlinfeng                 if (reg[_dst] as u32) < reg[_src] as u32 {
558*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
559*fae6e9adSlinfeng                 }
560*fae6e9adSlinfeng             }
561*fae6e9adSlinfeng             ebpf::JLE_IMM32 => {
562*fae6e9adSlinfeng                 if reg[_dst] as u32 <= insn.imm as u32 {
563*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
564*fae6e9adSlinfeng                 }
565*fae6e9adSlinfeng             }
566*fae6e9adSlinfeng             ebpf::JLE_REG32 => {
567*fae6e9adSlinfeng                 if reg[_dst] as u32 <= reg[_src] as u32 {
568*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
569*fae6e9adSlinfeng                 }
570*fae6e9adSlinfeng             }
571*fae6e9adSlinfeng             ebpf::JSET_IMM32 => {
572*fae6e9adSlinfeng                 if reg[_dst] as u32 & insn.imm as u32 != 0 {
573*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
574*fae6e9adSlinfeng                 }
575*fae6e9adSlinfeng             }
576*fae6e9adSlinfeng             ebpf::JSET_REG32 => {
577*fae6e9adSlinfeng                 if reg[_dst] as u32 & reg[_src] as u32 != 0 {
578*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
579*fae6e9adSlinfeng                 }
580*fae6e9adSlinfeng             }
581*fae6e9adSlinfeng             ebpf::JNE_IMM32 => {
582*fae6e9adSlinfeng                 if reg[_dst] as u32 != insn.imm as u32 {
583*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
584*fae6e9adSlinfeng                 }
585*fae6e9adSlinfeng             }
586*fae6e9adSlinfeng             ebpf::JNE_REG32 => {
587*fae6e9adSlinfeng                 if reg[_dst] as u32 != reg[_src] as u32 {
588*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
589*fae6e9adSlinfeng                 }
590*fae6e9adSlinfeng             }
591*fae6e9adSlinfeng             ebpf::JSGT_IMM32 => {
592*fae6e9adSlinfeng                 if reg[_dst] as i32 > insn.imm {
593*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
594*fae6e9adSlinfeng                 }
595*fae6e9adSlinfeng             }
596*fae6e9adSlinfeng             ebpf::JSGT_REG32 => {
597*fae6e9adSlinfeng                 if reg[_dst] as i32 > reg[_src] as i32 {
598*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
599*fae6e9adSlinfeng                 }
600*fae6e9adSlinfeng             }
601*fae6e9adSlinfeng             ebpf::JSGE_IMM32 => {
602*fae6e9adSlinfeng                 if reg[_dst] as i32 >= insn.imm {
603*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
604*fae6e9adSlinfeng                 }
605*fae6e9adSlinfeng             }
606*fae6e9adSlinfeng             ebpf::JSGE_REG32 => {
607*fae6e9adSlinfeng                 if reg[_dst] as i32 >= reg[_src] as i32 {
608*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
609*fae6e9adSlinfeng                 }
610*fae6e9adSlinfeng             }
611*fae6e9adSlinfeng             ebpf::JSLT_IMM32 => {
612*fae6e9adSlinfeng                 if (reg[_dst] as i32) < insn.imm {
613*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
614*fae6e9adSlinfeng                 }
615*fae6e9adSlinfeng             }
616*fae6e9adSlinfeng             ebpf::JSLT_REG32 => {
617*fae6e9adSlinfeng                 if (reg[_dst] as i32) < reg[_src] as i32 {
618*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
619*fae6e9adSlinfeng                 }
620*fae6e9adSlinfeng             }
621*fae6e9adSlinfeng             ebpf::JSLE_IMM32 => {
622*fae6e9adSlinfeng                 if reg[_dst] as i32 <= insn.imm {
623*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
624*fae6e9adSlinfeng                 }
625*fae6e9adSlinfeng             }
626*fae6e9adSlinfeng             ebpf::JSLE_REG32 => {
627*fae6e9adSlinfeng                 if reg[_dst] as i32 <= reg[_src] as i32 {
628*fae6e9adSlinfeng                     do_jump(&mut insn_ptr, &insn);
629*fae6e9adSlinfeng                 }
630*fae6e9adSlinfeng             }
631*fae6e9adSlinfeng 
632*fae6e9adSlinfeng             // Do not delegate the check to the verifier, since registered functions can be
633*fae6e9adSlinfeng             // changed after the program has been verified.
634*fae6e9adSlinfeng             ebpf::CALL => {
635*fae6e9adSlinfeng                 // See https://www.kernel.org/doc/html/latest/bpf/standardization/instruction-set.html#id16
636*fae6e9adSlinfeng                 let src_reg = _src;
637*fae6e9adSlinfeng                 let call_func_res = match src_reg {
638*fae6e9adSlinfeng                     0 => {
639*fae6e9adSlinfeng                         // Handle call by address to external function.
640*fae6e9adSlinfeng                         if let Some(function) = helpers.get(&(insn.imm as u32)) {
641*fae6e9adSlinfeng                             reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]);
642*fae6e9adSlinfeng                             Ok(())
643*fae6e9adSlinfeng                         }else {
644*fae6e9adSlinfeng                             Err(format!(
645*fae6e9adSlinfeng                                 "Error: unknown helper function (id: {:#x}) [{}], (instruction #{})",
646*fae6e9adSlinfeng                                 insn.imm as u32,BPF_FUNC_MAPPER[insn.imm as usize],insn_ptr
647*fae6e9adSlinfeng                             ))
648*fae6e9adSlinfeng                         }
649*fae6e9adSlinfeng                     }
650*fae6e9adSlinfeng                     1 => {
651*fae6e9adSlinfeng                         // bpf to bpf call
652*fae6e9adSlinfeng                         // The function is in the same program, so we can just jump to the address
653*fae6e9adSlinfeng                         if stacks.len() >= ebpf::RBPF_MAX_CALL_DEPTH{
654*fae6e9adSlinfeng                             Err(format!(
655*fae6e9adSlinfeng                                 "Error: bpf to bpf call stack limit reached (instruction #{}) max depth: {}",
656*fae6e9adSlinfeng                                 insn_ptr, ebpf::RBPF_MAX_CALL_DEPTH
657*fae6e9adSlinfeng                             ))
658*fae6e9adSlinfeng                         }else {
659*fae6e9adSlinfeng                             let mut pre_stack = stacks.last_mut().unwrap();
660*fae6e9adSlinfeng                             // Save the callee saved registers
661*fae6e9adSlinfeng                             pre_stack.save_registers(&reg[6..=9]);
662*fae6e9adSlinfeng                             // Save the return address
663*fae6e9adSlinfeng                             pre_stack.save_return_address(insn_ptr as u16);
664*fae6e9adSlinfeng                             // save the stack pointer
665*fae6e9adSlinfeng                             pre_stack.save_sp(reg[10] as u16);
666*fae6e9adSlinfeng                             let mut stack = StackFrame::new();
667*fae6e9adSlinfeng                             log::trace!("BPF TO BPF CALL: new pc: {} + {} = {}",insn_ptr ,insn.imm,insn_ptr + insn.imm as usize);
668*fae6e9adSlinfeng                             reg[10] = stack.as_ptr() as u64 + stack.len() as u64;
669*fae6e9adSlinfeng                             stacks.push(stack);
670*fae6e9adSlinfeng                             insn_ptr += insn.imm as usize;
671*fae6e9adSlinfeng                             Ok(())
672*fae6e9adSlinfeng                         }
673*fae6e9adSlinfeng                     }
674*fae6e9adSlinfeng                     _ =>{
675*fae6e9adSlinfeng                         Err(format!(
676*fae6e9adSlinfeng                             "Error: the function call type (id: {:#x}) [{}], (instruction #{}) not supported",
677*fae6e9adSlinfeng                             insn.imm as u32,BPF_FUNC_MAPPER[insn.imm as usize],insn_ptr
678*fae6e9adSlinfeng                         ))
679*fae6e9adSlinfeng                     }
680*fae6e9adSlinfeng                 };
681*fae6e9adSlinfeng                 if let Err(e) = call_func_res {
682*fae6e9adSlinfeng                     Err(Error::new(ErrorKind::Other, e))?;
683*fae6e9adSlinfeng                 }
684*fae6e9adSlinfeng             }
685*fae6e9adSlinfeng             ebpf::TAIL_CALL => unimplemented!(),
686*fae6e9adSlinfeng             ebpf::EXIT => {
687*fae6e9adSlinfeng                 if stacks.len() == 1 {
688*fae6e9adSlinfeng                     return Ok(reg[0]);
689*fae6e9adSlinfeng                 } else {
690*fae6e9adSlinfeng                     // Pop the stack
691*fae6e9adSlinfeng                     stacks.pop();
692*fae6e9adSlinfeng                     let stack = stacks.last().unwrap();
693*fae6e9adSlinfeng                     // Restore the callee saved registers
694*fae6e9adSlinfeng                     reg[6..=9].copy_from_slice(&stack.get_registers());
695*fae6e9adSlinfeng                     // Restore the return address
696*fae6e9adSlinfeng                     insn_ptr = stack.get_return_address() as usize;
697*fae6e9adSlinfeng                     // Restore the stack pointer
698*fae6e9adSlinfeng                     reg[10] = stack.get_sp() as u64;
699*fae6e9adSlinfeng                     log::trace!("EXIT: new pc: {}", insn_ptr);
700*fae6e9adSlinfeng                 }
701*fae6e9adSlinfeng             }
702*fae6e9adSlinfeng 
703*fae6e9adSlinfeng             _ => unreachable!(),
704*fae6e9adSlinfeng         }
705*fae6e9adSlinfeng     }
706*fae6e9adSlinfeng 
707*fae6e9adSlinfeng     unreachable!()
708*fae6e9adSlinfeng }
709