xref: /DragonOS/kernel/crates/rbpf/src/cranelift.rs (revision fae6e9ade46a52976ad5d099643d51cc20876448)
1*fae6e9adSlinfeng // SPDX-License-Identifier: (Apache-2.0 OR MIT)
2*fae6e9adSlinfeng 
3*fae6e9adSlinfeng use alloc::{collections::BTreeMap, format, vec, vec::Vec};
4*fae6e9adSlinfeng use core::{mem, mem::ManuallyDrop};
5*fae6e9adSlinfeng use std::io::ErrorKind;
6*fae6e9adSlinfeng 
7*fae6e9adSlinfeng use cranelift_codegen::{
8*fae6e9adSlinfeng     entity::EntityRef,
9*fae6e9adSlinfeng     ir::{
10*fae6e9adSlinfeng         condcodes::IntCC,
11*fae6e9adSlinfeng         types::{I16, I32, I64, I8},
12*fae6e9adSlinfeng         AbiParam, Block, Endianness, FuncRef, Function, InstBuilder, MemFlags, Signature,
13*fae6e9adSlinfeng         SourceLoc, StackSlotData, StackSlotKind, TrapCode, Type, UserFuncName, Value,
14*fae6e9adSlinfeng     },
15*fae6e9adSlinfeng     isa::OwnedTargetIsa,
16*fae6e9adSlinfeng     settings::{self, Configurable},
17*fae6e9adSlinfeng };
18*fae6e9adSlinfeng use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
19*fae6e9adSlinfeng use cranelift_jit::{JITBuilder, JITModule};
20*fae6e9adSlinfeng use cranelift_module::{FuncId, Linkage, Module};
21*fae6e9adSlinfeng 
22*fae6e9adSlinfeng use super::{Error, HashMap, HashSet};
23*fae6e9adSlinfeng use crate::ebpf::{
24*fae6e9adSlinfeng     self, Insn, BPF_ALU_OP_MASK, BPF_IND, BPF_JEQ, BPF_JGE, BPF_JGT, BPF_JLE, BPF_JLT, BPF_JMP32,
25*fae6e9adSlinfeng     BPF_JNE, BPF_JSET, BPF_JSGE, BPF_JSGT, BPF_JSLE, BPF_JSLT, BPF_X, STACK_SIZE,
26*fae6e9adSlinfeng };
27*fae6e9adSlinfeng 
28*fae6e9adSlinfeng pub type JittedFunction = extern "C" fn(
29*fae6e9adSlinfeng     *mut u8, // mem_ptr
30*fae6e9adSlinfeng     usize,   // mem_len
31*fae6e9adSlinfeng     *mut u8, // mbuff_ptr
32*fae6e9adSlinfeng     usize,   // mbuff_len
33*fae6e9adSlinfeng ) -> u64;
34*fae6e9adSlinfeng 
35*fae6e9adSlinfeng pub(crate) struct CraneliftCompiler {
36*fae6e9adSlinfeng     isa: OwnedTargetIsa,
37*fae6e9adSlinfeng     module: JITModule,
38*fae6e9adSlinfeng 
39*fae6e9adSlinfeng     helpers: HashMap<u32, ebpf::Helper>,
40*fae6e9adSlinfeng     helper_func_refs: HashMap<u32, FuncRef>,
41*fae6e9adSlinfeng 
42*fae6e9adSlinfeng     /// List of blocks corresponding to each instruction.
43*fae6e9adSlinfeng     /// We only store the first instruction that observes a new block
44*fae6e9adSlinfeng     insn_blocks: BTreeMap<u32, Block>,
45*fae6e9adSlinfeng     /// Map of block targets for each jump/branching instruction.
46*fae6e9adSlinfeng     insn_targets: BTreeMap<u32, (Block, Block)>,
47*fae6e9adSlinfeng     filled_blocks: HashSet<Block>,
48*fae6e9adSlinfeng 
49*fae6e9adSlinfeng     /// Map of register numbers to Cranelift variables.
50*fae6e9adSlinfeng     registers: [Variable; 11],
51*fae6e9adSlinfeng     /// Other usefull variables used throughout the program.
52*fae6e9adSlinfeng     mem_start: Variable,
53*fae6e9adSlinfeng     mem_end: Variable,
54*fae6e9adSlinfeng     mbuf_start: Variable,
55*fae6e9adSlinfeng     mbuf_end: Variable,
56*fae6e9adSlinfeng     stack_start: Variable,
57*fae6e9adSlinfeng     stack_end: Variable,
58*fae6e9adSlinfeng }
59*fae6e9adSlinfeng 
60*fae6e9adSlinfeng impl CraneliftCompiler {
new(helpers: HashMap<u32, ebpf::Helper>) -> Self61*fae6e9adSlinfeng     pub(crate) fn new(helpers: HashMap<u32, ebpf::Helper>) -> Self {
62*fae6e9adSlinfeng         let mut flag_builder = settings::builder();
63*fae6e9adSlinfeng 
64*fae6e9adSlinfeng         flag_builder.set("opt_level", "speed").unwrap();
65*fae6e9adSlinfeng 
66*fae6e9adSlinfeng         // Enable stack probes
67*fae6e9adSlinfeng         flag_builder.enable("enable_probestack").unwrap();
68*fae6e9adSlinfeng         flag_builder.set("probestack_strategy", "inline").unwrap();
69*fae6e9adSlinfeng 
70*fae6e9adSlinfeng         let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| {
71*fae6e9adSlinfeng             panic!("host machine is not supported: {}", msg);
72*fae6e9adSlinfeng         });
73*fae6e9adSlinfeng         let isa = isa_builder
74*fae6e9adSlinfeng             .finish(settings::Flags::new(flag_builder))
75*fae6e9adSlinfeng             .unwrap();
76*fae6e9adSlinfeng 
77*fae6e9adSlinfeng         let mut jit_builder =
78*fae6e9adSlinfeng             JITBuilder::with_isa(isa.clone(), cranelift_module::default_libcall_names());
79*fae6e9adSlinfeng         // Register all the helpers
80*fae6e9adSlinfeng         for (k, v) in helpers.iter() {
81*fae6e9adSlinfeng             let name = format!("helper_{}", k);
82*fae6e9adSlinfeng             jit_builder.symbol(name, (*v) as usize as *const u8);
83*fae6e9adSlinfeng         }
84*fae6e9adSlinfeng 
85*fae6e9adSlinfeng         let mut module = JITModule::new(jit_builder);
86*fae6e9adSlinfeng 
87*fae6e9adSlinfeng         let registers = (0..11)
88*fae6e9adSlinfeng             .map(|i| Variable::new(i))
89*fae6e9adSlinfeng             .collect::<Vec<_>>()
90*fae6e9adSlinfeng             .try_into()
91*fae6e9adSlinfeng             .unwrap();
92*fae6e9adSlinfeng 
93*fae6e9adSlinfeng         Self {
94*fae6e9adSlinfeng             isa,
95*fae6e9adSlinfeng             module,
96*fae6e9adSlinfeng             helpers,
97*fae6e9adSlinfeng             helper_func_refs: HashMap::new(),
98*fae6e9adSlinfeng             insn_blocks: BTreeMap::new(),
99*fae6e9adSlinfeng             insn_targets: BTreeMap::new(),
100*fae6e9adSlinfeng             filled_blocks: HashSet::new(),
101*fae6e9adSlinfeng             registers,
102*fae6e9adSlinfeng             mem_start: Variable::new(11),
103*fae6e9adSlinfeng             mem_end: Variable::new(12),
104*fae6e9adSlinfeng             mbuf_start: Variable::new(13),
105*fae6e9adSlinfeng             mbuf_end: Variable::new(14),
106*fae6e9adSlinfeng             stack_start: Variable::new(15),
107*fae6e9adSlinfeng             stack_end: Variable::new(16),
108*fae6e9adSlinfeng         }
109*fae6e9adSlinfeng     }
110*fae6e9adSlinfeng 
compile_function(mut self, prog: &[u8]) -> Result<CraneliftProgram, Error>111*fae6e9adSlinfeng     pub(crate) fn compile_function(mut self, prog: &[u8]) -> Result<CraneliftProgram, Error> {
112*fae6e9adSlinfeng         let name = "main";
113*fae6e9adSlinfeng         // This is not a standard eBPF function! We use an informal ABI with just 4 parameters.
114*fae6e9adSlinfeng         // See [JittedFunction] which is the signature of this function.
115*fae6e9adSlinfeng         //
116*fae6e9adSlinfeng         // Since this function only serves as the entrypoint for the JITed program, it doesen't
117*fae6e9adSlinfeng         // really matter.
118*fae6e9adSlinfeng         let sig = Signature {
119*fae6e9adSlinfeng             params: vec![
120*fae6e9adSlinfeng                 AbiParam::new(I64),
121*fae6e9adSlinfeng                 AbiParam::new(I64),
122*fae6e9adSlinfeng                 AbiParam::new(I64),
123*fae6e9adSlinfeng                 AbiParam::new(I64),
124*fae6e9adSlinfeng             ],
125*fae6e9adSlinfeng             returns: vec![AbiParam::new(I64)],
126*fae6e9adSlinfeng             call_conv: self.isa.default_call_conv(),
127*fae6e9adSlinfeng         };
128*fae6e9adSlinfeng 
129*fae6e9adSlinfeng         let func_id = self
130*fae6e9adSlinfeng             .module
131*fae6e9adSlinfeng             .declare_function(name, Linkage::Local, &sig)
132*fae6e9adSlinfeng             .unwrap();
133*fae6e9adSlinfeng 
134*fae6e9adSlinfeng         let mut ctx = self.module.make_context();
135*fae6e9adSlinfeng         ctx.func = Function::with_name_signature(UserFuncName::testcase(name.as_bytes()), sig);
136*fae6e9adSlinfeng         let mut func_ctx = FunctionBuilderContext::new();
137*fae6e9adSlinfeng 
138*fae6e9adSlinfeng         {
139*fae6e9adSlinfeng             let mut builder: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
140*fae6e9adSlinfeng 
141*fae6e9adSlinfeng             let entry = builder.create_block();
142*fae6e9adSlinfeng             builder.append_block_params_for_function_params(entry);
143*fae6e9adSlinfeng             builder.switch_to_block(entry);
144*fae6e9adSlinfeng 
145*fae6e9adSlinfeng             self.build_cfg(&mut builder, prog)?;
146*fae6e9adSlinfeng             self.build_function_prelude(&mut builder, entry)?;
147*fae6e9adSlinfeng             self.translate_program(&mut builder, prog)?;
148*fae6e9adSlinfeng 
149*fae6e9adSlinfeng             builder.seal_all_blocks();
150*fae6e9adSlinfeng             builder.finalize();
151*fae6e9adSlinfeng         }
152*fae6e9adSlinfeng 
153*fae6e9adSlinfeng         self.module.define_function(func_id, &mut ctx).unwrap();
154*fae6e9adSlinfeng         self.module.finalize_definitions().unwrap();
155*fae6e9adSlinfeng         self.module.clear_context(&mut ctx);
156*fae6e9adSlinfeng 
157*fae6e9adSlinfeng         Ok(CraneliftProgram::new(self.module, func_id))
158*fae6e9adSlinfeng     }
159*fae6e9adSlinfeng 
build_function_prelude( &mut self, bcx: &mut FunctionBuilder, entry: Block, ) -> Result<(), Error>160*fae6e9adSlinfeng     fn build_function_prelude(
161*fae6e9adSlinfeng         &mut self,
162*fae6e9adSlinfeng         bcx: &mut FunctionBuilder,
163*fae6e9adSlinfeng         entry: Block,
164*fae6e9adSlinfeng     ) -> Result<(), Error> {
165*fae6e9adSlinfeng         // Register the VM registers as variables
166*fae6e9adSlinfeng         for var in self.registers.iter() {
167*fae6e9adSlinfeng             bcx.declare_var(*var, I64);
168*fae6e9adSlinfeng         }
169*fae6e9adSlinfeng 
170*fae6e9adSlinfeng         // Register the bounds check variables
171*fae6e9adSlinfeng         bcx.declare_var(self.mem_start, I64);
172*fae6e9adSlinfeng         bcx.declare_var(self.mem_end, I64);
173*fae6e9adSlinfeng         bcx.declare_var(self.mbuf_start, I64);
174*fae6e9adSlinfeng         bcx.declare_var(self.mbuf_end, I64);
175*fae6e9adSlinfeng         bcx.declare_var(self.stack_start, I64);
176*fae6e9adSlinfeng         bcx.declare_var(self.stack_end, I64);
177*fae6e9adSlinfeng 
178*fae6e9adSlinfeng         // Register the helpers
179*fae6e9adSlinfeng         for (k, _) in self.helpers.iter() {
180*fae6e9adSlinfeng             let name = format!("helper_{}", k);
181*fae6e9adSlinfeng             let sig = Signature {
182*fae6e9adSlinfeng                 params: vec![
183*fae6e9adSlinfeng                     AbiParam::new(I64),
184*fae6e9adSlinfeng                     AbiParam::new(I64),
185*fae6e9adSlinfeng                     AbiParam::new(I64),
186*fae6e9adSlinfeng                     AbiParam::new(I64),
187*fae6e9adSlinfeng                     AbiParam::new(I64),
188*fae6e9adSlinfeng                 ],
189*fae6e9adSlinfeng                 returns: vec![AbiParam::new(I64)],
190*fae6e9adSlinfeng                 call_conv: self.isa.default_call_conv(),
191*fae6e9adSlinfeng             };
192*fae6e9adSlinfeng             let func_id = self
193*fae6e9adSlinfeng                 .module
194*fae6e9adSlinfeng                 .declare_function(&name, Linkage::Import, &sig)
195*fae6e9adSlinfeng                 .unwrap();
196*fae6e9adSlinfeng 
197*fae6e9adSlinfeng             let func_ref = self.module.declare_func_in_func(func_id, bcx.func);
198*fae6e9adSlinfeng             self.helper_func_refs.insert(*k, func_ref);
199*fae6e9adSlinfeng         }
200*fae6e9adSlinfeng 
201*fae6e9adSlinfeng         // Register the stack
202*fae6e9adSlinfeng         let ss = bcx.create_sized_stack_slot(StackSlotData {
203*fae6e9adSlinfeng             kind: StackSlotKind::ExplicitSlot,
204*fae6e9adSlinfeng             size: STACK_SIZE as u32,
205*fae6e9adSlinfeng         });
206*fae6e9adSlinfeng         let addr_ty = self.isa.pointer_type();
207*fae6e9adSlinfeng         let stack_addr = bcx.ins().stack_addr(addr_ty, ss, STACK_SIZE as i32);
208*fae6e9adSlinfeng         bcx.def_var(self.registers[10], stack_addr);
209*fae6e9adSlinfeng 
210*fae6e9adSlinfeng         // Initialize the bounds check variables
211*fae6e9adSlinfeng         let stack_start = bcx.ins().stack_addr(addr_ty, ss, 0);
212*fae6e9adSlinfeng         bcx.def_var(self.stack_start, stack_start);
213*fae6e9adSlinfeng         let stack_end = bcx.ins().stack_addr(addr_ty, ss, STACK_SIZE as i32);
214*fae6e9adSlinfeng         bcx.def_var(self.stack_end, stack_end);
215*fae6e9adSlinfeng 
216*fae6e9adSlinfeng         // This is our internal ABI where the first 2 params are the memory
217*fae6e9adSlinfeng         let mem_start = bcx.block_params(entry)[0];
218*fae6e9adSlinfeng         let mem_len = bcx.block_params(entry)[1];
219*fae6e9adSlinfeng         let mem_end = bcx.ins().iadd(mem_start, mem_len);
220*fae6e9adSlinfeng         bcx.def_var(self.mem_start, mem_start);
221*fae6e9adSlinfeng         bcx.def_var(self.mem_end, mem_end);
222*fae6e9adSlinfeng 
223*fae6e9adSlinfeng         // And the next 2 are the mbuf
224*fae6e9adSlinfeng         let mbuf_start = bcx.block_params(entry)[2];
225*fae6e9adSlinfeng         let mbuf_len = bcx.block_params(entry)[3];
226*fae6e9adSlinfeng         let mbuf_end = bcx.ins().iadd(mbuf_start, mbuf_len);
227*fae6e9adSlinfeng         bcx.def_var(self.mbuf_start, mbuf_start);
228*fae6e9adSlinfeng         bcx.def_var(self.mbuf_end, mbuf_end);
229*fae6e9adSlinfeng 
230*fae6e9adSlinfeng         // The ABI for eBPF specifies that R1 must contain either the memory, or mbuff pointer
231*fae6e9adSlinfeng         // If the mbuf length is non-zero, then we use that, otherwise we use the memory pointer
232*fae6e9adSlinfeng         let mbuf_exists = bcx.ins().icmp_imm(IntCC::NotEqual, mbuf_len, 0);
233*fae6e9adSlinfeng         let mem_or_mbuf = bcx.ins().select(mbuf_exists, mbuf_start, mem_start);
234*fae6e9adSlinfeng         bcx.def_var(self.registers[1], mem_or_mbuf);
235*fae6e9adSlinfeng 
236*fae6e9adSlinfeng         // R2 should contain the length of the memory or mbuf
237*fae6e9adSlinfeng         // At least ebpf-conformance tests expect this
238*fae6e9adSlinfeng         let mem_or_mbuf_len = bcx.ins().select(mbuf_exists, mbuf_len, mem_len);
239*fae6e9adSlinfeng         bcx.def_var(self.registers[2], mem_or_mbuf_len);
240*fae6e9adSlinfeng 
241*fae6e9adSlinfeng         // Insert the *actual* initial block
242*fae6e9adSlinfeng         let program_entry = bcx.create_block();
243*fae6e9adSlinfeng         bcx.ins().jump(program_entry, &[]);
244*fae6e9adSlinfeng         self.filled_blocks.insert(bcx.current_block().unwrap());
245*fae6e9adSlinfeng         self.insn_blocks.insert(0, program_entry);
246*fae6e9adSlinfeng 
247*fae6e9adSlinfeng         Ok(())
248*fae6e9adSlinfeng     }
249*fae6e9adSlinfeng 
translate_program(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error>250*fae6e9adSlinfeng     fn translate_program(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
251*fae6e9adSlinfeng         let mut insn_ptr: usize = 0;
252*fae6e9adSlinfeng         while insn_ptr * ebpf::INSN_SIZE < prog.len() {
253*fae6e9adSlinfeng             let insn = ebpf::get_insn(prog, insn_ptr);
254*fae6e9adSlinfeng 
255*fae6e9adSlinfeng             // If this instruction is on a new block switch to it.
256*fae6e9adSlinfeng             if let Some(block) = self.insn_blocks.get(&(insn_ptr as u32)) {
257*fae6e9adSlinfeng                 // Blocks must have a terminator instruction at the end before we switch away from them
258*fae6e9adSlinfeng                 let current_block = bcx.current_block().unwrap();
259*fae6e9adSlinfeng                 if !self.filled_blocks.contains(&current_block) {
260*fae6e9adSlinfeng                     bcx.ins().jump(*block, &[]);
261*fae6e9adSlinfeng                 }
262*fae6e9adSlinfeng 
263*fae6e9adSlinfeng                 bcx.switch_to_block(*block);
264*fae6e9adSlinfeng             }
265*fae6e9adSlinfeng 
266*fae6e9adSlinfeng             // Set the source location for the instruction
267*fae6e9adSlinfeng             bcx.set_srcloc(SourceLoc::new(insn_ptr as u32));
268*fae6e9adSlinfeng 
269*fae6e9adSlinfeng             match insn.opc {
270*fae6e9adSlinfeng                 // BPF_LD class
271*fae6e9adSlinfeng                 // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
272*fae6e9adSlinfeng                 // Since this pointer is constant, and since we already know it (mem), do not
273*fae6e9adSlinfeng                 // bother re-fetching it, just use mem already.
274*fae6e9adSlinfeng                 ebpf::LD_ABS_B
275*fae6e9adSlinfeng                 | ebpf::LD_ABS_H
276*fae6e9adSlinfeng                 | ebpf::LD_ABS_W
277*fae6e9adSlinfeng                 | ebpf::LD_ABS_DW
278*fae6e9adSlinfeng                 | ebpf::LD_IND_B
279*fae6e9adSlinfeng                 | ebpf::LD_IND_H
280*fae6e9adSlinfeng                 | ebpf::LD_IND_W
281*fae6e9adSlinfeng                 | ebpf::LD_IND_DW => {
282*fae6e9adSlinfeng                     let ty = match insn.opc {
283*fae6e9adSlinfeng                         ebpf::LD_ABS_B | ebpf::LD_IND_B => I8,
284*fae6e9adSlinfeng                         ebpf::LD_ABS_H | ebpf::LD_IND_H => I16,
285*fae6e9adSlinfeng                         ebpf::LD_ABS_W | ebpf::LD_IND_W => I32,
286*fae6e9adSlinfeng                         ebpf::LD_ABS_DW | ebpf::LD_IND_DW => I64,
287*fae6e9adSlinfeng                         _ => unreachable!(),
288*fae6e9adSlinfeng                     };
289*fae6e9adSlinfeng 
290*fae6e9adSlinfeng                     // Both instructions add the imm part of the instruction to the pointer
291*fae6e9adSlinfeng                     let ptr = bcx.use_var(self.mem_start);
292*fae6e9adSlinfeng                     let offset = bcx
293*fae6e9adSlinfeng                         .ins()
294*fae6e9adSlinfeng                         .iconst(self.isa.pointer_type(), insn.imm as u32 as i64);
295*fae6e9adSlinfeng                     let addr = bcx.ins().iadd(ptr, offset);
296*fae6e9adSlinfeng 
297*fae6e9adSlinfeng                     // IND instructions additionally add the value of the source register
298*fae6e9adSlinfeng                     let is_ind = (insn.opc & BPF_IND) != 0;
299*fae6e9adSlinfeng                     let addr = if is_ind {
300*fae6e9adSlinfeng                         let src_reg = self.insn_src(bcx, &insn);
301*fae6e9adSlinfeng                         bcx.ins().iadd(addr, src_reg)
302*fae6e9adSlinfeng                     } else {
303*fae6e9adSlinfeng                         addr
304*fae6e9adSlinfeng                     };
305*fae6e9adSlinfeng 
306*fae6e9adSlinfeng                     // The offset here has already been added to the pointer, so we pass 0
307*fae6e9adSlinfeng                     let loaded = self.reg_load(bcx, ty, addr, 0);
308*fae6e9adSlinfeng 
309*fae6e9adSlinfeng                     let ext = if ty != I64 {
310*fae6e9adSlinfeng                         bcx.ins().uextend(I64, loaded)
311*fae6e9adSlinfeng                     } else {
312*fae6e9adSlinfeng                         loaded
313*fae6e9adSlinfeng                     };
314*fae6e9adSlinfeng 
315*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, ext);
316*fae6e9adSlinfeng                 }
317*fae6e9adSlinfeng                 ebpf::LD_DW_IMM => {
318*fae6e9adSlinfeng                     insn_ptr += 1;
319*fae6e9adSlinfeng                     let next_insn = ebpf::get_insn(prog, insn_ptr);
320*fae6e9adSlinfeng 
321*fae6e9adSlinfeng                     let imm = (((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32)) as i64;
322*fae6e9adSlinfeng                     let iconst = bcx.ins().iconst(I64, imm);
323*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, iconst);
324*fae6e9adSlinfeng                 }
325*fae6e9adSlinfeng 
326*fae6e9adSlinfeng                 // BPF_LDX class
327*fae6e9adSlinfeng                 ebpf::LD_B_REG | ebpf::LD_H_REG | ebpf::LD_W_REG | ebpf::LD_DW_REG => {
328*fae6e9adSlinfeng                     let ty = match insn.opc {
329*fae6e9adSlinfeng                         ebpf::LD_B_REG => I8,
330*fae6e9adSlinfeng                         ebpf::LD_H_REG => I16,
331*fae6e9adSlinfeng                         ebpf::LD_W_REG => I32,
332*fae6e9adSlinfeng                         ebpf::LD_DW_REG => I64,
333*fae6e9adSlinfeng                         _ => unreachable!(),
334*fae6e9adSlinfeng                     };
335*fae6e9adSlinfeng 
336*fae6e9adSlinfeng                     let base = self.insn_src(bcx, &insn);
337*fae6e9adSlinfeng                     let loaded = self.reg_load(bcx, ty, base, insn.off);
338*fae6e9adSlinfeng 
339*fae6e9adSlinfeng                     let ext = if ty != I64 {
340*fae6e9adSlinfeng                         bcx.ins().uextend(I64, loaded)
341*fae6e9adSlinfeng                     } else {
342*fae6e9adSlinfeng                         loaded
343*fae6e9adSlinfeng                     };
344*fae6e9adSlinfeng 
345*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, ext);
346*fae6e9adSlinfeng                 }
347*fae6e9adSlinfeng 
348*fae6e9adSlinfeng                 // BPF_ST and BPF_STX class
349*fae6e9adSlinfeng                 ebpf::ST_B_IMM
350*fae6e9adSlinfeng                 | ebpf::ST_H_IMM
351*fae6e9adSlinfeng                 | ebpf::ST_W_IMM
352*fae6e9adSlinfeng                 | ebpf::ST_DW_IMM
353*fae6e9adSlinfeng                 | ebpf::ST_B_REG
354*fae6e9adSlinfeng                 | ebpf::ST_H_REG
355*fae6e9adSlinfeng                 | ebpf::ST_W_REG
356*fae6e9adSlinfeng                 | ebpf::ST_DW_REG => {
357*fae6e9adSlinfeng                     let ty = match insn.opc {
358*fae6e9adSlinfeng                         ebpf::ST_B_IMM | ebpf::ST_B_REG => I8,
359*fae6e9adSlinfeng                         ebpf::ST_H_IMM | ebpf::ST_H_REG => I16,
360*fae6e9adSlinfeng                         ebpf::ST_W_IMM | ebpf::ST_W_REG => I32,
361*fae6e9adSlinfeng                         ebpf::ST_DW_IMM | ebpf::ST_DW_REG => I64,
362*fae6e9adSlinfeng                         _ => unreachable!(),
363*fae6e9adSlinfeng                     };
364*fae6e9adSlinfeng                     let is_imm = match insn.opc {
365*fae6e9adSlinfeng                         ebpf::ST_B_IMM | ebpf::ST_H_IMM | ebpf::ST_W_IMM | ebpf::ST_DW_IMM => true,
366*fae6e9adSlinfeng                         ebpf::ST_B_REG | ebpf::ST_H_REG | ebpf::ST_W_REG | ebpf::ST_DW_REG => false,
367*fae6e9adSlinfeng                         _ => unreachable!(),
368*fae6e9adSlinfeng                     };
369*fae6e9adSlinfeng 
370*fae6e9adSlinfeng                     let value = if is_imm {
371*fae6e9adSlinfeng                         self.insn_imm64(bcx, &insn)
372*fae6e9adSlinfeng                     } else {
373*fae6e9adSlinfeng                         self.insn_src(bcx, &insn)
374*fae6e9adSlinfeng                     };
375*fae6e9adSlinfeng 
376*fae6e9adSlinfeng                     let narrow = if ty != I64 {
377*fae6e9adSlinfeng                         bcx.ins().ireduce(ty, value)
378*fae6e9adSlinfeng                     } else {
379*fae6e9adSlinfeng                         value
380*fae6e9adSlinfeng                     };
381*fae6e9adSlinfeng 
382*fae6e9adSlinfeng                     let base = self.insn_dst(bcx, &insn);
383*fae6e9adSlinfeng                     self.reg_store(bcx, ty, base, insn.off, narrow);
384*fae6e9adSlinfeng                 }
385*fae6e9adSlinfeng 
386*fae6e9adSlinfeng                 ebpf::ST_W_XADD => unimplemented!(),
387*fae6e9adSlinfeng                 ebpf::ST_DW_XADD => unimplemented!(),
388*fae6e9adSlinfeng 
389*fae6e9adSlinfeng                 // BPF_ALU class
390*fae6e9adSlinfeng                 // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
391*fae6e9adSlinfeng                 // before we do the operation?
392*fae6e9adSlinfeng                 // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
393*fae6e9adSlinfeng                 ebpf::ADD32_IMM => {
394*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
395*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
396*fae6e9adSlinfeng                     let res = bcx.ins().iadd(src, imm);
397*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
398*fae6e9adSlinfeng                 }
399*fae6e9adSlinfeng                 ebpf::ADD32_REG => {
400*fae6e9adSlinfeng                     //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
401*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
402*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
403*fae6e9adSlinfeng                     let res = bcx.ins().iadd(lhs, rhs);
404*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
405*fae6e9adSlinfeng                 }
406*fae6e9adSlinfeng                 ebpf::SUB32_IMM => {
407*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm)         as u64,
408*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
409*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
410*fae6e9adSlinfeng                     let res = bcx.ins().isub(src, imm);
411*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
412*fae6e9adSlinfeng                 }
413*fae6e9adSlinfeng                 ebpf::SUB32_REG => {
414*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
415*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
416*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
417*fae6e9adSlinfeng                     let res = bcx.ins().isub(lhs, rhs);
418*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
419*fae6e9adSlinfeng                 }
420*fae6e9adSlinfeng                 ebpf::MUL32_IMM => {
421*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm)         as u64,
422*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
423*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
424*fae6e9adSlinfeng                     let res = bcx.ins().imul(src, imm);
425*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
426*fae6e9adSlinfeng                 }
427*fae6e9adSlinfeng                 ebpf::MUL32_REG => {
428*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
429*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
430*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
431*fae6e9adSlinfeng                     let res = bcx.ins().imul(lhs, rhs);
432*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
433*fae6e9adSlinfeng                 }
434*fae6e9adSlinfeng                 ebpf::DIV32_IMM => {
435*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32 / insn.imm              as u32) as u64,
436*fae6e9adSlinfeng                     let res = if insn.imm == 0 {
437*fae6e9adSlinfeng                         bcx.ins().iconst(I32, 0)
438*fae6e9adSlinfeng                     } else {
439*fae6e9adSlinfeng                         let imm = self.insn_imm32(bcx, &insn);
440*fae6e9adSlinfeng                         let src = self.insn_dst32(bcx, &insn);
441*fae6e9adSlinfeng                         bcx.ins().udiv(src, imm)
442*fae6e9adSlinfeng                     };
443*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
444*fae6e9adSlinfeng                 }
445*fae6e9adSlinfeng                 ebpf::DIV32_REG => {
446*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32 / reg[_src]             as u32) as u64,
447*fae6e9adSlinfeng                     let zero = bcx.ins().iconst(I32, 0);
448*fae6e9adSlinfeng                     let one = bcx.ins().iconst(I32, 1);
449*fae6e9adSlinfeng 
450*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
451*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
452*fae6e9adSlinfeng 
453*fae6e9adSlinfeng                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
454*fae6e9adSlinfeng                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
455*fae6e9adSlinfeng                     let div_res = bcx.ins().udiv(lhs, safe_rhs);
456*fae6e9adSlinfeng 
457*fae6e9adSlinfeng                     let res = bcx.ins().select(rhs_is_zero, zero, div_res);
458*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
459*fae6e9adSlinfeng                 }
460*fae6e9adSlinfeng                 ebpf::OR32_IMM => {
461*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             | insn.imm  as u32) as u64,
462*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
463*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
464*fae6e9adSlinfeng                     let res = bcx.ins().bor(src, imm);
465*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
466*fae6e9adSlinfeng                 }
467*fae6e9adSlinfeng                 ebpf::OR32_REG => {
468*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             | reg[_src] as u32) as u64,
469*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
470*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
471*fae6e9adSlinfeng                     let res = bcx.ins().bor(lhs, rhs);
472*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
473*fae6e9adSlinfeng                 }
474*fae6e9adSlinfeng                 ebpf::AND32_IMM => {
475*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             & insn.imm  as u32) as u64,
476*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
477*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
478*fae6e9adSlinfeng                     let res = bcx.ins().band(src, imm);
479*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
480*fae6e9adSlinfeng                 }
481*fae6e9adSlinfeng                 ebpf::AND32_REG => {
482*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             & reg[_src] as u32) as u64,
483*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
484*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
485*fae6e9adSlinfeng                     let res = bcx.ins().band(lhs, rhs);
486*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
487*fae6e9adSlinfeng                 }
488*fae6e9adSlinfeng                 ebpf::LSH32_IMM => {
489*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm  as u32) as u64,
490*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
491*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
492*fae6e9adSlinfeng                     let res = bcx.ins().ishl(src, imm);
493*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
494*fae6e9adSlinfeng                 }
495*fae6e9adSlinfeng                 ebpf::LSH32_REG => {
496*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
497*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
498*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
499*fae6e9adSlinfeng                     let res = bcx.ins().ishl(lhs, rhs);
500*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
501*fae6e9adSlinfeng                 }
502*fae6e9adSlinfeng                 ebpf::RSH32_IMM => {
503*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm  as u32) as u64,
504*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
505*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
506*fae6e9adSlinfeng                     let res = bcx.ins().ushr(src, imm);
507*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
508*fae6e9adSlinfeng                 }
509*fae6e9adSlinfeng                 ebpf::RSH32_REG => {
510*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
511*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
512*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
513*fae6e9adSlinfeng                     let res = bcx.ins().ushr(lhs, rhs);
514*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
515*fae6e9adSlinfeng                 }
516*fae6e9adSlinfeng                 ebpf::NEG32 => {
517*fae6e9adSlinfeng                     // { reg[_dst] = (reg[_dst] as i32).wrapping_neg()                 as u64; reg[_dst] &= U32MAX; },
518*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
519*fae6e9adSlinfeng                     let res = bcx.ins().ineg(src);
520*fae6e9adSlinfeng                     // TODO: Do we need to mask the result?
521*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
522*fae6e9adSlinfeng                 }
523*fae6e9adSlinfeng                 ebpf::MOD32_IMM => {
524*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             % insn.imm  as u32) as u64,
525*fae6e9adSlinfeng 
526*fae6e9adSlinfeng                     if insn.imm != 0 {
527*fae6e9adSlinfeng                         let imm = self.insn_imm32(bcx, &insn);
528*fae6e9adSlinfeng                         let src = self.insn_dst32(bcx, &insn);
529*fae6e9adSlinfeng                         let res = bcx.ins().urem(src, imm);
530*fae6e9adSlinfeng                         self.set_dst32(bcx, &insn, res);
531*fae6e9adSlinfeng                     }
532*fae6e9adSlinfeng                 }
533*fae6e9adSlinfeng                 ebpf::MOD32_REG => {
534*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32 % reg[_src]             as u32) as u64,
535*fae6e9adSlinfeng                     let zero = bcx.ins().iconst(I32, 0);
536*fae6e9adSlinfeng                     let one = bcx.ins().iconst(I32, 1);
537*fae6e9adSlinfeng 
538*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
539*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
540*fae6e9adSlinfeng 
541*fae6e9adSlinfeng                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
542*fae6e9adSlinfeng                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
543*fae6e9adSlinfeng                     let div_res = bcx.ins().urem(lhs, safe_rhs);
544*fae6e9adSlinfeng 
545*fae6e9adSlinfeng                     let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
546*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
547*fae6e9adSlinfeng                 }
548*fae6e9adSlinfeng                 ebpf::XOR32_IMM => {
549*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             ^ insn.imm  as u32) as u64,
550*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
551*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
552*fae6e9adSlinfeng                     let res = bcx.ins().bxor(src, imm);
553*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
554*fae6e9adSlinfeng                 }
555*fae6e9adSlinfeng                 ebpf::XOR32_REG => {
556*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as u32             ^ reg[_src] as u32) as u64,
557*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
558*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
559*fae6e9adSlinfeng                     let res = bcx.ins().bxor(lhs, rhs);
560*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
561*fae6e9adSlinfeng                 }
562*fae6e9adSlinfeng                 ebpf::MOV32_IMM => {
563*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
564*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, imm);
565*fae6e9adSlinfeng                 }
566*fae6e9adSlinfeng                 ebpf::MOV32_REG => {
567*fae6e9adSlinfeng                     // reg[_dst] = (reg[_src] as u32)                                as u64,
568*fae6e9adSlinfeng                     let src = self.insn_src32(bcx, &insn);
569*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, src);
570*fae6e9adSlinfeng                 }
571*fae6e9adSlinfeng                 ebpf::ARSH32_IMM => {
572*fae6e9adSlinfeng                     // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm  as u32) as u64; reg[_dst] &= U32MAX; },
573*fae6e9adSlinfeng                     let src = self.insn_dst32(bcx, &insn);
574*fae6e9adSlinfeng                     let imm = self.insn_imm32(bcx, &insn);
575*fae6e9adSlinfeng                     let res = bcx.ins().sshr(src, imm);
576*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
577*fae6e9adSlinfeng                 }
578*fae6e9adSlinfeng                 ebpf::ARSH32_REG => {
579*fae6e9adSlinfeng                     // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
580*fae6e9adSlinfeng                     let lhs = self.insn_dst32(bcx, &insn);
581*fae6e9adSlinfeng                     let rhs = self.insn_src32(bcx, &insn);
582*fae6e9adSlinfeng                     let res = bcx.ins().sshr(lhs, rhs);
583*fae6e9adSlinfeng                     self.set_dst32(bcx, &insn, res);
584*fae6e9adSlinfeng                 }
585*fae6e9adSlinfeng 
586*fae6e9adSlinfeng                 ebpf::BE | ebpf::LE => {
587*fae6e9adSlinfeng                     let should_swap = match insn.opc {
588*fae6e9adSlinfeng                         ebpf::BE => self.isa.endianness() == Endianness::Little,
589*fae6e9adSlinfeng                         ebpf::LE => self.isa.endianness() == Endianness::Big,
590*fae6e9adSlinfeng                         _ => unreachable!(),
591*fae6e9adSlinfeng                     };
592*fae6e9adSlinfeng 
593*fae6e9adSlinfeng                     let ty: Type = match insn.imm {
594*fae6e9adSlinfeng                         16 => I16,
595*fae6e9adSlinfeng                         32 => I32,
596*fae6e9adSlinfeng                         64 => I64,
597*fae6e9adSlinfeng                         _ => unreachable!(),
598*fae6e9adSlinfeng                     };
599*fae6e9adSlinfeng 
600*fae6e9adSlinfeng                     if should_swap {
601*fae6e9adSlinfeng                         let src = self.insn_dst(bcx, &insn);
602*fae6e9adSlinfeng                         let src_narrow = if ty != I64 {
603*fae6e9adSlinfeng                             bcx.ins().ireduce(ty, src)
604*fae6e9adSlinfeng                         } else {
605*fae6e9adSlinfeng                             src
606*fae6e9adSlinfeng                         };
607*fae6e9adSlinfeng 
608*fae6e9adSlinfeng                         let res = bcx.ins().bswap(src_narrow);
609*fae6e9adSlinfeng                         let res_wide = if ty != I64 {
610*fae6e9adSlinfeng                             bcx.ins().uextend(I64, res)
611*fae6e9adSlinfeng                         } else {
612*fae6e9adSlinfeng                             res
613*fae6e9adSlinfeng                         };
614*fae6e9adSlinfeng 
615*fae6e9adSlinfeng                         self.set_dst(bcx, &insn, res_wide);
616*fae6e9adSlinfeng                     }
617*fae6e9adSlinfeng                 }
618*fae6e9adSlinfeng 
619*fae6e9adSlinfeng                 // BPF_ALU64 class
620*fae6e9adSlinfeng                 ebpf::ADD64_IMM => {
621*fae6e9adSlinfeng                     // reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
622*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
623*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
624*fae6e9adSlinfeng                     let res = bcx.ins().iadd(src, imm);
625*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
626*fae6e9adSlinfeng                 }
627*fae6e9adSlinfeng                 ebpf::ADD64_REG => {
628*fae6e9adSlinfeng                     // reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
629*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
630*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
631*fae6e9adSlinfeng                     let res = bcx.ins().iadd(lhs, rhs);
632*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
633*fae6e9adSlinfeng                 }
634*fae6e9adSlinfeng                 ebpf::SUB64_IMM => {
635*fae6e9adSlinfeng                     // reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
636*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
637*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
638*fae6e9adSlinfeng                     let res = bcx.ins().isub(src, imm);
639*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
640*fae6e9adSlinfeng                 }
641*fae6e9adSlinfeng                 ebpf::SUB64_REG => {
642*fae6e9adSlinfeng                     // reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
643*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
644*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
645*fae6e9adSlinfeng                     let res = bcx.ins().isub(lhs, rhs);
646*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
647*fae6e9adSlinfeng                 }
648*fae6e9adSlinfeng                 ebpf::MUL64_IMM => {
649*fae6e9adSlinfeng                     // reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
650*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
651*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
652*fae6e9adSlinfeng                     let res = bcx.ins().imul(src, imm);
653*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
654*fae6e9adSlinfeng                 }
655*fae6e9adSlinfeng                 ebpf::MUL64_REG => {
656*fae6e9adSlinfeng                     // reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
657*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
658*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
659*fae6e9adSlinfeng                     let res = bcx.ins().imul(lhs, rhs);
660*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
661*fae6e9adSlinfeng                 }
662*fae6e9adSlinfeng                 ebpf::DIV64_IMM => {
663*fae6e9adSlinfeng                     // reg[_dst] /= insn.imm as u64,
664*fae6e9adSlinfeng                     let res = if insn.imm == 0 {
665*fae6e9adSlinfeng                         bcx.ins().iconst(I64, 0)
666*fae6e9adSlinfeng                     } else {
667*fae6e9adSlinfeng                         let imm = self.insn_imm64(bcx, &insn);
668*fae6e9adSlinfeng                         let src = self.insn_dst(bcx, &insn);
669*fae6e9adSlinfeng                         bcx.ins().udiv(src, imm)
670*fae6e9adSlinfeng                     };
671*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
672*fae6e9adSlinfeng                 }
673*fae6e9adSlinfeng                 ebpf::DIV64_REG => {
674*fae6e9adSlinfeng                     // reg[_dst] /= reg[_src], if reg[_src] != 0
675*fae6e9adSlinfeng                     // reg[_dst] = 0, if reg[_src] == 0
676*fae6e9adSlinfeng                     let zero = bcx.ins().iconst(I64, 0);
677*fae6e9adSlinfeng                     let one = bcx.ins().iconst(I64, 1);
678*fae6e9adSlinfeng 
679*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
680*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
681*fae6e9adSlinfeng 
682*fae6e9adSlinfeng                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
683*fae6e9adSlinfeng                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
684*fae6e9adSlinfeng                     let div_res = bcx.ins().udiv(lhs, safe_rhs);
685*fae6e9adSlinfeng 
686*fae6e9adSlinfeng                     let res = bcx.ins().select(rhs_is_zero, zero, div_res);
687*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
688*fae6e9adSlinfeng                 }
689*fae6e9adSlinfeng                 ebpf::MOD64_IMM => {
690*fae6e9adSlinfeng                     // reg[_dst] %= insn.imm as u64,
691*fae6e9adSlinfeng 
692*fae6e9adSlinfeng                     if insn.imm != 0 {
693*fae6e9adSlinfeng                         let imm = self.insn_imm64(bcx, &insn);
694*fae6e9adSlinfeng                         let src = self.insn_dst(bcx, &insn);
695*fae6e9adSlinfeng                         let res = bcx.ins().urem(src, imm);
696*fae6e9adSlinfeng                         self.set_dst(bcx, &insn, res);
697*fae6e9adSlinfeng                     };
698*fae6e9adSlinfeng                 }
699*fae6e9adSlinfeng                 ebpf::MOD64_REG => {
700*fae6e9adSlinfeng                     // reg[_dst] %= reg[_src], if reg[_src] != 0
701*fae6e9adSlinfeng 
702*fae6e9adSlinfeng                     let zero = bcx.ins().iconst(I64, 0);
703*fae6e9adSlinfeng                     let one = bcx.ins().iconst(I64, 1);
704*fae6e9adSlinfeng 
705*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
706*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
707*fae6e9adSlinfeng 
708*fae6e9adSlinfeng                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
709*fae6e9adSlinfeng                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
710*fae6e9adSlinfeng                     let div_res = bcx.ins().urem(lhs, safe_rhs);
711*fae6e9adSlinfeng 
712*fae6e9adSlinfeng                     let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
713*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
714*fae6e9adSlinfeng                 }
715*fae6e9adSlinfeng                 ebpf::OR64_IMM => {
716*fae6e9adSlinfeng                     // reg[_dst] |= insn.imm as u64,
717*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
718*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
719*fae6e9adSlinfeng                     let res = bcx.ins().bor(src, imm);
720*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
721*fae6e9adSlinfeng                 }
722*fae6e9adSlinfeng                 ebpf::OR64_REG => {
723*fae6e9adSlinfeng                     // reg[_dst] |= reg[_src],
724*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
725*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
726*fae6e9adSlinfeng                     let res = bcx.ins().bor(lhs, rhs);
727*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
728*fae6e9adSlinfeng                 }
729*fae6e9adSlinfeng                 ebpf::AND64_IMM => {
730*fae6e9adSlinfeng                     // reg[_dst] &= insn.imm as u64,
731*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
732*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
733*fae6e9adSlinfeng                     let res = bcx.ins().band(src, imm);
734*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
735*fae6e9adSlinfeng                 }
736*fae6e9adSlinfeng                 ebpf::AND64_REG => {
737*fae6e9adSlinfeng                     // reg[_dst] &= reg[_src],
738*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
739*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
740*fae6e9adSlinfeng                     let res = bcx.ins().band(lhs, rhs);
741*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
742*fae6e9adSlinfeng                 }
743*fae6e9adSlinfeng                 ebpf::LSH64_IMM => {
744*fae6e9adSlinfeng                     // reg[_dst] <<= insn.imm as u64,
745*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
746*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
747*fae6e9adSlinfeng                     let res = bcx.ins().ishl(src, imm);
748*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
749*fae6e9adSlinfeng                 }
750*fae6e9adSlinfeng                 ebpf::LSH64_REG => {
751*fae6e9adSlinfeng                     // reg[_dst] <<= reg[_src],
752*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
753*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
754*fae6e9adSlinfeng                     let res = bcx.ins().ishl(lhs, rhs);
755*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
756*fae6e9adSlinfeng                 }
757*fae6e9adSlinfeng                 ebpf::RSH64_IMM => {
758*fae6e9adSlinfeng                     // reg[_dst] >>= insn.imm as u64,
759*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
760*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
761*fae6e9adSlinfeng                     let res = bcx.ins().ushr(src, imm);
762*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
763*fae6e9adSlinfeng                 }
764*fae6e9adSlinfeng                 ebpf::RSH64_REG => {
765*fae6e9adSlinfeng                     // reg[_dst] >>= reg[_src],
766*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
767*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
768*fae6e9adSlinfeng                     let res = bcx.ins().ushr(lhs, rhs);
769*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
770*fae6e9adSlinfeng                 }
771*fae6e9adSlinfeng                 ebpf::NEG64 => {
772*fae6e9adSlinfeng                     // reg[_dst] = -(reg[_dst] as i64) as u64,
773*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
774*fae6e9adSlinfeng                     let res = bcx.ins().ineg(src);
775*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
776*fae6e9adSlinfeng                 }
777*fae6e9adSlinfeng                 ebpf::XOR64_IMM => {
778*fae6e9adSlinfeng                     // reg[_dst] ^= insn.imm as u64,
779*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
780*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
781*fae6e9adSlinfeng                     let res = bcx.ins().bxor(src, imm);
782*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
783*fae6e9adSlinfeng                 }
784*fae6e9adSlinfeng                 ebpf::XOR64_REG => {
785*fae6e9adSlinfeng                     // reg[_dst] ^= reg[_src],
786*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
787*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
788*fae6e9adSlinfeng                     let res = bcx.ins().bxor(lhs, rhs);
789*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
790*fae6e9adSlinfeng                 }
791*fae6e9adSlinfeng                 ebpf::MOV64_IMM => {
792*fae6e9adSlinfeng                     // reg[_dst] = insn.imm as u64,
793*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
794*fae6e9adSlinfeng                     bcx.def_var(self.registers[insn.dst as usize], imm);
795*fae6e9adSlinfeng                 }
796*fae6e9adSlinfeng                 ebpf::MOV64_REG => {
797*fae6e9adSlinfeng                     // reg[_dst] = reg[_src],
798*fae6e9adSlinfeng                     let src = self.insn_src(bcx, &insn);
799*fae6e9adSlinfeng                     bcx.def_var(self.registers[insn.dst as usize], src);
800*fae6e9adSlinfeng                 }
801*fae6e9adSlinfeng                 ebpf::ARSH64_IMM => {
802*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
803*fae6e9adSlinfeng                     let imm = self.insn_imm64(bcx, &insn);
804*fae6e9adSlinfeng                     let src = self.insn_dst(bcx, &insn);
805*fae6e9adSlinfeng                     let res = bcx.ins().sshr(src, imm);
806*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
807*fae6e9adSlinfeng                 }
808*fae6e9adSlinfeng                 ebpf::ARSH64_REG => {
809*fae6e9adSlinfeng                     // reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
810*fae6e9adSlinfeng                     let lhs = self.insn_dst(bcx, &insn);
811*fae6e9adSlinfeng                     let rhs = self.insn_src(bcx, &insn);
812*fae6e9adSlinfeng                     let res = bcx.ins().sshr(lhs, rhs);
813*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, res);
814*fae6e9adSlinfeng                 }
815*fae6e9adSlinfeng 
816*fae6e9adSlinfeng                 // BPF_JMP & BPF_JMP32 class
817*fae6e9adSlinfeng                 ebpf::JA => {
818*fae6e9adSlinfeng                     let (_, target_block) = self.insn_targets[&(insn_ptr as u32)];
819*fae6e9adSlinfeng 
820*fae6e9adSlinfeng                     bcx.ins().jump(target_block, &[]);
821*fae6e9adSlinfeng                     self.filled_blocks.insert(bcx.current_block().unwrap());
822*fae6e9adSlinfeng                 }
823*fae6e9adSlinfeng                 ebpf::JEQ_IMM
824*fae6e9adSlinfeng                 | ebpf::JEQ_REG
825*fae6e9adSlinfeng                 | ebpf::JGT_IMM
826*fae6e9adSlinfeng                 | ebpf::JGT_REG
827*fae6e9adSlinfeng                 | ebpf::JGE_IMM
828*fae6e9adSlinfeng                 | ebpf::JGE_REG
829*fae6e9adSlinfeng                 | ebpf::JLT_IMM
830*fae6e9adSlinfeng                 | ebpf::JLT_REG
831*fae6e9adSlinfeng                 | ebpf::JLE_IMM
832*fae6e9adSlinfeng                 | ebpf::JLE_REG
833*fae6e9adSlinfeng                 | ebpf::JNE_IMM
834*fae6e9adSlinfeng                 | ebpf::JNE_REG
835*fae6e9adSlinfeng                 | ebpf::JSGT_IMM
836*fae6e9adSlinfeng                 | ebpf::JSGT_REG
837*fae6e9adSlinfeng                 | ebpf::JSGE_IMM
838*fae6e9adSlinfeng                 | ebpf::JSGE_REG
839*fae6e9adSlinfeng                 | ebpf::JSLT_IMM
840*fae6e9adSlinfeng                 | ebpf::JSLT_REG
841*fae6e9adSlinfeng                 | ebpf::JSLE_IMM
842*fae6e9adSlinfeng                 | ebpf::JSLE_REG
843*fae6e9adSlinfeng                 | ebpf::JSET_IMM
844*fae6e9adSlinfeng                 | ebpf::JSET_REG
845*fae6e9adSlinfeng                 | ebpf::JEQ_IMM32
846*fae6e9adSlinfeng                 | ebpf::JEQ_REG32
847*fae6e9adSlinfeng                 | ebpf::JGT_IMM32
848*fae6e9adSlinfeng                 | ebpf::JGT_REG32
849*fae6e9adSlinfeng                 | ebpf::JGE_IMM32
850*fae6e9adSlinfeng                 | ebpf::JGE_REG32
851*fae6e9adSlinfeng                 | ebpf::JLT_IMM32
852*fae6e9adSlinfeng                 | ebpf::JLT_REG32
853*fae6e9adSlinfeng                 | ebpf::JLE_IMM32
854*fae6e9adSlinfeng                 | ebpf::JLE_REG32
855*fae6e9adSlinfeng                 | ebpf::JNE_IMM32
856*fae6e9adSlinfeng                 | ebpf::JNE_REG32
857*fae6e9adSlinfeng                 | ebpf::JSGT_IMM32
858*fae6e9adSlinfeng                 | ebpf::JSGT_REG32
859*fae6e9adSlinfeng                 | ebpf::JSGE_IMM32
860*fae6e9adSlinfeng                 | ebpf::JSGE_REG32
861*fae6e9adSlinfeng                 | ebpf::JSLT_IMM32
862*fae6e9adSlinfeng                 | ebpf::JSLT_REG32
863*fae6e9adSlinfeng                 | ebpf::JSLE_IMM32
864*fae6e9adSlinfeng                 | ebpf::JSLE_REG32
865*fae6e9adSlinfeng                 | ebpf::JSET_IMM32
866*fae6e9adSlinfeng                 | ebpf::JSET_REG32 => {
867*fae6e9adSlinfeng                     let (fallthrough, target) = self.insn_targets[&(insn_ptr as u32)];
868*fae6e9adSlinfeng 
869*fae6e9adSlinfeng                     let is_reg = (insn.opc & BPF_X) != 0;
870*fae6e9adSlinfeng                     let is_32 = (insn.opc & BPF_JMP32) != 0;
871*fae6e9adSlinfeng                     let intcc = match insn.opc {
872*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JEQ => IntCC::Equal,
873*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JNE => IntCC::NotEqual,
874*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JGT => IntCC::UnsignedGreaterThan,
875*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JGE => IntCC::UnsignedGreaterThanOrEqual,
876*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JLT => IntCC::UnsignedLessThan,
877*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JLE => IntCC::UnsignedLessThanOrEqual,
878*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JSGT => IntCC::SignedGreaterThan,
879*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JSGE => IntCC::SignedGreaterThanOrEqual,
880*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JSLT => IntCC::SignedLessThan,
881*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JSLE => IntCC::SignedLessThanOrEqual,
882*fae6e9adSlinfeng                         // JSET is handled specially below
883*fae6e9adSlinfeng                         c if (c & BPF_ALU_OP_MASK) == BPF_JSET => IntCC::NotEqual,
884*fae6e9adSlinfeng                         _ => unreachable!(),
885*fae6e9adSlinfeng                     };
886*fae6e9adSlinfeng 
887*fae6e9adSlinfeng                     let lhs = if is_32 {
888*fae6e9adSlinfeng                         self.insn_dst32(bcx, &insn)
889*fae6e9adSlinfeng                     } else {
890*fae6e9adSlinfeng                         self.insn_dst(bcx, &insn)
891*fae6e9adSlinfeng                     };
892*fae6e9adSlinfeng                     let rhs = match (is_reg, is_32) {
893*fae6e9adSlinfeng                         (true, false) => self.insn_src(bcx, &insn),
894*fae6e9adSlinfeng                         (true, true) => self.insn_src32(bcx, &insn),
895*fae6e9adSlinfeng                         (false, false) => self.insn_imm64(bcx, &insn),
896*fae6e9adSlinfeng                         (false, true) => self.insn_imm32(bcx, &insn),
897*fae6e9adSlinfeng                     };
898*fae6e9adSlinfeng 
899*fae6e9adSlinfeng                     let cmp_res = if (insn.opc & BPF_ALU_OP_MASK) == BPF_JSET {
900*fae6e9adSlinfeng                         bcx.ins().band(lhs, rhs)
901*fae6e9adSlinfeng                     } else {
902*fae6e9adSlinfeng                         bcx.ins().icmp(intcc, lhs, rhs)
903*fae6e9adSlinfeng                     };
904*fae6e9adSlinfeng                     bcx.ins().brif(cmp_res, target, &[], fallthrough, &[]);
905*fae6e9adSlinfeng                     self.filled_blocks.insert(bcx.current_block().unwrap());
906*fae6e9adSlinfeng                 }
907*fae6e9adSlinfeng 
908*fae6e9adSlinfeng                 // Do not delegate the check to the verifier, since registered functions can be
909*fae6e9adSlinfeng                 // changed after the program has been verified.
910*fae6e9adSlinfeng                 ebpf::CALL => {
911*fae6e9adSlinfeng                     let func_ref = self
912*fae6e9adSlinfeng                         .helper_func_refs
913*fae6e9adSlinfeng                         .get(&(insn.imm as u32))
914*fae6e9adSlinfeng                         .copied()
915*fae6e9adSlinfeng                         .ok_or_else(|| {
916*fae6e9adSlinfeng                             Error::new(
917*fae6e9adSlinfeng                                 ErrorKind::Other,
918*fae6e9adSlinfeng                                 format!(
919*fae6e9adSlinfeng                                     "[CRANELIFT] Error: unknown helper function (id: {:#x})",
920*fae6e9adSlinfeng                                     insn.imm as u32
921*fae6e9adSlinfeng                                 ),
922*fae6e9adSlinfeng                             )
923*fae6e9adSlinfeng                         })?;
924*fae6e9adSlinfeng 
925*fae6e9adSlinfeng                     let arg0 = bcx.use_var(self.registers[1]);
926*fae6e9adSlinfeng                     let arg1 = bcx.use_var(self.registers[2]);
927*fae6e9adSlinfeng                     let arg2 = bcx.use_var(self.registers[3]);
928*fae6e9adSlinfeng                     let arg3 = bcx.use_var(self.registers[4]);
929*fae6e9adSlinfeng                     let arg4 = bcx.use_var(self.registers[5]);
930*fae6e9adSlinfeng 
931*fae6e9adSlinfeng                     let call = bcx.ins().call(func_ref, &[arg0, arg1, arg2, arg3, arg4]);
932*fae6e9adSlinfeng                     let ret = bcx.inst_results(call)[0];
933*fae6e9adSlinfeng                     self.set_dst(bcx, &insn, ret);
934*fae6e9adSlinfeng                 }
935*fae6e9adSlinfeng                 ebpf::TAIL_CALL => unimplemented!(),
936*fae6e9adSlinfeng                 ebpf::EXIT => {
937*fae6e9adSlinfeng                     let ret = bcx.use_var(self.registers[0]);
938*fae6e9adSlinfeng                     bcx.ins().return_(&[ret]);
939*fae6e9adSlinfeng                     self.filled_blocks.insert(bcx.current_block().unwrap());
940*fae6e9adSlinfeng                 }
941*fae6e9adSlinfeng                 _ => unimplemented!("inst: {:?}", insn),
942*fae6e9adSlinfeng             }
943*fae6e9adSlinfeng 
944*fae6e9adSlinfeng             insn_ptr += 1;
945*fae6e9adSlinfeng         }
946*fae6e9adSlinfeng 
947*fae6e9adSlinfeng         Ok(())
948*fae6e9adSlinfeng     }
949*fae6e9adSlinfeng 
insn_imm64(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value950*fae6e9adSlinfeng     fn insn_imm64(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
951*fae6e9adSlinfeng         bcx.ins().iconst(I64, insn.imm as u64 as i64)
952*fae6e9adSlinfeng     }
insn_imm32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value953*fae6e9adSlinfeng     fn insn_imm32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
954*fae6e9adSlinfeng         bcx.ins().iconst(I32, insn.imm as u32 as u64 as i64)
955*fae6e9adSlinfeng     }
956*fae6e9adSlinfeng 
insn_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value957*fae6e9adSlinfeng     fn insn_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
958*fae6e9adSlinfeng         bcx.use_var(self.registers[insn.dst as usize])
959*fae6e9adSlinfeng     }
insn_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value960*fae6e9adSlinfeng     fn insn_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
961*fae6e9adSlinfeng         let dst = self.insn_dst(bcx, insn);
962*fae6e9adSlinfeng         bcx.ins().ireduce(I32, dst)
963*fae6e9adSlinfeng     }
964*fae6e9adSlinfeng 
insn_src(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value965*fae6e9adSlinfeng     fn insn_src(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
966*fae6e9adSlinfeng         bcx.use_var(self.registers[insn.src as usize])
967*fae6e9adSlinfeng     }
insn_src32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value968*fae6e9adSlinfeng     fn insn_src32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
969*fae6e9adSlinfeng         let src = self.insn_src(bcx, insn);
970*fae6e9adSlinfeng         bcx.ins().ireduce(I32, src)
971*fae6e9adSlinfeng     }
972*fae6e9adSlinfeng 
set_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value)973*fae6e9adSlinfeng     fn set_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
974*fae6e9adSlinfeng         bcx.def_var(self.registers[insn.dst as usize], val);
975*fae6e9adSlinfeng     }
set_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value)976*fae6e9adSlinfeng     fn set_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
977*fae6e9adSlinfeng         let val32 = bcx.ins().uextend(I64, val);
978*fae6e9adSlinfeng         self.set_dst(bcx, insn, val32);
979*fae6e9adSlinfeng     }
980*fae6e9adSlinfeng 
reg_load(&mut self, bcx: &mut FunctionBuilder, ty: Type, base: Value, offset: i16) -> Value981*fae6e9adSlinfeng     fn reg_load(&mut self, bcx: &mut FunctionBuilder, ty: Type, base: Value, offset: i16) -> Value {
982*fae6e9adSlinfeng         self.insert_bounds_check(bcx, ty, base, offset);
983*fae6e9adSlinfeng 
984*fae6e9adSlinfeng         let mut flags = MemFlags::new();
985*fae6e9adSlinfeng         flags.set_endianness(Endianness::Little);
986*fae6e9adSlinfeng 
987*fae6e9adSlinfeng         bcx.ins().load(ty, flags, base, offset as i32)
988*fae6e9adSlinfeng     }
reg_store( &mut self, bcx: &mut FunctionBuilder, ty: Type, base: Value, offset: i16, val: Value, )989*fae6e9adSlinfeng     fn reg_store(
990*fae6e9adSlinfeng         &mut self,
991*fae6e9adSlinfeng         bcx: &mut FunctionBuilder,
992*fae6e9adSlinfeng         ty: Type,
993*fae6e9adSlinfeng         base: Value,
994*fae6e9adSlinfeng         offset: i16,
995*fae6e9adSlinfeng         val: Value,
996*fae6e9adSlinfeng     ) {
997*fae6e9adSlinfeng         self.insert_bounds_check(bcx, ty, base, offset);
998*fae6e9adSlinfeng 
999*fae6e9adSlinfeng         let mut flags = MemFlags::new();
1000*fae6e9adSlinfeng         flags.set_endianness(Endianness::Little);
1001*fae6e9adSlinfeng 
1002*fae6e9adSlinfeng         bcx.ins().store(flags, val, base, offset as i32);
1003*fae6e9adSlinfeng     }
1004*fae6e9adSlinfeng 
1005*fae6e9adSlinfeng     /// Inserts a bounds check for a memory access
1006*fae6e9adSlinfeng     ///
1007*fae6e9adSlinfeng     /// This emits a conditional trap if the access is out of bounds for any of the known
1008*fae6e9adSlinfeng     /// valid memory regions. These are the stack, the memory, and the mbuf.
insert_bounds_check( &mut self, bcx: &mut FunctionBuilder, ty: Type, base: Value, offset: i16, )1009*fae6e9adSlinfeng     fn insert_bounds_check(
1010*fae6e9adSlinfeng         &mut self,
1011*fae6e9adSlinfeng         bcx: &mut FunctionBuilder,
1012*fae6e9adSlinfeng         ty: Type,
1013*fae6e9adSlinfeng         base: Value,
1014*fae6e9adSlinfeng         offset: i16,
1015*fae6e9adSlinfeng     ) {
1016*fae6e9adSlinfeng         let access_size = bcx.ins().iconst(I64, ty.bytes() as i64);
1017*fae6e9adSlinfeng 
1018*fae6e9adSlinfeng         let offset = bcx.ins().iconst(I64, offset as i64);
1019*fae6e9adSlinfeng         let start_addr = bcx.ins().iadd(base, offset);
1020*fae6e9adSlinfeng         let end_addr = bcx.ins().iadd(start_addr, access_size);
1021*fae6e9adSlinfeng 
1022*fae6e9adSlinfeng         let does_not_overflow =
1023*fae6e9adSlinfeng             bcx.ins()
1024*fae6e9adSlinfeng                 .icmp(IntCC::UnsignedGreaterThanOrEqual, end_addr, start_addr);
1025*fae6e9adSlinfeng 
1026*fae6e9adSlinfeng         // Check if it's a valid stack access
1027*fae6e9adSlinfeng         let stack_start = bcx.use_var(self.stack_start);
1028*fae6e9adSlinfeng         let stack_end = bcx.use_var(self.stack_end);
1029*fae6e9adSlinfeng         let stack_start_valid =
1030*fae6e9adSlinfeng             bcx.ins()
1031*fae6e9adSlinfeng                 .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, stack_start);
1032*fae6e9adSlinfeng         let stack_end_valid = bcx
1033*fae6e9adSlinfeng             .ins()
1034*fae6e9adSlinfeng             .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, stack_end);
1035*fae6e9adSlinfeng         let stack_valid = bcx.ins().band(stack_start_valid, stack_end_valid);
1036*fae6e9adSlinfeng 
1037*fae6e9adSlinfeng         // Check if it's a valid memory access
1038*fae6e9adSlinfeng         let mem_start = bcx.use_var(self.mem_start);
1039*fae6e9adSlinfeng         let mem_end = bcx.use_var(self.mem_end);
1040*fae6e9adSlinfeng         let has_mem = bcx.ins().icmp_imm(IntCC::NotEqual, mem_start, 0);
1041*fae6e9adSlinfeng         let mem_start_valid =
1042*fae6e9adSlinfeng             bcx.ins()
1043*fae6e9adSlinfeng                 .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, mem_start);
1044*fae6e9adSlinfeng         let mem_end_valid = bcx
1045*fae6e9adSlinfeng             .ins()
1046*fae6e9adSlinfeng             .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, mem_end);
1047*fae6e9adSlinfeng 
1048*fae6e9adSlinfeng         let mem_valid = bcx.ins().band(mem_start_valid, mem_end_valid);
1049*fae6e9adSlinfeng         let mem_valid = bcx.ins().band(mem_valid, has_mem);
1050*fae6e9adSlinfeng 
1051*fae6e9adSlinfeng         // Check if it's a valid mbuf access
1052*fae6e9adSlinfeng         let mbuf_start = bcx.use_var(self.mbuf_start);
1053*fae6e9adSlinfeng         let mbuf_end = bcx.use_var(self.mbuf_end);
1054*fae6e9adSlinfeng         let has_mbuf = bcx.ins().icmp_imm(IntCC::NotEqual, mbuf_start, 0);
1055*fae6e9adSlinfeng         let mbuf_start_valid =
1056*fae6e9adSlinfeng             bcx.ins()
1057*fae6e9adSlinfeng                 .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, mbuf_start);
1058*fae6e9adSlinfeng         let mbuf_end_valid = bcx
1059*fae6e9adSlinfeng             .ins()
1060*fae6e9adSlinfeng             .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, mbuf_end);
1061*fae6e9adSlinfeng         let mbuf_valid = bcx.ins().band(mbuf_start_valid, mbuf_end_valid);
1062*fae6e9adSlinfeng         let mbuf_valid = bcx.ins().band(mbuf_valid, has_mbuf);
1063*fae6e9adSlinfeng 
1064*fae6e9adSlinfeng         // Join all of these checks together and trap if any of them fails
1065*fae6e9adSlinfeng 
1066*fae6e9adSlinfeng         // We need it to be valid to at least one region of memory
1067*fae6e9adSlinfeng         let valid_region = bcx.ins().bor(stack_valid, mem_valid);
1068*fae6e9adSlinfeng         let valid_region = bcx.ins().bor(valid_region, mbuf_valid);
1069*fae6e9adSlinfeng 
1070*fae6e9adSlinfeng         // And that it does not overflow
1071*fae6e9adSlinfeng         let valid = bcx.ins().band(does_not_overflow, valid_region);
1072*fae6e9adSlinfeng 
1073*fae6e9adSlinfeng         // TODO: We can potentially throw a custom trap code here to indicate
1074*fae6e9adSlinfeng         // which check failed.
1075*fae6e9adSlinfeng         bcx.ins().trapz(valid, TrapCode::HeapOutOfBounds);
1076*fae6e9adSlinfeng     }
1077*fae6e9adSlinfeng 
1078*fae6e9adSlinfeng     /// Analyze the program and build the CFG
1079*fae6e9adSlinfeng     ///
1080*fae6e9adSlinfeng     /// We do this because cranelift does not allow us to switch back to a previously
1081*fae6e9adSlinfeng     /// filled block and add instructions to it. So we can't split the program as we
1082*fae6e9adSlinfeng     /// translate it.
build_cfg(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error>1083*fae6e9adSlinfeng     fn build_cfg(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
1084*fae6e9adSlinfeng         let mut insn_ptr: usize = 0;
1085*fae6e9adSlinfeng         while insn_ptr * ebpf::INSN_SIZE < prog.len() {
1086*fae6e9adSlinfeng             let insn = ebpf::get_insn(prog, insn_ptr);
1087*fae6e9adSlinfeng 
1088*fae6e9adSlinfeng             match insn.opc {
1089*fae6e9adSlinfeng                 // This instruction consumes two opcodes
1090*fae6e9adSlinfeng                 ebpf::LD_DW_IMM => {
1091*fae6e9adSlinfeng                     insn_ptr += 1;
1092*fae6e9adSlinfeng                 }
1093*fae6e9adSlinfeng 
1094*fae6e9adSlinfeng                 ebpf::JA
1095*fae6e9adSlinfeng                 | ebpf::JEQ_IMM
1096*fae6e9adSlinfeng                 | ebpf::JEQ_REG
1097*fae6e9adSlinfeng                 | ebpf::JGT_IMM
1098*fae6e9adSlinfeng                 | ebpf::JGT_REG
1099*fae6e9adSlinfeng                 | ebpf::JGE_IMM
1100*fae6e9adSlinfeng                 | ebpf::JGE_REG
1101*fae6e9adSlinfeng                 | ebpf::JLT_IMM
1102*fae6e9adSlinfeng                 | ebpf::JLT_REG
1103*fae6e9adSlinfeng                 | ebpf::JLE_IMM
1104*fae6e9adSlinfeng                 | ebpf::JLE_REG
1105*fae6e9adSlinfeng                 | ebpf::JNE_IMM
1106*fae6e9adSlinfeng                 | ebpf::JNE_REG
1107*fae6e9adSlinfeng                 | ebpf::JSGT_IMM
1108*fae6e9adSlinfeng                 | ebpf::JSGT_REG
1109*fae6e9adSlinfeng                 | ebpf::JSGE_IMM
1110*fae6e9adSlinfeng                 | ebpf::JSGE_REG
1111*fae6e9adSlinfeng                 | ebpf::JSLT_IMM
1112*fae6e9adSlinfeng                 | ebpf::JSLT_REG
1113*fae6e9adSlinfeng                 | ebpf::JSLE_IMM
1114*fae6e9adSlinfeng                 | ebpf::JSLE_REG
1115*fae6e9adSlinfeng                 | ebpf::JSET_IMM
1116*fae6e9adSlinfeng                 | ebpf::JSET_REG
1117*fae6e9adSlinfeng                 | ebpf::JEQ_IMM32
1118*fae6e9adSlinfeng                 | ebpf::JEQ_REG32
1119*fae6e9adSlinfeng                 | ebpf::JGT_IMM32
1120*fae6e9adSlinfeng                 | ebpf::JGT_REG32
1121*fae6e9adSlinfeng                 | ebpf::JGE_IMM32
1122*fae6e9adSlinfeng                 | ebpf::JGE_REG32
1123*fae6e9adSlinfeng                 | ebpf::JLT_IMM32
1124*fae6e9adSlinfeng                 | ebpf::JLT_REG32
1125*fae6e9adSlinfeng                 | ebpf::JLE_IMM32
1126*fae6e9adSlinfeng                 | ebpf::JLE_REG32
1127*fae6e9adSlinfeng                 | ebpf::JNE_IMM32
1128*fae6e9adSlinfeng                 | ebpf::JNE_REG32
1129*fae6e9adSlinfeng                 | ebpf::JSGT_IMM32
1130*fae6e9adSlinfeng                 | ebpf::JSGT_REG32
1131*fae6e9adSlinfeng                 | ebpf::JSGE_IMM32
1132*fae6e9adSlinfeng                 | ebpf::JSGE_REG32
1133*fae6e9adSlinfeng                 | ebpf::JSLT_IMM32
1134*fae6e9adSlinfeng                 | ebpf::JSLT_REG32
1135*fae6e9adSlinfeng                 | ebpf::JSLE_IMM32
1136*fae6e9adSlinfeng                 | ebpf::JSLE_REG32
1137*fae6e9adSlinfeng                 | ebpf::JSET_IMM32
1138*fae6e9adSlinfeng                 | ebpf::JSET_REG32
1139*fae6e9adSlinfeng                 | ebpf::EXIT
1140*fae6e9adSlinfeng                 | ebpf::TAIL_CALL => {
1141*fae6e9adSlinfeng                     self.prepare_jump_blocks(bcx, insn_ptr, &insn);
1142*fae6e9adSlinfeng                 }
1143*fae6e9adSlinfeng                 _ => {}
1144*fae6e9adSlinfeng             }
1145*fae6e9adSlinfeng 
1146*fae6e9adSlinfeng             insn_ptr += 1;
1147*fae6e9adSlinfeng         }
1148*fae6e9adSlinfeng 
1149*fae6e9adSlinfeng         Ok(())
1150*fae6e9adSlinfeng     }
1151*fae6e9adSlinfeng 
prepare_jump_blocks(&mut self, bcx: &mut FunctionBuilder, insn_ptr: usize, insn: &Insn)1152*fae6e9adSlinfeng     fn prepare_jump_blocks(&mut self, bcx: &mut FunctionBuilder, insn_ptr: usize, insn: &Insn) {
1153*fae6e9adSlinfeng         let insn_ptr = insn_ptr as u32;
1154*fae6e9adSlinfeng         let next_pc: u32 = insn_ptr + 1;
1155*fae6e9adSlinfeng         let target_pc: u32 = (insn_ptr as isize + insn.off as isize + 1)
1156*fae6e9adSlinfeng             .try_into()
1157*fae6e9adSlinfeng             .unwrap();
1158*fae6e9adSlinfeng 
1159*fae6e9adSlinfeng         // This is the fallthrough block
1160*fae6e9adSlinfeng         let fallthrough_block = *self
1161*fae6e9adSlinfeng             .insn_blocks
1162*fae6e9adSlinfeng             .entry(next_pc)
1163*fae6e9adSlinfeng             .or_insert_with(|| bcx.create_block());
1164*fae6e9adSlinfeng 
1165*fae6e9adSlinfeng         // Jump Target
1166*fae6e9adSlinfeng         let target_block = *self
1167*fae6e9adSlinfeng             .insn_blocks
1168*fae6e9adSlinfeng             .entry(target_pc)
1169*fae6e9adSlinfeng             .or_insert_with(|| bcx.create_block());
1170*fae6e9adSlinfeng 
1171*fae6e9adSlinfeng         // Mark the blocks for this instruction
1172*fae6e9adSlinfeng         self.insn_targets
1173*fae6e9adSlinfeng             .insert(insn_ptr, (fallthrough_block, target_block));
1174*fae6e9adSlinfeng     }
1175*fae6e9adSlinfeng }
1176*fae6e9adSlinfeng 
1177*fae6e9adSlinfeng /// Contains the backing memory for a previously compiled function.
1178*fae6e9adSlinfeng ///
1179*fae6e9adSlinfeng /// Currently this will allways just contain code for a single function, but
1180*fae6e9adSlinfeng /// in the future we might want to support multiple functions per module.
1181*fae6e9adSlinfeng ///
1182*fae6e9adSlinfeng /// Ensures that the backing memory is freed when dropped.
1183*fae6e9adSlinfeng pub struct CraneliftProgram {
1184*fae6e9adSlinfeng     module: ManuallyDrop<JITModule>,
1185*fae6e9adSlinfeng 
1186*fae6e9adSlinfeng     main_id: FuncId,
1187*fae6e9adSlinfeng }
1188*fae6e9adSlinfeng 
1189*fae6e9adSlinfeng impl CraneliftProgram {
new(module: JITModule, main_id: FuncId) -> Self1190*fae6e9adSlinfeng     pub(crate) fn new(module: JITModule, main_id: FuncId) -> Self {
1191*fae6e9adSlinfeng         Self {
1192*fae6e9adSlinfeng             module: ManuallyDrop::new(module),
1193*fae6e9adSlinfeng             main_id,
1194*fae6e9adSlinfeng         }
1195*fae6e9adSlinfeng     }
1196*fae6e9adSlinfeng 
1197*fae6e9adSlinfeng     /// We shouldn't allow this function pointer to be exposed outside of this
1198*fae6e9adSlinfeng     /// module, since it's not guaranteed to be valid after the module is dropped.
get_main_function(&self) -> JittedFunction1199*fae6e9adSlinfeng     pub(crate) fn get_main_function(&self) -> JittedFunction {
1200*fae6e9adSlinfeng         let function_ptr = self.module.get_finalized_function(self.main_id);
1201*fae6e9adSlinfeng         unsafe { mem::transmute(function_ptr) }
1202*fae6e9adSlinfeng     }
1203*fae6e9adSlinfeng 
1204*fae6e9adSlinfeng     /// Execute this module by calling the main function
execute( &self, mem_ptr: *mut u8, mem_len: usize, mbuff_ptr: *mut u8, mbuff_len: usize, ) -> u641205*fae6e9adSlinfeng     pub fn execute(
1206*fae6e9adSlinfeng         &self,
1207*fae6e9adSlinfeng         mem_ptr: *mut u8,
1208*fae6e9adSlinfeng         mem_len: usize,
1209*fae6e9adSlinfeng         mbuff_ptr: *mut u8,
1210*fae6e9adSlinfeng         mbuff_len: usize,
1211*fae6e9adSlinfeng     ) -> u64 {
1212*fae6e9adSlinfeng         let main = self.get_main_function();
1213*fae6e9adSlinfeng 
1214*fae6e9adSlinfeng         main(mem_ptr, mem_len, mbuff_ptr, mbuff_len)
1215*fae6e9adSlinfeng     }
1216*fae6e9adSlinfeng }
1217*fae6e9adSlinfeng 
1218*fae6e9adSlinfeng impl Drop for CraneliftProgram {
drop(&mut self)1219*fae6e9adSlinfeng     fn drop(&mut self) {
1220*fae6e9adSlinfeng         // We need to have an owned version of `JITModule` to be able to free
1221*fae6e9adSlinfeng         // it's memory. Use `ManuallyDrop` to get the owned `JITModule`.
1222*fae6e9adSlinfeng         //
1223*fae6e9adSlinfeng         // We can no longer use `module` after this, but since we are `Drop`
1224*fae6e9adSlinfeng         // it should be safe.
1225*fae6e9adSlinfeng         unsafe {
1226*fae6e9adSlinfeng             let module = ManuallyDrop::take(&mut self.module);
1227*fae6e9adSlinfeng             module.free_memory()
1228*fae6e9adSlinfeng         };
1229*fae6e9adSlinfeng     }
1230*fae6e9adSlinfeng }
1231