xref: /DragonOS/kernel/crates/rbpf/src/cranelift.rs (revision 7b0ef10895108a0de5ff5ef3d2f93f40cf2e33a5)
1 // SPDX-License-Identifier: (Apache-2.0 OR MIT)
2 
3 use alloc::{collections::BTreeMap, format, vec, vec::Vec};
4 use core::{mem, mem::ManuallyDrop};
5 use std::io::ErrorKind;
6 
7 use cranelift_codegen::{
8     entity::EntityRef,
9     ir::{
10         condcodes::IntCC,
11         types::{I16, I32, I64, I8},
12         AbiParam, Block, Endianness, FuncRef, Function, InstBuilder, MemFlags, Signature,
13         SourceLoc, StackSlotData, StackSlotKind, TrapCode, Type, UserFuncName, Value,
14     },
15     isa::OwnedTargetIsa,
16     settings::{self, Configurable},
17 };
18 use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
19 use cranelift_jit::{JITBuilder, JITModule};
20 use cranelift_module::{FuncId, Linkage, Module};
21 
22 use super::{Error, HashMap, HashSet};
23 use crate::ebpf::{
24     self, Insn, BPF_ALU_OP_MASK, BPF_IND, BPF_JEQ, BPF_JGE, BPF_JGT, BPF_JLE, BPF_JLT, BPF_JMP32,
25     BPF_JNE, BPF_JSET, BPF_JSGE, BPF_JSGT, BPF_JSLE, BPF_JSLT, BPF_X, STACK_SIZE,
26 };
27 
28 pub type JittedFunction = extern "C" fn(
29     *mut u8, // mem_ptr
30     usize,   // mem_len
31     *mut u8, // mbuff_ptr
32     usize,   // mbuff_len
33 ) -> u64;
34 
35 pub(crate) struct CraneliftCompiler {
36     isa: OwnedTargetIsa,
37     module: JITModule,
38 
39     helpers: HashMap<u32, ebpf::Helper>,
40     helper_func_refs: HashMap<u32, FuncRef>,
41 
42     /// List of blocks corresponding to each instruction.
43     /// We only store the first instruction that observes a new block
44     insn_blocks: BTreeMap<u32, Block>,
45     /// Map of block targets for each jump/branching instruction.
46     insn_targets: BTreeMap<u32, (Block, Block)>,
47     filled_blocks: HashSet<Block>,
48 
49     /// Map of register numbers to Cranelift variables.
50     registers: [Variable; 11],
51     /// Other usefull variables used throughout the program.
52     mem_start: Variable,
53     mem_end: Variable,
54     mbuf_start: Variable,
55     mbuf_end: Variable,
56     stack_start: Variable,
57     stack_end: Variable,
58 }
59 
60 impl CraneliftCompiler {
61     pub(crate) fn new(helpers: HashMap<u32, ebpf::Helper>) -> Self {
62         let mut flag_builder = settings::builder();
63 
64         flag_builder.set("opt_level", "speed").unwrap();
65 
66         // Enable stack probes
67         flag_builder.enable("enable_probestack").unwrap();
68         flag_builder.set("probestack_strategy", "inline").unwrap();
69 
70         let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| {
71             panic!("host machine is not supported: {}", msg);
72         });
73         let isa = isa_builder
74             .finish(settings::Flags::new(flag_builder))
75             .unwrap();
76 
77         let mut jit_builder =
78             JITBuilder::with_isa(isa.clone(), cranelift_module::default_libcall_names());
79         // Register all the helpers
80         for (k, v) in helpers.iter() {
81             let name = format!("helper_{}", k);
82             jit_builder.symbol(name, (*v) as usize as *const u8);
83         }
84 
85         let mut module = JITModule::new(jit_builder);
86 
87         let registers = (0..11)
88             .map(|i| Variable::new(i))
89             .collect::<Vec<_>>()
90             .try_into()
91             .unwrap();
92 
93         Self {
94             isa,
95             module,
96             helpers,
97             helper_func_refs: HashMap::new(),
98             insn_blocks: BTreeMap::new(),
99             insn_targets: BTreeMap::new(),
100             filled_blocks: HashSet::new(),
101             registers,
102             mem_start: Variable::new(11),
103             mem_end: Variable::new(12),
104             mbuf_start: Variable::new(13),
105             mbuf_end: Variable::new(14),
106             stack_start: Variable::new(15),
107             stack_end: Variable::new(16),
108         }
109     }
110 
111     pub(crate) fn compile_function(mut self, prog: &[u8]) -> Result<CraneliftProgram, Error> {
112         let name = "main";
113         // This is not a standard eBPF function! We use an informal ABI with just 4 parameters.
114         // See [JittedFunction] which is the signature of this function.
115         //
116         // Since this function only serves as the entrypoint for the JITed program, it doesen't
117         // really matter.
118         let sig = Signature {
119             params: vec![
120                 AbiParam::new(I64),
121                 AbiParam::new(I64),
122                 AbiParam::new(I64),
123                 AbiParam::new(I64),
124             ],
125             returns: vec![AbiParam::new(I64)],
126             call_conv: self.isa.default_call_conv(),
127         };
128 
129         let func_id = self
130             .module
131             .declare_function(name, Linkage::Local, &sig)
132             .unwrap();
133 
134         let mut ctx = self.module.make_context();
135         ctx.func = Function::with_name_signature(UserFuncName::testcase(name.as_bytes()), sig);
136         let mut func_ctx = FunctionBuilderContext::new();
137 
138         {
139             let mut builder: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
140 
141             let entry = builder.create_block();
142             builder.append_block_params_for_function_params(entry);
143             builder.switch_to_block(entry);
144 
145             self.build_cfg(&mut builder, prog)?;
146             self.build_function_prelude(&mut builder, entry)?;
147             self.translate_program(&mut builder, prog)?;
148 
149             builder.seal_all_blocks();
150             builder.finalize();
151         }
152 
153         self.module.define_function(func_id, &mut ctx).unwrap();
154         self.module.finalize_definitions().unwrap();
155         self.module.clear_context(&mut ctx);
156 
157         Ok(CraneliftProgram::new(self.module, func_id))
158     }
159 
160     fn build_function_prelude(
161         &mut self,
162         bcx: &mut FunctionBuilder,
163         entry: Block,
164     ) -> Result<(), Error> {
165         // Register the VM registers as variables
166         for var in self.registers.iter() {
167             bcx.declare_var(*var, I64);
168         }
169 
170         // Register the bounds check variables
171         bcx.declare_var(self.mem_start, I64);
172         bcx.declare_var(self.mem_end, I64);
173         bcx.declare_var(self.mbuf_start, I64);
174         bcx.declare_var(self.mbuf_end, I64);
175         bcx.declare_var(self.stack_start, I64);
176         bcx.declare_var(self.stack_end, I64);
177 
178         // Register the helpers
179         for (k, _) in self.helpers.iter() {
180             let name = format!("helper_{}", k);
181             let sig = Signature {
182                 params: vec![
183                     AbiParam::new(I64),
184                     AbiParam::new(I64),
185                     AbiParam::new(I64),
186                     AbiParam::new(I64),
187                     AbiParam::new(I64),
188                 ],
189                 returns: vec![AbiParam::new(I64)],
190                 call_conv: self.isa.default_call_conv(),
191             };
192             let func_id = self
193                 .module
194                 .declare_function(&name, Linkage::Import, &sig)
195                 .unwrap();
196 
197             let func_ref = self.module.declare_func_in_func(func_id, bcx.func);
198             self.helper_func_refs.insert(*k, func_ref);
199         }
200 
201         // Register the stack
202         let ss = bcx.create_sized_stack_slot(StackSlotData {
203             kind: StackSlotKind::ExplicitSlot,
204             size: STACK_SIZE as u32,
205         });
206         let addr_ty = self.isa.pointer_type();
207         let stack_addr = bcx.ins().stack_addr(addr_ty, ss, STACK_SIZE as i32);
208         bcx.def_var(self.registers[10], stack_addr);
209 
210         // Initialize the bounds check variables
211         let stack_start = bcx.ins().stack_addr(addr_ty, ss, 0);
212         bcx.def_var(self.stack_start, stack_start);
213         let stack_end = bcx.ins().stack_addr(addr_ty, ss, STACK_SIZE as i32);
214         bcx.def_var(self.stack_end, stack_end);
215 
216         // This is our internal ABI where the first 2 params are the memory
217         let mem_start = bcx.block_params(entry)[0];
218         let mem_len = bcx.block_params(entry)[1];
219         let mem_end = bcx.ins().iadd(mem_start, mem_len);
220         bcx.def_var(self.mem_start, mem_start);
221         bcx.def_var(self.mem_end, mem_end);
222 
223         // And the next 2 are the mbuf
224         let mbuf_start = bcx.block_params(entry)[2];
225         let mbuf_len = bcx.block_params(entry)[3];
226         let mbuf_end = bcx.ins().iadd(mbuf_start, mbuf_len);
227         bcx.def_var(self.mbuf_start, mbuf_start);
228         bcx.def_var(self.mbuf_end, mbuf_end);
229 
230         // The ABI for eBPF specifies that R1 must contain either the memory, or mbuff pointer
231         // If the mbuf length is non-zero, then we use that, otherwise we use the memory pointer
232         let mbuf_exists = bcx.ins().icmp_imm(IntCC::NotEqual, mbuf_len, 0);
233         let mem_or_mbuf = bcx.ins().select(mbuf_exists, mbuf_start, mem_start);
234         bcx.def_var(self.registers[1], mem_or_mbuf);
235 
236         // R2 should contain the length of the memory or mbuf
237         // At least ebpf-conformance tests expect this
238         let mem_or_mbuf_len = bcx.ins().select(mbuf_exists, mbuf_len, mem_len);
239         bcx.def_var(self.registers[2], mem_or_mbuf_len);
240 
241         // Insert the *actual* initial block
242         let program_entry = bcx.create_block();
243         bcx.ins().jump(program_entry, &[]);
244         self.filled_blocks.insert(bcx.current_block().unwrap());
245         self.insn_blocks.insert(0, program_entry);
246 
247         Ok(())
248     }
249 
250     fn translate_program(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
251         let mut insn_ptr: usize = 0;
252         while insn_ptr * ebpf::INSN_SIZE < prog.len() {
253             let insn = ebpf::get_insn(prog, insn_ptr);
254 
255             // If this instruction is on a new block switch to it.
256             if let Some(block) = self.insn_blocks.get(&(insn_ptr as u32)) {
257                 // Blocks must have a terminator instruction at the end before we switch away from them
258                 let current_block = bcx.current_block().unwrap();
259                 if !self.filled_blocks.contains(&current_block) {
260                     bcx.ins().jump(*block, &[]);
261                 }
262 
263                 bcx.switch_to_block(*block);
264             }
265 
266             // Set the source location for the instruction
267             bcx.set_srcloc(SourceLoc::new(insn_ptr as u32));
268 
269             match insn.opc {
270                 // BPF_LD class
271                 // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer.
272                 // Since this pointer is constant, and since we already know it (mem), do not
273                 // bother re-fetching it, just use mem already.
274                 ebpf::LD_ABS_B
275                 | ebpf::LD_ABS_H
276                 | ebpf::LD_ABS_W
277                 | ebpf::LD_ABS_DW
278                 | ebpf::LD_IND_B
279                 | ebpf::LD_IND_H
280                 | ebpf::LD_IND_W
281                 | ebpf::LD_IND_DW => {
282                     let ty = match insn.opc {
283                         ebpf::LD_ABS_B | ebpf::LD_IND_B => I8,
284                         ebpf::LD_ABS_H | ebpf::LD_IND_H => I16,
285                         ebpf::LD_ABS_W | ebpf::LD_IND_W => I32,
286                         ebpf::LD_ABS_DW | ebpf::LD_IND_DW => I64,
287                         _ => unreachable!(),
288                     };
289 
290                     // Both instructions add the imm part of the instruction to the pointer
291                     let ptr = bcx.use_var(self.mem_start);
292                     let offset = bcx
293                         .ins()
294                         .iconst(self.isa.pointer_type(), insn.imm as u32 as i64);
295                     let addr = bcx.ins().iadd(ptr, offset);
296 
297                     // IND instructions additionally add the value of the source register
298                     let is_ind = (insn.opc & BPF_IND) != 0;
299                     let addr = if is_ind {
300                         let src_reg = self.insn_src(bcx, &insn);
301                         bcx.ins().iadd(addr, src_reg)
302                     } else {
303                         addr
304                     };
305 
306                     // The offset here has already been added to the pointer, so we pass 0
307                     let loaded = self.reg_load(bcx, ty, addr, 0);
308 
309                     let ext = if ty != I64 {
310                         bcx.ins().uextend(I64, loaded)
311                     } else {
312                         loaded
313                     };
314 
315                     self.set_dst(bcx, &insn, ext);
316                 }
317                 ebpf::LD_DW_IMM => {
318                     insn_ptr += 1;
319                     let next_insn = ebpf::get_insn(prog, insn_ptr);
320 
321                     let imm = (((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32)) as i64;
322                     let iconst = bcx.ins().iconst(I64, imm);
323                     self.set_dst(bcx, &insn, iconst);
324                 }
325 
326                 // BPF_LDX class
327                 ebpf::LD_B_REG | ebpf::LD_H_REG | ebpf::LD_W_REG | ebpf::LD_DW_REG => {
328                     let ty = match insn.opc {
329                         ebpf::LD_B_REG => I8,
330                         ebpf::LD_H_REG => I16,
331                         ebpf::LD_W_REG => I32,
332                         ebpf::LD_DW_REG => I64,
333                         _ => unreachable!(),
334                     };
335 
336                     let base = self.insn_src(bcx, &insn);
337                     let loaded = self.reg_load(bcx, ty, base, insn.off);
338 
339                     let ext = if ty != I64 {
340                         bcx.ins().uextend(I64, loaded)
341                     } else {
342                         loaded
343                     };
344 
345                     self.set_dst(bcx, &insn, ext);
346                 }
347 
348                 // BPF_ST and BPF_STX class
349                 ebpf::ST_B_IMM
350                 | ebpf::ST_H_IMM
351                 | ebpf::ST_W_IMM
352                 | ebpf::ST_DW_IMM
353                 | ebpf::ST_B_REG
354                 | ebpf::ST_H_REG
355                 | ebpf::ST_W_REG
356                 | ebpf::ST_DW_REG => {
357                     let ty = match insn.opc {
358                         ebpf::ST_B_IMM | ebpf::ST_B_REG => I8,
359                         ebpf::ST_H_IMM | ebpf::ST_H_REG => I16,
360                         ebpf::ST_W_IMM | ebpf::ST_W_REG => I32,
361                         ebpf::ST_DW_IMM | ebpf::ST_DW_REG => I64,
362                         _ => unreachable!(),
363                     };
364                     let is_imm = match insn.opc {
365                         ebpf::ST_B_IMM | ebpf::ST_H_IMM | ebpf::ST_W_IMM | ebpf::ST_DW_IMM => true,
366                         ebpf::ST_B_REG | ebpf::ST_H_REG | ebpf::ST_W_REG | ebpf::ST_DW_REG => false,
367                         _ => unreachable!(),
368                     };
369 
370                     let value = if is_imm {
371                         self.insn_imm64(bcx, &insn)
372                     } else {
373                         self.insn_src(bcx, &insn)
374                     };
375 
376                     let narrow = if ty != I64 {
377                         bcx.ins().ireduce(ty, value)
378                     } else {
379                         value
380                     };
381 
382                     let base = self.insn_dst(bcx, &insn);
383                     self.reg_store(bcx, ty, base, insn.off, narrow);
384                 }
385 
386                 ebpf::ST_W_XADD => unimplemented!(),
387                 ebpf::ST_DW_XADD => unimplemented!(),
388 
389                 // BPF_ALU class
390                 // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value
391                 // before we do the operation?
392                 // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32
393                 ebpf::ADD32_IMM => {
394                     let src = self.insn_dst32(bcx, &insn);
395                     let imm = self.insn_imm32(bcx, &insn);
396                     let res = bcx.ins().iadd(src, imm);
397                     self.set_dst32(bcx, &insn, res);
398                 }
399                 ebpf::ADD32_REG => {
400                     //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX,
401                     let lhs = self.insn_dst32(bcx, &insn);
402                     let rhs = self.insn_src32(bcx, &insn);
403                     let res = bcx.ins().iadd(lhs, rhs);
404                     self.set_dst32(bcx, &insn, res);
405                 }
406                 ebpf::SUB32_IMM => {
407                     // reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm)         as u64,
408                     let src = self.insn_dst32(bcx, &insn);
409                     let imm = self.insn_imm32(bcx, &insn);
410                     let res = bcx.ins().isub(src, imm);
411                     self.set_dst32(bcx, &insn, res);
412                 }
413                 ebpf::SUB32_REG => {
414                     // reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64,
415                     let lhs = self.insn_dst32(bcx, &insn);
416                     let rhs = self.insn_src32(bcx, &insn);
417                     let res = bcx.ins().isub(lhs, rhs);
418                     self.set_dst32(bcx, &insn, res);
419                 }
420                 ebpf::MUL32_IMM => {
421                     // reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm)         as u64,
422                     let src = self.insn_dst32(bcx, &insn);
423                     let imm = self.insn_imm32(bcx, &insn);
424                     let res = bcx.ins().imul(src, imm);
425                     self.set_dst32(bcx, &insn, res);
426                 }
427                 ebpf::MUL32_REG => {
428                     // reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64,
429                     let lhs = self.insn_dst32(bcx, &insn);
430                     let rhs = self.insn_src32(bcx, &insn);
431                     let res = bcx.ins().imul(lhs, rhs);
432                     self.set_dst32(bcx, &insn, res);
433                 }
434                 ebpf::DIV32_IMM => {
435                     // reg[_dst] = (reg[_dst] as u32 / insn.imm              as u32) as u64,
436                     let res = if insn.imm == 0 {
437                         bcx.ins().iconst(I32, 0)
438                     } else {
439                         let imm = self.insn_imm32(bcx, &insn);
440                         let src = self.insn_dst32(bcx, &insn);
441                         bcx.ins().udiv(src, imm)
442                     };
443                     self.set_dst32(bcx, &insn, res);
444                 }
445                 ebpf::DIV32_REG => {
446                     // reg[_dst] = (reg[_dst] as u32 / reg[_src]             as u32) as u64,
447                     let zero = bcx.ins().iconst(I32, 0);
448                     let one = bcx.ins().iconst(I32, 1);
449 
450                     let lhs = self.insn_dst32(bcx, &insn);
451                     let rhs = self.insn_src32(bcx, &insn);
452 
453                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
454                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
455                     let div_res = bcx.ins().udiv(lhs, safe_rhs);
456 
457                     let res = bcx.ins().select(rhs_is_zero, zero, div_res);
458                     self.set_dst32(bcx, &insn, res);
459                 }
460                 ebpf::OR32_IMM => {
461                     // reg[_dst] = (reg[_dst] as u32             | insn.imm  as u32) as u64,
462                     let src = self.insn_dst32(bcx, &insn);
463                     let imm = self.insn_imm32(bcx, &insn);
464                     let res = bcx.ins().bor(src, imm);
465                     self.set_dst32(bcx, &insn, res);
466                 }
467                 ebpf::OR32_REG => {
468                     // reg[_dst] = (reg[_dst] as u32             | reg[_src] as u32) as u64,
469                     let lhs = self.insn_dst32(bcx, &insn);
470                     let rhs = self.insn_src32(bcx, &insn);
471                     let res = bcx.ins().bor(lhs, rhs);
472                     self.set_dst32(bcx, &insn, res);
473                 }
474                 ebpf::AND32_IMM => {
475                     // reg[_dst] = (reg[_dst] as u32             & insn.imm  as u32) as u64,
476                     let src = self.insn_dst32(bcx, &insn);
477                     let imm = self.insn_imm32(bcx, &insn);
478                     let res = bcx.ins().band(src, imm);
479                     self.set_dst32(bcx, &insn, res);
480                 }
481                 ebpf::AND32_REG => {
482                     // reg[_dst] = (reg[_dst] as u32             & reg[_src] as u32) as u64,
483                     let lhs = self.insn_dst32(bcx, &insn);
484                     let rhs = self.insn_src32(bcx, &insn);
485                     let res = bcx.ins().band(lhs, rhs);
486                     self.set_dst32(bcx, &insn, res);
487                 }
488                 ebpf::LSH32_IMM => {
489                     // reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm  as u32) as u64,
490                     let src = self.insn_dst32(bcx, &insn);
491                     let imm = self.insn_imm32(bcx, &insn);
492                     let res = bcx.ins().ishl(src, imm);
493                     self.set_dst32(bcx, &insn, res);
494                 }
495                 ebpf::LSH32_REG => {
496                     // reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64,
497                     let lhs = self.insn_dst32(bcx, &insn);
498                     let rhs = self.insn_src32(bcx, &insn);
499                     let res = bcx.ins().ishl(lhs, rhs);
500                     self.set_dst32(bcx, &insn, res);
501                 }
502                 ebpf::RSH32_IMM => {
503                     // reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm  as u32) as u64,
504                     let src = self.insn_dst32(bcx, &insn);
505                     let imm = self.insn_imm32(bcx, &insn);
506                     let res = bcx.ins().ushr(src, imm);
507                     self.set_dst32(bcx, &insn, res);
508                 }
509                 ebpf::RSH32_REG => {
510                     // reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64,
511                     let lhs = self.insn_dst32(bcx, &insn);
512                     let rhs = self.insn_src32(bcx, &insn);
513                     let res = bcx.ins().ushr(lhs, rhs);
514                     self.set_dst32(bcx, &insn, res);
515                 }
516                 ebpf::NEG32 => {
517                     // { reg[_dst] = (reg[_dst] as i32).wrapping_neg()                 as u64; reg[_dst] &= U32MAX; },
518                     let src = self.insn_dst32(bcx, &insn);
519                     let res = bcx.ins().ineg(src);
520                     // TODO: Do we need to mask the result?
521                     self.set_dst32(bcx, &insn, res);
522                 }
523                 ebpf::MOD32_IMM => {
524                     // reg[_dst] = (reg[_dst] as u32             % insn.imm  as u32) as u64,
525 
526                     if insn.imm != 0 {
527                         let imm = self.insn_imm32(bcx, &insn);
528                         let src = self.insn_dst32(bcx, &insn);
529                         let res = bcx.ins().urem(src, imm);
530                         self.set_dst32(bcx, &insn, res);
531                     }
532                 }
533                 ebpf::MOD32_REG => {
534                     // reg[_dst] = (reg[_dst] as u32 % reg[_src]             as u32) as u64,
535                     let zero = bcx.ins().iconst(I32, 0);
536                     let one = bcx.ins().iconst(I32, 1);
537 
538                     let lhs = self.insn_dst32(bcx, &insn);
539                     let rhs = self.insn_src32(bcx, &insn);
540 
541                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
542                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
543                     let div_res = bcx.ins().urem(lhs, safe_rhs);
544 
545                     let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
546                     self.set_dst32(bcx, &insn, res);
547                 }
548                 ebpf::XOR32_IMM => {
549                     // reg[_dst] = (reg[_dst] as u32             ^ insn.imm  as u32) as u64,
550                     let src = self.insn_dst32(bcx, &insn);
551                     let imm = self.insn_imm32(bcx, &insn);
552                     let res = bcx.ins().bxor(src, imm);
553                     self.set_dst32(bcx, &insn, res);
554                 }
555                 ebpf::XOR32_REG => {
556                     // reg[_dst] = (reg[_dst] as u32             ^ reg[_src] as u32) as u64,
557                     let lhs = self.insn_dst32(bcx, &insn);
558                     let rhs = self.insn_src32(bcx, &insn);
559                     let res = bcx.ins().bxor(lhs, rhs);
560                     self.set_dst32(bcx, &insn, res);
561                 }
562                 ebpf::MOV32_IMM => {
563                     let imm = self.insn_imm32(bcx, &insn);
564                     self.set_dst32(bcx, &insn, imm);
565                 }
566                 ebpf::MOV32_REG => {
567                     // reg[_dst] = (reg[_src] as u32)                                as u64,
568                     let src = self.insn_src32(bcx, &insn);
569                     self.set_dst32(bcx, &insn, src);
570                 }
571                 ebpf::ARSH32_IMM => {
572                     // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm  as u32) as u64; reg[_dst] &= U32MAX; },
573                     let src = self.insn_dst32(bcx, &insn);
574                     let imm = self.insn_imm32(bcx, &insn);
575                     let res = bcx.ins().sshr(src, imm);
576                     self.set_dst32(bcx, &insn, res);
577                 }
578                 ebpf::ARSH32_REG => {
579                     // { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; },
580                     let lhs = self.insn_dst32(bcx, &insn);
581                     let rhs = self.insn_src32(bcx, &insn);
582                     let res = bcx.ins().sshr(lhs, rhs);
583                     self.set_dst32(bcx, &insn, res);
584                 }
585 
586                 ebpf::BE | ebpf::LE => {
587                     let should_swap = match insn.opc {
588                         ebpf::BE => self.isa.endianness() == Endianness::Little,
589                         ebpf::LE => self.isa.endianness() == Endianness::Big,
590                         _ => unreachable!(),
591                     };
592 
593                     let ty: Type = match insn.imm {
594                         16 => I16,
595                         32 => I32,
596                         64 => I64,
597                         _ => unreachable!(),
598                     };
599 
600                     if should_swap {
601                         let src = self.insn_dst(bcx, &insn);
602                         let src_narrow = if ty != I64 {
603                             bcx.ins().ireduce(ty, src)
604                         } else {
605                             src
606                         };
607 
608                         let res = bcx.ins().bswap(src_narrow);
609                         let res_wide = if ty != I64 {
610                             bcx.ins().uextend(I64, res)
611                         } else {
612                             res
613                         };
614 
615                         self.set_dst(bcx, &insn, res_wide);
616                     }
617                 }
618 
619                 // BPF_ALU64 class
620                 ebpf::ADD64_IMM => {
621                     // reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64),
622                     let imm = self.insn_imm64(bcx, &insn);
623                     let src = self.insn_dst(bcx, &insn);
624                     let res = bcx.ins().iadd(src, imm);
625                     self.set_dst(bcx, &insn, res);
626                 }
627                 ebpf::ADD64_REG => {
628                     // reg[_dst] = reg[_dst].wrapping_add(reg[_src]),
629                     let lhs = self.insn_dst(bcx, &insn);
630                     let rhs = self.insn_src(bcx, &insn);
631                     let res = bcx.ins().iadd(lhs, rhs);
632                     self.set_dst(bcx, &insn, res);
633                 }
634                 ebpf::SUB64_IMM => {
635                     // reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64),
636                     let imm = self.insn_imm64(bcx, &insn);
637                     let src = self.insn_dst(bcx, &insn);
638                     let res = bcx.ins().isub(src, imm);
639                     self.set_dst(bcx, &insn, res);
640                 }
641                 ebpf::SUB64_REG => {
642                     // reg[_dst] = reg[_dst].wrapping_sub(reg[_src]),
643                     let lhs = self.insn_dst(bcx, &insn);
644                     let rhs = self.insn_src(bcx, &insn);
645                     let res = bcx.ins().isub(lhs, rhs);
646                     self.set_dst(bcx, &insn, res);
647                 }
648                 ebpf::MUL64_IMM => {
649                     // reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64),
650                     let imm = self.insn_imm64(bcx, &insn);
651                     let src = self.insn_dst(bcx, &insn);
652                     let res = bcx.ins().imul(src, imm);
653                     self.set_dst(bcx, &insn, res);
654                 }
655                 ebpf::MUL64_REG => {
656                     // reg[_dst] = reg[_dst].wrapping_mul(reg[_src]),
657                     let lhs = self.insn_dst(bcx, &insn);
658                     let rhs = self.insn_src(bcx, &insn);
659                     let res = bcx.ins().imul(lhs, rhs);
660                     self.set_dst(bcx, &insn, res);
661                 }
662                 ebpf::DIV64_IMM => {
663                     // reg[_dst] /= insn.imm as u64,
664                     let res = if insn.imm == 0 {
665                         bcx.ins().iconst(I64, 0)
666                     } else {
667                         let imm = self.insn_imm64(bcx, &insn);
668                         let src = self.insn_dst(bcx, &insn);
669                         bcx.ins().udiv(src, imm)
670                     };
671                     self.set_dst(bcx, &insn, res);
672                 }
673                 ebpf::DIV64_REG => {
674                     // reg[_dst] /= reg[_src], if reg[_src] != 0
675                     // reg[_dst] = 0, if reg[_src] == 0
676                     let zero = bcx.ins().iconst(I64, 0);
677                     let one = bcx.ins().iconst(I64, 1);
678 
679                     let lhs = self.insn_dst(bcx, &insn);
680                     let rhs = self.insn_src(bcx, &insn);
681 
682                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
683                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
684                     let div_res = bcx.ins().udiv(lhs, safe_rhs);
685 
686                     let res = bcx.ins().select(rhs_is_zero, zero, div_res);
687                     self.set_dst(bcx, &insn, res);
688                 }
689                 ebpf::MOD64_IMM => {
690                     // reg[_dst] %= insn.imm as u64,
691 
692                     if insn.imm != 0 {
693                         let imm = self.insn_imm64(bcx, &insn);
694                         let src = self.insn_dst(bcx, &insn);
695                         let res = bcx.ins().urem(src, imm);
696                         self.set_dst(bcx, &insn, res);
697                     };
698                 }
699                 ebpf::MOD64_REG => {
700                     // reg[_dst] %= reg[_src], if reg[_src] != 0
701 
702                     let zero = bcx.ins().iconst(I64, 0);
703                     let one = bcx.ins().iconst(I64, 1);
704 
705                     let lhs = self.insn_dst(bcx, &insn);
706                     let rhs = self.insn_src(bcx, &insn);
707 
708                     let rhs_is_zero = bcx.ins().icmp(IntCC::Equal, rhs, zero);
709                     let safe_rhs = bcx.ins().select(rhs_is_zero, one, rhs);
710                     let div_res = bcx.ins().urem(lhs, safe_rhs);
711 
712                     let res = bcx.ins().select(rhs_is_zero, lhs, div_res);
713                     self.set_dst(bcx, &insn, res);
714                 }
715                 ebpf::OR64_IMM => {
716                     // reg[_dst] |= insn.imm as u64,
717                     let imm = self.insn_imm64(bcx, &insn);
718                     let src = self.insn_dst(bcx, &insn);
719                     let res = bcx.ins().bor(src, imm);
720                     self.set_dst(bcx, &insn, res);
721                 }
722                 ebpf::OR64_REG => {
723                     // reg[_dst] |= reg[_src],
724                     let lhs = self.insn_dst(bcx, &insn);
725                     let rhs = self.insn_src(bcx, &insn);
726                     let res = bcx.ins().bor(lhs, rhs);
727                     self.set_dst(bcx, &insn, res);
728                 }
729                 ebpf::AND64_IMM => {
730                     // reg[_dst] &= insn.imm as u64,
731                     let imm = self.insn_imm64(bcx, &insn);
732                     let src = self.insn_dst(bcx, &insn);
733                     let res = bcx.ins().band(src, imm);
734                     self.set_dst(bcx, &insn, res);
735                 }
736                 ebpf::AND64_REG => {
737                     // reg[_dst] &= reg[_src],
738                     let lhs = self.insn_dst(bcx, &insn);
739                     let rhs = self.insn_src(bcx, &insn);
740                     let res = bcx.ins().band(lhs, rhs);
741                     self.set_dst(bcx, &insn, res);
742                 }
743                 ebpf::LSH64_IMM => {
744                     // reg[_dst] <<= insn.imm as u64,
745                     let imm = self.insn_imm64(bcx, &insn);
746                     let src = self.insn_dst(bcx, &insn);
747                     let res = bcx.ins().ishl(src, imm);
748                     self.set_dst(bcx, &insn, res);
749                 }
750                 ebpf::LSH64_REG => {
751                     // reg[_dst] <<= reg[_src],
752                     let lhs = self.insn_dst(bcx, &insn);
753                     let rhs = self.insn_src(bcx, &insn);
754                     let res = bcx.ins().ishl(lhs, rhs);
755                     self.set_dst(bcx, &insn, res);
756                 }
757                 ebpf::RSH64_IMM => {
758                     // reg[_dst] >>= insn.imm as u64,
759                     let imm = self.insn_imm64(bcx, &insn);
760                     let src = self.insn_dst(bcx, &insn);
761                     let res = bcx.ins().ushr(src, imm);
762                     self.set_dst(bcx, &insn, res);
763                 }
764                 ebpf::RSH64_REG => {
765                     // reg[_dst] >>= reg[_src],
766                     let lhs = self.insn_dst(bcx, &insn);
767                     let rhs = self.insn_src(bcx, &insn);
768                     let res = bcx.ins().ushr(lhs, rhs);
769                     self.set_dst(bcx, &insn, res);
770                 }
771                 ebpf::NEG64 => {
772                     // reg[_dst] = -(reg[_dst] as i64) as u64,
773                     let src = self.insn_dst(bcx, &insn);
774                     let res = bcx.ins().ineg(src);
775                     self.set_dst(bcx, &insn, res);
776                 }
777                 ebpf::XOR64_IMM => {
778                     // reg[_dst] ^= insn.imm as u64,
779                     let imm = self.insn_imm64(bcx, &insn);
780                     let src = self.insn_dst(bcx, &insn);
781                     let res = bcx.ins().bxor(src, imm);
782                     self.set_dst(bcx, &insn, res);
783                 }
784                 ebpf::XOR64_REG => {
785                     // reg[_dst] ^= reg[_src],
786                     let lhs = self.insn_dst(bcx, &insn);
787                     let rhs = self.insn_src(bcx, &insn);
788                     let res = bcx.ins().bxor(lhs, rhs);
789                     self.set_dst(bcx, &insn, res);
790                 }
791                 ebpf::MOV64_IMM => {
792                     // reg[_dst] = insn.imm as u64,
793                     let imm = self.insn_imm64(bcx, &insn);
794                     bcx.def_var(self.registers[insn.dst as usize], imm);
795                 }
796                 ebpf::MOV64_REG => {
797                     // reg[_dst] = reg[_src],
798                     let src = self.insn_src(bcx, &insn);
799                     bcx.def_var(self.registers[insn.dst as usize], src);
800                 }
801                 ebpf::ARSH64_IMM => {
802                     // reg[_dst] = (reg[_dst] as i64 >> insn.imm) as u64,
803                     let imm = self.insn_imm64(bcx, &insn);
804                     let src = self.insn_dst(bcx, &insn);
805                     let res = bcx.ins().sshr(src, imm);
806                     self.set_dst(bcx, &insn, res);
807                 }
808                 ebpf::ARSH64_REG => {
809                     // reg[_dst] = (reg[_dst] as i64 >> reg[_src]) as u64,
810                     let lhs = self.insn_dst(bcx, &insn);
811                     let rhs = self.insn_src(bcx, &insn);
812                     let res = bcx.ins().sshr(lhs, rhs);
813                     self.set_dst(bcx, &insn, res);
814                 }
815 
816                 // BPF_JMP & BPF_JMP32 class
817                 ebpf::JA => {
818                     let (_, target_block) = self.insn_targets[&(insn_ptr as u32)];
819 
820                     bcx.ins().jump(target_block, &[]);
821                     self.filled_blocks.insert(bcx.current_block().unwrap());
822                 }
823                 ebpf::JEQ_IMM
824                 | ebpf::JEQ_REG
825                 | ebpf::JGT_IMM
826                 | ebpf::JGT_REG
827                 | ebpf::JGE_IMM
828                 | ebpf::JGE_REG
829                 | ebpf::JLT_IMM
830                 | ebpf::JLT_REG
831                 | ebpf::JLE_IMM
832                 | ebpf::JLE_REG
833                 | ebpf::JNE_IMM
834                 | ebpf::JNE_REG
835                 | ebpf::JSGT_IMM
836                 | ebpf::JSGT_REG
837                 | ebpf::JSGE_IMM
838                 | ebpf::JSGE_REG
839                 | ebpf::JSLT_IMM
840                 | ebpf::JSLT_REG
841                 | ebpf::JSLE_IMM
842                 | ebpf::JSLE_REG
843                 | ebpf::JSET_IMM
844                 | ebpf::JSET_REG
845                 | ebpf::JEQ_IMM32
846                 | ebpf::JEQ_REG32
847                 | ebpf::JGT_IMM32
848                 | ebpf::JGT_REG32
849                 | ebpf::JGE_IMM32
850                 | ebpf::JGE_REG32
851                 | ebpf::JLT_IMM32
852                 | ebpf::JLT_REG32
853                 | ebpf::JLE_IMM32
854                 | ebpf::JLE_REG32
855                 | ebpf::JNE_IMM32
856                 | ebpf::JNE_REG32
857                 | ebpf::JSGT_IMM32
858                 | ebpf::JSGT_REG32
859                 | ebpf::JSGE_IMM32
860                 | ebpf::JSGE_REG32
861                 | ebpf::JSLT_IMM32
862                 | ebpf::JSLT_REG32
863                 | ebpf::JSLE_IMM32
864                 | ebpf::JSLE_REG32
865                 | ebpf::JSET_IMM32
866                 | ebpf::JSET_REG32 => {
867                     let (fallthrough, target) = self.insn_targets[&(insn_ptr as u32)];
868 
869                     let is_reg = (insn.opc & BPF_X) != 0;
870                     let is_32 = (insn.opc & BPF_JMP32) != 0;
871                     let intcc = match insn.opc {
872                         c if (c & BPF_ALU_OP_MASK) == BPF_JEQ => IntCC::Equal,
873                         c if (c & BPF_ALU_OP_MASK) == BPF_JNE => IntCC::NotEqual,
874                         c if (c & BPF_ALU_OP_MASK) == BPF_JGT => IntCC::UnsignedGreaterThan,
875                         c if (c & BPF_ALU_OP_MASK) == BPF_JGE => IntCC::UnsignedGreaterThanOrEqual,
876                         c if (c & BPF_ALU_OP_MASK) == BPF_JLT => IntCC::UnsignedLessThan,
877                         c if (c & BPF_ALU_OP_MASK) == BPF_JLE => IntCC::UnsignedLessThanOrEqual,
878                         c if (c & BPF_ALU_OP_MASK) == BPF_JSGT => IntCC::SignedGreaterThan,
879                         c if (c & BPF_ALU_OP_MASK) == BPF_JSGE => IntCC::SignedGreaterThanOrEqual,
880                         c if (c & BPF_ALU_OP_MASK) == BPF_JSLT => IntCC::SignedLessThan,
881                         c if (c & BPF_ALU_OP_MASK) == BPF_JSLE => IntCC::SignedLessThanOrEqual,
882                         // JSET is handled specially below
883                         c if (c & BPF_ALU_OP_MASK) == BPF_JSET => IntCC::NotEqual,
884                         _ => unreachable!(),
885                     };
886 
887                     let lhs = if is_32 {
888                         self.insn_dst32(bcx, &insn)
889                     } else {
890                         self.insn_dst(bcx, &insn)
891                     };
892                     let rhs = match (is_reg, is_32) {
893                         (true, false) => self.insn_src(bcx, &insn),
894                         (true, true) => self.insn_src32(bcx, &insn),
895                         (false, false) => self.insn_imm64(bcx, &insn),
896                         (false, true) => self.insn_imm32(bcx, &insn),
897                     };
898 
899                     let cmp_res = if (insn.opc & BPF_ALU_OP_MASK) == BPF_JSET {
900                         bcx.ins().band(lhs, rhs)
901                     } else {
902                         bcx.ins().icmp(intcc, lhs, rhs)
903                     };
904                     bcx.ins().brif(cmp_res, target, &[], fallthrough, &[]);
905                     self.filled_blocks.insert(bcx.current_block().unwrap());
906                 }
907 
908                 // Do not delegate the check to the verifier, since registered functions can be
909                 // changed after the program has been verified.
910                 ebpf::CALL => {
911                     let func_ref = self
912                         .helper_func_refs
913                         .get(&(insn.imm as u32))
914                         .copied()
915                         .ok_or_else(|| {
916                             Error::new(
917                                 ErrorKind::Other,
918                                 format!(
919                                     "[CRANELIFT] Error: unknown helper function (id: {:#x})",
920                                     insn.imm as u32
921                                 ),
922                             )
923                         })?;
924 
925                     let arg0 = bcx.use_var(self.registers[1]);
926                     let arg1 = bcx.use_var(self.registers[2]);
927                     let arg2 = bcx.use_var(self.registers[3]);
928                     let arg3 = bcx.use_var(self.registers[4]);
929                     let arg4 = bcx.use_var(self.registers[5]);
930 
931                     let call = bcx.ins().call(func_ref, &[arg0, arg1, arg2, arg3, arg4]);
932                     let ret = bcx.inst_results(call)[0];
933                     self.set_dst(bcx, &insn, ret);
934                 }
935                 ebpf::TAIL_CALL => unimplemented!(),
936                 ebpf::EXIT => {
937                     let ret = bcx.use_var(self.registers[0]);
938                     bcx.ins().return_(&[ret]);
939                     self.filled_blocks.insert(bcx.current_block().unwrap());
940                 }
941                 _ => unimplemented!("inst: {:?}", insn),
942             }
943 
944             insn_ptr += 1;
945         }
946 
947         Ok(())
948     }
949 
950     fn insn_imm64(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
951         bcx.ins().iconst(I64, insn.imm as u64 as i64)
952     }
953     fn insn_imm32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
954         bcx.ins().iconst(I32, insn.imm as u32 as u64 as i64)
955     }
956 
957     fn insn_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
958         bcx.use_var(self.registers[insn.dst as usize])
959     }
960     fn insn_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
961         let dst = self.insn_dst(bcx, insn);
962         bcx.ins().ireduce(I32, dst)
963     }
964 
965     fn insn_src(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
966         bcx.use_var(self.registers[insn.src as usize])
967     }
968     fn insn_src32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn) -> Value {
969         let src = self.insn_src(bcx, insn);
970         bcx.ins().ireduce(I32, src)
971     }
972 
973     fn set_dst(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
974         bcx.def_var(self.registers[insn.dst as usize], val);
975     }
976     fn set_dst32(&mut self, bcx: &mut FunctionBuilder, insn: &Insn, val: Value) {
977         let val32 = bcx.ins().uextend(I64, val);
978         self.set_dst(bcx, insn, val32);
979     }
980 
981     fn reg_load(&mut self, bcx: &mut FunctionBuilder, ty: Type, base: Value, offset: i16) -> Value {
982         self.insert_bounds_check(bcx, ty, base, offset);
983 
984         let mut flags = MemFlags::new();
985         flags.set_endianness(Endianness::Little);
986 
987         bcx.ins().load(ty, flags, base, offset as i32)
988     }
989     fn reg_store(
990         &mut self,
991         bcx: &mut FunctionBuilder,
992         ty: Type,
993         base: Value,
994         offset: i16,
995         val: Value,
996     ) {
997         self.insert_bounds_check(bcx, ty, base, offset);
998 
999         let mut flags = MemFlags::new();
1000         flags.set_endianness(Endianness::Little);
1001 
1002         bcx.ins().store(flags, val, base, offset as i32);
1003     }
1004 
1005     /// Inserts a bounds check for a memory access
1006     ///
1007     /// This emits a conditional trap if the access is out of bounds for any of the known
1008     /// valid memory regions. These are the stack, the memory, and the mbuf.
1009     fn insert_bounds_check(
1010         &mut self,
1011         bcx: &mut FunctionBuilder,
1012         ty: Type,
1013         base: Value,
1014         offset: i16,
1015     ) {
1016         let access_size = bcx.ins().iconst(I64, ty.bytes() as i64);
1017 
1018         let offset = bcx.ins().iconst(I64, offset as i64);
1019         let start_addr = bcx.ins().iadd(base, offset);
1020         let end_addr = bcx.ins().iadd(start_addr, access_size);
1021 
1022         let does_not_overflow =
1023             bcx.ins()
1024                 .icmp(IntCC::UnsignedGreaterThanOrEqual, end_addr, start_addr);
1025 
1026         // Check if it's a valid stack access
1027         let stack_start = bcx.use_var(self.stack_start);
1028         let stack_end = bcx.use_var(self.stack_end);
1029         let stack_start_valid =
1030             bcx.ins()
1031                 .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, stack_start);
1032         let stack_end_valid = bcx
1033             .ins()
1034             .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, stack_end);
1035         let stack_valid = bcx.ins().band(stack_start_valid, stack_end_valid);
1036 
1037         // Check if it's a valid memory access
1038         let mem_start = bcx.use_var(self.mem_start);
1039         let mem_end = bcx.use_var(self.mem_end);
1040         let has_mem = bcx.ins().icmp_imm(IntCC::NotEqual, mem_start, 0);
1041         let mem_start_valid =
1042             bcx.ins()
1043                 .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, mem_start);
1044         let mem_end_valid = bcx
1045             .ins()
1046             .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, mem_end);
1047 
1048         let mem_valid = bcx.ins().band(mem_start_valid, mem_end_valid);
1049         let mem_valid = bcx.ins().band(mem_valid, has_mem);
1050 
1051         // Check if it's a valid mbuf access
1052         let mbuf_start = bcx.use_var(self.mbuf_start);
1053         let mbuf_end = bcx.use_var(self.mbuf_end);
1054         let has_mbuf = bcx.ins().icmp_imm(IntCC::NotEqual, mbuf_start, 0);
1055         let mbuf_start_valid =
1056             bcx.ins()
1057                 .icmp(IntCC::UnsignedGreaterThanOrEqual, start_addr, mbuf_start);
1058         let mbuf_end_valid = bcx
1059             .ins()
1060             .icmp(IntCC::UnsignedLessThanOrEqual, end_addr, mbuf_end);
1061         let mbuf_valid = bcx.ins().band(mbuf_start_valid, mbuf_end_valid);
1062         let mbuf_valid = bcx.ins().band(mbuf_valid, has_mbuf);
1063 
1064         // Join all of these checks together and trap if any of them fails
1065 
1066         // We need it to be valid to at least one region of memory
1067         let valid_region = bcx.ins().bor(stack_valid, mem_valid);
1068         let valid_region = bcx.ins().bor(valid_region, mbuf_valid);
1069 
1070         // And that it does not overflow
1071         let valid = bcx.ins().band(does_not_overflow, valid_region);
1072 
1073         // TODO: We can potentially throw a custom trap code here to indicate
1074         // which check failed.
1075         bcx.ins().trapz(valid, TrapCode::HeapOutOfBounds);
1076     }
1077 
1078     /// Analyze the program and build the CFG
1079     ///
1080     /// We do this because cranelift does not allow us to switch back to a previously
1081     /// filled block and add instructions to it. So we can't split the program as we
1082     /// translate it.
1083     fn build_cfg(&mut self, bcx: &mut FunctionBuilder, prog: &[u8]) -> Result<(), Error> {
1084         let mut insn_ptr: usize = 0;
1085         while insn_ptr * ebpf::INSN_SIZE < prog.len() {
1086             let insn = ebpf::get_insn(prog, insn_ptr);
1087 
1088             match insn.opc {
1089                 // This instruction consumes two opcodes
1090                 ebpf::LD_DW_IMM => {
1091                     insn_ptr += 1;
1092                 }
1093 
1094                 ebpf::JA
1095                 | ebpf::JEQ_IMM
1096                 | ebpf::JEQ_REG
1097                 | ebpf::JGT_IMM
1098                 | ebpf::JGT_REG
1099                 | ebpf::JGE_IMM
1100                 | ebpf::JGE_REG
1101                 | ebpf::JLT_IMM
1102                 | ebpf::JLT_REG
1103                 | ebpf::JLE_IMM
1104                 | ebpf::JLE_REG
1105                 | ebpf::JNE_IMM
1106                 | ebpf::JNE_REG
1107                 | ebpf::JSGT_IMM
1108                 | ebpf::JSGT_REG
1109                 | ebpf::JSGE_IMM
1110                 | ebpf::JSGE_REG
1111                 | ebpf::JSLT_IMM
1112                 | ebpf::JSLT_REG
1113                 | ebpf::JSLE_IMM
1114                 | ebpf::JSLE_REG
1115                 | ebpf::JSET_IMM
1116                 | ebpf::JSET_REG
1117                 | ebpf::JEQ_IMM32
1118                 | ebpf::JEQ_REG32
1119                 | ebpf::JGT_IMM32
1120                 | ebpf::JGT_REG32
1121                 | ebpf::JGE_IMM32
1122                 | ebpf::JGE_REG32
1123                 | ebpf::JLT_IMM32
1124                 | ebpf::JLT_REG32
1125                 | ebpf::JLE_IMM32
1126                 | ebpf::JLE_REG32
1127                 | ebpf::JNE_IMM32
1128                 | ebpf::JNE_REG32
1129                 | ebpf::JSGT_IMM32
1130                 | ebpf::JSGT_REG32
1131                 | ebpf::JSGE_IMM32
1132                 | ebpf::JSGE_REG32
1133                 | ebpf::JSLT_IMM32
1134                 | ebpf::JSLT_REG32
1135                 | ebpf::JSLE_IMM32
1136                 | ebpf::JSLE_REG32
1137                 | ebpf::JSET_IMM32
1138                 | ebpf::JSET_REG32
1139                 | ebpf::EXIT
1140                 | ebpf::TAIL_CALL => {
1141                     self.prepare_jump_blocks(bcx, insn_ptr, &insn);
1142                 }
1143                 _ => {}
1144             }
1145 
1146             insn_ptr += 1;
1147         }
1148 
1149         Ok(())
1150     }
1151 
1152     fn prepare_jump_blocks(&mut self, bcx: &mut FunctionBuilder, insn_ptr: usize, insn: &Insn) {
1153         let insn_ptr = insn_ptr as u32;
1154         let next_pc: u32 = insn_ptr + 1;
1155         let target_pc: u32 = (insn_ptr as isize + insn.off as isize + 1)
1156             .try_into()
1157             .unwrap();
1158 
1159         // This is the fallthrough block
1160         let fallthrough_block = *self
1161             .insn_blocks
1162             .entry(next_pc)
1163             .or_insert_with(|| bcx.create_block());
1164 
1165         // Jump Target
1166         let target_block = *self
1167             .insn_blocks
1168             .entry(target_pc)
1169             .or_insert_with(|| bcx.create_block());
1170 
1171         // Mark the blocks for this instruction
1172         self.insn_targets
1173             .insert(insn_ptr, (fallthrough_block, target_block));
1174     }
1175 }
1176 
1177 /// Contains the backing memory for a previously compiled function.
1178 ///
1179 /// Currently this will allways just contain code for a single function, but
1180 /// in the future we might want to support multiple functions per module.
1181 ///
1182 /// Ensures that the backing memory is freed when dropped.
1183 pub struct CraneliftProgram {
1184     module: ManuallyDrop<JITModule>,
1185 
1186     main_id: FuncId,
1187 }
1188 
1189 impl CraneliftProgram {
1190     pub(crate) fn new(module: JITModule, main_id: FuncId) -> Self {
1191         Self {
1192             module: ManuallyDrop::new(module),
1193             main_id,
1194         }
1195     }
1196 
1197     /// We shouldn't allow this function pointer to be exposed outside of this
1198     /// module, since it's not guaranteed to be valid after the module is dropped.
1199     pub(crate) fn get_main_function(&self) -> JittedFunction {
1200         let function_ptr = self.module.get_finalized_function(self.main_id);
1201         unsafe { mem::transmute(function_ptr) }
1202     }
1203 
1204     /// Execute this module by calling the main function
1205     pub fn execute(
1206         &self,
1207         mem_ptr: *mut u8,
1208         mem_len: usize,
1209         mbuff_ptr: *mut u8,
1210         mbuff_len: usize,
1211     ) -> u64 {
1212         let main = self.get_main_function();
1213 
1214         main(mem_ptr, mem_len, mbuff_ptr, mbuff_len)
1215     }
1216 }
1217 
1218 impl Drop for CraneliftProgram {
1219     fn drop(&mut self) {
1220         // We need to have an owned version of `JITModule` to be able to free
1221         // it's memory. Use `ManuallyDrop` to get the owned `JITModule`.
1222         //
1223         // We can no longer use `module` after this, but since we are `Drop`
1224         // it should be safe.
1225         unsafe {
1226             let module = ManuallyDrop::take(&mut self.module);
1227             module.free_memory()
1228         };
1229     }
1230 }
1231