1*fae6e9adSlinfeng // SPDX-License-Identifier: (Apache-2.0 OR MIT)
2*fae6e9adSlinfeng // Derived from uBPF <https://github.com/iovisor/ubpf>
3*fae6e9adSlinfeng // Copyright 2015 Big Switch Networks, Inc
4*fae6e9adSlinfeng // (uBPF: JIT algorithm, originally in C)
5*fae6e9adSlinfeng // Copyright 2016 6WIND S.A. <quentin.monnet@6wind.com>
6*fae6e9adSlinfeng // (Translation to Rust, MetaBuff addition)
7*fae6e9adSlinfeng
8*fae6e9adSlinfeng use std::{
9*fae6e9adSlinfeng fmt::{Error as FormatterError, Formatter},
10*fae6e9adSlinfeng io::{Error, ErrorKind},
11*fae6e9adSlinfeng mem,
12*fae6e9adSlinfeng ops::{Index, IndexMut},
13*fae6e9adSlinfeng };
14*fae6e9adSlinfeng
15*fae6e9adSlinfeng use crate::{ebpf, HashMap};
16*fae6e9adSlinfeng
17*fae6e9adSlinfeng extern crate libc;
18*fae6e9adSlinfeng
19*fae6e9adSlinfeng type MachineCode = unsafe fn(*mut u8, usize, *mut u8, usize, usize, usize) -> u64;
20*fae6e9adSlinfeng
21*fae6e9adSlinfeng const PAGE_SIZE: usize = 4096;
22*fae6e9adSlinfeng // TODO: check how long the page must be to be sure to support an eBPF program of maximum possible
23*fae6e9adSlinfeng // length
24*fae6e9adSlinfeng const NUM_PAGES: usize = 1;
25*fae6e9adSlinfeng
26*fae6e9adSlinfeng // Special values for target_pc in struct Jump
27*fae6e9adSlinfeng const TARGET_OFFSET: isize = ebpf::PROG_MAX_INSNS as isize;
28*fae6e9adSlinfeng const TARGET_PC_EXIT: isize = TARGET_OFFSET + 1;
29*fae6e9adSlinfeng
30*fae6e9adSlinfeng #[derive(Copy, Clone)]
31*fae6e9adSlinfeng enum OperandSize {
32*fae6e9adSlinfeng S8 = 8,
33*fae6e9adSlinfeng S16 = 16,
34*fae6e9adSlinfeng S32 = 32,
35*fae6e9adSlinfeng S64 = 64,
36*fae6e9adSlinfeng }
37*fae6e9adSlinfeng
38*fae6e9adSlinfeng // Registers
39*fae6e9adSlinfeng const RAX: u8 = 0;
40*fae6e9adSlinfeng const RCX: u8 = 1;
41*fae6e9adSlinfeng const RDX: u8 = 2;
42*fae6e9adSlinfeng const RBX: u8 = 3;
43*fae6e9adSlinfeng const RSP: u8 = 4;
44*fae6e9adSlinfeng const RBP: u8 = 5;
45*fae6e9adSlinfeng const RSI: u8 = 6;
46*fae6e9adSlinfeng const RDI: u8 = 7;
47*fae6e9adSlinfeng const R8: u8 = 8;
48*fae6e9adSlinfeng const R9: u8 = 9;
49*fae6e9adSlinfeng const R10: u8 = 10;
50*fae6e9adSlinfeng const R11: u8 = 11;
51*fae6e9adSlinfeng //const R12: u8 = 12;
52*fae6e9adSlinfeng const R13: u8 = 13;
53*fae6e9adSlinfeng const R14: u8 = 14;
54*fae6e9adSlinfeng const R15: u8 = 15;
55*fae6e9adSlinfeng
56*fae6e9adSlinfeng const REGISTER_MAP_SIZE: usize = 11;
57*fae6e9adSlinfeng const REGISTER_MAP: [u8; REGISTER_MAP_SIZE] = [
58*fae6e9adSlinfeng RAX, // 0 return value
59*fae6e9adSlinfeng RDI, // 1 arg 1
60*fae6e9adSlinfeng RSI, // 2 arg 2
61*fae6e9adSlinfeng RDX, // 3 arg 3
62*fae6e9adSlinfeng R9, // 4 arg 4
63*fae6e9adSlinfeng R8, // 5 arg 5
64*fae6e9adSlinfeng RBX, // 6 callee-saved
65*fae6e9adSlinfeng R13, // 7 callee-saved
66*fae6e9adSlinfeng R14, // 8 callee-saved
67*fae6e9adSlinfeng R15, // 9 callee-saved
68*fae6e9adSlinfeng RBP, // 10 stack pointer
69*fae6e9adSlinfeng // R10 and R11 are used to compute store a constant pointer to mem and to compute offset for
70*fae6e9adSlinfeng // LD_ABS_* and LD_IND_* operations, so they are not mapped to any eBPF register.
71*fae6e9adSlinfeng ];
72*fae6e9adSlinfeng
73*fae6e9adSlinfeng // Return the x86 register for the given eBPF register
map_register(r: u8) -> u874*fae6e9adSlinfeng fn map_register(r: u8) -> u8 {
75*fae6e9adSlinfeng assert!(r < REGISTER_MAP_SIZE as u8);
76*fae6e9adSlinfeng REGISTER_MAP[(r % REGISTER_MAP_SIZE as u8) as usize]
77*fae6e9adSlinfeng }
78*fae6e9adSlinfeng
79*fae6e9adSlinfeng macro_rules! emit_bytes {
80*fae6e9adSlinfeng ( $mem:ident, $data:tt, $t:ty ) => {{
81*fae6e9adSlinfeng let size = mem::size_of::<$t>() as usize;
82*fae6e9adSlinfeng assert!($mem.offset + size <= $mem.contents.len());
83*fae6e9adSlinfeng unsafe {
84*fae6e9adSlinfeng let mut ptr = $mem.contents.as_ptr().add($mem.offset) as *mut $t;
85*fae6e9adSlinfeng ptr.write_unaligned($data);
86*fae6e9adSlinfeng }
87*fae6e9adSlinfeng $mem.offset += size;
88*fae6e9adSlinfeng }};
89*fae6e9adSlinfeng }
90*fae6e9adSlinfeng
91*fae6e9adSlinfeng #[derive(Debug)]
92*fae6e9adSlinfeng struct Jump {
93*fae6e9adSlinfeng offset_loc: usize,
94*fae6e9adSlinfeng target_pc: isize,
95*fae6e9adSlinfeng }
96*fae6e9adSlinfeng
97*fae6e9adSlinfeng #[derive(Debug)]
98*fae6e9adSlinfeng struct JitCompiler {
99*fae6e9adSlinfeng pc_locs: Vec<usize>,
100*fae6e9adSlinfeng special_targets: HashMap<isize, usize>,
101*fae6e9adSlinfeng jumps: Vec<Jump>,
102*fae6e9adSlinfeng }
103*fae6e9adSlinfeng
104*fae6e9adSlinfeng impl JitCompiler {
new() -> JitCompiler105*fae6e9adSlinfeng fn new() -> JitCompiler {
106*fae6e9adSlinfeng JitCompiler {
107*fae6e9adSlinfeng pc_locs: vec![],
108*fae6e9adSlinfeng jumps: vec![],
109*fae6e9adSlinfeng special_targets: HashMap::new(),
110*fae6e9adSlinfeng }
111*fae6e9adSlinfeng }
112*fae6e9adSlinfeng
emit1(&self, mem: &mut JitMemory, data: u8)113*fae6e9adSlinfeng fn emit1(&self, mem: &mut JitMemory, data: u8) {
114*fae6e9adSlinfeng emit_bytes!(mem, data, u8);
115*fae6e9adSlinfeng }
116*fae6e9adSlinfeng
emit2(&self, mem: &mut JitMemory, data: u16)117*fae6e9adSlinfeng fn emit2(&self, mem: &mut JitMemory, data: u16) {
118*fae6e9adSlinfeng emit_bytes!(mem, data, u16);
119*fae6e9adSlinfeng }
120*fae6e9adSlinfeng
emit4(&self, mem: &mut JitMemory, data: u32)121*fae6e9adSlinfeng fn emit4(&self, mem: &mut JitMemory, data: u32) {
122*fae6e9adSlinfeng emit_bytes!(mem, data, u32);
123*fae6e9adSlinfeng }
124*fae6e9adSlinfeng
emit8(&self, mem: &mut JitMemory, data: u64)125*fae6e9adSlinfeng fn emit8(&self, mem: &mut JitMemory, data: u64) {
126*fae6e9adSlinfeng emit_bytes!(mem, data, u64);
127*fae6e9adSlinfeng }
128*fae6e9adSlinfeng
emit_modrm(&self, mem: &mut JitMemory, modrm: u8, r: u8, m: u8)129*fae6e9adSlinfeng fn emit_modrm(&self, mem: &mut JitMemory, modrm: u8, r: u8, m: u8) {
130*fae6e9adSlinfeng assert_eq!((modrm | 0xc0), 0xc0);
131*fae6e9adSlinfeng self.emit1(mem, (modrm & 0xc0) | ((r & 0b111) << 3) | (m & 0b111));
132*fae6e9adSlinfeng }
133*fae6e9adSlinfeng
emit_modrm_reg2reg(&self, mem: &mut JitMemory, r: u8, m: u8)134*fae6e9adSlinfeng fn emit_modrm_reg2reg(&self, mem: &mut JitMemory, r: u8, m: u8) {
135*fae6e9adSlinfeng self.emit_modrm(mem, 0xc0, r, m);
136*fae6e9adSlinfeng }
137*fae6e9adSlinfeng
emit_modrm_and_displacement(&self, mem: &mut JitMemory, r: u8, m: u8, d: i32)138*fae6e9adSlinfeng fn emit_modrm_and_displacement(&self, mem: &mut JitMemory, r: u8, m: u8, d: i32) {
139*fae6e9adSlinfeng if d == 0 && (m & 0b111) != RBP {
140*fae6e9adSlinfeng self.emit_modrm(mem, 0x00, r, m);
141*fae6e9adSlinfeng } else if (-128..=127).contains(&d) {
142*fae6e9adSlinfeng self.emit_modrm(mem, 0x40, r, m);
143*fae6e9adSlinfeng self.emit1(mem, d as u8);
144*fae6e9adSlinfeng } else {
145*fae6e9adSlinfeng self.emit_modrm(mem, 0x80, r, m);
146*fae6e9adSlinfeng self.emit4(mem, d as u32);
147*fae6e9adSlinfeng }
148*fae6e9adSlinfeng }
149*fae6e9adSlinfeng
basix_rex_would_set_bits(&self, w: u8, src: u8, dst: u8) -> bool150*fae6e9adSlinfeng fn basix_rex_would_set_bits(&self, w: u8, src: u8, dst: u8) -> bool {
151*fae6e9adSlinfeng w != 0 || (src & 0b1000) != 0 || (dst & 0b1000) != 0
152*fae6e9adSlinfeng }
153*fae6e9adSlinfeng
emit_rex(&self, mem: &mut JitMemory, w: u8, r: u8, x: u8, b: u8)154*fae6e9adSlinfeng fn emit_rex(&self, mem: &mut JitMemory, w: u8, r: u8, x: u8, b: u8) {
155*fae6e9adSlinfeng assert_eq!((w | 1), 1);
156*fae6e9adSlinfeng assert_eq!((r | 1), 1);
157*fae6e9adSlinfeng assert_eq!((x | 1), 1);
158*fae6e9adSlinfeng assert_eq!((b | 1), 1);
159*fae6e9adSlinfeng self.emit1(mem, 0x40 | (w << 3) | (r << 2) | (x << 1) | b);
160*fae6e9adSlinfeng }
161*fae6e9adSlinfeng
162*fae6e9adSlinfeng // Emits a REX prefix with the top bit of src and dst.
163*fae6e9adSlinfeng // Skipped if no bits would be set.
emit_basic_rex(&self, mem: &mut JitMemory, w: u8, src: u8, dst: u8)164*fae6e9adSlinfeng fn emit_basic_rex(&self, mem: &mut JitMemory, w: u8, src: u8, dst: u8) {
165*fae6e9adSlinfeng if self.basix_rex_would_set_bits(w, src, dst) {
166*fae6e9adSlinfeng let is_masked = |val, mask| match val & mask {
167*fae6e9adSlinfeng 0 => 0,
168*fae6e9adSlinfeng _ => 1,
169*fae6e9adSlinfeng };
170*fae6e9adSlinfeng self.emit_rex(mem, w, is_masked(src, 8), 0, is_masked(dst, 8));
171*fae6e9adSlinfeng }
172*fae6e9adSlinfeng }
173*fae6e9adSlinfeng
emit_push(&self, mem: &mut JitMemory, r: u8)174*fae6e9adSlinfeng fn emit_push(&self, mem: &mut JitMemory, r: u8) {
175*fae6e9adSlinfeng self.emit_basic_rex(mem, 0, 0, r);
176*fae6e9adSlinfeng self.emit1(mem, 0x50 | (r & 0b111));
177*fae6e9adSlinfeng }
178*fae6e9adSlinfeng
emit_pop(&self, mem: &mut JitMemory, r: u8)179*fae6e9adSlinfeng fn emit_pop(&self, mem: &mut JitMemory, r: u8) {
180*fae6e9adSlinfeng self.emit_basic_rex(mem, 0, 0, r);
181*fae6e9adSlinfeng self.emit1(mem, 0x58 | (r & 0b111));
182*fae6e9adSlinfeng }
183*fae6e9adSlinfeng
184*fae6e9adSlinfeng // REX prefix and ModRM byte
185*fae6e9adSlinfeng // We use the MR encoding when there is a choice
186*fae6e9adSlinfeng // 'src' is often used as an opcode extension
emit_alu32(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8)187*fae6e9adSlinfeng fn emit_alu32(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8) {
188*fae6e9adSlinfeng self.emit_basic_rex(mem, 0, src, dst);
189*fae6e9adSlinfeng self.emit1(mem, op);
190*fae6e9adSlinfeng self.emit_modrm_reg2reg(mem, src, dst);
191*fae6e9adSlinfeng }
192*fae6e9adSlinfeng
193*fae6e9adSlinfeng // REX prefix, ModRM byte, and 32-bit immediate
emit_alu32_imm32(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i32)194*fae6e9adSlinfeng fn emit_alu32_imm32(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i32) {
195*fae6e9adSlinfeng self.emit_alu32(mem, op, src, dst);
196*fae6e9adSlinfeng self.emit4(mem, imm as u32);
197*fae6e9adSlinfeng }
198*fae6e9adSlinfeng
199*fae6e9adSlinfeng // REX prefix, ModRM byte, and 8-bit immediate
emit_alu32_imm8(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i8)200*fae6e9adSlinfeng fn emit_alu32_imm8(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i8) {
201*fae6e9adSlinfeng self.emit_alu32(mem, op, src, dst);
202*fae6e9adSlinfeng self.emit1(mem, imm as u8);
203*fae6e9adSlinfeng }
204*fae6e9adSlinfeng
205*fae6e9adSlinfeng // REX.W prefix and ModRM byte
206*fae6e9adSlinfeng // We use the MR encoding when there is a choice
207*fae6e9adSlinfeng // 'src' is often used as an opcode extension
emit_alu64(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8)208*fae6e9adSlinfeng fn emit_alu64(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8) {
209*fae6e9adSlinfeng self.emit_basic_rex(mem, 1, src, dst);
210*fae6e9adSlinfeng self.emit1(mem, op);
211*fae6e9adSlinfeng self.emit_modrm_reg2reg(mem, src, dst);
212*fae6e9adSlinfeng }
213*fae6e9adSlinfeng
214*fae6e9adSlinfeng // REX.W prefix, ModRM byte, and 32-bit immediate
emit_alu64_imm32(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i32)215*fae6e9adSlinfeng fn emit_alu64_imm32(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i32) {
216*fae6e9adSlinfeng self.emit_alu64(mem, op, src, dst);
217*fae6e9adSlinfeng self.emit4(mem, imm as u32);
218*fae6e9adSlinfeng }
219*fae6e9adSlinfeng
220*fae6e9adSlinfeng // REX.W prefix, ModRM byte, and 8-bit immediate
emit_alu64_imm8(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i8)221*fae6e9adSlinfeng fn emit_alu64_imm8(&self, mem: &mut JitMemory, op: u8, src: u8, dst: u8, imm: i8) {
222*fae6e9adSlinfeng self.emit_alu64(mem, op, src, dst);
223*fae6e9adSlinfeng self.emit1(mem, imm as u8);
224*fae6e9adSlinfeng }
225*fae6e9adSlinfeng
226*fae6e9adSlinfeng // Register to register mov
emit_mov(&self, mem: &mut JitMemory, src: u8, dst: u8)227*fae6e9adSlinfeng fn emit_mov(&self, mem: &mut JitMemory, src: u8, dst: u8) {
228*fae6e9adSlinfeng self.emit_alu64(mem, 0x89, src, dst);
229*fae6e9adSlinfeng }
230*fae6e9adSlinfeng
emit_cmp_imm32(&self, mem: &mut JitMemory, dst: u8, imm: i32)231*fae6e9adSlinfeng fn emit_cmp_imm32(&self, mem: &mut JitMemory, dst: u8, imm: i32) {
232*fae6e9adSlinfeng self.emit_alu64_imm32(mem, 0x81, 7, dst, imm);
233*fae6e9adSlinfeng }
234*fae6e9adSlinfeng
emit_cmp(&self, mem: &mut JitMemory, src: u8, dst: u8)235*fae6e9adSlinfeng fn emit_cmp(&self, mem: &mut JitMemory, src: u8, dst: u8) {
236*fae6e9adSlinfeng self.emit_alu64(mem, 0x39, src, dst);
237*fae6e9adSlinfeng }
238*fae6e9adSlinfeng
emit_cmp32_imm32(&self, mem: &mut JitMemory, dst: u8, imm: i32)239*fae6e9adSlinfeng fn emit_cmp32_imm32(&self, mem: &mut JitMemory, dst: u8, imm: i32) {
240*fae6e9adSlinfeng self.emit_alu32_imm32(mem, 0x81, 7, dst, imm);
241*fae6e9adSlinfeng }
242*fae6e9adSlinfeng
emit_cmp32(&self, mem: &mut JitMemory, src: u8, dst: u8)243*fae6e9adSlinfeng fn emit_cmp32(&self, mem: &mut JitMemory, src: u8, dst: u8) {
244*fae6e9adSlinfeng self.emit_alu32(mem, 0x39, src, dst);
245*fae6e9adSlinfeng }
246*fae6e9adSlinfeng
247*fae6e9adSlinfeng // Load [src + offset] into dst
emit_load(&self, mem: &mut JitMemory, size: OperandSize, src: u8, dst: u8, offset: i32)248*fae6e9adSlinfeng fn emit_load(&self, mem: &mut JitMemory, size: OperandSize, src: u8, dst: u8, offset: i32) {
249*fae6e9adSlinfeng let data = match size {
250*fae6e9adSlinfeng OperandSize::S64 => 1,
251*fae6e9adSlinfeng _ => 0,
252*fae6e9adSlinfeng };
253*fae6e9adSlinfeng self.emit_basic_rex(mem, data, dst, src);
254*fae6e9adSlinfeng
255*fae6e9adSlinfeng match size {
256*fae6e9adSlinfeng OperandSize::S8 => {
257*fae6e9adSlinfeng // movzx
258*fae6e9adSlinfeng self.emit1(mem, 0x0f);
259*fae6e9adSlinfeng self.emit1(mem, 0xb6);
260*fae6e9adSlinfeng }
261*fae6e9adSlinfeng OperandSize::S16 => {
262*fae6e9adSlinfeng // movzx
263*fae6e9adSlinfeng self.emit1(mem, 0x0f);
264*fae6e9adSlinfeng self.emit1(mem, 0xb7);
265*fae6e9adSlinfeng }
266*fae6e9adSlinfeng OperandSize::S32 | OperandSize::S64 => {
267*fae6e9adSlinfeng // mov
268*fae6e9adSlinfeng self.emit1(mem, 0x8b);
269*fae6e9adSlinfeng }
270*fae6e9adSlinfeng }
271*fae6e9adSlinfeng
272*fae6e9adSlinfeng self.emit_modrm_and_displacement(mem, dst, src, offset);
273*fae6e9adSlinfeng }
274*fae6e9adSlinfeng
275*fae6e9adSlinfeng // Load sign-extended immediate into register
emit_load_imm(&self, mem: &mut JitMemory, dst: u8, imm: i64)276*fae6e9adSlinfeng fn emit_load_imm(&self, mem: &mut JitMemory, dst: u8, imm: i64) {
277*fae6e9adSlinfeng if imm >= i32::MIN as i64 && imm <= i32::MAX as i64 {
278*fae6e9adSlinfeng self.emit_alu64_imm32(mem, 0xc7, 0, dst, imm as i32);
279*fae6e9adSlinfeng } else {
280*fae6e9adSlinfeng // movabs $imm,dst
281*fae6e9adSlinfeng self.emit_basic_rex(mem, 1, 0, dst);
282*fae6e9adSlinfeng self.emit1(mem, 0xb8 | (dst & 0b111));
283*fae6e9adSlinfeng self.emit8(mem, imm as u64);
284*fae6e9adSlinfeng }
285*fae6e9adSlinfeng }
286*fae6e9adSlinfeng
287*fae6e9adSlinfeng // Store register src to [dst + offset]
emit_store(&self, mem: &mut JitMemory, size: OperandSize, src: u8, dst: u8, offset: i32)288*fae6e9adSlinfeng fn emit_store(&self, mem: &mut JitMemory, size: OperandSize, src: u8, dst: u8, offset: i32) {
289*fae6e9adSlinfeng match size {
290*fae6e9adSlinfeng OperandSize::S16 => self.emit1(mem, 0x66), // 16-bit override
291*fae6e9adSlinfeng _ => {}
292*fae6e9adSlinfeng };
293*fae6e9adSlinfeng let (is_s8, is_u64, rexw) = match size {
294*fae6e9adSlinfeng OperandSize::S8 => (true, false, 0),
295*fae6e9adSlinfeng OperandSize::S64 => (false, true, 1),
296*fae6e9adSlinfeng _ => (false, false, 0),
297*fae6e9adSlinfeng };
298*fae6e9adSlinfeng if is_u64 || (src & 0b1000) != 0 || (dst & 0b1000) != 0 || is_s8 {
299*fae6e9adSlinfeng let is_masked = |val, mask| match val & mask {
300*fae6e9adSlinfeng 0 => 0,
301*fae6e9adSlinfeng _ => 1,
302*fae6e9adSlinfeng };
303*fae6e9adSlinfeng self.emit_rex(mem, rexw, is_masked(src, 8), 0, is_masked(dst, 8));
304*fae6e9adSlinfeng }
305*fae6e9adSlinfeng match size {
306*fae6e9adSlinfeng OperandSize::S8 => self.emit1(mem, 0x88),
307*fae6e9adSlinfeng _ => self.emit1(mem, 0x89),
308*fae6e9adSlinfeng };
309*fae6e9adSlinfeng self.emit_modrm_and_displacement(mem, src, dst, offset);
310*fae6e9adSlinfeng }
311*fae6e9adSlinfeng
312*fae6e9adSlinfeng // Store immediate to [dst + offset]
emit_store_imm32( &self, mem: &mut JitMemory, size: OperandSize, dst: u8, offset: i32, imm: i32, )313*fae6e9adSlinfeng fn emit_store_imm32(
314*fae6e9adSlinfeng &self,
315*fae6e9adSlinfeng mem: &mut JitMemory,
316*fae6e9adSlinfeng size: OperandSize,
317*fae6e9adSlinfeng dst: u8,
318*fae6e9adSlinfeng offset: i32,
319*fae6e9adSlinfeng imm: i32,
320*fae6e9adSlinfeng ) {
321*fae6e9adSlinfeng match size {
322*fae6e9adSlinfeng OperandSize::S16 => self.emit1(mem, 0x66), // 16-bit override
323*fae6e9adSlinfeng _ => {}
324*fae6e9adSlinfeng };
325*fae6e9adSlinfeng match size {
326*fae6e9adSlinfeng OperandSize::S64 => self.emit_basic_rex(mem, 1, 0, dst),
327*fae6e9adSlinfeng _ => self.emit_basic_rex(mem, 0, 0, dst),
328*fae6e9adSlinfeng };
329*fae6e9adSlinfeng match size {
330*fae6e9adSlinfeng OperandSize::S8 => self.emit1(mem, 0xc6),
331*fae6e9adSlinfeng _ => self.emit1(mem, 0xc7),
332*fae6e9adSlinfeng };
333*fae6e9adSlinfeng self.emit_modrm_and_displacement(mem, 0, dst, offset);
334*fae6e9adSlinfeng match size {
335*fae6e9adSlinfeng OperandSize::S8 => self.emit1(mem, imm as u8),
336*fae6e9adSlinfeng OperandSize::S16 => self.emit2(mem, imm as u16),
337*fae6e9adSlinfeng _ => self.emit4(mem, imm as u32),
338*fae6e9adSlinfeng };
339*fae6e9adSlinfeng }
340*fae6e9adSlinfeng
emit_direct_jcc(&self, mem: &mut JitMemory, code: u8, offset: u32)341*fae6e9adSlinfeng fn emit_direct_jcc(&self, mem: &mut JitMemory, code: u8, offset: u32) {
342*fae6e9adSlinfeng self.emit1(mem, 0x0f);
343*fae6e9adSlinfeng self.emit1(mem, code);
344*fae6e9adSlinfeng emit_bytes!(mem, offset, u32);
345*fae6e9adSlinfeng }
346*fae6e9adSlinfeng
emit_call(&self, mem: &mut JitMemory, target: usize)347*fae6e9adSlinfeng fn emit_call(&self, mem: &mut JitMemory, target: usize) {
348*fae6e9adSlinfeng // TODO use direct call when possible
349*fae6e9adSlinfeng self.emit_load_imm(mem, RAX, target as i64);
350*fae6e9adSlinfeng // callq *%rax
351*fae6e9adSlinfeng self.emit1(mem, 0xff);
352*fae6e9adSlinfeng self.emit1(mem, 0xd0);
353*fae6e9adSlinfeng }
354*fae6e9adSlinfeng
emit_jump_offset(&mut self, mem: &mut JitMemory, target_pc: isize)355*fae6e9adSlinfeng fn emit_jump_offset(&mut self, mem: &mut JitMemory, target_pc: isize) {
356*fae6e9adSlinfeng let jump = Jump {
357*fae6e9adSlinfeng offset_loc: mem.offset,
358*fae6e9adSlinfeng target_pc,
359*fae6e9adSlinfeng };
360*fae6e9adSlinfeng self.jumps.push(jump);
361*fae6e9adSlinfeng self.emit4(mem, 0);
362*fae6e9adSlinfeng }
363*fae6e9adSlinfeng
emit_jcc(&mut self, mem: &mut JitMemory, code: u8, target_pc: isize)364*fae6e9adSlinfeng fn emit_jcc(&mut self, mem: &mut JitMemory, code: u8, target_pc: isize) {
365*fae6e9adSlinfeng self.emit1(mem, 0x0f);
366*fae6e9adSlinfeng self.emit1(mem, code);
367*fae6e9adSlinfeng self.emit_jump_offset(mem, target_pc);
368*fae6e9adSlinfeng }
369*fae6e9adSlinfeng
emit_jmp(&mut self, mem: &mut JitMemory, target_pc: isize)370*fae6e9adSlinfeng fn emit_jmp(&mut self, mem: &mut JitMemory, target_pc: isize) {
371*fae6e9adSlinfeng self.emit1(mem, 0xe9);
372*fae6e9adSlinfeng self.emit_jump_offset(mem, target_pc);
373*fae6e9adSlinfeng }
374*fae6e9adSlinfeng
set_anchor(&mut self, mem: &mut JitMemory, target: isize)375*fae6e9adSlinfeng fn set_anchor(&mut self, mem: &mut JitMemory, target: isize) {
376*fae6e9adSlinfeng self.special_targets.insert(target, mem.offset);
377*fae6e9adSlinfeng }
378*fae6e9adSlinfeng
emit_muldivmod( &mut self, mem: &mut JitMemory, pc: u16, opc: u8, src: u8, dst: u8, imm: i32, )379*fae6e9adSlinfeng fn emit_muldivmod(
380*fae6e9adSlinfeng &mut self,
381*fae6e9adSlinfeng mem: &mut JitMemory,
382*fae6e9adSlinfeng pc: u16,
383*fae6e9adSlinfeng opc: u8,
384*fae6e9adSlinfeng src: u8,
385*fae6e9adSlinfeng dst: u8,
386*fae6e9adSlinfeng imm: i32,
387*fae6e9adSlinfeng ) {
388*fae6e9adSlinfeng let mul = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::MUL32_IMM & ebpf::BPF_ALU_OP_MASK);
389*fae6e9adSlinfeng let div = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::DIV32_IMM & ebpf::BPF_ALU_OP_MASK);
390*fae6e9adSlinfeng let modrm = (opc & ebpf::BPF_ALU_OP_MASK) == (ebpf::MOD32_IMM & ebpf::BPF_ALU_OP_MASK);
391*fae6e9adSlinfeng let is64 = (opc & ebpf::BPF_CLS_MASK) == ebpf::BPF_ALU64;
392*fae6e9adSlinfeng let is_reg = (opc & ebpf::BPF_X) == ebpf::BPF_X;
393*fae6e9adSlinfeng
394*fae6e9adSlinfeng if (div || mul) && !is_reg && imm == 0 {
395*fae6e9adSlinfeng // Division by zero returns 0
396*fae6e9adSlinfeng // Set register to 0: xor with itself
397*fae6e9adSlinfeng self.emit_alu32(mem, 0x31, dst, dst);
398*fae6e9adSlinfeng return;
399*fae6e9adSlinfeng }
400*fae6e9adSlinfeng if modrm && !is_reg && imm == 0 {
401*fae6e9adSlinfeng // Modulo remainder of division by zero keeps destination register unchanged
402*fae6e9adSlinfeng return;
403*fae6e9adSlinfeng }
404*fae6e9adSlinfeng if (div || modrm) && is_reg {
405*fae6e9adSlinfeng self.emit_load_imm(mem, RCX, pc as i64);
406*fae6e9adSlinfeng
407*fae6e9adSlinfeng // test src,src
408*fae6e9adSlinfeng if is64 {
409*fae6e9adSlinfeng self.emit_alu64(mem, 0x85, src, src);
410*fae6e9adSlinfeng } else {
411*fae6e9adSlinfeng self.emit_alu32(mem, 0x85, src, src);
412*fae6e9adSlinfeng }
413*fae6e9adSlinfeng
414*fae6e9adSlinfeng if div {
415*fae6e9adSlinfeng // No division by 0: skip next instructions
416*fae6e9adSlinfeng // Jump offset: emit_alu32 adds 2 to 3 bytes, emit_jmp adds 5
417*fae6e9adSlinfeng let offset = match self.basix_rex_would_set_bits(0, dst, dst) {
418*fae6e9adSlinfeng true => 3 + 5,
419*fae6e9adSlinfeng false => 2 + 5,
420*fae6e9adSlinfeng };
421*fae6e9adSlinfeng self.emit_direct_jcc(mem, 0x85, offset);
422*fae6e9adSlinfeng // Division by 0: set dst to 0 then go to next instruction
423*fae6e9adSlinfeng // Set register to 0: xor with itself
424*fae6e9adSlinfeng self.emit_alu32(mem, 0x31, dst, dst);
425*fae6e9adSlinfeng self.emit_jmp(mem, (pc + 1) as isize);
426*fae6e9adSlinfeng }
427*fae6e9adSlinfeng if modrm {
428*fae6e9adSlinfeng // Modulo by zero: keep destination register unchanged
429*fae6e9adSlinfeng self.emit_jcc(mem, 0x84, (pc + 1) as isize);
430*fae6e9adSlinfeng }
431*fae6e9adSlinfeng }
432*fae6e9adSlinfeng
433*fae6e9adSlinfeng if dst != RAX {
434*fae6e9adSlinfeng self.emit_push(mem, RAX);
435*fae6e9adSlinfeng }
436*fae6e9adSlinfeng if dst != RDX {
437*fae6e9adSlinfeng self.emit_push(mem, RDX);
438*fae6e9adSlinfeng }
439*fae6e9adSlinfeng if imm != 0 {
440*fae6e9adSlinfeng self.emit_load_imm(mem, RCX, imm as i64);
441*fae6e9adSlinfeng } else {
442*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
443*fae6e9adSlinfeng }
444*fae6e9adSlinfeng
445*fae6e9adSlinfeng self.emit_mov(mem, dst, RAX);
446*fae6e9adSlinfeng
447*fae6e9adSlinfeng if div || modrm {
448*fae6e9adSlinfeng // Set register to 0: xor %edx,%edx
449*fae6e9adSlinfeng self.emit_alu32(mem, 0x31, RDX, RDX);
450*fae6e9adSlinfeng }
451*fae6e9adSlinfeng
452*fae6e9adSlinfeng if is64 {
453*fae6e9adSlinfeng self.emit_rex(mem, 1, 0, 0, 0);
454*fae6e9adSlinfeng }
455*fae6e9adSlinfeng
456*fae6e9adSlinfeng // mul %ecx or div %ecx
457*fae6e9adSlinfeng self.emit_alu32(mem, 0xf7, if mul { 4 } else { 6 }, RCX);
458*fae6e9adSlinfeng
459*fae6e9adSlinfeng if dst != RDX {
460*fae6e9adSlinfeng if modrm {
461*fae6e9adSlinfeng self.emit_mov(mem, RDX, dst);
462*fae6e9adSlinfeng }
463*fae6e9adSlinfeng self.emit_pop(mem, RDX);
464*fae6e9adSlinfeng }
465*fae6e9adSlinfeng if dst != RAX {
466*fae6e9adSlinfeng if div || mul {
467*fae6e9adSlinfeng self.emit_mov(mem, RAX, dst);
468*fae6e9adSlinfeng }
469*fae6e9adSlinfeng self.emit_pop(mem, RAX);
470*fae6e9adSlinfeng }
471*fae6e9adSlinfeng }
472*fae6e9adSlinfeng
jit_compile( &mut self, mem: &mut JitMemory, prog: &[u8], use_mbuff: bool, update_data_ptr: bool, helpers: &HashMap<u32, ebpf::Helper>, ) -> Result<(), Error>473*fae6e9adSlinfeng fn jit_compile(
474*fae6e9adSlinfeng &mut self,
475*fae6e9adSlinfeng mem: &mut JitMemory,
476*fae6e9adSlinfeng prog: &[u8],
477*fae6e9adSlinfeng use_mbuff: bool,
478*fae6e9adSlinfeng update_data_ptr: bool,
479*fae6e9adSlinfeng helpers: &HashMap<u32, ebpf::Helper>,
480*fae6e9adSlinfeng ) -> Result<(), Error> {
481*fae6e9adSlinfeng self.emit_push(mem, RBP);
482*fae6e9adSlinfeng self.emit_push(mem, RBX);
483*fae6e9adSlinfeng self.emit_push(mem, R13);
484*fae6e9adSlinfeng self.emit_push(mem, R14);
485*fae6e9adSlinfeng self.emit_push(mem, R15);
486*fae6e9adSlinfeng
487*fae6e9adSlinfeng // RDI: mbuff
488*fae6e9adSlinfeng // RSI: mbuff_len
489*fae6e9adSlinfeng // RDX: mem
490*fae6e9adSlinfeng // RCX: mem_len
491*fae6e9adSlinfeng // R8: mem_offset
492*fae6e9adSlinfeng // R9: mem_end_offset
493*fae6e9adSlinfeng
494*fae6e9adSlinfeng // Save mem pointer for use with LD_ABS_* and LD_IND_* instructions
495*fae6e9adSlinfeng self.emit_mov(mem, RDX, R10);
496*fae6e9adSlinfeng
497*fae6e9adSlinfeng match (use_mbuff, update_data_ptr) {
498*fae6e9adSlinfeng (false, _) => {
499*fae6e9adSlinfeng // We do not use any mbuff. Move mem pointer into register 1.
500*fae6e9adSlinfeng if map_register(1) != RDX {
501*fae6e9adSlinfeng self.emit_mov(mem, RDX, map_register(1));
502*fae6e9adSlinfeng }
503*fae6e9adSlinfeng }
504*fae6e9adSlinfeng (true, false) => {
505*fae6e9adSlinfeng // We use a mbuff already pointing to mem and mem_end: move it to register 1.
506*fae6e9adSlinfeng if map_register(1) != RDI {
507*fae6e9adSlinfeng self.emit_mov(mem, RDI, map_register(1));
508*fae6e9adSlinfeng }
509*fae6e9adSlinfeng }
510*fae6e9adSlinfeng (true, true) => {
511*fae6e9adSlinfeng // We have a fixed (simulated) mbuff: update mem and mem_end offset values in it.
512*fae6e9adSlinfeng // Store mem at mbuff + mem_offset. Trash R8.
513*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, RDI, R8); // add mbuff to mem_offset in R8
514*fae6e9adSlinfeng self.emit_store(mem, OperandSize::S64, RDX, R8, 0); // set mem at mbuff + mem_offset
515*fae6e9adSlinfeng // Store mem_end at mbuff + mem_end_offset. Trash R9.
516*fae6e9adSlinfeng self.emit_load(mem, OperandSize::S64, RDX, R8, 0); // load mem into R8
517*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, RCX, R8); // add mem_len to mem (= mem_end)
518*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, RDI, R9); // add mbuff to mem_end_offset
519*fae6e9adSlinfeng self.emit_store(mem, OperandSize::S64, R8, R9, 0); // store mem_end
520*fae6e9adSlinfeng
521*fae6e9adSlinfeng // Move rdi into register 1
522*fae6e9adSlinfeng if map_register(1) != RDI {
523*fae6e9adSlinfeng self.emit_mov(mem, RDI, map_register(1));
524*fae6e9adSlinfeng }
525*fae6e9adSlinfeng }
526*fae6e9adSlinfeng }
527*fae6e9adSlinfeng
528*fae6e9adSlinfeng // Copy stack pointer to R10
529*fae6e9adSlinfeng self.emit_mov(mem, RSP, map_register(10));
530*fae6e9adSlinfeng
531*fae6e9adSlinfeng // Allocate stack space
532*fae6e9adSlinfeng self.emit_alu64_imm32(mem, 0x81, 5, RSP, ebpf::STACK_SIZE as i32);
533*fae6e9adSlinfeng
534*fae6e9adSlinfeng self.pc_locs = vec![0; prog.len() / ebpf::INSN_SIZE + 1];
535*fae6e9adSlinfeng
536*fae6e9adSlinfeng let mut insn_ptr: usize = 0;
537*fae6e9adSlinfeng while insn_ptr * ebpf::INSN_SIZE < prog.len() {
538*fae6e9adSlinfeng let insn = ebpf::get_insn(prog, insn_ptr);
539*fae6e9adSlinfeng
540*fae6e9adSlinfeng self.pc_locs[insn_ptr] = mem.offset;
541*fae6e9adSlinfeng
542*fae6e9adSlinfeng let dst = map_register(insn.dst);
543*fae6e9adSlinfeng let src = map_register(insn.src);
544*fae6e9adSlinfeng let target_pc = insn_ptr as isize + insn.off as isize + 1;
545*fae6e9adSlinfeng
546*fae6e9adSlinfeng match insn.opc {
547*fae6e9adSlinfeng // BPF_LD class
548*fae6e9adSlinfeng // R10 is a constant pointer to mem.
549*fae6e9adSlinfeng ebpf::LD_ABS_B => self.emit_load(mem, OperandSize::S8, R10, RAX, insn.imm),
550*fae6e9adSlinfeng ebpf::LD_ABS_H => self.emit_load(mem, OperandSize::S16, R10, RAX, insn.imm),
551*fae6e9adSlinfeng ebpf::LD_ABS_W => self.emit_load(mem, OperandSize::S32, R10, RAX, insn.imm),
552*fae6e9adSlinfeng ebpf::LD_ABS_DW => self.emit_load(mem, OperandSize::S64, R10, RAX, insn.imm),
553*fae6e9adSlinfeng ebpf::LD_IND_B => {
554*fae6e9adSlinfeng self.emit_mov(mem, R10, R11); // load mem into R11
555*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, src, R11); // add src to R11
556*fae6e9adSlinfeng self.emit_load(mem, OperandSize::S8, R11, RAX, insn.imm); // ld R0, mem[src+imm]
557*fae6e9adSlinfeng }
558*fae6e9adSlinfeng ebpf::LD_IND_H => {
559*fae6e9adSlinfeng self.emit_mov(mem, R10, R11); // load mem into R11
560*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, src, R11); // add src to R11
561*fae6e9adSlinfeng self.emit_load(mem, OperandSize::S16, R11, RAX, insn.imm); // ld R0, mem[src+imm]
562*fae6e9adSlinfeng }
563*fae6e9adSlinfeng ebpf::LD_IND_W => {
564*fae6e9adSlinfeng self.emit_mov(mem, R10, R11); // load mem into R11
565*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, src, R11); // add src to R11
566*fae6e9adSlinfeng self.emit_load(mem, OperandSize::S32, R11, RAX, insn.imm); // ld R0, mem[src+imm]
567*fae6e9adSlinfeng }
568*fae6e9adSlinfeng ebpf::LD_IND_DW => {
569*fae6e9adSlinfeng self.emit_mov(mem, R10, R11); // load mem into R11
570*fae6e9adSlinfeng self.emit_alu64(mem, 0x01, src, R11); // add src to R11
571*fae6e9adSlinfeng self.emit_load(mem, OperandSize::S64, R11, RAX, insn.imm); // ld R0, mem[src+imm]
572*fae6e9adSlinfeng }
573*fae6e9adSlinfeng
574*fae6e9adSlinfeng ebpf::LD_DW_IMM => {
575*fae6e9adSlinfeng insn_ptr += 1;
576*fae6e9adSlinfeng let second_part = ebpf::get_insn(prog, insn_ptr).imm as u64;
577*fae6e9adSlinfeng let imm = (insn.imm as u32) as u64 | second_part.wrapping_shl(32);
578*fae6e9adSlinfeng self.emit_load_imm(mem, dst, imm as i64);
579*fae6e9adSlinfeng }
580*fae6e9adSlinfeng
581*fae6e9adSlinfeng // BPF_LDX class
582*fae6e9adSlinfeng ebpf::LD_B_REG => self.emit_load(mem, OperandSize::S8, src, dst, insn.off as i32),
583*fae6e9adSlinfeng ebpf::LD_H_REG => self.emit_load(mem, OperandSize::S16, src, dst, insn.off as i32),
584*fae6e9adSlinfeng ebpf::LD_W_REG => self.emit_load(mem, OperandSize::S32, src, dst, insn.off as i32),
585*fae6e9adSlinfeng ebpf::LD_DW_REG => self.emit_load(mem, OperandSize::S64, src, dst, insn.off as i32),
586*fae6e9adSlinfeng
587*fae6e9adSlinfeng // BPF_ST class
588*fae6e9adSlinfeng ebpf::ST_B_IMM => {
589*fae6e9adSlinfeng self.emit_store_imm32(mem, OperandSize::S8, dst, insn.off as i32, insn.imm)
590*fae6e9adSlinfeng }
591*fae6e9adSlinfeng ebpf::ST_H_IMM => {
592*fae6e9adSlinfeng self.emit_store_imm32(mem, OperandSize::S16, dst, insn.off as i32, insn.imm)
593*fae6e9adSlinfeng }
594*fae6e9adSlinfeng ebpf::ST_W_IMM => {
595*fae6e9adSlinfeng self.emit_store_imm32(mem, OperandSize::S32, dst, insn.off as i32, insn.imm)
596*fae6e9adSlinfeng }
597*fae6e9adSlinfeng ebpf::ST_DW_IMM => {
598*fae6e9adSlinfeng self.emit_store_imm32(mem, OperandSize::S64, dst, insn.off as i32, insn.imm)
599*fae6e9adSlinfeng }
600*fae6e9adSlinfeng
601*fae6e9adSlinfeng // BPF_STX class
602*fae6e9adSlinfeng ebpf::ST_B_REG => self.emit_store(mem, OperandSize::S8, src, dst, insn.off as i32),
603*fae6e9adSlinfeng ebpf::ST_H_REG => self.emit_store(mem, OperandSize::S16, src, dst, insn.off as i32),
604*fae6e9adSlinfeng ebpf::ST_W_REG => self.emit_store(mem, OperandSize::S32, src, dst, insn.off as i32),
605*fae6e9adSlinfeng ebpf::ST_DW_REG => {
606*fae6e9adSlinfeng self.emit_store(mem, OperandSize::S64, src, dst, insn.off as i32)
607*fae6e9adSlinfeng }
608*fae6e9adSlinfeng ebpf::ST_W_XADD => unimplemented!(),
609*fae6e9adSlinfeng ebpf::ST_DW_XADD => unimplemented!(),
610*fae6e9adSlinfeng
611*fae6e9adSlinfeng // BPF_ALU class
612*fae6e9adSlinfeng ebpf::ADD32_IMM => self.emit_alu32_imm32(mem, 0x81, 0, dst, insn.imm),
613*fae6e9adSlinfeng ebpf::ADD32_REG => self.emit_alu32(mem, 0x01, src, dst),
614*fae6e9adSlinfeng ebpf::SUB32_IMM => self.emit_alu32_imm32(mem, 0x81, 5, dst, insn.imm),
615*fae6e9adSlinfeng ebpf::SUB32_REG => self.emit_alu32(mem, 0x29, src, dst),
616*fae6e9adSlinfeng ebpf::MUL32_IMM
617*fae6e9adSlinfeng | ebpf::MUL32_REG
618*fae6e9adSlinfeng | ebpf::DIV32_IMM
619*fae6e9adSlinfeng | ebpf::DIV32_REG
620*fae6e9adSlinfeng | ebpf::MOD32_IMM
621*fae6e9adSlinfeng | ebpf::MOD32_REG => {
622*fae6e9adSlinfeng self.emit_muldivmod(mem, insn_ptr as u16, insn.opc, src, dst, insn.imm)
623*fae6e9adSlinfeng }
624*fae6e9adSlinfeng ebpf::OR32_IMM => self.emit_alu32_imm32(mem, 0x81, 1, dst, insn.imm),
625*fae6e9adSlinfeng ebpf::OR32_REG => self.emit_alu32(mem, 0x09, src, dst),
626*fae6e9adSlinfeng ebpf::AND32_IMM => self.emit_alu32_imm32(mem, 0x81, 4, dst, insn.imm),
627*fae6e9adSlinfeng ebpf::AND32_REG => self.emit_alu32(mem, 0x21, src, dst),
628*fae6e9adSlinfeng ebpf::LSH32_IMM => self.emit_alu32_imm8(mem, 0xc1, 4, dst, insn.imm as i8),
629*fae6e9adSlinfeng ebpf::LSH32_REG => {
630*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
631*fae6e9adSlinfeng self.emit_alu32(mem, 0xd3, 4, dst);
632*fae6e9adSlinfeng }
633*fae6e9adSlinfeng ebpf::RSH32_IMM => self.emit_alu32_imm8(mem, 0xc1, 5, dst, insn.imm as i8),
634*fae6e9adSlinfeng ebpf::RSH32_REG => {
635*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
636*fae6e9adSlinfeng self.emit_alu32(mem, 0xd3, 5, dst);
637*fae6e9adSlinfeng }
638*fae6e9adSlinfeng ebpf::NEG32 => self.emit_alu32(mem, 0xf7, 3, dst),
639*fae6e9adSlinfeng ebpf::XOR32_IMM => self.emit_alu32_imm32(mem, 0x81, 6, dst, insn.imm),
640*fae6e9adSlinfeng ebpf::XOR32_REG => self.emit_alu32(mem, 0x31, src, dst),
641*fae6e9adSlinfeng ebpf::MOV32_IMM => self.emit_alu32_imm32(mem, 0xc7, 0, dst, insn.imm),
642*fae6e9adSlinfeng ebpf::MOV32_REG => self.emit_mov(mem, src, dst),
643*fae6e9adSlinfeng ebpf::ARSH32_IMM => self.emit_alu32_imm8(mem, 0xc1, 7, dst, insn.imm as i8),
644*fae6e9adSlinfeng ebpf::ARSH32_REG => {
645*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
646*fae6e9adSlinfeng self.emit_alu32(mem, 0xd3, 7, dst);
647*fae6e9adSlinfeng }
648*fae6e9adSlinfeng ebpf::LE => {} // No-op
649*fae6e9adSlinfeng ebpf::BE => {
650*fae6e9adSlinfeng match insn.imm {
651*fae6e9adSlinfeng 16 => {
652*fae6e9adSlinfeng // rol
653*fae6e9adSlinfeng self.emit1(mem, 0x66); // 16-bit override
654*fae6e9adSlinfeng self.emit_alu32_imm8(mem, 0xc1, 0, dst, 8);
655*fae6e9adSlinfeng // and
656*fae6e9adSlinfeng self.emit_alu32_imm32(mem, 0x81, 4, dst, 0xffff);
657*fae6e9adSlinfeng }
658*fae6e9adSlinfeng 32 | 64 => {
659*fae6e9adSlinfeng // bswap
660*fae6e9adSlinfeng let bit = match insn.imm {
661*fae6e9adSlinfeng 64 => 1,
662*fae6e9adSlinfeng _ => 0,
663*fae6e9adSlinfeng };
664*fae6e9adSlinfeng self.emit_basic_rex(mem, bit, 0, dst);
665*fae6e9adSlinfeng self.emit1(mem, 0x0f);
666*fae6e9adSlinfeng self.emit1(mem, 0xc8 | (dst & 0b111));
667*fae6e9adSlinfeng }
668*fae6e9adSlinfeng _ => unreachable!(), // Should have been caught by verifier
669*fae6e9adSlinfeng }
670*fae6e9adSlinfeng }
671*fae6e9adSlinfeng
672*fae6e9adSlinfeng // BPF_ALU64 class
673*fae6e9adSlinfeng ebpf::ADD64_IMM => self.emit_alu64_imm32(mem, 0x81, 0, dst, insn.imm),
674*fae6e9adSlinfeng ebpf::ADD64_REG => self.emit_alu64(mem, 0x01, src, dst),
675*fae6e9adSlinfeng ebpf::SUB64_IMM => self.emit_alu64_imm32(mem, 0x81, 5, dst, insn.imm),
676*fae6e9adSlinfeng ebpf::SUB64_REG => self.emit_alu64(mem, 0x29, src, dst),
677*fae6e9adSlinfeng ebpf::MUL64_IMM
678*fae6e9adSlinfeng | ebpf::MUL64_REG
679*fae6e9adSlinfeng | ebpf::DIV64_IMM
680*fae6e9adSlinfeng | ebpf::DIV64_REG
681*fae6e9adSlinfeng | ebpf::MOD64_IMM
682*fae6e9adSlinfeng | ebpf::MOD64_REG => {
683*fae6e9adSlinfeng self.emit_muldivmod(mem, insn_ptr as u16, insn.opc, src, dst, insn.imm)
684*fae6e9adSlinfeng }
685*fae6e9adSlinfeng ebpf::OR64_IMM => self.emit_alu64_imm32(mem, 0x81, 1, dst, insn.imm),
686*fae6e9adSlinfeng ebpf::OR64_REG => self.emit_alu64(mem, 0x09, src, dst),
687*fae6e9adSlinfeng ebpf::AND64_IMM => self.emit_alu64_imm32(mem, 0x81, 4, dst, insn.imm),
688*fae6e9adSlinfeng ebpf::AND64_REG => self.emit_alu64(mem, 0x21, src, dst),
689*fae6e9adSlinfeng ebpf::LSH64_IMM => self.emit_alu64_imm8(mem, 0xc1, 4, dst, insn.imm as i8),
690*fae6e9adSlinfeng ebpf::LSH64_REG => {
691*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
692*fae6e9adSlinfeng self.emit_alu64(mem, 0xd3, 4, dst);
693*fae6e9adSlinfeng }
694*fae6e9adSlinfeng ebpf::RSH64_IMM => self.emit_alu64_imm8(mem, 0xc1, 5, dst, insn.imm as i8),
695*fae6e9adSlinfeng ebpf::RSH64_REG => {
696*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
697*fae6e9adSlinfeng self.emit_alu64(mem, 0xd3, 5, dst);
698*fae6e9adSlinfeng }
699*fae6e9adSlinfeng ebpf::NEG64 => self.emit_alu64(mem, 0xf7, 3, dst),
700*fae6e9adSlinfeng ebpf::XOR64_IMM => self.emit_alu64_imm32(mem, 0x81, 6, dst, insn.imm),
701*fae6e9adSlinfeng ebpf::XOR64_REG => self.emit_alu64(mem, 0x31, src, dst),
702*fae6e9adSlinfeng ebpf::MOV64_IMM => self.emit_load_imm(mem, dst, insn.imm as i64),
703*fae6e9adSlinfeng ebpf::MOV64_REG => self.emit_mov(mem, src, dst),
704*fae6e9adSlinfeng ebpf::ARSH64_IMM => self.emit_alu64_imm8(mem, 0xc1, 7, dst, insn.imm as i8),
705*fae6e9adSlinfeng ebpf::ARSH64_REG => {
706*fae6e9adSlinfeng self.emit_mov(mem, src, RCX);
707*fae6e9adSlinfeng self.emit_alu64(mem, 0xd3, 7, dst);
708*fae6e9adSlinfeng }
709*fae6e9adSlinfeng
710*fae6e9adSlinfeng // BPF_JMP class
711*fae6e9adSlinfeng ebpf::JA => self.emit_jmp(mem, target_pc),
712*fae6e9adSlinfeng ebpf::JEQ_IMM => {
713*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
714*fae6e9adSlinfeng self.emit_jcc(mem, 0x84, target_pc);
715*fae6e9adSlinfeng }
716*fae6e9adSlinfeng ebpf::JEQ_REG => {
717*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
718*fae6e9adSlinfeng self.emit_jcc(mem, 0x84, target_pc);
719*fae6e9adSlinfeng }
720*fae6e9adSlinfeng ebpf::JGT_IMM => {
721*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
722*fae6e9adSlinfeng self.emit_jcc(mem, 0x87, target_pc);
723*fae6e9adSlinfeng }
724*fae6e9adSlinfeng ebpf::JGT_REG => {
725*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
726*fae6e9adSlinfeng self.emit_jcc(mem, 0x87, target_pc);
727*fae6e9adSlinfeng }
728*fae6e9adSlinfeng ebpf::JGE_IMM => {
729*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
730*fae6e9adSlinfeng self.emit_jcc(mem, 0x83, target_pc);
731*fae6e9adSlinfeng }
732*fae6e9adSlinfeng ebpf::JGE_REG => {
733*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
734*fae6e9adSlinfeng self.emit_jcc(mem, 0x83, target_pc);
735*fae6e9adSlinfeng }
736*fae6e9adSlinfeng ebpf::JLT_IMM => {
737*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
738*fae6e9adSlinfeng self.emit_jcc(mem, 0x82, target_pc);
739*fae6e9adSlinfeng }
740*fae6e9adSlinfeng ebpf::JLT_REG => {
741*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
742*fae6e9adSlinfeng self.emit_jcc(mem, 0x82, target_pc);
743*fae6e9adSlinfeng }
744*fae6e9adSlinfeng ebpf::JLE_IMM => {
745*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
746*fae6e9adSlinfeng self.emit_jcc(mem, 0x86, target_pc);
747*fae6e9adSlinfeng }
748*fae6e9adSlinfeng ebpf::JLE_REG => {
749*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
750*fae6e9adSlinfeng self.emit_jcc(mem, 0x86, target_pc);
751*fae6e9adSlinfeng }
752*fae6e9adSlinfeng ebpf::JSET_IMM => {
753*fae6e9adSlinfeng self.emit_alu64_imm32(mem, 0xf7, 0, dst, insn.imm);
754*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
755*fae6e9adSlinfeng }
756*fae6e9adSlinfeng ebpf::JSET_REG => {
757*fae6e9adSlinfeng self.emit_alu64(mem, 0x85, src, dst);
758*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
759*fae6e9adSlinfeng }
760*fae6e9adSlinfeng ebpf::JNE_IMM => {
761*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
762*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
763*fae6e9adSlinfeng }
764*fae6e9adSlinfeng ebpf::JNE_REG => {
765*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
766*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
767*fae6e9adSlinfeng }
768*fae6e9adSlinfeng ebpf::JSGT_IMM => {
769*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
770*fae6e9adSlinfeng self.emit_jcc(mem, 0x8f, target_pc);
771*fae6e9adSlinfeng }
772*fae6e9adSlinfeng ebpf::JSGT_REG => {
773*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
774*fae6e9adSlinfeng self.emit_jcc(mem, 0x8f, target_pc);
775*fae6e9adSlinfeng }
776*fae6e9adSlinfeng ebpf::JSGE_IMM => {
777*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
778*fae6e9adSlinfeng self.emit_jcc(mem, 0x8d, target_pc);
779*fae6e9adSlinfeng }
780*fae6e9adSlinfeng ebpf::JSGE_REG => {
781*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
782*fae6e9adSlinfeng self.emit_jcc(mem, 0x8d, target_pc);
783*fae6e9adSlinfeng }
784*fae6e9adSlinfeng ebpf::JSLT_IMM => {
785*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
786*fae6e9adSlinfeng self.emit_jcc(mem, 0x8c, target_pc);
787*fae6e9adSlinfeng }
788*fae6e9adSlinfeng ebpf::JSLT_REG => {
789*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
790*fae6e9adSlinfeng self.emit_jcc(mem, 0x8c, target_pc);
791*fae6e9adSlinfeng }
792*fae6e9adSlinfeng ebpf::JSLE_IMM => {
793*fae6e9adSlinfeng self.emit_cmp_imm32(mem, dst, insn.imm);
794*fae6e9adSlinfeng self.emit_jcc(mem, 0x8e, target_pc);
795*fae6e9adSlinfeng }
796*fae6e9adSlinfeng ebpf::JSLE_REG => {
797*fae6e9adSlinfeng self.emit_cmp(mem, src, dst);
798*fae6e9adSlinfeng self.emit_jcc(mem, 0x8e, target_pc);
799*fae6e9adSlinfeng }
800*fae6e9adSlinfeng
801*fae6e9adSlinfeng // BPF_JMP32 class
802*fae6e9adSlinfeng ebpf::JEQ_IMM32 => {
803*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
804*fae6e9adSlinfeng self.emit_jcc(mem, 0x84, target_pc);
805*fae6e9adSlinfeng }
806*fae6e9adSlinfeng ebpf::JEQ_REG32 => {
807*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
808*fae6e9adSlinfeng self.emit_jcc(mem, 0x84, target_pc);
809*fae6e9adSlinfeng }
810*fae6e9adSlinfeng ebpf::JGT_IMM32 => {
811*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
812*fae6e9adSlinfeng self.emit_jcc(mem, 0x87, target_pc);
813*fae6e9adSlinfeng }
814*fae6e9adSlinfeng ebpf::JGT_REG32 => {
815*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
816*fae6e9adSlinfeng self.emit_jcc(mem, 0x87, target_pc);
817*fae6e9adSlinfeng }
818*fae6e9adSlinfeng ebpf::JGE_IMM32 => {
819*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
820*fae6e9adSlinfeng self.emit_jcc(mem, 0x83, target_pc);
821*fae6e9adSlinfeng }
822*fae6e9adSlinfeng ebpf::JGE_REG32 => {
823*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
824*fae6e9adSlinfeng self.emit_jcc(mem, 0x83, target_pc);
825*fae6e9adSlinfeng }
826*fae6e9adSlinfeng ebpf::JLT_IMM32 => {
827*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
828*fae6e9adSlinfeng self.emit_jcc(mem, 0x82, target_pc);
829*fae6e9adSlinfeng }
830*fae6e9adSlinfeng ebpf::JLT_REG32 => {
831*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
832*fae6e9adSlinfeng self.emit_jcc(mem, 0x82, target_pc);
833*fae6e9adSlinfeng }
834*fae6e9adSlinfeng ebpf::JLE_IMM32 => {
835*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
836*fae6e9adSlinfeng self.emit_jcc(mem, 0x86, target_pc);
837*fae6e9adSlinfeng }
838*fae6e9adSlinfeng ebpf::JLE_REG32 => {
839*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
840*fae6e9adSlinfeng self.emit_jcc(mem, 0x86, target_pc);
841*fae6e9adSlinfeng }
842*fae6e9adSlinfeng ebpf::JSET_IMM32 => {
843*fae6e9adSlinfeng self.emit_alu32_imm32(mem, 0xf7, 0, dst, insn.imm);
844*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
845*fae6e9adSlinfeng }
846*fae6e9adSlinfeng ebpf::JSET_REG32 => {
847*fae6e9adSlinfeng self.emit_alu32(mem, 0x85, src, dst);
848*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
849*fae6e9adSlinfeng }
850*fae6e9adSlinfeng ebpf::JNE_IMM32 => {
851*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
852*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
853*fae6e9adSlinfeng }
854*fae6e9adSlinfeng ebpf::JNE_REG32 => {
855*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
856*fae6e9adSlinfeng self.emit_jcc(mem, 0x85, target_pc);
857*fae6e9adSlinfeng }
858*fae6e9adSlinfeng ebpf::JSGT_IMM32 => {
859*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
860*fae6e9adSlinfeng self.emit_jcc(mem, 0x8f, target_pc);
861*fae6e9adSlinfeng }
862*fae6e9adSlinfeng ebpf::JSGT_REG32 => {
863*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
864*fae6e9adSlinfeng self.emit_jcc(mem, 0x8f, target_pc);
865*fae6e9adSlinfeng }
866*fae6e9adSlinfeng ebpf::JSGE_IMM32 => {
867*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
868*fae6e9adSlinfeng self.emit_jcc(mem, 0x8d, target_pc);
869*fae6e9adSlinfeng }
870*fae6e9adSlinfeng ebpf::JSGE_REG32 => {
871*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
872*fae6e9adSlinfeng self.emit_jcc(mem, 0x8d, target_pc);
873*fae6e9adSlinfeng }
874*fae6e9adSlinfeng ebpf::JSLT_IMM32 => {
875*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
876*fae6e9adSlinfeng self.emit_jcc(mem, 0x8c, target_pc);
877*fae6e9adSlinfeng }
878*fae6e9adSlinfeng ebpf::JSLT_REG32 => {
879*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
880*fae6e9adSlinfeng self.emit_jcc(mem, 0x8c, target_pc);
881*fae6e9adSlinfeng }
882*fae6e9adSlinfeng ebpf::JSLE_IMM32 => {
883*fae6e9adSlinfeng self.emit_cmp32_imm32(mem, dst, insn.imm);
884*fae6e9adSlinfeng self.emit_jcc(mem, 0x8e, target_pc);
885*fae6e9adSlinfeng }
886*fae6e9adSlinfeng ebpf::JSLE_REG32 => {
887*fae6e9adSlinfeng self.emit_cmp32(mem, src, dst);
888*fae6e9adSlinfeng self.emit_jcc(mem, 0x8e, target_pc);
889*fae6e9adSlinfeng }
890*fae6e9adSlinfeng
891*fae6e9adSlinfeng ebpf::CALL => {
892*fae6e9adSlinfeng // For JIT, helpers in use MUST be registered at compile time. They can be
893*fae6e9adSlinfeng // updated later, but not created after compiling (we need the address of the
894*fae6e9adSlinfeng // helper function in the JIT-compiled program).
895*fae6e9adSlinfeng if let Some(helper) = helpers.get(&(insn.imm as u32)) {
896*fae6e9adSlinfeng // We reserve RCX for shifts
897*fae6e9adSlinfeng self.emit_mov(mem, R9, RCX);
898*fae6e9adSlinfeng self.emit_call(mem, *helper as usize);
899*fae6e9adSlinfeng } else {
900*fae6e9adSlinfeng Err(Error::new(
901*fae6e9adSlinfeng ErrorKind::Other,
902*fae6e9adSlinfeng format!(
903*fae6e9adSlinfeng "[JIT] Error: unknown helper function (id: {:#x})",
904*fae6e9adSlinfeng insn.imm as u32
905*fae6e9adSlinfeng ),
906*fae6e9adSlinfeng ))?;
907*fae6e9adSlinfeng };
908*fae6e9adSlinfeng }
909*fae6e9adSlinfeng ebpf::TAIL_CALL => {
910*fae6e9adSlinfeng unimplemented!()
911*fae6e9adSlinfeng }
912*fae6e9adSlinfeng ebpf::EXIT => {
913*fae6e9adSlinfeng if insn_ptr != prog.len() / ebpf::INSN_SIZE - 1 {
914*fae6e9adSlinfeng self.emit_jmp(mem, TARGET_PC_EXIT);
915*fae6e9adSlinfeng };
916*fae6e9adSlinfeng }
917*fae6e9adSlinfeng
918*fae6e9adSlinfeng _ => {
919*fae6e9adSlinfeng Err(Error::new(
920*fae6e9adSlinfeng ErrorKind::Other,
921*fae6e9adSlinfeng format!(
922*fae6e9adSlinfeng "[JIT] Error: unknown eBPF opcode {:#2x} (insn #{insn_ptr:?})",
923*fae6e9adSlinfeng insn.opc
924*fae6e9adSlinfeng ),
925*fae6e9adSlinfeng ))?;
926*fae6e9adSlinfeng }
927*fae6e9adSlinfeng }
928*fae6e9adSlinfeng
929*fae6e9adSlinfeng insn_ptr += 1;
930*fae6e9adSlinfeng }
931*fae6e9adSlinfeng
932*fae6e9adSlinfeng // Epilogue
933*fae6e9adSlinfeng self.set_anchor(mem, TARGET_PC_EXIT);
934*fae6e9adSlinfeng
935*fae6e9adSlinfeng // Move register 0 into rax
936*fae6e9adSlinfeng if map_register(0) != RAX {
937*fae6e9adSlinfeng self.emit_mov(mem, map_register(0), RAX);
938*fae6e9adSlinfeng }
939*fae6e9adSlinfeng
940*fae6e9adSlinfeng // Deallocate stack space
941*fae6e9adSlinfeng self.emit_alu64_imm32(mem, 0x81, 0, RSP, ebpf::STACK_SIZE as i32);
942*fae6e9adSlinfeng
943*fae6e9adSlinfeng self.emit_pop(mem, R15);
944*fae6e9adSlinfeng self.emit_pop(mem, R14);
945*fae6e9adSlinfeng self.emit_pop(mem, R13);
946*fae6e9adSlinfeng self.emit_pop(mem, RBX);
947*fae6e9adSlinfeng self.emit_pop(mem, RBP);
948*fae6e9adSlinfeng
949*fae6e9adSlinfeng self.emit1(mem, 0xc3); // ret
950*fae6e9adSlinfeng
951*fae6e9adSlinfeng Ok(())
952*fae6e9adSlinfeng }
953*fae6e9adSlinfeng
resolve_jumps(&mut self, mem: &mut JitMemory) -> Result<(), Error>954*fae6e9adSlinfeng fn resolve_jumps(&mut self, mem: &mut JitMemory) -> Result<(), Error> {
955*fae6e9adSlinfeng for jump in &self.jumps {
956*fae6e9adSlinfeng let target_loc = match self.special_targets.get(&jump.target_pc) {
957*fae6e9adSlinfeng Some(target) => *target,
958*fae6e9adSlinfeng None => self.pc_locs[jump.target_pc as usize],
959*fae6e9adSlinfeng };
960*fae6e9adSlinfeng
961*fae6e9adSlinfeng // Assumes jump offset is at end of instruction
962*fae6e9adSlinfeng unsafe {
963*fae6e9adSlinfeng let offset_loc = jump.offset_loc as i32 + std::mem::size_of::<i32>() as i32;
964*fae6e9adSlinfeng let rel = &(target_loc as i32 - offset_loc) as *const i32;
965*fae6e9adSlinfeng
966*fae6e9adSlinfeng let offset_ptr = mem.contents.as_ptr().add(jump.offset_loc);
967*fae6e9adSlinfeng
968*fae6e9adSlinfeng libc::memcpy(
969*fae6e9adSlinfeng offset_ptr as *mut libc::c_void,
970*fae6e9adSlinfeng rel as *const libc::c_void,
971*fae6e9adSlinfeng std::mem::size_of::<i32>(),
972*fae6e9adSlinfeng );
973*fae6e9adSlinfeng }
974*fae6e9adSlinfeng }
975*fae6e9adSlinfeng Ok(())
976*fae6e9adSlinfeng }
977*fae6e9adSlinfeng } // impl JitCompiler
978*fae6e9adSlinfeng
979*fae6e9adSlinfeng pub struct JitMemory<'a> {
980*fae6e9adSlinfeng contents: &'a mut [u8],
981*fae6e9adSlinfeng offset: usize,
982*fae6e9adSlinfeng }
983*fae6e9adSlinfeng
984*fae6e9adSlinfeng impl<'a> JitMemory<'a> {
new( prog: &[u8], helpers: &HashMap<u32, ebpf::Helper>, use_mbuff: bool, update_data_ptr: bool, ) -> Result<JitMemory<'a>, Error>985*fae6e9adSlinfeng pub fn new(
986*fae6e9adSlinfeng prog: &[u8],
987*fae6e9adSlinfeng helpers: &HashMap<u32, ebpf::Helper>,
988*fae6e9adSlinfeng use_mbuff: bool,
989*fae6e9adSlinfeng update_data_ptr: bool,
990*fae6e9adSlinfeng ) -> Result<JitMemory<'a>, Error> {
991*fae6e9adSlinfeng let contents: &mut [u8];
992*fae6e9adSlinfeng let mut raw: mem::MaybeUninit<*mut libc::c_void> = mem::MaybeUninit::uninit();
993*fae6e9adSlinfeng unsafe {
994*fae6e9adSlinfeng let size = NUM_PAGES * PAGE_SIZE;
995*fae6e9adSlinfeng libc::posix_memalign(raw.as_mut_ptr(), PAGE_SIZE, size);
996*fae6e9adSlinfeng libc::mprotect(
997*fae6e9adSlinfeng *raw.as_mut_ptr(),
998*fae6e9adSlinfeng size,
999*fae6e9adSlinfeng libc::PROT_EXEC | libc::PROT_READ | libc::PROT_WRITE,
1000*fae6e9adSlinfeng );
1001*fae6e9adSlinfeng std::ptr::write_bytes(*raw.as_mut_ptr(), 0xc3, size); // for now, prepopulate with 'RET' calls
1002*fae6e9adSlinfeng contents =
1003*fae6e9adSlinfeng std::slice::from_raw_parts_mut(*raw.as_mut_ptr() as *mut u8, NUM_PAGES * PAGE_SIZE);
1004*fae6e9adSlinfeng raw.assume_init();
1005*fae6e9adSlinfeng }
1006*fae6e9adSlinfeng
1007*fae6e9adSlinfeng let mut mem = JitMemory {
1008*fae6e9adSlinfeng contents,
1009*fae6e9adSlinfeng offset: 0,
1010*fae6e9adSlinfeng };
1011*fae6e9adSlinfeng
1012*fae6e9adSlinfeng let mut jit = JitCompiler::new();
1013*fae6e9adSlinfeng jit.jit_compile(&mut mem, prog, use_mbuff, update_data_ptr, helpers)?;
1014*fae6e9adSlinfeng jit.resolve_jumps(&mut mem)?;
1015*fae6e9adSlinfeng
1016*fae6e9adSlinfeng Ok(mem)
1017*fae6e9adSlinfeng }
1018*fae6e9adSlinfeng
get_prog(&self) -> MachineCode1019*fae6e9adSlinfeng pub fn get_prog(&self) -> MachineCode {
1020*fae6e9adSlinfeng unsafe { mem::transmute(self.contents.as_ptr()) }
1021*fae6e9adSlinfeng }
1022*fae6e9adSlinfeng }
1023*fae6e9adSlinfeng
1024*fae6e9adSlinfeng impl<'a> Index<usize> for JitMemory<'a> {
1025*fae6e9adSlinfeng type Output = u8;
1026*fae6e9adSlinfeng
index(&self, _index: usize) -> &u81027*fae6e9adSlinfeng fn index(&self, _index: usize) -> &u8 {
1028*fae6e9adSlinfeng &self.contents[_index]
1029*fae6e9adSlinfeng }
1030*fae6e9adSlinfeng }
1031*fae6e9adSlinfeng
1032*fae6e9adSlinfeng impl<'a> IndexMut<usize> for JitMemory<'a> {
index_mut(&mut self, _index: usize) -> &mut u81033*fae6e9adSlinfeng fn index_mut(&mut self, _index: usize) -> &mut u8 {
1034*fae6e9adSlinfeng &mut self.contents[_index]
1035*fae6e9adSlinfeng }
1036*fae6e9adSlinfeng }
1037*fae6e9adSlinfeng
1038*fae6e9adSlinfeng impl<'a> Drop for JitMemory<'a> {
drop(&mut self)1039*fae6e9adSlinfeng fn drop(&mut self) {
1040*fae6e9adSlinfeng unsafe {
1041*fae6e9adSlinfeng libc::free(self.contents.as_mut_ptr() as *mut libc::c_void);
1042*fae6e9adSlinfeng }
1043*fae6e9adSlinfeng }
1044*fae6e9adSlinfeng }
1045*fae6e9adSlinfeng
1046*fae6e9adSlinfeng impl<'a> std::fmt::Debug for JitMemory<'a> {
fmt(&self, fmt: &mut Formatter) -> Result<(), FormatterError>1047*fae6e9adSlinfeng fn fmt(&self, fmt: &mut Formatter) -> Result<(), FormatterError> {
1048*fae6e9adSlinfeng fmt.write_str("JIT contents: [")?;
1049*fae6e9adSlinfeng fmt.write_str(" ] | ")?;
1050*fae6e9adSlinfeng fmt.debug_struct("JIT memory")
1051*fae6e9adSlinfeng .field("offset", &self.offset)
1052*fae6e9adSlinfeng .finish()
1053*fae6e9adSlinfeng }
1054*fae6e9adSlinfeng }
1055