1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18
emit_code(u8 * ptr,u32 bytes,unsigned int len)19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
20 {
21 if (len == 1)
22 *ptr = bytes;
23 else if (len == 2)
24 *(u16 *)ptr = bytes;
25 else {
26 *(u32 *)ptr = bytes;
27 barrier();
28 }
29 return ptr + len;
30 }
31
32 #define EMIT(bytes, len) \
33 do { prog = emit_code(prog, bytes, len); } while (0)
34
35 #define EMIT1(b1) EMIT(b1, 1)
36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
39
40 #define EMIT1_off32(b1, off) \
41 do { EMIT1(b1); EMIT(off, 4); } while (0)
42 #define EMIT2_off32(b1, b2, off) \
43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
44 #define EMIT3_off32(b1, b2, b3, off) \
45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
46 #define EMIT4_off32(b1, b2, b3, b4, off) \
47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
48
49 #ifdef CONFIG_X86_KERNEL_IBT
50 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
51 #else
52 #define EMIT_ENDBR()
53 #endif
54
is_imm8(int value)55 static bool is_imm8(int value)
56 {
57 return value <= 127 && value >= -128;
58 }
59
is_simm32(s64 value)60 static bool is_simm32(s64 value)
61 {
62 return value == (s64)(s32)value;
63 }
64
is_uimm32(u64 value)65 static bool is_uimm32(u64 value)
66 {
67 return value == (u64)(u32)value;
68 }
69
70 /* mov dst, src */
71 #define EMIT_mov(DST, SRC) \
72 do { \
73 if (DST != SRC) \
74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
75 } while (0)
76
bpf_size_to_x86_bytes(int bpf_size)77 static int bpf_size_to_x86_bytes(int bpf_size)
78 {
79 if (bpf_size == BPF_W)
80 return 4;
81 else if (bpf_size == BPF_H)
82 return 2;
83 else if (bpf_size == BPF_B)
84 return 1;
85 else if (bpf_size == BPF_DW)
86 return 4; /* imm32 */
87 else
88 return 0;
89 }
90
91 /*
92 * List of x86 cond jumps opcodes (. + s8)
93 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
94 */
95 #define X86_JB 0x72
96 #define X86_JAE 0x73
97 #define X86_JE 0x74
98 #define X86_JNE 0x75
99 #define X86_JBE 0x76
100 #define X86_JA 0x77
101 #define X86_JL 0x7C
102 #define X86_JGE 0x7D
103 #define X86_JLE 0x7E
104 #define X86_JG 0x7F
105
106 /* Pick a register outside of BPF range for JIT internal work */
107 #define AUX_REG (MAX_BPF_JIT_REG + 1)
108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
109
110 /*
111 * The following table maps BPF registers to x86-64 registers.
112 *
113 * x86-64 register R12 is unused, since if used as base address
114 * register in load/store instructions, it always needs an
115 * extra byte of encoding and is callee saved.
116 *
117 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
118 * trampoline. x86-64 register R10 is used for blinding (if enabled).
119 */
120 static const int reg2hex[] = {
121 [BPF_REG_0] = 0, /* RAX */
122 [BPF_REG_1] = 7, /* RDI */
123 [BPF_REG_2] = 6, /* RSI */
124 [BPF_REG_3] = 2, /* RDX */
125 [BPF_REG_4] = 1, /* RCX */
126 [BPF_REG_5] = 0, /* R8 */
127 [BPF_REG_6] = 3, /* RBX callee saved */
128 [BPF_REG_7] = 5, /* R13 callee saved */
129 [BPF_REG_8] = 6, /* R14 callee saved */
130 [BPF_REG_9] = 7, /* R15 callee saved */
131 [BPF_REG_FP] = 5, /* RBP readonly */
132 [BPF_REG_AX] = 2, /* R10 temp register */
133 [AUX_REG] = 3, /* R11 temp register */
134 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
135 };
136
137 static const int reg2pt_regs[] = {
138 [BPF_REG_0] = offsetof(struct pt_regs, ax),
139 [BPF_REG_1] = offsetof(struct pt_regs, di),
140 [BPF_REG_2] = offsetof(struct pt_regs, si),
141 [BPF_REG_3] = offsetof(struct pt_regs, dx),
142 [BPF_REG_4] = offsetof(struct pt_regs, cx),
143 [BPF_REG_5] = offsetof(struct pt_regs, r8),
144 [BPF_REG_6] = offsetof(struct pt_regs, bx),
145 [BPF_REG_7] = offsetof(struct pt_regs, r13),
146 [BPF_REG_8] = offsetof(struct pt_regs, r14),
147 [BPF_REG_9] = offsetof(struct pt_regs, r15),
148 };
149
150 /*
151 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
152 * which need extra byte of encoding.
153 * rax,rcx,...,rbp have simpler encoding
154 */
is_ereg(u32 reg)155 static bool is_ereg(u32 reg)
156 {
157 return (1 << reg) & (BIT(BPF_REG_5) |
158 BIT(AUX_REG) |
159 BIT(BPF_REG_7) |
160 BIT(BPF_REG_8) |
161 BIT(BPF_REG_9) |
162 BIT(X86_REG_R9) |
163 BIT(BPF_REG_AX));
164 }
165
166 /*
167 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
168 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
169 * of encoding. al,cl,dl,bl have simpler encoding.
170 */
is_ereg_8l(u32 reg)171 static bool is_ereg_8l(u32 reg)
172 {
173 return is_ereg(reg) ||
174 (1 << reg) & (BIT(BPF_REG_1) |
175 BIT(BPF_REG_2) |
176 BIT(BPF_REG_FP));
177 }
178
is_axreg(u32 reg)179 static bool is_axreg(u32 reg)
180 {
181 return reg == BPF_REG_0;
182 }
183
184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)185 static u8 add_1mod(u8 byte, u32 reg)
186 {
187 if (is_ereg(reg))
188 byte |= 1;
189 return byte;
190 }
191
add_2mod(u8 byte,u32 r1,u32 r2)192 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
193 {
194 if (is_ereg(r1))
195 byte |= 1;
196 if (is_ereg(r2))
197 byte |= 4;
198 return byte;
199 }
200
201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)202 static u8 add_1reg(u8 byte, u32 dst_reg)
203 {
204 return byte + reg2hex[dst_reg];
205 }
206
207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
209 {
210 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
211 }
212
213 /* Some 1-byte opcodes for binary ALU operations */
214 static u8 simple_alu_opcodes[] = {
215 [BPF_ADD] = 0x01,
216 [BPF_SUB] = 0x29,
217 [BPF_AND] = 0x21,
218 [BPF_OR] = 0x09,
219 [BPF_XOR] = 0x31,
220 [BPF_LSH] = 0xE0,
221 [BPF_RSH] = 0xE8,
222 [BPF_ARSH] = 0xF8,
223 };
224
jit_fill_hole(void * area,unsigned int size)225 static void jit_fill_hole(void *area, unsigned int size)
226 {
227 /* Fill whole space with INT3 instructions */
228 memset(area, 0xcc, size);
229 }
230
bpf_arch_text_invalidate(void * dst,size_t len)231 int bpf_arch_text_invalidate(void *dst, size_t len)
232 {
233 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
234 }
235
236 struct jit_context {
237 int cleanup_addr; /* Epilogue code offset */
238
239 /*
240 * Program specific offsets of labels in the code; these rely on the
241 * JIT doing at least 2 passes, recording the position on the first
242 * pass, only to generate the correct offset on the second pass.
243 */
244 int tail_call_direct_label;
245 int tail_call_indirect_label;
246 };
247
248 /* Maximum number of bytes emitted while JITing one eBPF insn */
249 #define BPF_MAX_INSN_SIZE 128
250 #define BPF_INSN_SAFETY 64
251
252 /* Number of bytes emit_patch() needs to generate instructions */
253 #define X86_PATCH_SIZE 5
254 /* Number of bytes that will be skipped on tailcall */
255 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
256
push_callee_regs(u8 ** pprog,bool * callee_regs_used)257 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
258 {
259 u8 *prog = *pprog;
260
261 if (callee_regs_used[0])
262 EMIT1(0x53); /* push rbx */
263 if (callee_regs_used[1])
264 EMIT2(0x41, 0x55); /* push r13 */
265 if (callee_regs_used[2])
266 EMIT2(0x41, 0x56); /* push r14 */
267 if (callee_regs_used[3])
268 EMIT2(0x41, 0x57); /* push r15 */
269 *pprog = prog;
270 }
271
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)272 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
273 {
274 u8 *prog = *pprog;
275
276 if (callee_regs_used[3])
277 EMIT2(0x41, 0x5F); /* pop r15 */
278 if (callee_regs_used[2])
279 EMIT2(0x41, 0x5E); /* pop r14 */
280 if (callee_regs_used[1])
281 EMIT2(0x41, 0x5D); /* pop r13 */
282 if (callee_regs_used[0])
283 EMIT1(0x5B); /* pop rbx */
284 *pprog = prog;
285 }
286
287 /*
288 * Emit x86-64 prologue code for BPF program.
289 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
290 * while jumping to another program
291 */
emit_prologue(u8 ** pprog,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog)292 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
293 bool tail_call_reachable, bool is_subprog)
294 {
295 u8 *prog = *pprog;
296
297 /* BPF trampoline can be made to work without these nops,
298 * but let's waste 5 bytes for now and optimize later
299 */
300 EMIT_ENDBR();
301 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
302 prog += X86_PATCH_SIZE;
303 if (!ebpf_from_cbpf) {
304 if (tail_call_reachable && !is_subprog)
305 EMIT2(0x31, 0xC0); /* xor eax, eax */
306 else
307 EMIT2(0x66, 0x90); /* nop2 */
308 }
309 EMIT1(0x55); /* push rbp */
310 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
311
312 /* X86_TAIL_CALL_OFFSET is here */
313 EMIT_ENDBR();
314
315 /* sub rsp, rounded_stack_depth */
316 if (stack_depth)
317 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
318 if (tail_call_reachable)
319 EMIT1(0x50); /* push rax */
320 *pprog = prog;
321 }
322
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)323 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
324 {
325 u8 *prog = *pprog;
326 s64 offset;
327
328 offset = func - (ip + X86_PATCH_SIZE);
329 if (!is_simm32(offset)) {
330 pr_err("Target call %p is out of range\n", func);
331 return -ERANGE;
332 }
333 EMIT1_off32(opcode, offset);
334 *pprog = prog;
335 return 0;
336 }
337
emit_call(u8 ** pprog,void * func,void * ip)338 static int emit_call(u8 **pprog, void *func, void *ip)
339 {
340 return emit_patch(pprog, func, ip, 0xE8);
341 }
342
emit_jump(u8 ** pprog,void * func,void * ip)343 static int emit_jump(u8 **pprog, void *func, void *ip)
344 {
345 return emit_patch(pprog, func, ip, 0xE9);
346 }
347
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)348 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
349 void *old_addr, void *new_addr)
350 {
351 const u8 *nop_insn = x86_nops[5];
352 u8 old_insn[X86_PATCH_SIZE];
353 u8 new_insn[X86_PATCH_SIZE];
354 u8 *prog;
355 int ret;
356
357 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
358 if (old_addr) {
359 prog = old_insn;
360 ret = t == BPF_MOD_CALL ?
361 emit_call(&prog, old_addr, ip) :
362 emit_jump(&prog, old_addr, ip);
363 if (ret)
364 return ret;
365 }
366
367 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
368 if (new_addr) {
369 prog = new_insn;
370 ret = t == BPF_MOD_CALL ?
371 emit_call(&prog, new_addr, ip) :
372 emit_jump(&prog, new_addr, ip);
373 if (ret)
374 return ret;
375 }
376
377 ret = -EBUSY;
378 mutex_lock(&text_mutex);
379 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
380 goto out;
381 ret = 1;
382 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
383 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
384 ret = 0;
385 }
386 out:
387 mutex_unlock(&text_mutex);
388 return ret;
389 }
390
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)391 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
392 void *old_addr, void *new_addr)
393 {
394 if (!is_kernel_text((long)ip) &&
395 !is_bpf_text_address((long)ip))
396 /* BPF poking in modules is not supported */
397 return -EINVAL;
398
399 /*
400 * See emit_prologue(), for IBT builds the trampoline hook is preceded
401 * with an ENDBR instruction.
402 */
403 if (is_endbr(*(u32 *)ip))
404 ip += ENDBR_INSN_SIZE;
405
406 return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
407 }
408
409 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
410
emit_indirect_jump(u8 ** pprog,int reg,u8 * ip)411 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
412 {
413 u8 *prog = *pprog;
414
415 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
416 EMIT_LFENCE();
417 EMIT2(0xFF, 0xE0 + reg);
418 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
419 OPTIMIZER_HIDE_VAR(reg);
420 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
421 } else {
422 EMIT2(0xFF, 0xE0 + reg);
423 }
424
425 *pprog = prog;
426 }
427
emit_return(u8 ** pprog,u8 * ip)428 static void emit_return(u8 **pprog, u8 *ip)
429 {
430 u8 *prog = *pprog;
431
432 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
433 emit_jump(&prog, &__x86_return_thunk, ip);
434 } else {
435 EMIT1(0xC3); /* ret */
436 if (IS_ENABLED(CONFIG_SLS))
437 EMIT1(0xCC); /* int3 */
438 }
439
440 *pprog = prog;
441 }
442
443 /*
444 * Generate the following code:
445 *
446 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
447 * if (index >= array->map.max_entries)
448 * goto out;
449 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
450 * goto out;
451 * prog = array->ptrs[index];
452 * if (prog == NULL)
453 * goto out;
454 * goto *(prog->bpf_func + prologue_size);
455 * out:
456 */
emit_bpf_tail_call_indirect(u8 ** pprog,bool * callee_regs_used,u32 stack_depth,u8 * ip,struct jit_context * ctx)457 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
458 u32 stack_depth, u8 *ip,
459 struct jit_context *ctx)
460 {
461 int tcc_off = -4 - round_up(stack_depth, 8);
462 u8 *prog = *pprog, *start = *pprog;
463 int offset;
464
465 /*
466 * rdi - pointer to ctx
467 * rsi - pointer to bpf_array
468 * rdx - index in bpf_array
469 */
470
471 /*
472 * if (index >= array->map.max_entries)
473 * goto out;
474 */
475 EMIT2(0x89, 0xD2); /* mov edx, edx */
476 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
477 offsetof(struct bpf_array, map.max_entries));
478
479 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
480 EMIT2(X86_JBE, offset); /* jbe out */
481
482 /*
483 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
484 * goto out;
485 */
486 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
487 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
488
489 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
490 EMIT2(X86_JAE, offset); /* jae out */
491 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
492 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
493
494 /* prog = array->ptrs[index]; */
495 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
496 offsetof(struct bpf_array, ptrs));
497
498 /*
499 * if (prog == NULL)
500 * goto out;
501 */
502 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
503
504 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
505 EMIT2(X86_JE, offset); /* je out */
506
507 pop_callee_regs(&prog, callee_regs_used);
508
509 EMIT1(0x58); /* pop rax */
510 if (stack_depth)
511 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
512 round_up(stack_depth, 8));
513
514 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
515 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
516 offsetof(struct bpf_prog, bpf_func));
517 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
518 X86_TAIL_CALL_OFFSET);
519 /*
520 * Now we're ready to jump into next BPF program
521 * rdi == ctx (1st arg)
522 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
523 */
524 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
525
526 /* out: */
527 ctx->tail_call_indirect_label = prog - start;
528 *pprog = prog;
529 }
530
emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor * poke,u8 ** pprog,u8 * ip,bool * callee_regs_used,u32 stack_depth,struct jit_context * ctx)531 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
532 u8 **pprog, u8 *ip,
533 bool *callee_regs_used, u32 stack_depth,
534 struct jit_context *ctx)
535 {
536 int tcc_off = -4 - round_up(stack_depth, 8);
537 u8 *prog = *pprog, *start = *pprog;
538 int offset;
539
540 /*
541 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
542 * goto out;
543 */
544 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
545 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
546
547 offset = ctx->tail_call_direct_label - (prog + 2 - start);
548 EMIT2(X86_JAE, offset); /* jae out */
549 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
550 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
551
552 poke->tailcall_bypass = ip + (prog - start);
553 poke->adj_off = X86_TAIL_CALL_OFFSET;
554 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
555 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
556
557 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
558 poke->tailcall_bypass);
559
560 pop_callee_regs(&prog, callee_regs_used);
561 EMIT1(0x58); /* pop rax */
562 if (stack_depth)
563 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
564
565 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
566 prog += X86_PATCH_SIZE;
567
568 /* out: */
569 ctx->tail_call_direct_label = prog - start;
570
571 *pprog = prog;
572 }
573
bpf_tail_call_direct_fixup(struct bpf_prog * prog)574 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
575 {
576 struct bpf_jit_poke_descriptor *poke;
577 struct bpf_array *array;
578 struct bpf_prog *target;
579 int i, ret;
580
581 for (i = 0; i < prog->aux->size_poke_tab; i++) {
582 poke = &prog->aux->poke_tab[i];
583 if (poke->aux && poke->aux != prog->aux)
584 continue;
585
586 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
587
588 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
589 continue;
590
591 array = container_of(poke->tail_call.map, struct bpf_array, map);
592 mutex_lock(&array->aux->poke_mutex);
593 target = array->ptrs[poke->tail_call.key];
594 if (target) {
595 ret = __bpf_arch_text_poke(poke->tailcall_target,
596 BPF_MOD_JUMP, NULL,
597 (u8 *)target->bpf_func +
598 poke->adj_off);
599 BUG_ON(ret < 0);
600 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
601 BPF_MOD_JUMP,
602 (u8 *)poke->tailcall_target +
603 X86_PATCH_SIZE, NULL);
604 BUG_ON(ret < 0);
605 }
606 WRITE_ONCE(poke->tailcall_target_stable, true);
607 mutex_unlock(&array->aux->poke_mutex);
608 }
609 }
610
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)611 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
612 u32 dst_reg, const u32 imm32)
613 {
614 u8 *prog = *pprog;
615 u8 b1, b2, b3;
616
617 /*
618 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
619 * (which zero-extends imm32) to save 2 bytes.
620 */
621 if (sign_propagate && (s32)imm32 < 0) {
622 /* 'mov %rax, imm32' sign extends imm32 */
623 b1 = add_1mod(0x48, dst_reg);
624 b2 = 0xC7;
625 b3 = 0xC0;
626 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
627 goto done;
628 }
629
630 /*
631 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
632 * to save 3 bytes.
633 */
634 if (imm32 == 0) {
635 if (is_ereg(dst_reg))
636 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
637 b2 = 0x31; /* xor */
638 b3 = 0xC0;
639 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
640 goto done;
641 }
642
643 /* mov %eax, imm32 */
644 if (is_ereg(dst_reg))
645 EMIT1(add_1mod(0x40, dst_reg));
646 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
647 done:
648 *pprog = prog;
649 }
650
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)651 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
652 const u32 imm32_hi, const u32 imm32_lo)
653 {
654 u8 *prog = *pprog;
655
656 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
657 /*
658 * For emitting plain u32, where sign bit must not be
659 * propagated LLVM tends to load imm64 over mov32
660 * directly, so save couple of bytes by just doing
661 * 'mov %eax, imm32' instead.
662 */
663 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
664 } else {
665 /* movabsq %rax, imm64 */
666 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
667 EMIT(imm32_lo, 4);
668 EMIT(imm32_hi, 4);
669 }
670
671 *pprog = prog;
672 }
673
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)674 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
675 {
676 u8 *prog = *pprog;
677
678 if (is64) {
679 /* mov dst, src */
680 EMIT_mov(dst_reg, src_reg);
681 } else {
682 /* mov32 dst, src */
683 if (is_ereg(dst_reg) || is_ereg(src_reg))
684 EMIT1(add_2mod(0x40, dst_reg, src_reg));
685 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
686 }
687
688 *pprog = prog;
689 }
690
691 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)692 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
693 {
694 u8 *prog = *pprog;
695
696 if (is_imm8(off)) {
697 /* 1-byte signed displacement.
698 *
699 * If off == 0 we could skip this and save one extra byte, but
700 * special case of x86 R13 which always needs an offset is not
701 * worth the hassle
702 */
703 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
704 } else {
705 /* 4-byte signed displacement */
706 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
707 }
708 *pprog = prog;
709 }
710
711 /*
712 * Emit a REX byte if it will be necessary to address these registers
713 */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)714 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
715 {
716 u8 *prog = *pprog;
717
718 if (is64)
719 EMIT1(add_2mod(0x48, dst_reg, src_reg));
720 else if (is_ereg(dst_reg) || is_ereg(src_reg))
721 EMIT1(add_2mod(0x40, dst_reg, src_reg));
722 *pprog = prog;
723 }
724
725 /*
726 * Similar version of maybe_emit_mod() for a single register
727 */
maybe_emit_1mod(u8 ** pprog,u32 reg,bool is64)728 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
729 {
730 u8 *prog = *pprog;
731
732 if (is64)
733 EMIT1(add_1mod(0x48, reg));
734 else if (is_ereg(reg))
735 EMIT1(add_1mod(0x40, reg));
736 *pprog = prog;
737 }
738
739 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)740 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
741 {
742 u8 *prog = *pprog;
743
744 switch (size) {
745 case BPF_B:
746 /* Emit 'movzx rax, byte ptr [rax + off]' */
747 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
748 break;
749 case BPF_H:
750 /* Emit 'movzx rax, word ptr [rax + off]' */
751 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
752 break;
753 case BPF_W:
754 /* Emit 'mov eax, dword ptr [rax+0x14]' */
755 if (is_ereg(dst_reg) || is_ereg(src_reg))
756 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
757 else
758 EMIT1(0x8B);
759 break;
760 case BPF_DW:
761 /* Emit 'mov rax, qword ptr [rax+0x14]' */
762 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
763 break;
764 }
765 emit_insn_suffix(&prog, src_reg, dst_reg, off);
766 *pprog = prog;
767 }
768
769 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)770 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
771 {
772 u8 *prog = *pprog;
773
774 switch (size) {
775 case BPF_B:
776 /* Emit 'mov byte ptr [rax + off], al' */
777 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
778 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
779 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
780 else
781 EMIT1(0x88);
782 break;
783 case BPF_H:
784 if (is_ereg(dst_reg) || is_ereg(src_reg))
785 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
786 else
787 EMIT2(0x66, 0x89);
788 break;
789 case BPF_W:
790 if (is_ereg(dst_reg) || is_ereg(src_reg))
791 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
792 else
793 EMIT1(0x89);
794 break;
795 case BPF_DW:
796 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
797 break;
798 }
799 emit_insn_suffix(&prog, dst_reg, src_reg, off);
800 *pprog = prog;
801 }
802
emit_atomic(u8 ** pprog,u8 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)803 static int emit_atomic(u8 **pprog, u8 atomic_op,
804 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
805 {
806 u8 *prog = *pprog;
807
808 EMIT1(0xF0); /* lock prefix */
809
810 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
811
812 /* emit opcode */
813 switch (atomic_op) {
814 case BPF_ADD:
815 case BPF_AND:
816 case BPF_OR:
817 case BPF_XOR:
818 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
819 EMIT1(simple_alu_opcodes[atomic_op]);
820 break;
821 case BPF_ADD | BPF_FETCH:
822 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
823 EMIT2(0x0F, 0xC1);
824 break;
825 case BPF_XCHG:
826 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
827 EMIT1(0x87);
828 break;
829 case BPF_CMPXCHG:
830 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
831 EMIT2(0x0F, 0xB1);
832 break;
833 default:
834 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
835 return -EFAULT;
836 }
837
838 emit_insn_suffix(&prog, dst_reg, src_reg, off);
839
840 *pprog = prog;
841 return 0;
842 }
843
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs)844 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
845 {
846 u32 reg = x->fixup >> 8;
847
848 /* jump over faulting load and clear dest register */
849 *(unsigned long *)((void *)regs + reg) = 0;
850 regs->ip += x->fixup & 0xff;
851 return true;
852 }
853
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used,bool * tail_call_seen)854 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
855 bool *regs_used, bool *tail_call_seen)
856 {
857 int i;
858
859 for (i = 1; i <= insn_cnt; i++, insn++) {
860 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
861 *tail_call_seen = true;
862 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
863 regs_used[0] = true;
864 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
865 regs_used[1] = true;
866 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
867 regs_used[2] = true;
868 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
869 regs_used[3] = true;
870 }
871 }
872
emit_nops(u8 ** pprog,int len)873 static void emit_nops(u8 **pprog, int len)
874 {
875 u8 *prog = *pprog;
876 int i, noplen;
877
878 while (len > 0) {
879 noplen = len;
880
881 if (noplen > ASM_NOP_MAX)
882 noplen = ASM_NOP_MAX;
883
884 for (i = 0; i < noplen; i++)
885 EMIT1(x86_nops[noplen][i]);
886 len -= noplen;
887 }
888
889 *pprog = prog;
890 }
891
892 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
893
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,u8 * rw_image,int oldproglen,struct jit_context * ctx,bool jmp_padding)894 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
895 int oldproglen, struct jit_context *ctx, bool jmp_padding)
896 {
897 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
898 struct bpf_insn *insn = bpf_prog->insnsi;
899 bool callee_regs_used[4] = {};
900 int insn_cnt = bpf_prog->len;
901 bool tail_call_seen = false;
902 bool seen_exit = false;
903 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
904 int i, excnt = 0;
905 int ilen, proglen = 0;
906 u8 *prog = temp;
907 int err;
908
909 detect_reg_usage(insn, insn_cnt, callee_regs_used,
910 &tail_call_seen);
911
912 /* tail call's presence in current prog implies it is reachable */
913 tail_call_reachable |= tail_call_seen;
914
915 emit_prologue(&prog, bpf_prog->aux->stack_depth,
916 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
917 bpf_prog->aux->func_idx != 0);
918 push_callee_regs(&prog, callee_regs_used);
919
920 ilen = prog - temp;
921 if (rw_image)
922 memcpy(rw_image + proglen, temp, ilen);
923 proglen += ilen;
924 addrs[0] = proglen;
925 prog = temp;
926
927 for (i = 1; i <= insn_cnt; i++, insn++) {
928 const s32 imm32 = insn->imm;
929 u32 dst_reg = insn->dst_reg;
930 u32 src_reg = insn->src_reg;
931 u8 b2 = 0, b3 = 0;
932 u8 *start_of_ldx;
933 s64 jmp_offset;
934 u8 jmp_cond;
935 u8 *func;
936 int nops;
937
938 switch (insn->code) {
939 /* ALU */
940 case BPF_ALU | BPF_ADD | BPF_X:
941 case BPF_ALU | BPF_SUB | BPF_X:
942 case BPF_ALU | BPF_AND | BPF_X:
943 case BPF_ALU | BPF_OR | BPF_X:
944 case BPF_ALU | BPF_XOR | BPF_X:
945 case BPF_ALU64 | BPF_ADD | BPF_X:
946 case BPF_ALU64 | BPF_SUB | BPF_X:
947 case BPF_ALU64 | BPF_AND | BPF_X:
948 case BPF_ALU64 | BPF_OR | BPF_X:
949 case BPF_ALU64 | BPF_XOR | BPF_X:
950 maybe_emit_mod(&prog, dst_reg, src_reg,
951 BPF_CLASS(insn->code) == BPF_ALU64);
952 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
953 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
954 break;
955
956 case BPF_ALU64 | BPF_MOV | BPF_X:
957 case BPF_ALU | BPF_MOV | BPF_X:
958 emit_mov_reg(&prog,
959 BPF_CLASS(insn->code) == BPF_ALU64,
960 dst_reg, src_reg);
961 break;
962
963 /* neg dst */
964 case BPF_ALU | BPF_NEG:
965 case BPF_ALU64 | BPF_NEG:
966 maybe_emit_1mod(&prog, dst_reg,
967 BPF_CLASS(insn->code) == BPF_ALU64);
968 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
969 break;
970
971 case BPF_ALU | BPF_ADD | BPF_K:
972 case BPF_ALU | BPF_SUB | BPF_K:
973 case BPF_ALU | BPF_AND | BPF_K:
974 case BPF_ALU | BPF_OR | BPF_K:
975 case BPF_ALU | BPF_XOR | BPF_K:
976 case BPF_ALU64 | BPF_ADD | BPF_K:
977 case BPF_ALU64 | BPF_SUB | BPF_K:
978 case BPF_ALU64 | BPF_AND | BPF_K:
979 case BPF_ALU64 | BPF_OR | BPF_K:
980 case BPF_ALU64 | BPF_XOR | BPF_K:
981 maybe_emit_1mod(&prog, dst_reg,
982 BPF_CLASS(insn->code) == BPF_ALU64);
983
984 /*
985 * b3 holds 'normal' opcode, b2 short form only valid
986 * in case dst is eax/rax.
987 */
988 switch (BPF_OP(insn->code)) {
989 case BPF_ADD:
990 b3 = 0xC0;
991 b2 = 0x05;
992 break;
993 case BPF_SUB:
994 b3 = 0xE8;
995 b2 = 0x2D;
996 break;
997 case BPF_AND:
998 b3 = 0xE0;
999 b2 = 0x25;
1000 break;
1001 case BPF_OR:
1002 b3 = 0xC8;
1003 b2 = 0x0D;
1004 break;
1005 case BPF_XOR:
1006 b3 = 0xF0;
1007 b2 = 0x35;
1008 break;
1009 }
1010
1011 if (is_imm8(imm32))
1012 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1013 else if (is_axreg(dst_reg))
1014 EMIT1_off32(b2, imm32);
1015 else
1016 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1017 break;
1018
1019 case BPF_ALU64 | BPF_MOV | BPF_K:
1020 case BPF_ALU | BPF_MOV | BPF_K:
1021 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1022 dst_reg, imm32);
1023 break;
1024
1025 case BPF_LD | BPF_IMM | BPF_DW:
1026 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1027 insn++;
1028 i++;
1029 break;
1030
1031 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1032 case BPF_ALU | BPF_MOD | BPF_X:
1033 case BPF_ALU | BPF_DIV | BPF_X:
1034 case BPF_ALU | BPF_MOD | BPF_K:
1035 case BPF_ALU | BPF_DIV | BPF_K:
1036 case BPF_ALU64 | BPF_MOD | BPF_X:
1037 case BPF_ALU64 | BPF_DIV | BPF_X:
1038 case BPF_ALU64 | BPF_MOD | BPF_K:
1039 case BPF_ALU64 | BPF_DIV | BPF_K: {
1040 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1041
1042 if (dst_reg != BPF_REG_0)
1043 EMIT1(0x50); /* push rax */
1044 if (dst_reg != BPF_REG_3)
1045 EMIT1(0x52); /* push rdx */
1046
1047 if (BPF_SRC(insn->code) == BPF_X) {
1048 if (src_reg == BPF_REG_0 ||
1049 src_reg == BPF_REG_3) {
1050 /* mov r11, src_reg */
1051 EMIT_mov(AUX_REG, src_reg);
1052 src_reg = AUX_REG;
1053 }
1054 } else {
1055 /* mov r11, imm32 */
1056 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1057 src_reg = AUX_REG;
1058 }
1059
1060 if (dst_reg != BPF_REG_0)
1061 /* mov rax, dst_reg */
1062 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1063
1064 /*
1065 * xor edx, edx
1066 * equivalent to 'xor rdx, rdx', but one byte less
1067 */
1068 EMIT2(0x31, 0xd2);
1069
1070 /* div src_reg */
1071 maybe_emit_1mod(&prog, src_reg, is64);
1072 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1073
1074 if (BPF_OP(insn->code) == BPF_MOD &&
1075 dst_reg != BPF_REG_3)
1076 /* mov dst_reg, rdx */
1077 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1078 else if (BPF_OP(insn->code) == BPF_DIV &&
1079 dst_reg != BPF_REG_0)
1080 /* mov dst_reg, rax */
1081 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1082
1083 if (dst_reg != BPF_REG_3)
1084 EMIT1(0x5A); /* pop rdx */
1085 if (dst_reg != BPF_REG_0)
1086 EMIT1(0x58); /* pop rax */
1087 break;
1088 }
1089
1090 case BPF_ALU | BPF_MUL | BPF_K:
1091 case BPF_ALU64 | BPF_MUL | BPF_K:
1092 maybe_emit_mod(&prog, dst_reg, dst_reg,
1093 BPF_CLASS(insn->code) == BPF_ALU64);
1094
1095 if (is_imm8(imm32))
1096 /* imul dst_reg, dst_reg, imm8 */
1097 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1098 imm32);
1099 else
1100 /* imul dst_reg, dst_reg, imm32 */
1101 EMIT2_off32(0x69,
1102 add_2reg(0xC0, dst_reg, dst_reg),
1103 imm32);
1104 break;
1105
1106 case BPF_ALU | BPF_MUL | BPF_X:
1107 case BPF_ALU64 | BPF_MUL | BPF_X:
1108 maybe_emit_mod(&prog, src_reg, dst_reg,
1109 BPF_CLASS(insn->code) == BPF_ALU64);
1110
1111 /* imul dst_reg, src_reg */
1112 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1113 break;
1114
1115 /* Shifts */
1116 case BPF_ALU | BPF_LSH | BPF_K:
1117 case BPF_ALU | BPF_RSH | BPF_K:
1118 case BPF_ALU | BPF_ARSH | BPF_K:
1119 case BPF_ALU64 | BPF_LSH | BPF_K:
1120 case BPF_ALU64 | BPF_RSH | BPF_K:
1121 case BPF_ALU64 | BPF_ARSH | BPF_K:
1122 maybe_emit_1mod(&prog, dst_reg,
1123 BPF_CLASS(insn->code) == BPF_ALU64);
1124
1125 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1126 if (imm32 == 1)
1127 EMIT2(0xD1, add_1reg(b3, dst_reg));
1128 else
1129 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1130 break;
1131
1132 case BPF_ALU | BPF_LSH | BPF_X:
1133 case BPF_ALU | BPF_RSH | BPF_X:
1134 case BPF_ALU | BPF_ARSH | BPF_X:
1135 case BPF_ALU64 | BPF_LSH | BPF_X:
1136 case BPF_ALU64 | BPF_RSH | BPF_X:
1137 case BPF_ALU64 | BPF_ARSH | BPF_X:
1138
1139 /* Check for bad case when dst_reg == rcx */
1140 if (dst_reg == BPF_REG_4) {
1141 /* mov r11, dst_reg */
1142 EMIT_mov(AUX_REG, dst_reg);
1143 dst_reg = AUX_REG;
1144 }
1145
1146 if (src_reg != BPF_REG_4) { /* common case */
1147 EMIT1(0x51); /* push rcx */
1148
1149 /* mov rcx, src_reg */
1150 EMIT_mov(BPF_REG_4, src_reg);
1151 }
1152
1153 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1154 maybe_emit_1mod(&prog, dst_reg,
1155 BPF_CLASS(insn->code) == BPF_ALU64);
1156
1157 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1158 EMIT2(0xD3, add_1reg(b3, dst_reg));
1159
1160 if (src_reg != BPF_REG_4)
1161 EMIT1(0x59); /* pop rcx */
1162
1163 if (insn->dst_reg == BPF_REG_4)
1164 /* mov dst_reg, r11 */
1165 EMIT_mov(insn->dst_reg, AUX_REG);
1166 break;
1167
1168 case BPF_ALU | BPF_END | BPF_FROM_BE:
1169 switch (imm32) {
1170 case 16:
1171 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1172 EMIT1(0x66);
1173 if (is_ereg(dst_reg))
1174 EMIT1(0x41);
1175 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1176
1177 /* Emit 'movzwl eax, ax' */
1178 if (is_ereg(dst_reg))
1179 EMIT3(0x45, 0x0F, 0xB7);
1180 else
1181 EMIT2(0x0F, 0xB7);
1182 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1183 break;
1184 case 32:
1185 /* Emit 'bswap eax' to swap lower 4 bytes */
1186 if (is_ereg(dst_reg))
1187 EMIT2(0x41, 0x0F);
1188 else
1189 EMIT1(0x0F);
1190 EMIT1(add_1reg(0xC8, dst_reg));
1191 break;
1192 case 64:
1193 /* Emit 'bswap rax' to swap 8 bytes */
1194 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1195 add_1reg(0xC8, dst_reg));
1196 break;
1197 }
1198 break;
1199
1200 case BPF_ALU | BPF_END | BPF_FROM_LE:
1201 switch (imm32) {
1202 case 16:
1203 /*
1204 * Emit 'movzwl eax, ax' to zero extend 16-bit
1205 * into 64 bit
1206 */
1207 if (is_ereg(dst_reg))
1208 EMIT3(0x45, 0x0F, 0xB7);
1209 else
1210 EMIT2(0x0F, 0xB7);
1211 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1212 break;
1213 case 32:
1214 /* Emit 'mov eax, eax' to clear upper 32-bits */
1215 if (is_ereg(dst_reg))
1216 EMIT1(0x45);
1217 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1218 break;
1219 case 64:
1220 /* nop */
1221 break;
1222 }
1223 break;
1224
1225 /* speculation barrier */
1226 case BPF_ST | BPF_NOSPEC:
1227 if (boot_cpu_has(X86_FEATURE_XMM2))
1228 EMIT_LFENCE();
1229 break;
1230
1231 /* ST: *(u8*)(dst_reg + off) = imm */
1232 case BPF_ST | BPF_MEM | BPF_B:
1233 if (is_ereg(dst_reg))
1234 EMIT2(0x41, 0xC6);
1235 else
1236 EMIT1(0xC6);
1237 goto st;
1238 case BPF_ST | BPF_MEM | BPF_H:
1239 if (is_ereg(dst_reg))
1240 EMIT3(0x66, 0x41, 0xC7);
1241 else
1242 EMIT2(0x66, 0xC7);
1243 goto st;
1244 case BPF_ST | BPF_MEM | BPF_W:
1245 if (is_ereg(dst_reg))
1246 EMIT2(0x41, 0xC7);
1247 else
1248 EMIT1(0xC7);
1249 goto st;
1250 case BPF_ST | BPF_MEM | BPF_DW:
1251 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1252
1253 st: if (is_imm8(insn->off))
1254 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1255 else
1256 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1257
1258 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1259 break;
1260
1261 /* STX: *(u8*)(dst_reg + off) = src_reg */
1262 case BPF_STX | BPF_MEM | BPF_B:
1263 case BPF_STX | BPF_MEM | BPF_H:
1264 case BPF_STX | BPF_MEM | BPF_W:
1265 case BPF_STX | BPF_MEM | BPF_DW:
1266 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1267 break;
1268
1269 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1270 case BPF_LDX | BPF_MEM | BPF_B:
1271 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1272 case BPF_LDX | BPF_MEM | BPF_H:
1273 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1274 case BPF_LDX | BPF_MEM | BPF_W:
1275 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1276 case BPF_LDX | BPF_MEM | BPF_DW:
1277 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1278 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1279 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
1280 * add abs(insn->off) to the limit to make sure that negative
1281 * offset won't be an issue.
1282 * insn->off is s16, so it won't affect valid pointers.
1283 */
1284 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
1285 u8 *end_of_jmp1, *end_of_jmp2;
1286
1287 /* Conservatively check that src_reg + insn->off is a kernel address:
1288 * 1. src_reg + insn->off >= limit
1289 * 2. src_reg + insn->off doesn't become small positive.
1290 * Cannot do src_reg + insn->off >= limit in one branch,
1291 * since it needs two spare registers, but JIT has only one.
1292 */
1293
1294 /* movabsq r11, limit */
1295 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1296 EMIT((u32)limit, 4);
1297 EMIT(limit >> 32, 4);
1298 /* cmp src_reg, r11 */
1299 maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1300 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1301 /* if unsigned '<' goto end_of_jmp2 */
1302 EMIT2(X86_JB, 0);
1303 end_of_jmp1 = prog;
1304
1305 /* mov r11, src_reg */
1306 emit_mov_reg(&prog, true, AUX_REG, src_reg);
1307 /* add r11, insn->off */
1308 maybe_emit_1mod(&prog, AUX_REG, true);
1309 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1310 /* jmp if not carry to start_of_ldx
1311 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
1312 * that has to be rejected.
1313 */
1314 EMIT2(0x73 /* JNC */, 0);
1315 end_of_jmp2 = prog;
1316
1317 /* xor dst_reg, dst_reg */
1318 emit_mov_imm32(&prog, false, dst_reg, 0);
1319 /* jmp byte_after_ldx */
1320 EMIT2(0xEB, 0);
1321
1322 /* populate jmp_offset for JB above to jump to xor dst_reg */
1323 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
1324 /* populate jmp_offset for JNC above to jump to start_of_ldx */
1325 start_of_ldx = prog;
1326 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
1327 }
1328 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1329 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1330 struct exception_table_entry *ex;
1331 u8 *_insn = image + proglen + (start_of_ldx - temp);
1332 s64 delta;
1333
1334 /* populate jmp_offset for JMP above */
1335 start_of_ldx[-1] = prog - start_of_ldx;
1336
1337 if (!bpf_prog->aux->extable)
1338 break;
1339
1340 if (excnt >= bpf_prog->aux->num_exentries) {
1341 pr_err("ex gen bug\n");
1342 return -EFAULT;
1343 }
1344 ex = &bpf_prog->aux->extable[excnt++];
1345
1346 delta = _insn - (u8 *)&ex->insn;
1347 if (!is_simm32(delta)) {
1348 pr_err("extable->insn doesn't fit into 32-bit\n");
1349 return -EFAULT;
1350 }
1351 /* switch ex to rw buffer for writes */
1352 ex = (void *)rw_image + ((void *)ex - (void *)image);
1353
1354 ex->insn = delta;
1355
1356 ex->data = EX_TYPE_BPF;
1357
1358 if (dst_reg > BPF_REG_9) {
1359 pr_err("verifier error\n");
1360 return -EFAULT;
1361 }
1362 /*
1363 * Compute size of x86 insn and its target dest x86 register.
1364 * ex_handler_bpf() will use lower 8 bits to adjust
1365 * pt_regs->ip to jump over this x86 instruction
1366 * and upper bits to figure out which pt_regs to zero out.
1367 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1368 * of 4 bytes will be ignored and rbx will be zero inited.
1369 */
1370 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1371 }
1372 break;
1373
1374 case BPF_STX | BPF_ATOMIC | BPF_W:
1375 case BPF_STX | BPF_ATOMIC | BPF_DW:
1376 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1377 insn->imm == (BPF_OR | BPF_FETCH) ||
1378 insn->imm == (BPF_XOR | BPF_FETCH)) {
1379 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1380 u32 real_src_reg = src_reg;
1381 u32 real_dst_reg = dst_reg;
1382 u8 *branch_target;
1383
1384 /*
1385 * Can't be implemented with a single x86 insn.
1386 * Need to do a CMPXCHG loop.
1387 */
1388
1389 /* Will need RAX as a CMPXCHG operand so save R0 */
1390 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1391 if (src_reg == BPF_REG_0)
1392 real_src_reg = BPF_REG_AX;
1393 if (dst_reg == BPF_REG_0)
1394 real_dst_reg = BPF_REG_AX;
1395
1396 branch_target = prog;
1397 /* Load old value */
1398 emit_ldx(&prog, BPF_SIZE(insn->code),
1399 BPF_REG_0, real_dst_reg, insn->off);
1400 /*
1401 * Perform the (commutative) operation locally,
1402 * put the result in the AUX_REG.
1403 */
1404 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1405 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1406 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1407 add_2reg(0xC0, AUX_REG, real_src_reg));
1408 /* Attempt to swap in new value */
1409 err = emit_atomic(&prog, BPF_CMPXCHG,
1410 real_dst_reg, AUX_REG,
1411 insn->off,
1412 BPF_SIZE(insn->code));
1413 if (WARN_ON(err))
1414 return err;
1415 /*
1416 * ZF tells us whether we won the race. If it's
1417 * cleared we need to try again.
1418 */
1419 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1420 /* Return the pre-modification value */
1421 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1422 /* Restore R0 after clobbering RAX */
1423 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1424 break;
1425 }
1426
1427 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1428 insn->off, BPF_SIZE(insn->code));
1429 if (err)
1430 return err;
1431 break;
1432
1433 /* call */
1434 case BPF_JMP | BPF_CALL:
1435 func = (u8 *) __bpf_call_base + imm32;
1436 if (tail_call_reachable) {
1437 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1438 EMIT3_off32(0x48, 0x8B, 0x85,
1439 -round_up(bpf_prog->aux->stack_depth, 8) - 8);
1440 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1441 return -EINVAL;
1442 } else {
1443 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1444 return -EINVAL;
1445 }
1446 break;
1447
1448 case BPF_JMP | BPF_TAIL_CALL:
1449 if (imm32)
1450 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1451 &prog, image + addrs[i - 1],
1452 callee_regs_used,
1453 bpf_prog->aux->stack_depth,
1454 ctx);
1455 else
1456 emit_bpf_tail_call_indirect(&prog,
1457 callee_regs_used,
1458 bpf_prog->aux->stack_depth,
1459 image + addrs[i - 1],
1460 ctx);
1461 break;
1462
1463 /* cond jump */
1464 case BPF_JMP | BPF_JEQ | BPF_X:
1465 case BPF_JMP | BPF_JNE | BPF_X:
1466 case BPF_JMP | BPF_JGT | BPF_X:
1467 case BPF_JMP | BPF_JLT | BPF_X:
1468 case BPF_JMP | BPF_JGE | BPF_X:
1469 case BPF_JMP | BPF_JLE | BPF_X:
1470 case BPF_JMP | BPF_JSGT | BPF_X:
1471 case BPF_JMP | BPF_JSLT | BPF_X:
1472 case BPF_JMP | BPF_JSGE | BPF_X:
1473 case BPF_JMP | BPF_JSLE | BPF_X:
1474 case BPF_JMP32 | BPF_JEQ | BPF_X:
1475 case BPF_JMP32 | BPF_JNE | BPF_X:
1476 case BPF_JMP32 | BPF_JGT | BPF_X:
1477 case BPF_JMP32 | BPF_JLT | BPF_X:
1478 case BPF_JMP32 | BPF_JGE | BPF_X:
1479 case BPF_JMP32 | BPF_JLE | BPF_X:
1480 case BPF_JMP32 | BPF_JSGT | BPF_X:
1481 case BPF_JMP32 | BPF_JSLT | BPF_X:
1482 case BPF_JMP32 | BPF_JSGE | BPF_X:
1483 case BPF_JMP32 | BPF_JSLE | BPF_X:
1484 /* cmp dst_reg, src_reg */
1485 maybe_emit_mod(&prog, dst_reg, src_reg,
1486 BPF_CLASS(insn->code) == BPF_JMP);
1487 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1488 goto emit_cond_jmp;
1489
1490 case BPF_JMP | BPF_JSET | BPF_X:
1491 case BPF_JMP32 | BPF_JSET | BPF_X:
1492 /* test dst_reg, src_reg */
1493 maybe_emit_mod(&prog, dst_reg, src_reg,
1494 BPF_CLASS(insn->code) == BPF_JMP);
1495 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1496 goto emit_cond_jmp;
1497
1498 case BPF_JMP | BPF_JSET | BPF_K:
1499 case BPF_JMP32 | BPF_JSET | BPF_K:
1500 /* test dst_reg, imm32 */
1501 maybe_emit_1mod(&prog, dst_reg,
1502 BPF_CLASS(insn->code) == BPF_JMP);
1503 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1504 goto emit_cond_jmp;
1505
1506 case BPF_JMP | BPF_JEQ | BPF_K:
1507 case BPF_JMP | BPF_JNE | BPF_K:
1508 case BPF_JMP | BPF_JGT | BPF_K:
1509 case BPF_JMP | BPF_JLT | BPF_K:
1510 case BPF_JMP | BPF_JGE | BPF_K:
1511 case BPF_JMP | BPF_JLE | BPF_K:
1512 case BPF_JMP | BPF_JSGT | BPF_K:
1513 case BPF_JMP | BPF_JSLT | BPF_K:
1514 case BPF_JMP | BPF_JSGE | BPF_K:
1515 case BPF_JMP | BPF_JSLE | BPF_K:
1516 case BPF_JMP32 | BPF_JEQ | BPF_K:
1517 case BPF_JMP32 | BPF_JNE | BPF_K:
1518 case BPF_JMP32 | BPF_JGT | BPF_K:
1519 case BPF_JMP32 | BPF_JLT | BPF_K:
1520 case BPF_JMP32 | BPF_JGE | BPF_K:
1521 case BPF_JMP32 | BPF_JLE | BPF_K:
1522 case BPF_JMP32 | BPF_JSGT | BPF_K:
1523 case BPF_JMP32 | BPF_JSLT | BPF_K:
1524 case BPF_JMP32 | BPF_JSGE | BPF_K:
1525 case BPF_JMP32 | BPF_JSLE | BPF_K:
1526 /* test dst_reg, dst_reg to save one extra byte */
1527 if (imm32 == 0) {
1528 maybe_emit_mod(&prog, dst_reg, dst_reg,
1529 BPF_CLASS(insn->code) == BPF_JMP);
1530 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1531 goto emit_cond_jmp;
1532 }
1533
1534 /* cmp dst_reg, imm8/32 */
1535 maybe_emit_1mod(&prog, dst_reg,
1536 BPF_CLASS(insn->code) == BPF_JMP);
1537
1538 if (is_imm8(imm32))
1539 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1540 else
1541 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1542
1543 emit_cond_jmp: /* Convert BPF opcode to x86 */
1544 switch (BPF_OP(insn->code)) {
1545 case BPF_JEQ:
1546 jmp_cond = X86_JE;
1547 break;
1548 case BPF_JSET:
1549 case BPF_JNE:
1550 jmp_cond = X86_JNE;
1551 break;
1552 case BPF_JGT:
1553 /* GT is unsigned '>', JA in x86 */
1554 jmp_cond = X86_JA;
1555 break;
1556 case BPF_JLT:
1557 /* LT is unsigned '<', JB in x86 */
1558 jmp_cond = X86_JB;
1559 break;
1560 case BPF_JGE:
1561 /* GE is unsigned '>=', JAE in x86 */
1562 jmp_cond = X86_JAE;
1563 break;
1564 case BPF_JLE:
1565 /* LE is unsigned '<=', JBE in x86 */
1566 jmp_cond = X86_JBE;
1567 break;
1568 case BPF_JSGT:
1569 /* Signed '>', GT in x86 */
1570 jmp_cond = X86_JG;
1571 break;
1572 case BPF_JSLT:
1573 /* Signed '<', LT in x86 */
1574 jmp_cond = X86_JL;
1575 break;
1576 case BPF_JSGE:
1577 /* Signed '>=', GE in x86 */
1578 jmp_cond = X86_JGE;
1579 break;
1580 case BPF_JSLE:
1581 /* Signed '<=', LE in x86 */
1582 jmp_cond = X86_JLE;
1583 break;
1584 default: /* to silence GCC warning */
1585 return -EFAULT;
1586 }
1587 jmp_offset = addrs[i + insn->off] - addrs[i];
1588 if (is_imm8(jmp_offset)) {
1589 if (jmp_padding) {
1590 /* To keep the jmp_offset valid, the extra bytes are
1591 * padded before the jump insn, so we subtract the
1592 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1593 *
1594 * If the previous pass already emits an imm8
1595 * jmp_cond, then this BPF insn won't shrink, so
1596 * "nops" is 0.
1597 *
1598 * On the other hand, if the previous pass emits an
1599 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1600 * keep the image from shrinking further.
1601 *
1602 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1603 * is 2 bytes, so the size difference is 4 bytes.
1604 */
1605 nops = INSN_SZ_DIFF - 2;
1606 if (nops != 0 && nops != 4) {
1607 pr_err("unexpected jmp_cond padding: %d bytes\n",
1608 nops);
1609 return -EFAULT;
1610 }
1611 emit_nops(&prog, nops);
1612 }
1613 EMIT2(jmp_cond, jmp_offset);
1614 } else if (is_simm32(jmp_offset)) {
1615 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1616 } else {
1617 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1618 return -EFAULT;
1619 }
1620
1621 break;
1622
1623 case BPF_JMP | BPF_JA:
1624 if (insn->off == -1)
1625 /* -1 jmp instructions will always jump
1626 * backwards two bytes. Explicitly handling
1627 * this case avoids wasting too many passes
1628 * when there are long sequences of replaced
1629 * dead code.
1630 */
1631 jmp_offset = -2;
1632 else
1633 jmp_offset = addrs[i + insn->off] - addrs[i];
1634
1635 if (!jmp_offset) {
1636 /*
1637 * If jmp_padding is enabled, the extra nops will
1638 * be inserted. Otherwise, optimize out nop jumps.
1639 */
1640 if (jmp_padding) {
1641 /* There are 3 possible conditions.
1642 * (1) This BPF_JA is already optimized out in
1643 * the previous run, so there is no need
1644 * to pad any extra byte (0 byte).
1645 * (2) The previous pass emits an imm8 jmp,
1646 * so we pad 2 bytes to match the previous
1647 * insn size.
1648 * (3) Similarly, the previous pass emits an
1649 * imm32 jmp, and 5 bytes is padded.
1650 */
1651 nops = INSN_SZ_DIFF;
1652 if (nops != 0 && nops != 2 && nops != 5) {
1653 pr_err("unexpected nop jump padding: %d bytes\n",
1654 nops);
1655 return -EFAULT;
1656 }
1657 emit_nops(&prog, nops);
1658 }
1659 break;
1660 }
1661 emit_jmp:
1662 if (is_imm8(jmp_offset)) {
1663 if (jmp_padding) {
1664 /* To avoid breaking jmp_offset, the extra bytes
1665 * are padded before the actual jmp insn, so
1666 * 2 bytes is subtracted from INSN_SZ_DIFF.
1667 *
1668 * If the previous pass already emits an imm8
1669 * jmp, there is nothing to pad (0 byte).
1670 *
1671 * If it emits an imm32 jmp (5 bytes) previously
1672 * and now an imm8 jmp (2 bytes), then we pad
1673 * (5 - 2 = 3) bytes to stop the image from
1674 * shrinking further.
1675 */
1676 nops = INSN_SZ_DIFF - 2;
1677 if (nops != 0 && nops != 3) {
1678 pr_err("unexpected jump padding: %d bytes\n",
1679 nops);
1680 return -EFAULT;
1681 }
1682 emit_nops(&prog, INSN_SZ_DIFF - 2);
1683 }
1684 EMIT2(0xEB, jmp_offset);
1685 } else if (is_simm32(jmp_offset)) {
1686 EMIT1_off32(0xE9, jmp_offset);
1687 } else {
1688 pr_err("jmp gen bug %llx\n", jmp_offset);
1689 return -EFAULT;
1690 }
1691 break;
1692
1693 case BPF_JMP | BPF_EXIT:
1694 if (seen_exit) {
1695 jmp_offset = ctx->cleanup_addr - addrs[i];
1696 goto emit_jmp;
1697 }
1698 seen_exit = true;
1699 /* Update cleanup_addr */
1700 ctx->cleanup_addr = proglen;
1701 pop_callee_regs(&prog, callee_regs_used);
1702 EMIT1(0xC9); /* leave */
1703 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1704 break;
1705
1706 default:
1707 /*
1708 * By design x86-64 JIT should support all BPF instructions.
1709 * This error will be seen if new instruction was added
1710 * to the interpreter, but not to the JIT, or if there is
1711 * junk in bpf_prog.
1712 */
1713 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1714 return -EINVAL;
1715 }
1716
1717 ilen = prog - temp;
1718 if (ilen > BPF_MAX_INSN_SIZE) {
1719 pr_err("bpf_jit: fatal insn size error\n");
1720 return -EFAULT;
1721 }
1722
1723 if (image) {
1724 /*
1725 * When populating the image, assert that:
1726 *
1727 * i) We do not write beyond the allocated space, and
1728 * ii) addrs[i] did not change from the prior run, in order
1729 * to validate assumptions made for computing branch
1730 * displacements.
1731 */
1732 if (unlikely(proglen + ilen > oldproglen ||
1733 proglen + ilen != addrs[i])) {
1734 pr_err("bpf_jit: fatal error\n");
1735 return -EFAULT;
1736 }
1737 memcpy(rw_image + proglen, temp, ilen);
1738 }
1739 proglen += ilen;
1740 addrs[i] = proglen;
1741 prog = temp;
1742 }
1743
1744 if (image && excnt != bpf_prog->aux->num_exentries) {
1745 pr_err("extable is not populated\n");
1746 return -EFAULT;
1747 }
1748 return proglen;
1749 }
1750
save_regs(const struct btf_func_model * m,u8 ** prog,int nr_args,int stack_size)1751 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1752 int stack_size)
1753 {
1754 int i;
1755 /* Store function arguments to stack.
1756 * For a function that accepts two pointers the sequence will be:
1757 * mov QWORD PTR [rbp-0x10],rdi
1758 * mov QWORD PTR [rbp-0x8],rsi
1759 */
1760 for (i = 0; i < min(nr_args, 6); i++)
1761 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1762 BPF_REG_FP,
1763 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1764 -(stack_size - i * 8));
1765 }
1766
restore_regs(const struct btf_func_model * m,u8 ** prog,int nr_args,int stack_size)1767 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1768 int stack_size)
1769 {
1770 int i;
1771
1772 /* Restore function arguments from stack.
1773 * For a function that accepts two pointers the sequence will be:
1774 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1775 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1776 */
1777 for (i = 0; i < min(nr_args, 6); i++)
1778 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1779 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1780 BPF_REG_FP,
1781 -(stack_size - i * 8));
1782 }
1783
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_link * l,int stack_size,int run_ctx_off,bool save_ret)1784 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1785 struct bpf_tramp_link *l, int stack_size,
1786 int run_ctx_off, bool save_ret)
1787 {
1788 u8 *prog = *pprog;
1789 u8 *jmp_insn;
1790 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1791 struct bpf_prog *p = l->link.prog;
1792 u64 cookie = l->cookie;
1793
1794 /* mov rdi, cookie */
1795 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
1796
1797 /* Prepare struct bpf_tramp_run_ctx.
1798 *
1799 * bpf_tramp_run_ctx is already preserved by
1800 * arch_prepare_bpf_trampoline().
1801 *
1802 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
1803 */
1804 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
1805
1806 /* arg1: mov rdi, progs[i] */
1807 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1808 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
1809 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
1810
1811 if (emit_call(&prog,
1812 p->aux->sleepable ? __bpf_prog_enter_sleepable :
1813 __bpf_prog_enter, prog))
1814 return -EINVAL;
1815 /* remember prog start time returned by __bpf_prog_enter */
1816 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1817
1818 /* if (__bpf_prog_enter*(prog) == 0)
1819 * goto skip_exec_of_prog;
1820 */
1821 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
1822 /* emit 2 nops that will be replaced with JE insn */
1823 jmp_insn = prog;
1824 emit_nops(&prog, 2);
1825
1826 /* arg1: lea rdi, [rbp - stack_size] */
1827 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1828 /* arg2: progs[i]->insnsi for interpreter */
1829 if (!p->jited)
1830 emit_mov_imm64(&prog, BPF_REG_2,
1831 (long) p->insnsi >> 32,
1832 (u32) (long) p->insnsi);
1833 /* call JITed bpf program or interpreter */
1834 if (emit_call(&prog, p->bpf_func, prog))
1835 return -EINVAL;
1836
1837 /*
1838 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1839 * of the previous call which is then passed on the stack to
1840 * the next BPF program.
1841 *
1842 * BPF_TRAMP_FENTRY trampoline may need to return the return
1843 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1844 */
1845 if (save_ret)
1846 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1847
1848 /* replace 2 nops with JE insn, since jmp target is known */
1849 jmp_insn[0] = X86_JE;
1850 jmp_insn[1] = prog - jmp_insn - 2;
1851
1852 /* arg1: mov rdi, progs[i] */
1853 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1854 /* arg2: mov rsi, rbx <- start time in nsec */
1855 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1856 /* arg3: lea rdx, [rbp - run_ctx_off] */
1857 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
1858 if (emit_call(&prog,
1859 p->aux->sleepable ? __bpf_prog_exit_sleepable :
1860 __bpf_prog_exit, prog))
1861 return -EINVAL;
1862
1863 *pprog = prog;
1864 return 0;
1865 }
1866
emit_align(u8 ** pprog,u32 align)1867 static void emit_align(u8 **pprog, u32 align)
1868 {
1869 u8 *target, *prog = *pprog;
1870
1871 target = PTR_ALIGN(prog, align);
1872 if (target != prog)
1873 emit_nops(&prog, target - prog);
1874
1875 *pprog = prog;
1876 }
1877
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)1878 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1879 {
1880 u8 *prog = *pprog;
1881 s64 offset;
1882
1883 offset = func - (ip + 2 + 4);
1884 if (!is_simm32(offset)) {
1885 pr_err("Target %p is out of range\n", func);
1886 return -EINVAL;
1887 }
1888 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1889 *pprog = prog;
1890 return 0;
1891 }
1892
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,bool save_ret)1893 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1894 struct bpf_tramp_links *tl, int stack_size,
1895 int run_ctx_off, bool save_ret)
1896 {
1897 int i;
1898 u8 *prog = *pprog;
1899
1900 for (i = 0; i < tl->nr_links; i++) {
1901 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
1902 run_ctx_off, save_ret))
1903 return -EINVAL;
1904 }
1905 *pprog = prog;
1906 return 0;
1907 }
1908
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_links * tl,int stack_size,int run_ctx_off,u8 ** branches)1909 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1910 struct bpf_tramp_links *tl, int stack_size,
1911 int run_ctx_off, u8 **branches)
1912 {
1913 u8 *prog = *pprog;
1914 int i;
1915
1916 /* The first fmod_ret program will receive a garbage return value.
1917 * Set this to 0 to avoid confusing the program.
1918 */
1919 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1920 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1921 for (i = 0; i < tl->nr_links; i++) {
1922 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
1923 return -EINVAL;
1924
1925 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1926 * if (*(u64 *)(rbp - 8) != 0)
1927 * goto do_fexit;
1928 */
1929 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1930 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1931
1932 /* Save the location of the branch and Generate 6 nops
1933 * (4 bytes for an offset and 2 bytes for the jump) These nops
1934 * are replaced with a conditional jump once do_fexit (i.e. the
1935 * start of the fexit invocation) is finalized.
1936 */
1937 branches[i] = prog;
1938 emit_nops(&prog, 4 + 2);
1939 }
1940
1941 *pprog = prog;
1942 return 0;
1943 }
1944
is_valid_bpf_tramp_flags(unsigned int flags)1945 static bool is_valid_bpf_tramp_flags(unsigned int flags)
1946 {
1947 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1948 (flags & BPF_TRAMP_F_SKIP_FRAME))
1949 return false;
1950
1951 /*
1952 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
1953 * and it must be used alone.
1954 */
1955 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
1956 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
1957 return false;
1958
1959 return true;
1960 }
1961
1962 /* Example:
1963 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1964 * its 'struct btf_func_model' will be nr_args=2
1965 * The assembly code when eth_type_trans is executing after trampoline:
1966 *
1967 * push rbp
1968 * mov rbp, rsp
1969 * sub rsp, 16 // space for skb and dev
1970 * push rbx // temp regs to pass start time
1971 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1972 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1973 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1974 * mov rbx, rax // remember start time in bpf stats are enabled
1975 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1976 * call addr_of_jited_FENTRY_prog
1977 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1978 * mov rsi, rbx // prog start time
1979 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1980 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1981 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1982 * pop rbx
1983 * leave
1984 * ret
1985 *
1986 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1987 * replaced with 'call generated_bpf_trampoline'. When it returns
1988 * eth_type_trans will continue executing with original skb and dev pointers.
1989 *
1990 * The assembly code when eth_type_trans is called from trampoline:
1991 *
1992 * push rbp
1993 * mov rbp, rsp
1994 * sub rsp, 24 // space for skb, dev, return value
1995 * push rbx // temp regs to pass start time
1996 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1997 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1998 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1999 * mov rbx, rax // remember start time if bpf stats are enabled
2000 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2001 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2002 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2003 * mov rsi, rbx // prog start time
2004 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2005 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2006 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2007 * call eth_type_trans+5 // execute body of eth_type_trans
2008 * mov qword ptr [rbp - 8], rax // save return value
2009 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2010 * mov rbx, rax // remember start time in bpf stats are enabled
2011 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2012 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2013 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2014 * mov rsi, rbx // prog start time
2015 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2016 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2017 * pop rbx
2018 * leave
2019 * add rsp, 8 // skip eth_type_trans's frame
2020 * ret // return to its caller
2021 */
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * orig_call)2022 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2023 const struct btf_func_model *m, u32 flags,
2024 struct bpf_tramp_links *tlinks,
2025 void *orig_call)
2026 {
2027 int ret, i, nr_args = m->nr_args;
2028 int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
2029 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2030 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2031 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2032 u8 **branches = NULL;
2033 u8 *prog;
2034 bool save_ret;
2035
2036 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
2037 if (nr_args > 6)
2038 return -ENOTSUPP;
2039
2040 if (!is_valid_bpf_tramp_flags(flags))
2041 return -EINVAL;
2042
2043 /* Generated trampoline stack layout:
2044 *
2045 * RBP + 8 [ return address ]
2046 * RBP + 0 [ RBP ]
2047 *
2048 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2049 * BPF_TRAMP_F_RET_FENTRY_RET flags
2050 *
2051 * [ reg_argN ] always
2052 * [ ... ]
2053 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2054 *
2055 * RBP - args_off [ args count ] always
2056 *
2057 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2058 *
2059 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2060 */
2061
2062 /* room for return value of orig_call or fentry prog */
2063 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2064 if (save_ret)
2065 stack_size += 8;
2066
2067 regs_off = stack_size;
2068
2069 /* args count */
2070 stack_size += 8;
2071 args_off = stack_size;
2072
2073 if (flags & BPF_TRAMP_F_IP_ARG)
2074 stack_size += 8; /* room for IP address argument */
2075
2076 ip_off = stack_size;
2077
2078 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2079 run_ctx_off = stack_size;
2080
2081 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2082 /* skip patched call instruction and point orig_call to actual
2083 * body of the kernel function.
2084 */
2085 if (is_endbr(*(u32 *)orig_call))
2086 orig_call += ENDBR_INSN_SIZE;
2087 orig_call += X86_PATCH_SIZE;
2088 }
2089
2090 prog = image;
2091
2092 EMIT_ENDBR();
2093 EMIT1(0x55); /* push rbp */
2094 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2095 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2096 EMIT1(0x53); /* push rbx */
2097
2098 /* Store number of arguments of the traced function:
2099 * mov rax, nr_args
2100 * mov QWORD PTR [rbp - args_off], rax
2101 */
2102 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args);
2103 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
2104
2105 if (flags & BPF_TRAMP_F_IP_ARG) {
2106 /* Store IP address of the traced function:
2107 * mov rax, QWORD PTR [rbp + 8]
2108 * sub rax, X86_PATCH_SIZE
2109 * mov QWORD PTR [rbp - ip_off], rax
2110 */
2111 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2112 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
2113 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2114 }
2115
2116 save_regs(m, &prog, nr_args, regs_off);
2117
2118 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2119 /* arg1: mov rdi, im */
2120 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2121 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2122 ret = -EINVAL;
2123 goto cleanup;
2124 }
2125 }
2126
2127 if (fentry->nr_links)
2128 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2129 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2130 return -EINVAL;
2131
2132 if (fmod_ret->nr_links) {
2133 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2134 GFP_KERNEL);
2135 if (!branches)
2136 return -ENOMEM;
2137
2138 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2139 run_ctx_off, branches)) {
2140 ret = -EINVAL;
2141 goto cleanup;
2142 }
2143 }
2144
2145 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2146 restore_regs(m, &prog, nr_args, regs_off);
2147
2148 /* call original function */
2149 if (emit_call(&prog, orig_call, prog)) {
2150 ret = -EINVAL;
2151 goto cleanup;
2152 }
2153 /* remember return value in a stack for bpf prog to access */
2154 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2155 im->ip_after_call = prog;
2156 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2157 prog += X86_PATCH_SIZE;
2158 }
2159
2160 if (fmod_ret->nr_links) {
2161 /* From Intel 64 and IA-32 Architectures Optimization
2162 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2163 * Coding Rule 11: All branch targets should be 16-byte
2164 * aligned.
2165 */
2166 emit_align(&prog, 16);
2167 /* Update the branches saved in invoke_bpf_mod_ret with the
2168 * aligned address of do_fexit.
2169 */
2170 for (i = 0; i < fmod_ret->nr_links; i++)
2171 emit_cond_near_jump(&branches[i], prog, branches[i],
2172 X86_JNE);
2173 }
2174
2175 if (fexit->nr_links)
2176 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2177 ret = -EINVAL;
2178 goto cleanup;
2179 }
2180
2181 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2182 restore_regs(m, &prog, nr_args, regs_off);
2183
2184 /* This needs to be done regardless. If there were fmod_ret programs,
2185 * the return value is only updated on the stack and still needs to be
2186 * restored to R0.
2187 */
2188 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2189 im->ip_epilogue = prog;
2190 /* arg1: mov rdi, im */
2191 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2192 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2193 ret = -EINVAL;
2194 goto cleanup;
2195 }
2196 }
2197 /* restore return value of orig_call or fentry prog back into RAX */
2198 if (save_ret)
2199 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2200
2201 EMIT1(0x5B); /* pop rbx */
2202 EMIT1(0xC9); /* leave */
2203 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2204 /* skip our return address and return to parent */
2205 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2206 emit_return(&prog, prog);
2207 /* Make sure the trampoline generation logic doesn't overflow */
2208 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2209 ret = -EFAULT;
2210 goto cleanup;
2211 }
2212 ret = prog - (u8 *)image;
2213
2214 cleanup:
2215 kfree(branches);
2216 return ret;
2217 }
2218
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs)2219 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2220 {
2221 u8 *jg_reloc, *prog = *pprog;
2222 int pivot, err, jg_bytes = 1;
2223 s64 jg_offset;
2224
2225 if (a == b) {
2226 /* Leaf node of recursion, i.e. not a range of indices
2227 * anymore.
2228 */
2229 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2230 if (!is_simm32(progs[a]))
2231 return -1;
2232 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2233 progs[a]);
2234 err = emit_cond_near_jump(&prog, /* je func */
2235 (void *)progs[a], prog,
2236 X86_JE);
2237 if (err)
2238 return err;
2239
2240 emit_indirect_jump(&prog, 2 /* rdx */, prog);
2241
2242 *pprog = prog;
2243 return 0;
2244 }
2245
2246 /* Not a leaf node, so we pivot, and recursively descend into
2247 * the lower and upper ranges.
2248 */
2249 pivot = (b - a) / 2;
2250 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2251 if (!is_simm32(progs[a + pivot]))
2252 return -1;
2253 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2254
2255 if (pivot > 2) { /* jg upper_part */
2256 /* Require near jump. */
2257 jg_bytes = 4;
2258 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2259 } else {
2260 EMIT2(X86_JG, 0);
2261 }
2262 jg_reloc = prog;
2263
2264 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2265 progs);
2266 if (err)
2267 return err;
2268
2269 /* From Intel 64 and IA-32 Architectures Optimization
2270 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2271 * Coding Rule 11: All branch targets should be 16-byte
2272 * aligned.
2273 */
2274 emit_align(&prog, 16);
2275 jg_offset = prog - jg_reloc;
2276 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2277
2278 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2279 b, progs);
2280 if (err)
2281 return err;
2282
2283 *pprog = prog;
2284 return 0;
2285 }
2286
cmp_ips(const void * a,const void * b)2287 static int cmp_ips(const void *a, const void *b)
2288 {
2289 const s64 *ipa = a;
2290 const s64 *ipb = b;
2291
2292 if (*ipa > *ipb)
2293 return 1;
2294 if (*ipa < *ipb)
2295 return -1;
2296 return 0;
2297 }
2298
arch_prepare_bpf_dispatcher(void * image,s64 * funcs,int num_funcs)2299 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2300 {
2301 u8 *prog = image;
2302
2303 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2304 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2305 }
2306
2307 struct x64_jit_data {
2308 struct bpf_binary_header *rw_header;
2309 struct bpf_binary_header *header;
2310 int *addrs;
2311 u8 *image;
2312 int proglen;
2313 struct jit_context ctx;
2314 };
2315
2316 #define MAX_PASSES 20
2317 #define PADDING_PASSES (MAX_PASSES - 5)
2318
bpf_int_jit_compile(struct bpf_prog * prog)2319 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2320 {
2321 struct bpf_binary_header *rw_header = NULL;
2322 struct bpf_binary_header *header = NULL;
2323 struct bpf_prog *tmp, *orig_prog = prog;
2324 struct x64_jit_data *jit_data;
2325 int proglen, oldproglen = 0;
2326 struct jit_context ctx = {};
2327 bool tmp_blinded = false;
2328 bool extra_pass = false;
2329 bool padding = false;
2330 u8 *rw_image = NULL;
2331 u8 *image = NULL;
2332 int *addrs;
2333 int pass;
2334 int i;
2335
2336 if (!prog->jit_requested)
2337 return orig_prog;
2338
2339 tmp = bpf_jit_blind_constants(prog);
2340 /*
2341 * If blinding was requested and we failed during blinding,
2342 * we must fall back to the interpreter.
2343 */
2344 if (IS_ERR(tmp))
2345 return orig_prog;
2346 if (tmp != prog) {
2347 tmp_blinded = true;
2348 prog = tmp;
2349 }
2350
2351 jit_data = prog->aux->jit_data;
2352 if (!jit_data) {
2353 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2354 if (!jit_data) {
2355 prog = orig_prog;
2356 goto out;
2357 }
2358 prog->aux->jit_data = jit_data;
2359 }
2360 addrs = jit_data->addrs;
2361 if (addrs) {
2362 ctx = jit_data->ctx;
2363 oldproglen = jit_data->proglen;
2364 image = jit_data->image;
2365 header = jit_data->header;
2366 rw_header = jit_data->rw_header;
2367 rw_image = (void *)rw_header + ((void *)image - (void *)header);
2368 extra_pass = true;
2369 padding = true;
2370 goto skip_init_addrs;
2371 }
2372 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2373 if (!addrs) {
2374 prog = orig_prog;
2375 goto out_addrs;
2376 }
2377
2378 /*
2379 * Before first pass, make a rough estimation of addrs[]
2380 * each BPF instruction is translated to less than 64 bytes
2381 */
2382 for (proglen = 0, i = 0; i <= prog->len; i++) {
2383 proglen += 64;
2384 addrs[i] = proglen;
2385 }
2386 ctx.cleanup_addr = proglen;
2387 skip_init_addrs:
2388
2389 /*
2390 * JITed image shrinks with every pass and the loop iterates
2391 * until the image stops shrinking. Very large BPF programs
2392 * may converge on the last pass. In such case do one more
2393 * pass to emit the final image.
2394 */
2395 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2396 if (!padding && pass >= PADDING_PASSES)
2397 padding = true;
2398 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
2399 if (proglen <= 0) {
2400 out_image:
2401 image = NULL;
2402 if (header) {
2403 bpf_arch_text_copy(&header->size, &rw_header->size,
2404 sizeof(rw_header->size));
2405 bpf_jit_binary_pack_free(header, rw_header);
2406 }
2407 /* Fall back to interpreter mode */
2408 prog = orig_prog;
2409 if (extra_pass) {
2410 prog->bpf_func = NULL;
2411 prog->jited = 0;
2412 prog->jited_len = 0;
2413 }
2414 goto out_addrs;
2415 }
2416 if (image) {
2417 if (proglen != oldproglen) {
2418 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2419 proglen, oldproglen);
2420 goto out_image;
2421 }
2422 break;
2423 }
2424 if (proglen == oldproglen) {
2425 /*
2426 * The number of entries in extable is the number of BPF_LDX
2427 * insns that access kernel memory via "pointer to BTF type".
2428 * The verifier changed their opcode from LDX|MEM|size
2429 * to LDX|PROBE_MEM|size to make JITing easier.
2430 */
2431 u32 align = __alignof__(struct exception_table_entry);
2432 u32 extable_size = prog->aux->num_exentries *
2433 sizeof(struct exception_table_entry);
2434
2435 /* allocate module memory for x86 insns and extable */
2436 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
2437 &image, align, &rw_header, &rw_image,
2438 jit_fill_hole);
2439 if (!header) {
2440 prog = orig_prog;
2441 goto out_addrs;
2442 }
2443 prog->aux->extable = (void *) image + roundup(proglen, align);
2444 }
2445 oldproglen = proglen;
2446 cond_resched();
2447 }
2448
2449 if (bpf_jit_enable > 1)
2450 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2451
2452 if (image) {
2453 if (!prog->is_func || extra_pass) {
2454 /*
2455 * bpf_jit_binary_pack_finalize fails in two scenarios:
2456 * 1) header is not pointing to proper module memory;
2457 * 2) the arch doesn't support bpf_arch_text_copy().
2458 *
2459 * Both cases are serious bugs and justify WARN_ON.
2460 */
2461 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
2462 /* header has been freed */
2463 header = NULL;
2464 goto out_image;
2465 }
2466
2467 bpf_tail_call_direct_fixup(prog);
2468 } else {
2469 jit_data->addrs = addrs;
2470 jit_data->ctx = ctx;
2471 jit_data->proglen = proglen;
2472 jit_data->image = image;
2473 jit_data->header = header;
2474 jit_data->rw_header = rw_header;
2475 }
2476 prog->bpf_func = (void *)image;
2477 prog->jited = 1;
2478 prog->jited_len = proglen;
2479 } else {
2480 prog = orig_prog;
2481 }
2482
2483 if (!image || !prog->is_func || extra_pass) {
2484 if (image)
2485 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2486 out_addrs:
2487 kvfree(addrs);
2488 kfree(jit_data);
2489 prog->aux->jit_data = NULL;
2490 }
2491 out:
2492 if (tmp_blinded)
2493 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2494 tmp : orig_prog);
2495 return prog;
2496 }
2497
bpf_jit_supports_kfunc_call(void)2498 bool bpf_jit_supports_kfunc_call(void)
2499 {
2500 return true;
2501 }
2502
bpf_arch_text_copy(void * dst,void * src,size_t len)2503 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
2504 {
2505 if (text_poke_copy(dst, src, len) == NULL)
2506 return ERR_PTR(-EINVAL);
2507 return dst;
2508 }
2509
2510 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)2511 bool bpf_jit_supports_subprog_tailcalls(void)
2512 {
2513 return true;
2514 }
2515
bpf_jit_free(struct bpf_prog * prog)2516 void bpf_jit_free(struct bpf_prog *prog)
2517 {
2518 if (prog->jited) {
2519 struct x64_jit_data *jit_data = prog->aux->jit_data;
2520 struct bpf_binary_header *hdr;
2521
2522 /*
2523 * If we fail the final pass of JIT (from jit_subprogs),
2524 * the program may not be finalized yet. Call finalize here
2525 * before freeing it.
2526 */
2527 if (jit_data) {
2528 bpf_jit_binary_pack_finalize(prog, jit_data->header,
2529 jit_data->rw_header);
2530 kvfree(jit_data->addrs);
2531 kfree(jit_data);
2532 }
2533 hdr = bpf_jit_binary_pack_hdr(prog);
2534 bpf_jit_binary_pack_free(hdr, NULL);
2535 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
2536 }
2537
2538 bpf_prog_unlock_free(prog);
2539 }
2540