1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/moduleloader.h>
3 #include <linux/workqueue.h>
4 #include <linux/netdevice.h>
5 #include <linux/filter.h>
6 #include <linux/cache.h>
7 #include <linux/if_vlan.h>
8
9 #include <asm/cacheflush.h>
10 #include <asm/ptrace.h>
11
12 #include "bpf_jit_32.h"
13
is_simm13(unsigned int value)14 static inline bool is_simm13(unsigned int value)
15 {
16 return value + 0x1000 < 0x2000;
17 }
18
19 #define SEEN_DATAREF 1 /* might call external helpers */
20 #define SEEN_XREG 2 /* ebx is used */
21 #define SEEN_MEM 4 /* use mem[] for temporary storage */
22
23 #define S13(X) ((X) & 0x1fff)
24 #define IMMED 0x00002000
25 #define RD(X) ((X) << 25)
26 #define RS1(X) ((X) << 14)
27 #define RS2(X) ((X))
28 #define OP(X) ((X) << 30)
29 #define OP2(X) ((X) << 22)
30 #define OP3(X) ((X) << 19)
31 #define COND(X) ((X) << 25)
32 #define F1(X) OP(X)
33 #define F2(X, Y) (OP(X) | OP2(Y))
34 #define F3(X, Y) (OP(X) | OP3(Y))
35
36 #define CONDN COND(0x0)
37 #define CONDE COND(0x1)
38 #define CONDLE COND(0x2)
39 #define CONDL COND(0x3)
40 #define CONDLEU COND(0x4)
41 #define CONDCS COND(0x5)
42 #define CONDNEG COND(0x6)
43 #define CONDVC COND(0x7)
44 #define CONDA COND(0x8)
45 #define CONDNE COND(0x9)
46 #define CONDG COND(0xa)
47 #define CONDGE COND(0xb)
48 #define CONDGU COND(0xc)
49 #define CONDCC COND(0xd)
50 #define CONDPOS COND(0xe)
51 #define CONDVS COND(0xf)
52
53 #define CONDGEU CONDCC
54 #define CONDLU CONDCS
55
56 #define WDISP22(X) (((X) >> 2) & 0x3fffff)
57
58 #define BA (F2(0, 2) | CONDA)
59 #define BGU (F2(0, 2) | CONDGU)
60 #define BLEU (F2(0, 2) | CONDLEU)
61 #define BGEU (F2(0, 2) | CONDGEU)
62 #define BLU (F2(0, 2) | CONDLU)
63 #define BE (F2(0, 2) | CONDE)
64 #define BNE (F2(0, 2) | CONDNE)
65
66 #define BE_PTR BE
67
68 #define SETHI(K, REG) \
69 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
70 #define OR_LO(K, REG) \
71 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
72
73 #define ADD F3(2, 0x00)
74 #define AND F3(2, 0x01)
75 #define ANDCC F3(2, 0x11)
76 #define OR F3(2, 0x02)
77 #define XOR F3(2, 0x03)
78 #define SUB F3(2, 0x04)
79 #define SUBCC F3(2, 0x14)
80 #define MUL F3(2, 0x0a) /* umul */
81 #define DIV F3(2, 0x0e) /* udiv */
82 #define SLL F3(2, 0x25)
83 #define SRL F3(2, 0x26)
84 #define JMPL F3(2, 0x38)
85 #define CALL F1(1)
86 #define BR F2(0, 0x01)
87 #define RD_Y F3(2, 0x28)
88 #define WR_Y F3(2, 0x30)
89
90 #define LD32 F3(3, 0x00)
91 #define LD8 F3(3, 0x01)
92 #define LD16 F3(3, 0x02)
93 #define LD64 F3(3, 0x0b)
94 #define ST32 F3(3, 0x04)
95
96 #define LDPTR LD32
97 #define BASE_STACKFRAME 96
98
99 #define LD32I (LD32 | IMMED)
100 #define LD8I (LD8 | IMMED)
101 #define LD16I (LD16 | IMMED)
102 #define LD64I (LD64 | IMMED)
103 #define LDPTRI (LDPTR | IMMED)
104 #define ST32I (ST32 | IMMED)
105
106 #define emit_nop() \
107 do { \
108 *prog++ = SETHI(0, G0); \
109 } while (0)
110
111 #define emit_neg() \
112 do { /* sub %g0, r_A, r_A */ \
113 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \
114 } while (0)
115
116 #define emit_reg_move(FROM, TO) \
117 do { /* or %g0, FROM, TO */ \
118 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \
119 } while (0)
120
121 #define emit_clear(REG) \
122 do { /* or %g0, %g0, REG */ \
123 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \
124 } while (0)
125
126 #define emit_set_const(K, REG) \
127 do { /* sethi %hi(K), REG */ \
128 *prog++ = SETHI(K, REG); \
129 /* or REG, %lo(K), REG */ \
130 *prog++ = OR_LO(K, REG); \
131 } while (0)
132
133 /* Emit
134 *
135 * OP r_A, r_X, r_A
136 */
137 #define emit_alu_X(OPCODE) \
138 do { \
139 seen |= SEEN_XREG; \
140 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \
141 } while (0)
142
143 /* Emit either:
144 *
145 * OP r_A, K, r_A
146 *
147 * or
148 *
149 * sethi %hi(K), r_TMP
150 * or r_TMP, %lo(K), r_TMP
151 * OP r_A, r_TMP, r_A
152 *
153 * depending upon whether K fits in a signed 13-bit
154 * immediate instruction field. Emit nothing if K
155 * is zero.
156 */
157 #define emit_alu_K(OPCODE, K) \
158 do { \
159 if (K || OPCODE == AND || OPCODE == MUL) { \
160 unsigned int _insn = OPCODE; \
161 _insn |= RS1(r_A) | RD(r_A); \
162 if (is_simm13(K)) { \
163 *prog++ = _insn | IMMED | S13(K); \
164 } else { \
165 emit_set_const(K, r_TMP); \
166 *prog++ = _insn | RS2(r_TMP); \
167 } \
168 } \
169 } while (0)
170
171 #define emit_loadimm(K, DEST) \
172 do { \
173 if (is_simm13(K)) { \
174 /* or %g0, K, DEST */ \
175 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \
176 } else { \
177 emit_set_const(K, DEST); \
178 } \
179 } while (0)
180
181 #define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
182 do { unsigned int _off = offsetof(STRUCT, FIELD); \
183 BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(void *)); \
184 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
185 } while (0)
186
187 #define emit_load32(BASE, STRUCT, FIELD, DEST) \
188 do { unsigned int _off = offsetof(STRUCT, FIELD); \
189 BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u32)); \
190 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
191 } while (0)
192
193 #define emit_load16(BASE, STRUCT, FIELD, DEST) \
194 do { unsigned int _off = offsetof(STRUCT, FIELD); \
195 BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u16)); \
196 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
197 } while (0)
198
199 #define __emit_load8(BASE, STRUCT, FIELD, DEST) \
200 do { unsigned int _off = offsetof(STRUCT, FIELD); \
201 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \
202 } while (0)
203
204 #define emit_load8(BASE, STRUCT, FIELD, DEST) \
205 do { BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u8)); \
206 __emit_load8(BASE, STRUCT, FIELD, DEST); \
207 } while (0)
208
209 #define BIAS (-4)
210
211 #define emit_ldmem(OFF, DEST) \
212 do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \
213 } while (0)
214
215 #define emit_stmem(OFF, SRC) \
216 do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \
217 } while (0)
218
219 #ifdef CONFIG_SMP
220 #define emit_load_cpu(REG) \
221 emit_load32(G6, struct thread_info, cpu, REG)
222 #else
223 #define emit_load_cpu(REG) emit_clear(REG)
224 #endif
225
226 #define emit_skb_loadptr(FIELD, DEST) \
227 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
228 #define emit_skb_load32(FIELD, DEST) \
229 emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
230 #define emit_skb_load16(FIELD, DEST) \
231 emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
232 #define __emit_skb_load8(FIELD, DEST) \
233 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
234 #define emit_skb_load8(FIELD, DEST) \
235 emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
236
237 #define emit_jmpl(BASE, IMM_OFF, LREG) \
238 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
239
240 #define emit_call(FUNC) \
241 do { void *_here = image + addrs[i] - 8; \
242 unsigned int _off = (void *)(FUNC) - _here; \
243 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \
244 emit_nop(); \
245 } while (0)
246
247 #define emit_branch(BR_OPC, DEST) \
248 do { unsigned int _here = addrs[i] - 8; \
249 *prog++ = BR_OPC | WDISP22((DEST) - _here); \
250 } while (0)
251
252 #define emit_branch_off(BR_OPC, OFF) \
253 do { *prog++ = BR_OPC | WDISP22(OFF); \
254 } while (0)
255
256 #define emit_jump(DEST) emit_branch(BA, DEST)
257
258 #define emit_read_y(REG) *prog++ = RD_Y | RD(REG)
259 #define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
260
261 #define emit_cmp(R1, R2) \
262 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
263
264 #define emit_cmpi(R1, IMM) \
265 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
266
267 #define emit_btst(R1, R2) \
268 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
269
270 #define emit_btsti(R1, IMM) \
271 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
272
273 #define emit_sub(R1, R2, R3) \
274 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
275
276 #define emit_subi(R1, IMM, R3) \
277 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
278
279 #define emit_add(R1, R2, R3) \
280 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
281
282 #define emit_addi(R1, IMM, R3) \
283 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
284
285 #define emit_and(R1, R2, R3) \
286 *prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3))
287
288 #define emit_andi(R1, IMM, R3) \
289 *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3))
290
291 #define emit_alloc_stack(SZ) \
292 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
293
294 #define emit_release_stack(SZ) \
295 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
296
297 /* A note about branch offset calculations. The addrs[] array,
298 * indexed by BPF instruction, records the address after all the
299 * sparc instructions emitted for that BPF instruction.
300 *
301 * The most common case is to emit a branch at the end of such
302 * a code sequence. So this would be two instructions, the
303 * branch and it's delay slot.
304 *
305 * Therefore by default the branch emitters calculate the branch
306 * offset field as:
307 *
308 * destination - (addrs[i] - 8)
309 *
310 * This "addrs[i] - 8" is the address of the branch itself or
311 * what "." would be in assembler notation. The "8" part is
312 * how we take into consideration the branch and it's delay
313 * slot mentioned above.
314 *
315 * Sometimes we need to emit a branch earlier in the code
316 * sequence. And in these situations we adjust "destination"
317 * to accommodate this difference. For example, if we needed
318 * to emit a branch (and it's delay slot) right before the
319 * final instruction emitted for a BPF opcode, we'd use
320 * "destination + 4" instead of just plain "destination" above.
321 *
322 * This is why you see all of these funny emit_branch() and
323 * emit_jump() calls with adjusted offsets.
324 */
325
bpf_jit_compile(struct bpf_prog * fp)326 void bpf_jit_compile(struct bpf_prog *fp)
327 {
328 unsigned int cleanup_addr, proglen, oldproglen = 0;
329 u32 temp[8], *prog, *func, seen = 0, pass;
330 const struct sock_filter *filter = fp->insns;
331 int i, flen = fp->len, pc_ret0 = -1;
332 unsigned int *addrs;
333 void *image;
334
335 if (!bpf_jit_enable)
336 return;
337
338 addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL);
339 if (addrs == NULL)
340 return;
341
342 /* Before first pass, make a rough estimation of addrs[]
343 * each bpf instruction is translated to less than 64 bytes
344 */
345 for (proglen = 0, i = 0; i < flen; i++) {
346 proglen += 64;
347 addrs[i] = proglen;
348 }
349 cleanup_addr = proglen; /* epilogue address */
350 image = NULL;
351 for (pass = 0; pass < 10; pass++) {
352 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
353
354 /* no prologue/epilogue for trivial filters (RET something) */
355 proglen = 0;
356 prog = temp;
357
358 /* Prologue */
359 if (seen_or_pass0) {
360 if (seen_or_pass0 & SEEN_MEM) {
361 unsigned int sz = BASE_STACKFRAME;
362 sz += BPF_MEMWORDS * sizeof(u32);
363 emit_alloc_stack(sz);
364 }
365
366 /* Make sure we dont leek kernel memory. */
367 if (seen_or_pass0 & SEEN_XREG)
368 emit_clear(r_X);
369
370 /* If this filter needs to access skb data,
371 * load %o4 and %o5 with:
372 * %o4 = skb->len - skb->data_len
373 * %o5 = skb->data
374 * And also back up %o7 into r_saved_O7 so we can
375 * invoke the stubs using 'call'.
376 */
377 if (seen_or_pass0 & SEEN_DATAREF) {
378 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
379 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
380 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
381 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
382 }
383 }
384 emit_reg_move(O7, r_saved_O7);
385
386 /* Make sure we dont leak kernel information to the user. */
387 if (bpf_needs_clear_a(&filter[0]))
388 emit_clear(r_A); /* A = 0 */
389
390 for (i = 0; i < flen; i++) {
391 unsigned int K = filter[i].k;
392 unsigned int t_offset;
393 unsigned int f_offset;
394 u32 t_op, f_op;
395 u16 code = bpf_anc_helper(&filter[i]);
396 int ilen;
397
398 switch (code) {
399 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
400 emit_alu_X(ADD);
401 break;
402 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
403 emit_alu_K(ADD, K);
404 break;
405 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
406 emit_alu_X(SUB);
407 break;
408 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
409 emit_alu_K(SUB, K);
410 break;
411 case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
412 emit_alu_X(AND);
413 break;
414 case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
415 emit_alu_K(AND, K);
416 break;
417 case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
418 emit_alu_X(OR);
419 break;
420 case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
421 emit_alu_K(OR, K);
422 break;
423 case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
424 case BPF_ALU | BPF_XOR | BPF_X:
425 emit_alu_X(XOR);
426 break;
427 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
428 emit_alu_K(XOR, K);
429 break;
430 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
431 emit_alu_X(SLL);
432 break;
433 case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
434 emit_alu_K(SLL, K);
435 break;
436 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
437 emit_alu_X(SRL);
438 break;
439 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
440 emit_alu_K(SRL, K);
441 break;
442 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
443 emit_alu_X(MUL);
444 break;
445 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
446 emit_alu_K(MUL, K);
447 break;
448 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
449 if (K == 1)
450 break;
451 emit_write_y(G0);
452 /* The Sparc v8 architecture requires
453 * three instructions between a %y
454 * register write and the first use.
455 */
456 emit_nop();
457 emit_nop();
458 emit_nop();
459 emit_alu_K(DIV, K);
460 break;
461 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
462 emit_cmpi(r_X, 0);
463 if (pc_ret0 > 0) {
464 t_offset = addrs[pc_ret0 - 1];
465 emit_branch(BE, t_offset + 20);
466 emit_nop(); /* delay slot */
467 } else {
468 emit_branch_off(BNE, 16);
469 emit_nop();
470 emit_jump(cleanup_addr + 20);
471 emit_clear(r_A);
472 }
473 emit_write_y(G0);
474 /* The Sparc v8 architecture requires
475 * three instructions between a %y
476 * register write and the first use.
477 */
478 emit_nop();
479 emit_nop();
480 emit_nop();
481 emit_alu_X(DIV);
482 break;
483 case BPF_ALU | BPF_NEG:
484 emit_neg();
485 break;
486 case BPF_RET | BPF_K:
487 if (!K) {
488 if (pc_ret0 == -1)
489 pc_ret0 = i;
490 emit_clear(r_A);
491 } else {
492 emit_loadimm(K, r_A);
493 }
494 fallthrough;
495 case BPF_RET | BPF_A:
496 if (seen_or_pass0) {
497 if (i != flen - 1) {
498 emit_jump(cleanup_addr);
499 emit_nop();
500 break;
501 }
502 if (seen_or_pass0 & SEEN_MEM) {
503 unsigned int sz = BASE_STACKFRAME;
504 sz += BPF_MEMWORDS * sizeof(u32);
505 emit_release_stack(sz);
506 }
507 }
508 /* jmpl %r_saved_O7 + 8, %g0 */
509 emit_jmpl(r_saved_O7, 8, G0);
510 emit_reg_move(r_A, O0); /* delay slot */
511 break;
512 case BPF_MISC | BPF_TAX:
513 seen |= SEEN_XREG;
514 emit_reg_move(r_A, r_X);
515 break;
516 case BPF_MISC | BPF_TXA:
517 seen |= SEEN_XREG;
518 emit_reg_move(r_X, r_A);
519 break;
520 case BPF_ANC | SKF_AD_CPU:
521 emit_load_cpu(r_A);
522 break;
523 case BPF_ANC | SKF_AD_PROTOCOL:
524 emit_skb_load16(protocol, r_A);
525 break;
526 case BPF_ANC | SKF_AD_PKTTYPE:
527 __emit_skb_load8(__pkt_type_offset, r_A);
528 emit_andi(r_A, PKT_TYPE_MAX, r_A);
529 emit_alu_K(SRL, 5);
530 break;
531 case BPF_ANC | SKF_AD_IFINDEX:
532 emit_skb_loadptr(dev, r_A);
533 emit_cmpi(r_A, 0);
534 emit_branch(BE_PTR, cleanup_addr + 4);
535 emit_nop();
536 emit_load32(r_A, struct net_device, ifindex, r_A);
537 break;
538 case BPF_ANC | SKF_AD_MARK:
539 emit_skb_load32(mark, r_A);
540 break;
541 case BPF_ANC | SKF_AD_QUEUE:
542 emit_skb_load16(queue_mapping, r_A);
543 break;
544 case BPF_ANC | SKF_AD_HATYPE:
545 emit_skb_loadptr(dev, r_A);
546 emit_cmpi(r_A, 0);
547 emit_branch(BE_PTR, cleanup_addr + 4);
548 emit_nop();
549 emit_load16(r_A, struct net_device, type, r_A);
550 break;
551 case BPF_ANC | SKF_AD_RXHASH:
552 emit_skb_load32(hash, r_A);
553 break;
554 case BPF_ANC | SKF_AD_VLAN_TAG:
555 emit_skb_load16(vlan_tci, r_A);
556 break;
557 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
558 __emit_skb_load8(__pkt_vlan_present_offset, r_A);
559 if (PKT_VLAN_PRESENT_BIT)
560 emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT);
561 if (PKT_VLAN_PRESENT_BIT < 7)
562 emit_andi(r_A, 1, r_A);
563 break;
564 case BPF_LD | BPF_W | BPF_LEN:
565 emit_skb_load32(len, r_A);
566 break;
567 case BPF_LDX | BPF_W | BPF_LEN:
568 emit_skb_load32(len, r_X);
569 break;
570 case BPF_LD | BPF_IMM:
571 emit_loadimm(K, r_A);
572 break;
573 case BPF_LDX | BPF_IMM:
574 emit_loadimm(K, r_X);
575 break;
576 case BPF_LD | BPF_MEM:
577 seen |= SEEN_MEM;
578 emit_ldmem(K * 4, r_A);
579 break;
580 case BPF_LDX | BPF_MEM:
581 seen |= SEEN_MEM | SEEN_XREG;
582 emit_ldmem(K * 4, r_X);
583 break;
584 case BPF_ST:
585 seen |= SEEN_MEM;
586 emit_stmem(K * 4, r_A);
587 break;
588 case BPF_STX:
589 seen |= SEEN_MEM | SEEN_XREG;
590 emit_stmem(K * 4, r_X);
591 break;
592
593 #define CHOOSE_LOAD_FUNC(K, func) \
594 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
595
596 case BPF_LD | BPF_W | BPF_ABS:
597 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
598 common_load: seen |= SEEN_DATAREF;
599 emit_loadimm(K, r_OFF);
600 emit_call(func);
601 break;
602 case BPF_LD | BPF_H | BPF_ABS:
603 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
604 goto common_load;
605 case BPF_LD | BPF_B | BPF_ABS:
606 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
607 goto common_load;
608 case BPF_LDX | BPF_B | BPF_MSH:
609 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
610 goto common_load;
611 case BPF_LD | BPF_W | BPF_IND:
612 func = bpf_jit_load_word;
613 common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
614 if (K) {
615 if (is_simm13(K)) {
616 emit_addi(r_X, K, r_OFF);
617 } else {
618 emit_loadimm(K, r_TMP);
619 emit_add(r_X, r_TMP, r_OFF);
620 }
621 } else {
622 emit_reg_move(r_X, r_OFF);
623 }
624 emit_call(func);
625 break;
626 case BPF_LD | BPF_H | BPF_IND:
627 func = bpf_jit_load_half;
628 goto common_load_ind;
629 case BPF_LD | BPF_B | BPF_IND:
630 func = bpf_jit_load_byte;
631 goto common_load_ind;
632 case BPF_JMP | BPF_JA:
633 emit_jump(addrs[i + K]);
634 emit_nop();
635 break;
636
637 #define COND_SEL(CODE, TOP, FOP) \
638 case CODE: \
639 t_op = TOP; \
640 f_op = FOP; \
641 goto cond_branch
642
643 COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
644 COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
645 COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
646 COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
647 COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
648 COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
649 COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
650 COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
651
652 cond_branch: f_offset = addrs[i + filter[i].jf];
653 t_offset = addrs[i + filter[i].jt];
654
655 /* same targets, can avoid doing the test :) */
656 if (filter[i].jt == filter[i].jf) {
657 emit_jump(t_offset);
658 emit_nop();
659 break;
660 }
661
662 switch (code) {
663 case BPF_JMP | BPF_JGT | BPF_X:
664 case BPF_JMP | BPF_JGE | BPF_X:
665 case BPF_JMP | BPF_JEQ | BPF_X:
666 seen |= SEEN_XREG;
667 emit_cmp(r_A, r_X);
668 break;
669 case BPF_JMP | BPF_JSET | BPF_X:
670 seen |= SEEN_XREG;
671 emit_btst(r_A, r_X);
672 break;
673 case BPF_JMP | BPF_JEQ | BPF_K:
674 case BPF_JMP | BPF_JGT | BPF_K:
675 case BPF_JMP | BPF_JGE | BPF_K:
676 if (is_simm13(K)) {
677 emit_cmpi(r_A, K);
678 } else {
679 emit_loadimm(K, r_TMP);
680 emit_cmp(r_A, r_TMP);
681 }
682 break;
683 case BPF_JMP | BPF_JSET | BPF_K:
684 if (is_simm13(K)) {
685 emit_btsti(r_A, K);
686 } else {
687 emit_loadimm(K, r_TMP);
688 emit_btst(r_A, r_TMP);
689 }
690 break;
691 }
692 if (filter[i].jt != 0) {
693 if (filter[i].jf)
694 t_offset += 8;
695 emit_branch(t_op, t_offset);
696 emit_nop(); /* delay slot */
697 if (filter[i].jf) {
698 emit_jump(f_offset);
699 emit_nop();
700 }
701 break;
702 }
703 emit_branch(f_op, f_offset);
704 emit_nop(); /* delay slot */
705 break;
706
707 default:
708 /* hmm, too complex filter, give up with jit compiler */
709 goto out;
710 }
711 ilen = (void *) prog - (void *) temp;
712 if (image) {
713 if (unlikely(proglen + ilen > oldproglen)) {
714 pr_err("bpb_jit_compile fatal error\n");
715 kfree(addrs);
716 module_memfree(image);
717 return;
718 }
719 memcpy(image + proglen, temp, ilen);
720 }
721 proglen += ilen;
722 addrs[i] = proglen;
723 prog = temp;
724 }
725 /* last bpf instruction is always a RET :
726 * use it to give the cleanup instruction(s) addr
727 */
728 cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
729 if (seen_or_pass0 & SEEN_MEM)
730 cleanup_addr -= 4; /* add %sp, X, %sp; */
731
732 if (image) {
733 if (proglen != oldproglen)
734 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
735 proglen, oldproglen);
736 break;
737 }
738 if (proglen == oldproglen) {
739 image = module_alloc(proglen);
740 if (!image)
741 goto out;
742 }
743 oldproglen = proglen;
744 }
745
746 if (bpf_jit_enable > 1)
747 bpf_jit_dump(flen, proglen, pass + 1, image);
748
749 if (image) {
750 fp->bpf_func = (void *)image;
751 fp->jited = 1;
752 }
753 out:
754 kfree(addrs);
755 return;
756 }
757
bpf_jit_free(struct bpf_prog * fp)758 void bpf_jit_free(struct bpf_prog *fp)
759 {
760 if (fp->jited)
761 module_memfree(fp->bpf_func);
762
763 bpf_prog_unlock_free(fp);
764 }
765