1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common functionality for RV32 and RV64 BPF JIT compilers
4 *
5 * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
6 *
7 */
8
9 #include <linux/bpf.h>
10 #include <linux/filter.h>
11 #include <linux/memory.h>
12 #include <asm/patch.h>
13 #include "bpf_jit.h"
14
15 /* Number of iterations to try until offsets converge. */
16 #define NR_JIT_ITERATIONS 32
17
build_body(struct rv_jit_context * ctx,bool extra_pass,int * offset)18 static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
19 {
20 const struct bpf_prog *prog = ctx->prog;
21 int i;
22
23 for (i = 0; i < prog->len; i++) {
24 const struct bpf_insn *insn = &prog->insnsi[i];
25 int ret;
26
27 ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
28 /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
29 if (ret > 0)
30 i++;
31 if (offset)
32 offset[i] = ctx->ninsns;
33 if (ret < 0)
34 return ret;
35 }
36 return 0;
37 }
38
bpf_jit_needs_zext(void)39 bool bpf_jit_needs_zext(void)
40 {
41 return true;
42 }
43
bpf_int_jit_compile(struct bpf_prog * prog)44 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
45 {
46 unsigned int prog_size = 0, extable_size = 0;
47 bool tmp_blinded = false, extra_pass = false;
48 struct bpf_prog *tmp, *orig_prog = prog;
49 int pass = 0, prev_ninsns = 0, i;
50 struct rv_jit_data *jit_data;
51 struct rv_jit_context *ctx;
52
53 if (!prog->jit_requested)
54 return orig_prog;
55
56 tmp = bpf_jit_blind_constants(prog);
57 if (IS_ERR(tmp))
58 return orig_prog;
59 if (tmp != prog) {
60 tmp_blinded = true;
61 prog = tmp;
62 }
63
64 jit_data = prog->aux->jit_data;
65 if (!jit_data) {
66 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
67 if (!jit_data) {
68 prog = orig_prog;
69 goto out;
70 }
71 prog->aux->jit_data = jit_data;
72 }
73
74 ctx = &jit_data->ctx;
75
76 if (ctx->offset) {
77 extra_pass = true;
78 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
79 goto skip_init_ctx;
80 }
81
82 ctx->prog = prog;
83 ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
84 if (!ctx->offset) {
85 prog = orig_prog;
86 goto out_offset;
87 }
88
89 if (build_body(ctx, extra_pass, NULL)) {
90 prog = orig_prog;
91 goto out_offset;
92 }
93
94 for (i = 0; i < prog->len; i++) {
95 prev_ninsns += 32;
96 ctx->offset[i] = prev_ninsns;
97 }
98
99 for (i = 0; i < NR_JIT_ITERATIONS; i++) {
100 pass++;
101 ctx->ninsns = 0;
102
103 bpf_jit_build_prologue(ctx);
104 ctx->prologue_len = ctx->ninsns;
105
106 if (build_body(ctx, extra_pass, ctx->offset)) {
107 prog = orig_prog;
108 goto out_offset;
109 }
110
111 ctx->epilogue_offset = ctx->ninsns;
112 bpf_jit_build_epilogue(ctx);
113
114 if (ctx->ninsns == prev_ninsns) {
115 if (jit_data->header)
116 break;
117 /* obtain the actual image size */
118 extable_size = prog->aux->num_exentries *
119 sizeof(struct exception_table_entry);
120 prog_size = sizeof(*ctx->insns) * ctx->ninsns;
121
122 jit_data->ro_header =
123 bpf_jit_binary_pack_alloc(prog_size + extable_size,
124 &jit_data->ro_image, sizeof(u32),
125 &jit_data->header, &jit_data->image,
126 bpf_fill_ill_insns);
127 if (!jit_data->ro_header) {
128 prog = orig_prog;
129 goto out_offset;
130 }
131
132 /*
133 * Use the image(RW) for writing the JITed instructions. But also save
134 * the ro_image(RX) for calculating the offsets in the image. The RW
135 * image will be later copied to the RX image from where the program
136 * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
137 * final step.
138 */
139 ctx->ro_insns = (u16 *)jit_data->ro_image;
140 ctx->insns = (u16 *)jit_data->image;
141 /*
142 * Now, when the image is allocated, the image can
143 * potentially shrink more (auipc/jalr -> jal).
144 */
145 }
146 prev_ninsns = ctx->ninsns;
147 }
148
149 if (i == NR_JIT_ITERATIONS) {
150 pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
151 prog = orig_prog;
152 goto out_free_hdr;
153 }
154
155 if (extable_size)
156 prog->aux->extable = (void *)ctx->ro_insns + prog_size;
157
158 skip_init_ctx:
159 pass++;
160 ctx->ninsns = 0;
161 ctx->nexentries = 0;
162
163 bpf_jit_build_prologue(ctx);
164 if (build_body(ctx, extra_pass, NULL)) {
165 prog = orig_prog;
166 goto out_free_hdr;
167 }
168 bpf_jit_build_epilogue(ctx);
169
170 if (bpf_jit_enable > 1)
171 bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
172
173 prog->bpf_func = (void *)ctx->ro_insns;
174 prog->jited = 1;
175 prog->jited_len = prog_size;
176
177 if (!prog->is_func || extra_pass) {
178 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header,
179 jit_data->header))) {
180 /* ro_header has been freed */
181 jit_data->ro_header = NULL;
182 prog = orig_prog;
183 goto out_offset;
184 }
185 /*
186 * The instructions have now been copied to the ROX region from
187 * where they will execute.
188 * Write any modified data cache blocks out to memory and
189 * invalidate the corresponding blocks in the instruction cache.
190 */
191 bpf_flush_icache(jit_data->ro_header, ctx->ro_insns + ctx->ninsns);
192 for (i = 0; i < prog->len; i++)
193 ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
194 bpf_prog_fill_jited_linfo(prog, ctx->offset);
195 out_offset:
196 kfree(ctx->offset);
197 kfree(jit_data);
198 prog->aux->jit_data = NULL;
199 }
200 out:
201
202 if (tmp_blinded)
203 bpf_jit_prog_release_other(prog, prog == orig_prog ?
204 tmp : orig_prog);
205 return prog;
206
207 out_free_hdr:
208 if (jit_data->header) {
209 bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
210 sizeof(jit_data->header->size));
211 bpf_jit_binary_pack_free(jit_data->ro_header, jit_data->header);
212 }
213 goto out_offset;
214 }
215
bpf_jit_alloc_exec_limit(void)216 u64 bpf_jit_alloc_exec_limit(void)
217 {
218 return BPF_JIT_REGION_SIZE;
219 }
220
bpf_jit_alloc_exec(unsigned long size)221 void *bpf_jit_alloc_exec(unsigned long size)
222 {
223 return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
224 BPF_JIT_REGION_END, GFP_KERNEL,
225 PAGE_KERNEL, 0, NUMA_NO_NODE,
226 __builtin_return_address(0));
227 }
228
bpf_jit_free_exec(void * addr)229 void bpf_jit_free_exec(void *addr)
230 {
231 return vfree(addr);
232 }
233
bpf_arch_text_copy(void * dst,void * src,size_t len)234 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
235 {
236 int ret;
237
238 mutex_lock(&text_mutex);
239 ret = patch_text_nosync(dst, src, len);
240 mutex_unlock(&text_mutex);
241
242 if (ret)
243 return ERR_PTR(-EINVAL);
244
245 return dst;
246 }
247
bpf_arch_text_invalidate(void * dst,size_t len)248 int bpf_arch_text_invalidate(void *dst, size_t len)
249 {
250 int ret;
251
252 mutex_lock(&text_mutex);
253 ret = patch_text_set_nosync(dst, 0, len);
254 mutex_unlock(&text_mutex);
255
256 return ret;
257 }
258
bpf_jit_free(struct bpf_prog * prog)259 void bpf_jit_free(struct bpf_prog *prog)
260 {
261 if (prog->jited) {
262 struct rv_jit_data *jit_data = prog->aux->jit_data;
263 struct bpf_binary_header *hdr;
264
265 /*
266 * If we fail the final pass of JIT (from jit_subprogs),
267 * the program may not be finalized yet. Call finalize here
268 * before freeing it.
269 */
270 if (jit_data) {
271 bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header);
272 kfree(jit_data);
273 }
274 hdr = bpf_jit_binary_pack_hdr(prog);
275 bpf_jit_binary_pack_free(hdr, NULL);
276 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
277 }
278
279 bpf_prog_unlock_free(prog);
280 }
281