1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13 
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18 
19 #define AARCH64_INSN_SF_BIT	BIT(31)
20 #define AARCH64_INSN_N_BIT	BIT(22)
21 #define AARCH64_INSN_LSL_12	BIT(22)
22 
23 static const int aarch64_insn_encoding_class[] = {
24 	AARCH64_INSN_CLS_UNKNOWN,
25 	AARCH64_INSN_CLS_UNKNOWN,
26 	AARCH64_INSN_CLS_SVE,
27 	AARCH64_INSN_CLS_UNKNOWN,
28 	AARCH64_INSN_CLS_LDST,
29 	AARCH64_INSN_CLS_DP_REG,
30 	AARCH64_INSN_CLS_LDST,
31 	AARCH64_INSN_CLS_DP_FPSIMD,
32 	AARCH64_INSN_CLS_DP_IMM,
33 	AARCH64_INSN_CLS_DP_IMM,
34 	AARCH64_INSN_CLS_BR_SYS,
35 	AARCH64_INSN_CLS_BR_SYS,
36 	AARCH64_INSN_CLS_LDST,
37 	AARCH64_INSN_CLS_DP_REG,
38 	AARCH64_INSN_CLS_LDST,
39 	AARCH64_INSN_CLS_DP_FPSIMD,
40 };
41 
aarch64_get_insn_class(u32 insn)42 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
43 {
44 	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
45 }
46 
aarch64_insn_is_steppable_hint(u32 insn)47 bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
48 {
49 	if (!aarch64_insn_is_hint(insn))
50 		return false;
51 
52 	switch (insn & 0xFE0) {
53 	case AARCH64_INSN_HINT_XPACLRI:
54 	case AARCH64_INSN_HINT_PACIA_1716:
55 	case AARCH64_INSN_HINT_PACIB_1716:
56 	case AARCH64_INSN_HINT_PACIAZ:
57 	case AARCH64_INSN_HINT_PACIASP:
58 	case AARCH64_INSN_HINT_PACIBZ:
59 	case AARCH64_INSN_HINT_PACIBSP:
60 	case AARCH64_INSN_HINT_BTI:
61 	case AARCH64_INSN_HINT_BTIC:
62 	case AARCH64_INSN_HINT_BTIJ:
63 	case AARCH64_INSN_HINT_BTIJC:
64 	case AARCH64_INSN_HINT_NOP:
65 		return true;
66 	default:
67 		return false;
68 	}
69 }
70 
aarch64_insn_is_branch_imm(u32 insn)71 bool aarch64_insn_is_branch_imm(u32 insn)
72 {
73 	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
74 		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
75 		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
76 		aarch64_insn_is_bcond(insn));
77 }
78 
aarch64_insn_uses_literal(u32 insn)79 bool __kprobes aarch64_insn_uses_literal(u32 insn)
80 {
81 	/* ldr/ldrsw (literal), prfm */
82 
83 	return aarch64_insn_is_ldr_lit(insn) ||
84 		aarch64_insn_is_ldrsw_lit(insn) ||
85 		aarch64_insn_is_adr_adrp(insn) ||
86 		aarch64_insn_is_prfm_lit(insn);
87 }
88 
aarch64_insn_is_branch(u32 insn)89 bool __kprobes aarch64_insn_is_branch(u32 insn)
90 {
91 	/* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
92 
93 	return aarch64_insn_is_b(insn) ||
94 		aarch64_insn_is_bl(insn) ||
95 		aarch64_insn_is_cbz(insn) ||
96 		aarch64_insn_is_cbnz(insn) ||
97 		aarch64_insn_is_tbz(insn) ||
98 		aarch64_insn_is_tbnz(insn) ||
99 		aarch64_insn_is_ret(insn) ||
100 		aarch64_insn_is_ret_auth(insn) ||
101 		aarch64_insn_is_br(insn) ||
102 		aarch64_insn_is_br_auth(insn) ||
103 		aarch64_insn_is_blr(insn) ||
104 		aarch64_insn_is_blr_auth(insn) ||
105 		aarch64_insn_is_bcond(insn);
106 }
107 
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)108 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
109 						u32 *maskp, int *shiftp)
110 {
111 	u32 mask;
112 	int shift;
113 
114 	switch (type) {
115 	case AARCH64_INSN_IMM_26:
116 		mask = BIT(26) - 1;
117 		shift = 0;
118 		break;
119 	case AARCH64_INSN_IMM_19:
120 		mask = BIT(19) - 1;
121 		shift = 5;
122 		break;
123 	case AARCH64_INSN_IMM_16:
124 		mask = BIT(16) - 1;
125 		shift = 5;
126 		break;
127 	case AARCH64_INSN_IMM_14:
128 		mask = BIT(14) - 1;
129 		shift = 5;
130 		break;
131 	case AARCH64_INSN_IMM_12:
132 		mask = BIT(12) - 1;
133 		shift = 10;
134 		break;
135 	case AARCH64_INSN_IMM_9:
136 		mask = BIT(9) - 1;
137 		shift = 12;
138 		break;
139 	case AARCH64_INSN_IMM_7:
140 		mask = BIT(7) - 1;
141 		shift = 15;
142 		break;
143 	case AARCH64_INSN_IMM_6:
144 	case AARCH64_INSN_IMM_S:
145 		mask = BIT(6) - 1;
146 		shift = 10;
147 		break;
148 	case AARCH64_INSN_IMM_R:
149 		mask = BIT(6) - 1;
150 		shift = 16;
151 		break;
152 	case AARCH64_INSN_IMM_N:
153 		mask = 1;
154 		shift = 22;
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 
160 	*maskp = mask;
161 	*shiftp = shift;
162 
163 	return 0;
164 }
165 
166 #define ADR_IMM_HILOSPLIT	2
167 #define ADR_IMM_SIZE		SZ_2M
168 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
169 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
170 #define ADR_IMM_LOSHIFT		29
171 #define ADR_IMM_HISHIFT		5
172 
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)173 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
174 {
175 	u32 immlo, immhi, mask;
176 	int shift;
177 
178 	switch (type) {
179 	case AARCH64_INSN_IMM_ADR:
180 		shift = 0;
181 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
182 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
183 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
184 		mask = ADR_IMM_SIZE - 1;
185 		break;
186 	default:
187 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
188 			pr_err("%s: unknown immediate encoding %d\n", __func__,
189 			       type);
190 			return 0;
191 		}
192 	}
193 
194 	return (insn >> shift) & mask;
195 }
196 
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)197 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
198 				  u32 insn, u64 imm)
199 {
200 	u32 immlo, immhi, mask;
201 	int shift;
202 
203 	if (insn == AARCH64_BREAK_FAULT)
204 		return AARCH64_BREAK_FAULT;
205 
206 	switch (type) {
207 	case AARCH64_INSN_IMM_ADR:
208 		shift = 0;
209 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
210 		imm >>= ADR_IMM_HILOSPLIT;
211 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
212 		imm = immlo | immhi;
213 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
214 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
215 		break;
216 	default:
217 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
218 			pr_err("%s: unknown immediate encoding %d\n", __func__,
219 			       type);
220 			return AARCH64_BREAK_FAULT;
221 		}
222 	}
223 
224 	/* Update the immediate field. */
225 	insn &= ~(mask << shift);
226 	insn |= (imm & mask) << shift;
227 
228 	return insn;
229 }
230 
aarch64_insn_decode_register(enum aarch64_insn_register_type type,u32 insn)231 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
232 					u32 insn)
233 {
234 	int shift;
235 
236 	switch (type) {
237 	case AARCH64_INSN_REGTYPE_RT:
238 	case AARCH64_INSN_REGTYPE_RD:
239 		shift = 0;
240 		break;
241 	case AARCH64_INSN_REGTYPE_RN:
242 		shift = 5;
243 		break;
244 	case AARCH64_INSN_REGTYPE_RT2:
245 	case AARCH64_INSN_REGTYPE_RA:
246 		shift = 10;
247 		break;
248 	case AARCH64_INSN_REGTYPE_RM:
249 		shift = 16;
250 		break;
251 	default:
252 		pr_err("%s: unknown register type encoding %d\n", __func__,
253 		       type);
254 		return 0;
255 	}
256 
257 	return (insn >> shift) & GENMASK(4, 0);
258 }
259 
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)260 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
261 					u32 insn,
262 					enum aarch64_insn_register reg)
263 {
264 	int shift;
265 
266 	if (insn == AARCH64_BREAK_FAULT)
267 		return AARCH64_BREAK_FAULT;
268 
269 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
270 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
271 		return AARCH64_BREAK_FAULT;
272 	}
273 
274 	switch (type) {
275 	case AARCH64_INSN_REGTYPE_RT:
276 	case AARCH64_INSN_REGTYPE_RD:
277 		shift = 0;
278 		break;
279 	case AARCH64_INSN_REGTYPE_RN:
280 		shift = 5;
281 		break;
282 	case AARCH64_INSN_REGTYPE_RT2:
283 	case AARCH64_INSN_REGTYPE_RA:
284 		shift = 10;
285 		break;
286 	case AARCH64_INSN_REGTYPE_RM:
287 	case AARCH64_INSN_REGTYPE_RS:
288 		shift = 16;
289 		break;
290 	default:
291 		pr_err("%s: unknown register type encoding %d\n", __func__,
292 		       type);
293 		return AARCH64_BREAK_FAULT;
294 	}
295 
296 	insn &= ~(GENMASK(4, 0) << shift);
297 	insn |= reg << shift;
298 
299 	return insn;
300 }
301 
302 static const u32 aarch64_insn_ldst_size[] = {
303 	[AARCH64_INSN_SIZE_8] = 0,
304 	[AARCH64_INSN_SIZE_16] = 1,
305 	[AARCH64_INSN_SIZE_32] = 2,
306 	[AARCH64_INSN_SIZE_64] = 3,
307 };
308 
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)309 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
310 					 u32 insn)
311 {
312 	u32 size;
313 
314 	if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
315 		pr_err("%s: unknown size encoding %d\n", __func__, type);
316 		return AARCH64_BREAK_FAULT;
317 	}
318 
319 	size = aarch64_insn_ldst_size[type];
320 	insn &= ~GENMASK(31, 30);
321 	insn |= size << 30;
322 
323 	return insn;
324 }
325 
label_imm_common(unsigned long pc,unsigned long addr,long range)326 static inline long label_imm_common(unsigned long pc, unsigned long addr,
327 				     long range)
328 {
329 	long offset;
330 
331 	if ((pc & 0x3) || (addr & 0x3)) {
332 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
333 		return range;
334 	}
335 
336 	offset = ((long)addr - (long)pc);
337 
338 	if (offset < -range || offset >= range) {
339 		pr_err("%s: offset out of range\n", __func__);
340 		return range;
341 	}
342 
343 	return offset;
344 }
345 
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)346 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
347 					  enum aarch64_insn_branch_type type)
348 {
349 	u32 insn;
350 	long offset;
351 
352 	/*
353 	 * B/BL support [-128M, 128M) offset
354 	 * ARM64 virtual address arrangement guarantees all kernel and module
355 	 * texts are within +/-128M.
356 	 */
357 	offset = label_imm_common(pc, addr, SZ_128M);
358 	if (offset >= SZ_128M)
359 		return AARCH64_BREAK_FAULT;
360 
361 	switch (type) {
362 	case AARCH64_INSN_BRANCH_LINK:
363 		insn = aarch64_insn_get_bl_value();
364 		break;
365 	case AARCH64_INSN_BRANCH_NOLINK:
366 		insn = aarch64_insn_get_b_value();
367 		break;
368 	default:
369 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
370 		return AARCH64_BREAK_FAULT;
371 	}
372 
373 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
374 					     offset >> 2);
375 }
376 
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)377 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
378 				     enum aarch64_insn_register reg,
379 				     enum aarch64_insn_variant variant,
380 				     enum aarch64_insn_branch_type type)
381 {
382 	u32 insn;
383 	long offset;
384 
385 	offset = label_imm_common(pc, addr, SZ_1M);
386 	if (offset >= SZ_1M)
387 		return AARCH64_BREAK_FAULT;
388 
389 	switch (type) {
390 	case AARCH64_INSN_BRANCH_COMP_ZERO:
391 		insn = aarch64_insn_get_cbz_value();
392 		break;
393 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
394 		insn = aarch64_insn_get_cbnz_value();
395 		break;
396 	default:
397 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
398 		return AARCH64_BREAK_FAULT;
399 	}
400 
401 	switch (variant) {
402 	case AARCH64_INSN_VARIANT_32BIT:
403 		break;
404 	case AARCH64_INSN_VARIANT_64BIT:
405 		insn |= AARCH64_INSN_SF_BIT;
406 		break;
407 	default:
408 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
409 		return AARCH64_BREAK_FAULT;
410 	}
411 
412 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
413 
414 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
415 					     offset >> 2);
416 }
417 
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)418 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
419 				     enum aarch64_insn_condition cond)
420 {
421 	u32 insn;
422 	long offset;
423 
424 	offset = label_imm_common(pc, addr, SZ_1M);
425 
426 	insn = aarch64_insn_get_bcond_value();
427 
428 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
429 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
430 		return AARCH64_BREAK_FAULT;
431 	}
432 	insn |= cond;
433 
434 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
435 					     offset >> 2);
436 }
437 
aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)438 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
439 {
440 	return aarch64_insn_get_hint_value() | op;
441 }
442 
aarch64_insn_gen_nop(void)443 u32 __kprobes aarch64_insn_gen_nop(void)
444 {
445 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
446 }
447 
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)448 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
449 				enum aarch64_insn_branch_type type)
450 {
451 	u32 insn;
452 
453 	switch (type) {
454 	case AARCH64_INSN_BRANCH_NOLINK:
455 		insn = aarch64_insn_get_br_value();
456 		break;
457 	case AARCH64_INSN_BRANCH_LINK:
458 		insn = aarch64_insn_get_blr_value();
459 		break;
460 	case AARCH64_INSN_BRANCH_RETURN:
461 		insn = aarch64_insn_get_ret_value();
462 		break;
463 	default:
464 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
465 		return AARCH64_BREAK_FAULT;
466 	}
467 
468 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
469 }
470 
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)471 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
472 				    enum aarch64_insn_register base,
473 				    enum aarch64_insn_register offset,
474 				    enum aarch64_insn_size_type size,
475 				    enum aarch64_insn_ldst_type type)
476 {
477 	u32 insn;
478 
479 	switch (type) {
480 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
481 		insn = aarch64_insn_get_ldr_reg_value();
482 		break;
483 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
484 		insn = aarch64_insn_get_str_reg_value();
485 		break;
486 	default:
487 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
488 		return AARCH64_BREAK_FAULT;
489 	}
490 
491 	insn = aarch64_insn_encode_ldst_size(size, insn);
492 
493 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
494 
495 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
496 					    base);
497 
498 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
499 					    offset);
500 }
501 
aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,enum aarch64_insn_register base,unsigned int imm,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)502 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
503 				    enum aarch64_insn_register base,
504 				    unsigned int imm,
505 				    enum aarch64_insn_size_type size,
506 				    enum aarch64_insn_ldst_type type)
507 {
508 	u32 insn;
509 	u32 shift;
510 
511 	if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
512 		pr_err("%s: unknown size encoding %d\n", __func__, type);
513 		return AARCH64_BREAK_FAULT;
514 	}
515 
516 	shift = aarch64_insn_ldst_size[size];
517 	if (imm & ~(BIT(12 + shift) - BIT(shift))) {
518 		pr_err("%s: invalid imm: %d\n", __func__, imm);
519 		return AARCH64_BREAK_FAULT;
520 	}
521 
522 	imm >>= shift;
523 
524 	switch (type) {
525 	case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
526 		insn = aarch64_insn_get_ldr_imm_value();
527 		break;
528 	case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
529 		insn = aarch64_insn_get_str_imm_value();
530 		break;
531 	default:
532 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
533 		return AARCH64_BREAK_FAULT;
534 	}
535 
536 	insn = aarch64_insn_encode_ldst_size(size, insn);
537 
538 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
539 
540 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
541 					    base);
542 
543 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
544 }
545 
aarch64_insn_gen_load_literal(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,bool is64bit)546 u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
547 				  enum aarch64_insn_register reg,
548 				  bool is64bit)
549 {
550 	u32 insn;
551 	long offset;
552 
553 	offset = label_imm_common(pc, addr, SZ_1M);
554 	if (offset >= SZ_1M)
555 		return AARCH64_BREAK_FAULT;
556 
557 	insn = aarch64_insn_get_ldr_lit_value();
558 
559 	if (is64bit)
560 		insn |= BIT(30);
561 
562 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
563 
564 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
565 					     offset >> 2);
566 }
567 
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)568 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
569 				     enum aarch64_insn_register reg2,
570 				     enum aarch64_insn_register base,
571 				     int offset,
572 				     enum aarch64_insn_variant variant,
573 				     enum aarch64_insn_ldst_type type)
574 {
575 	u32 insn;
576 	int shift;
577 
578 	switch (type) {
579 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
580 		insn = aarch64_insn_get_ldp_pre_value();
581 		break;
582 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
583 		insn = aarch64_insn_get_stp_pre_value();
584 		break;
585 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
586 		insn = aarch64_insn_get_ldp_post_value();
587 		break;
588 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
589 		insn = aarch64_insn_get_stp_post_value();
590 		break;
591 	default:
592 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
593 		return AARCH64_BREAK_FAULT;
594 	}
595 
596 	switch (variant) {
597 	case AARCH64_INSN_VARIANT_32BIT:
598 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
599 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
600 			       __func__, offset);
601 			return AARCH64_BREAK_FAULT;
602 		}
603 		shift = 2;
604 		break;
605 	case AARCH64_INSN_VARIANT_64BIT:
606 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
607 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
608 			       __func__, offset);
609 			return AARCH64_BREAK_FAULT;
610 		}
611 		shift = 3;
612 		insn |= AARCH64_INSN_SF_BIT;
613 		break;
614 	default:
615 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
616 		return AARCH64_BREAK_FAULT;
617 	}
618 
619 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
620 					    reg1);
621 
622 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
623 					    reg2);
624 
625 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
626 					    base);
627 
628 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
629 					     offset >> shift);
630 }
631 
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register state,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)632 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
633 				   enum aarch64_insn_register base,
634 				   enum aarch64_insn_register state,
635 				   enum aarch64_insn_size_type size,
636 				   enum aarch64_insn_ldst_type type)
637 {
638 	u32 insn;
639 
640 	switch (type) {
641 	case AARCH64_INSN_LDST_LOAD_EX:
642 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
643 		insn = aarch64_insn_get_load_ex_value();
644 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
645 			insn |= BIT(15);
646 		break;
647 	case AARCH64_INSN_LDST_STORE_EX:
648 	case AARCH64_INSN_LDST_STORE_REL_EX:
649 		insn = aarch64_insn_get_store_ex_value();
650 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
651 			insn |= BIT(15);
652 		break;
653 	default:
654 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
655 		return AARCH64_BREAK_FAULT;
656 	}
657 
658 	insn = aarch64_insn_encode_ldst_size(size, insn);
659 
660 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
661 					    reg);
662 
663 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
664 					    base);
665 
666 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
667 					    AARCH64_INSN_REG_ZR);
668 
669 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
670 					    state);
671 }
672 
673 #ifdef CONFIG_ARM64_LSE_ATOMICS
aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,u32 insn)674 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
675 					  u32 insn)
676 {
677 	u32 order;
678 
679 	switch (type) {
680 	case AARCH64_INSN_MEM_ORDER_NONE:
681 		order = 0;
682 		break;
683 	case AARCH64_INSN_MEM_ORDER_ACQ:
684 		order = 2;
685 		break;
686 	case AARCH64_INSN_MEM_ORDER_REL:
687 		order = 1;
688 		break;
689 	case AARCH64_INSN_MEM_ORDER_ACQREL:
690 		order = 3;
691 		break;
692 	default:
693 		pr_err("%s: unknown mem order %d\n", __func__, type);
694 		return AARCH64_BREAK_FAULT;
695 	}
696 
697 	insn &= ~GENMASK(23, 22);
698 	insn |= order << 22;
699 
700 	return insn;
701 }
702 
aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size,enum aarch64_insn_mem_atomic_op op,enum aarch64_insn_mem_order_type order)703 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
704 				  enum aarch64_insn_register address,
705 				  enum aarch64_insn_register value,
706 				  enum aarch64_insn_size_type size,
707 				  enum aarch64_insn_mem_atomic_op op,
708 				  enum aarch64_insn_mem_order_type order)
709 {
710 	u32 insn;
711 
712 	switch (op) {
713 	case AARCH64_INSN_MEM_ATOMIC_ADD:
714 		insn = aarch64_insn_get_ldadd_value();
715 		break;
716 	case AARCH64_INSN_MEM_ATOMIC_CLR:
717 		insn = aarch64_insn_get_ldclr_value();
718 		break;
719 	case AARCH64_INSN_MEM_ATOMIC_EOR:
720 		insn = aarch64_insn_get_ldeor_value();
721 		break;
722 	case AARCH64_INSN_MEM_ATOMIC_SET:
723 		insn = aarch64_insn_get_ldset_value();
724 		break;
725 	case AARCH64_INSN_MEM_ATOMIC_SWP:
726 		insn = aarch64_insn_get_swp_value();
727 		break;
728 	default:
729 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
730 		return AARCH64_BREAK_FAULT;
731 	}
732 
733 	switch (size) {
734 	case AARCH64_INSN_SIZE_32:
735 	case AARCH64_INSN_SIZE_64:
736 		break;
737 	default:
738 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
739 		return AARCH64_BREAK_FAULT;
740 	}
741 
742 	insn = aarch64_insn_encode_ldst_size(size, insn);
743 
744 	insn = aarch64_insn_encode_ldst_order(order, insn);
745 
746 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
747 					    result);
748 
749 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
750 					    address);
751 
752 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
753 					    value);
754 }
755 
aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,u32 insn)756 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
757 					 u32 insn)
758 {
759 	u32 order;
760 
761 	switch (type) {
762 	case AARCH64_INSN_MEM_ORDER_NONE:
763 		order = 0;
764 		break;
765 	case AARCH64_INSN_MEM_ORDER_ACQ:
766 		order = BIT(22);
767 		break;
768 	case AARCH64_INSN_MEM_ORDER_REL:
769 		order = BIT(15);
770 		break;
771 	case AARCH64_INSN_MEM_ORDER_ACQREL:
772 		order = BIT(15) | BIT(22);
773 		break;
774 	default:
775 		pr_err("%s: unknown mem order %d\n", __func__, type);
776 		return AARCH64_BREAK_FAULT;
777 	}
778 
779 	insn &= ~(BIT(15) | BIT(22));
780 	insn |= order;
781 
782 	return insn;
783 }
784 
aarch64_insn_gen_cas(enum aarch64_insn_register result,enum aarch64_insn_register address,enum aarch64_insn_register value,enum aarch64_insn_size_type size,enum aarch64_insn_mem_order_type order)785 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
786 			 enum aarch64_insn_register address,
787 			 enum aarch64_insn_register value,
788 			 enum aarch64_insn_size_type size,
789 			 enum aarch64_insn_mem_order_type order)
790 {
791 	u32 insn;
792 
793 	switch (size) {
794 	case AARCH64_INSN_SIZE_32:
795 	case AARCH64_INSN_SIZE_64:
796 		break;
797 	default:
798 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
799 		return AARCH64_BREAK_FAULT;
800 	}
801 
802 	insn = aarch64_insn_get_cas_value();
803 
804 	insn = aarch64_insn_encode_ldst_size(size, insn);
805 
806 	insn = aarch64_insn_encode_cas_order(order, insn);
807 
808 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
809 					    result);
810 
811 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
812 					    address);
813 
814 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
815 					    value);
816 }
817 #endif
818 
aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,enum aarch64_insn_prfm_target target,enum aarch64_insn_prfm_policy policy,u32 insn)819 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
820 					enum aarch64_insn_prfm_target target,
821 					enum aarch64_insn_prfm_policy policy,
822 					u32 insn)
823 {
824 	u32 imm_type = 0, imm_target = 0, imm_policy = 0;
825 
826 	switch (type) {
827 	case AARCH64_INSN_PRFM_TYPE_PLD:
828 		break;
829 	case AARCH64_INSN_PRFM_TYPE_PLI:
830 		imm_type = BIT(0);
831 		break;
832 	case AARCH64_INSN_PRFM_TYPE_PST:
833 		imm_type = BIT(1);
834 		break;
835 	default:
836 		pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
837 		return AARCH64_BREAK_FAULT;
838 	}
839 
840 	switch (target) {
841 	case AARCH64_INSN_PRFM_TARGET_L1:
842 		break;
843 	case AARCH64_INSN_PRFM_TARGET_L2:
844 		imm_target = BIT(0);
845 		break;
846 	case AARCH64_INSN_PRFM_TARGET_L3:
847 		imm_target = BIT(1);
848 		break;
849 	default:
850 		pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
851 		return AARCH64_BREAK_FAULT;
852 	}
853 
854 	switch (policy) {
855 	case AARCH64_INSN_PRFM_POLICY_KEEP:
856 		break;
857 	case AARCH64_INSN_PRFM_POLICY_STRM:
858 		imm_policy = BIT(0);
859 		break;
860 	default:
861 		pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
862 		return AARCH64_BREAK_FAULT;
863 	}
864 
865 	/* In this case, imm5 is encoded into Rt field. */
866 	insn &= ~GENMASK(4, 0);
867 	insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
868 
869 	return insn;
870 }
871 
aarch64_insn_gen_prefetch(enum aarch64_insn_register base,enum aarch64_insn_prfm_type type,enum aarch64_insn_prfm_target target,enum aarch64_insn_prfm_policy policy)872 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
873 			      enum aarch64_insn_prfm_type type,
874 			      enum aarch64_insn_prfm_target target,
875 			      enum aarch64_insn_prfm_policy policy)
876 {
877 	u32 insn = aarch64_insn_get_prfm_value();
878 
879 	insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
880 
881 	insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
882 
883 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
884 					    base);
885 
886 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
887 }
888 
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)889 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
890 				 enum aarch64_insn_register src,
891 				 int imm, enum aarch64_insn_variant variant,
892 				 enum aarch64_insn_adsb_type type)
893 {
894 	u32 insn;
895 
896 	switch (type) {
897 	case AARCH64_INSN_ADSB_ADD:
898 		insn = aarch64_insn_get_add_imm_value();
899 		break;
900 	case AARCH64_INSN_ADSB_SUB:
901 		insn = aarch64_insn_get_sub_imm_value();
902 		break;
903 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
904 		insn = aarch64_insn_get_adds_imm_value();
905 		break;
906 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
907 		insn = aarch64_insn_get_subs_imm_value();
908 		break;
909 	default:
910 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
911 		return AARCH64_BREAK_FAULT;
912 	}
913 
914 	switch (variant) {
915 	case AARCH64_INSN_VARIANT_32BIT:
916 		break;
917 	case AARCH64_INSN_VARIANT_64BIT:
918 		insn |= AARCH64_INSN_SF_BIT;
919 		break;
920 	default:
921 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
922 		return AARCH64_BREAK_FAULT;
923 	}
924 
925 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
926 	if (imm & ~(BIT(24) - 1))
927 		goto out;
928 
929 	/* If we have something in the top 12 bits... */
930 	if (imm & ~(SZ_4K - 1)) {
931 		/* ... and in the low 12 bits -> error */
932 		if (imm & (SZ_4K - 1))
933 			goto out;
934 
935 		imm >>= 12;
936 		insn |= AARCH64_INSN_LSL_12;
937 	}
938 
939 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
940 
941 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
942 
943 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
944 
945 out:
946 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
947 	return AARCH64_BREAK_FAULT;
948 }
949 
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)950 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
951 			      enum aarch64_insn_register src,
952 			      int immr, int imms,
953 			      enum aarch64_insn_variant variant,
954 			      enum aarch64_insn_bitfield_type type)
955 {
956 	u32 insn;
957 	u32 mask;
958 
959 	switch (type) {
960 	case AARCH64_INSN_BITFIELD_MOVE:
961 		insn = aarch64_insn_get_bfm_value();
962 		break;
963 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
964 		insn = aarch64_insn_get_ubfm_value();
965 		break;
966 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
967 		insn = aarch64_insn_get_sbfm_value();
968 		break;
969 	default:
970 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
971 		return AARCH64_BREAK_FAULT;
972 	}
973 
974 	switch (variant) {
975 	case AARCH64_INSN_VARIANT_32BIT:
976 		mask = GENMASK(4, 0);
977 		break;
978 	case AARCH64_INSN_VARIANT_64BIT:
979 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
980 		mask = GENMASK(5, 0);
981 		break;
982 	default:
983 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
984 		return AARCH64_BREAK_FAULT;
985 	}
986 
987 	if (immr & ~mask) {
988 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
989 		return AARCH64_BREAK_FAULT;
990 	}
991 	if (imms & ~mask) {
992 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
993 		return AARCH64_BREAK_FAULT;
994 	}
995 
996 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
997 
998 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
999 
1000 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1001 
1002 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1003 }
1004 
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)1005 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
1006 			      int imm, int shift,
1007 			      enum aarch64_insn_variant variant,
1008 			      enum aarch64_insn_movewide_type type)
1009 {
1010 	u32 insn;
1011 
1012 	switch (type) {
1013 	case AARCH64_INSN_MOVEWIDE_ZERO:
1014 		insn = aarch64_insn_get_movz_value();
1015 		break;
1016 	case AARCH64_INSN_MOVEWIDE_KEEP:
1017 		insn = aarch64_insn_get_movk_value();
1018 		break;
1019 	case AARCH64_INSN_MOVEWIDE_INVERSE:
1020 		insn = aarch64_insn_get_movn_value();
1021 		break;
1022 	default:
1023 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
1024 		return AARCH64_BREAK_FAULT;
1025 	}
1026 
1027 	if (imm & ~(SZ_64K - 1)) {
1028 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1029 		return AARCH64_BREAK_FAULT;
1030 	}
1031 
1032 	switch (variant) {
1033 	case AARCH64_INSN_VARIANT_32BIT:
1034 		if (shift != 0 && shift != 16) {
1035 			pr_err("%s: invalid shift encoding %d\n", __func__,
1036 			       shift);
1037 			return AARCH64_BREAK_FAULT;
1038 		}
1039 		break;
1040 	case AARCH64_INSN_VARIANT_64BIT:
1041 		insn |= AARCH64_INSN_SF_BIT;
1042 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1043 			pr_err("%s: invalid shift encoding %d\n", __func__,
1044 			       shift);
1045 			return AARCH64_BREAK_FAULT;
1046 		}
1047 		break;
1048 	default:
1049 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1050 		return AARCH64_BREAK_FAULT;
1051 	}
1052 
1053 	insn |= (shift >> 4) << 21;
1054 
1055 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1056 
1057 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1058 }
1059 
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)1060 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1061 					 enum aarch64_insn_register src,
1062 					 enum aarch64_insn_register reg,
1063 					 int shift,
1064 					 enum aarch64_insn_variant variant,
1065 					 enum aarch64_insn_adsb_type type)
1066 {
1067 	u32 insn;
1068 
1069 	switch (type) {
1070 	case AARCH64_INSN_ADSB_ADD:
1071 		insn = aarch64_insn_get_add_value();
1072 		break;
1073 	case AARCH64_INSN_ADSB_SUB:
1074 		insn = aarch64_insn_get_sub_value();
1075 		break;
1076 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1077 		insn = aarch64_insn_get_adds_value();
1078 		break;
1079 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1080 		insn = aarch64_insn_get_subs_value();
1081 		break;
1082 	default:
1083 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1084 		return AARCH64_BREAK_FAULT;
1085 	}
1086 
1087 	switch (variant) {
1088 	case AARCH64_INSN_VARIANT_32BIT:
1089 		if (shift & ~(SZ_32 - 1)) {
1090 			pr_err("%s: invalid shift encoding %d\n", __func__,
1091 			       shift);
1092 			return AARCH64_BREAK_FAULT;
1093 		}
1094 		break;
1095 	case AARCH64_INSN_VARIANT_64BIT:
1096 		insn |= AARCH64_INSN_SF_BIT;
1097 		if (shift & ~(SZ_64 - 1)) {
1098 			pr_err("%s: invalid shift encoding %d\n", __func__,
1099 			       shift);
1100 			return AARCH64_BREAK_FAULT;
1101 		}
1102 		break;
1103 	default:
1104 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1105 		return AARCH64_BREAK_FAULT;
1106 	}
1107 
1108 
1109 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1110 
1111 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1112 
1113 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1114 
1115 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1116 }
1117 
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)1118 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1119 			   enum aarch64_insn_register src,
1120 			   enum aarch64_insn_variant variant,
1121 			   enum aarch64_insn_data1_type type)
1122 {
1123 	u32 insn;
1124 
1125 	switch (type) {
1126 	case AARCH64_INSN_DATA1_REVERSE_16:
1127 		insn = aarch64_insn_get_rev16_value();
1128 		break;
1129 	case AARCH64_INSN_DATA1_REVERSE_32:
1130 		insn = aarch64_insn_get_rev32_value();
1131 		break;
1132 	case AARCH64_INSN_DATA1_REVERSE_64:
1133 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1134 			pr_err("%s: invalid variant for reverse64 %d\n",
1135 			       __func__, variant);
1136 			return AARCH64_BREAK_FAULT;
1137 		}
1138 		insn = aarch64_insn_get_rev64_value();
1139 		break;
1140 	default:
1141 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1142 		return AARCH64_BREAK_FAULT;
1143 	}
1144 
1145 	switch (variant) {
1146 	case AARCH64_INSN_VARIANT_32BIT:
1147 		break;
1148 	case AARCH64_INSN_VARIANT_64BIT:
1149 		insn |= AARCH64_INSN_SF_BIT;
1150 		break;
1151 	default:
1152 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1153 		return AARCH64_BREAK_FAULT;
1154 	}
1155 
1156 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1157 
1158 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1159 }
1160 
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)1161 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1162 			   enum aarch64_insn_register src,
1163 			   enum aarch64_insn_register reg,
1164 			   enum aarch64_insn_variant variant,
1165 			   enum aarch64_insn_data2_type type)
1166 {
1167 	u32 insn;
1168 
1169 	switch (type) {
1170 	case AARCH64_INSN_DATA2_UDIV:
1171 		insn = aarch64_insn_get_udiv_value();
1172 		break;
1173 	case AARCH64_INSN_DATA2_SDIV:
1174 		insn = aarch64_insn_get_sdiv_value();
1175 		break;
1176 	case AARCH64_INSN_DATA2_LSLV:
1177 		insn = aarch64_insn_get_lslv_value();
1178 		break;
1179 	case AARCH64_INSN_DATA2_LSRV:
1180 		insn = aarch64_insn_get_lsrv_value();
1181 		break;
1182 	case AARCH64_INSN_DATA2_ASRV:
1183 		insn = aarch64_insn_get_asrv_value();
1184 		break;
1185 	case AARCH64_INSN_DATA2_RORV:
1186 		insn = aarch64_insn_get_rorv_value();
1187 		break;
1188 	default:
1189 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1190 		return AARCH64_BREAK_FAULT;
1191 	}
1192 
1193 	switch (variant) {
1194 	case AARCH64_INSN_VARIANT_32BIT:
1195 		break;
1196 	case AARCH64_INSN_VARIANT_64BIT:
1197 		insn |= AARCH64_INSN_SF_BIT;
1198 		break;
1199 	default:
1200 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1201 		return AARCH64_BREAK_FAULT;
1202 	}
1203 
1204 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1205 
1206 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1207 
1208 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1209 }
1210 
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)1211 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1212 			   enum aarch64_insn_register src,
1213 			   enum aarch64_insn_register reg1,
1214 			   enum aarch64_insn_register reg2,
1215 			   enum aarch64_insn_variant variant,
1216 			   enum aarch64_insn_data3_type type)
1217 {
1218 	u32 insn;
1219 
1220 	switch (type) {
1221 	case AARCH64_INSN_DATA3_MADD:
1222 		insn = aarch64_insn_get_madd_value();
1223 		break;
1224 	case AARCH64_INSN_DATA3_MSUB:
1225 		insn = aarch64_insn_get_msub_value();
1226 		break;
1227 	default:
1228 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1229 		return AARCH64_BREAK_FAULT;
1230 	}
1231 
1232 	switch (variant) {
1233 	case AARCH64_INSN_VARIANT_32BIT:
1234 		break;
1235 	case AARCH64_INSN_VARIANT_64BIT:
1236 		insn |= AARCH64_INSN_SF_BIT;
1237 		break;
1238 	default:
1239 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1240 		return AARCH64_BREAK_FAULT;
1241 	}
1242 
1243 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1244 
1245 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1246 
1247 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1248 					    reg1);
1249 
1250 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1251 					    reg2);
1252 }
1253 
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1254 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1255 					 enum aarch64_insn_register src,
1256 					 enum aarch64_insn_register reg,
1257 					 int shift,
1258 					 enum aarch64_insn_variant variant,
1259 					 enum aarch64_insn_logic_type type)
1260 {
1261 	u32 insn;
1262 
1263 	switch (type) {
1264 	case AARCH64_INSN_LOGIC_AND:
1265 		insn = aarch64_insn_get_and_value();
1266 		break;
1267 	case AARCH64_INSN_LOGIC_BIC:
1268 		insn = aarch64_insn_get_bic_value();
1269 		break;
1270 	case AARCH64_INSN_LOGIC_ORR:
1271 		insn = aarch64_insn_get_orr_value();
1272 		break;
1273 	case AARCH64_INSN_LOGIC_ORN:
1274 		insn = aarch64_insn_get_orn_value();
1275 		break;
1276 	case AARCH64_INSN_LOGIC_EOR:
1277 		insn = aarch64_insn_get_eor_value();
1278 		break;
1279 	case AARCH64_INSN_LOGIC_EON:
1280 		insn = aarch64_insn_get_eon_value();
1281 		break;
1282 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1283 		insn = aarch64_insn_get_ands_value();
1284 		break;
1285 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1286 		insn = aarch64_insn_get_bics_value();
1287 		break;
1288 	default:
1289 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1290 		return AARCH64_BREAK_FAULT;
1291 	}
1292 
1293 	switch (variant) {
1294 	case AARCH64_INSN_VARIANT_32BIT:
1295 		if (shift & ~(SZ_32 - 1)) {
1296 			pr_err("%s: invalid shift encoding %d\n", __func__,
1297 			       shift);
1298 			return AARCH64_BREAK_FAULT;
1299 		}
1300 		break;
1301 	case AARCH64_INSN_VARIANT_64BIT:
1302 		insn |= AARCH64_INSN_SF_BIT;
1303 		if (shift & ~(SZ_64 - 1)) {
1304 			pr_err("%s: invalid shift encoding %d\n", __func__,
1305 			       shift);
1306 			return AARCH64_BREAK_FAULT;
1307 		}
1308 		break;
1309 	default:
1310 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1311 		return AARCH64_BREAK_FAULT;
1312 	}
1313 
1314 
1315 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1316 
1317 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1318 
1319 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1320 
1321 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1322 }
1323 
1324 /*
1325  * MOV (register) is architecturally an alias of ORR (shifted register) where
1326  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1327  */
aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant)1328 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1329 			      enum aarch64_insn_register src,
1330 			      enum aarch64_insn_variant variant)
1331 {
1332 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1333 						    src, 0, variant,
1334 						    AARCH64_INSN_LOGIC_ORR);
1335 }
1336 
aarch64_insn_gen_adr(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_adr_type type)1337 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1338 			 enum aarch64_insn_register reg,
1339 			 enum aarch64_insn_adr_type type)
1340 {
1341 	u32 insn;
1342 	s32 offset;
1343 
1344 	switch (type) {
1345 	case AARCH64_INSN_ADR_TYPE_ADR:
1346 		insn = aarch64_insn_get_adr_value();
1347 		offset = addr - pc;
1348 		break;
1349 	case AARCH64_INSN_ADR_TYPE_ADRP:
1350 		insn = aarch64_insn_get_adrp_value();
1351 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1352 		break;
1353 	default:
1354 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1355 		return AARCH64_BREAK_FAULT;
1356 	}
1357 
1358 	if (offset < -SZ_1M || offset >= SZ_1M)
1359 		return AARCH64_BREAK_FAULT;
1360 
1361 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1362 
1363 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1364 }
1365 
1366 /*
1367  * Decode the imm field of a branch, and return the byte offset as a
1368  * signed value (so it can be used when computing a new branch
1369  * target).
1370  */
aarch64_get_branch_offset(u32 insn)1371 s32 aarch64_get_branch_offset(u32 insn)
1372 {
1373 	s32 imm;
1374 
1375 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1376 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1377 		return (imm << 6) >> 4;
1378 	}
1379 
1380 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1381 	    aarch64_insn_is_bcond(insn)) {
1382 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1383 		return (imm << 13) >> 11;
1384 	}
1385 
1386 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1387 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1388 		return (imm << 18) >> 16;
1389 	}
1390 
1391 	/* Unhandled instruction */
1392 	BUG();
1393 }
1394 
1395 /*
1396  * Encode the displacement of a branch in the imm field and return the
1397  * updated instruction.
1398  */
aarch64_set_branch_offset(u32 insn,s32 offset)1399 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1400 {
1401 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1402 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1403 						     offset >> 2);
1404 
1405 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1406 	    aarch64_insn_is_bcond(insn))
1407 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1408 						     offset >> 2);
1409 
1410 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1411 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1412 						     offset >> 2);
1413 
1414 	/* Unhandled instruction */
1415 	BUG();
1416 }
1417 
aarch64_insn_adrp_get_offset(u32 insn)1418 s32 aarch64_insn_adrp_get_offset(u32 insn)
1419 {
1420 	BUG_ON(!aarch64_insn_is_adrp(insn));
1421 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1422 }
1423 
aarch64_insn_adrp_set_offset(u32 insn,s32 offset)1424 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1425 {
1426 	BUG_ON(!aarch64_insn_is_adrp(insn));
1427 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1428 						offset >> 12);
1429 }
1430 
1431 /*
1432  * Extract the Op/CR data from a msr/mrs instruction.
1433  */
aarch64_insn_extract_system_reg(u32 insn)1434 u32 aarch64_insn_extract_system_reg(u32 insn)
1435 {
1436 	return (insn & 0x1FFFE0) >> 5;
1437 }
1438 
aarch32_insn_is_wide(u32 insn)1439 bool aarch32_insn_is_wide(u32 insn)
1440 {
1441 	return insn >= 0xe800;
1442 }
1443 
1444 /*
1445  * Macros/defines for extracting register numbers from instruction.
1446  */
aarch32_insn_extract_reg_num(u32 insn,int offset)1447 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1448 {
1449 	return (insn & (0xf << offset)) >> offset;
1450 }
1451 
1452 #define OPC2_MASK	0x7
1453 #define OPC2_OFFSET	5
aarch32_insn_mcr_extract_opc2(u32 insn)1454 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1455 {
1456 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1457 }
1458 
1459 #define CRM_MASK	0xf
aarch32_insn_mcr_extract_crm(u32 insn)1460 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1461 {
1462 	return insn & CRM_MASK;
1463 }
1464 
range_of_ones(u64 val)1465 static bool range_of_ones(u64 val)
1466 {
1467 	/* Doesn't handle full ones or full zeroes */
1468 	u64 sval = val >> __ffs64(val);
1469 
1470 	/* One of Sean Eron Anderson's bithack tricks */
1471 	return ((sval + 1) & (sval)) == 0;
1472 }
1473 
aarch64_encode_immediate(u64 imm,enum aarch64_insn_variant variant,u32 insn)1474 static u32 aarch64_encode_immediate(u64 imm,
1475 				    enum aarch64_insn_variant variant,
1476 				    u32 insn)
1477 {
1478 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1479 	u64 mask;
1480 
1481 	switch (variant) {
1482 	case AARCH64_INSN_VARIANT_32BIT:
1483 		esz = 32;
1484 		break;
1485 	case AARCH64_INSN_VARIANT_64BIT:
1486 		insn |= AARCH64_INSN_SF_BIT;
1487 		esz = 64;
1488 		break;
1489 	default:
1490 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1491 		return AARCH64_BREAK_FAULT;
1492 	}
1493 
1494 	mask = GENMASK(esz - 1, 0);
1495 
1496 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1497 	if (!imm || imm == mask || imm & ~mask)
1498 		return AARCH64_BREAK_FAULT;
1499 
1500 	/*
1501 	 * Inverse of Replicate(). Try to spot a repeating pattern
1502 	 * with a pow2 stride.
1503 	 */
1504 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1505 		u64 emask = BIT(tmp) - 1;
1506 
1507 		if ((imm & emask) != ((imm >> tmp) & emask))
1508 			break;
1509 
1510 		esz = tmp;
1511 		mask = emask;
1512 	}
1513 
1514 	/* N is only set if we're encoding a 64bit value */
1515 	n = esz == 64;
1516 
1517 	/* Trim imm to the element size */
1518 	imm &= mask;
1519 
1520 	/* That's how many ones we need to encode */
1521 	ones = hweight64(imm);
1522 
1523 	/*
1524 	 * imms is set to (ones - 1), prefixed with a string of ones
1525 	 * and a zero if they fit. Cap it to 6 bits.
1526 	 */
1527 	imms  = ones - 1;
1528 	imms |= 0xf << ffs(esz);
1529 	imms &= BIT(6) - 1;
1530 
1531 	/* Compute the rotation */
1532 	if (range_of_ones(imm)) {
1533 		/*
1534 		 * Pattern: 0..01..10..0
1535 		 *
1536 		 * Compute how many rotate we need to align it right
1537 		 */
1538 		ror = __ffs64(imm);
1539 	} else {
1540 		/*
1541 		 * Pattern: 0..01..10..01..1
1542 		 *
1543 		 * Fill the unused top bits with ones, and check if
1544 		 * the result is a valid immediate (all ones with a
1545 		 * contiguous ranges of zeroes).
1546 		 */
1547 		imm |= ~mask;
1548 		if (!range_of_ones(~imm))
1549 			return AARCH64_BREAK_FAULT;
1550 
1551 		/*
1552 		 * Compute the rotation to get a continuous set of
1553 		 * ones, with the first bit set at position 0
1554 		 */
1555 		ror = fls64(~imm);
1556 	}
1557 
1558 	/*
1559 	 * immr is the number of bits we need to rotate back to the
1560 	 * original set of ones. Note that this is relative to the
1561 	 * element size...
1562 	 */
1563 	immr = (esz - ror) % esz;
1564 
1565 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1566 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1567 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1568 }
1569 
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,enum aarch64_insn_variant variant,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u64 imm)1570 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1571 				       enum aarch64_insn_variant variant,
1572 				       enum aarch64_insn_register Rn,
1573 				       enum aarch64_insn_register Rd,
1574 				       u64 imm)
1575 {
1576 	u32 insn;
1577 
1578 	switch (type) {
1579 	case AARCH64_INSN_LOGIC_AND:
1580 		insn = aarch64_insn_get_and_imm_value();
1581 		break;
1582 	case AARCH64_INSN_LOGIC_ORR:
1583 		insn = aarch64_insn_get_orr_imm_value();
1584 		break;
1585 	case AARCH64_INSN_LOGIC_EOR:
1586 		insn = aarch64_insn_get_eor_imm_value();
1587 		break;
1588 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1589 		insn = aarch64_insn_get_ands_imm_value();
1590 		break;
1591 	default:
1592 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1593 		return AARCH64_BREAK_FAULT;
1594 	}
1595 
1596 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1597 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1598 	return aarch64_encode_immediate(imm, variant, insn);
1599 }
1600 
aarch64_insn_gen_extr(enum aarch64_insn_variant variant,enum aarch64_insn_register Rm,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u8 lsb)1601 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1602 			  enum aarch64_insn_register Rm,
1603 			  enum aarch64_insn_register Rn,
1604 			  enum aarch64_insn_register Rd,
1605 			  u8 lsb)
1606 {
1607 	u32 insn;
1608 
1609 	insn = aarch64_insn_get_extr_value();
1610 
1611 	switch (variant) {
1612 	case AARCH64_INSN_VARIANT_32BIT:
1613 		if (lsb > 31)
1614 			return AARCH64_BREAK_FAULT;
1615 		break;
1616 	case AARCH64_INSN_VARIANT_64BIT:
1617 		if (lsb > 63)
1618 			return AARCH64_BREAK_FAULT;
1619 		insn |= AARCH64_INSN_SF_BIT;
1620 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1621 		break;
1622 	default:
1623 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1624 		return AARCH64_BREAK_FAULT;
1625 	}
1626 
1627 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1628 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1629 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1630 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1631 }
1632 
aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)1633 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1634 {
1635 	u32 opt;
1636 	u32 insn;
1637 
1638 	switch (type) {
1639 	case AARCH64_INSN_MB_SY:
1640 		opt = 0xf;
1641 		break;
1642 	case AARCH64_INSN_MB_ST:
1643 		opt = 0xe;
1644 		break;
1645 	case AARCH64_INSN_MB_LD:
1646 		opt = 0xd;
1647 		break;
1648 	case AARCH64_INSN_MB_ISH:
1649 		opt = 0xb;
1650 		break;
1651 	case AARCH64_INSN_MB_ISHST:
1652 		opt = 0xa;
1653 		break;
1654 	case AARCH64_INSN_MB_ISHLD:
1655 		opt = 0x9;
1656 		break;
1657 	case AARCH64_INSN_MB_NSH:
1658 		opt = 0x7;
1659 		break;
1660 	case AARCH64_INSN_MB_NSHST:
1661 		opt = 0x6;
1662 		break;
1663 	case AARCH64_INSN_MB_NSHLD:
1664 		opt = 0x5;
1665 		break;
1666 	default:
1667 		pr_err("%s: unknown dmb type %d\n", __func__, type);
1668 		return AARCH64_BREAK_FAULT;
1669 	}
1670 
1671 	insn = aarch64_insn_get_dmb_value();
1672 	insn &= ~GENMASK(11, 8);
1673 	insn |= (opt << 8);
1674 
1675 	return insn;
1676 }
1677