1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * alternative runtime patching
4  * inspired by the x86 version
5  *
6  * Copyright (C) 2014 ARM Ltd.
7  */
8 
9 #define pr_fmt(fmt) "alternatives: " fmt
10 
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/elf.h>
14 #include <asm/cacheflush.h>
15 #include <asm/alternative.h>
16 #include <asm/cpufeature.h>
17 #include <asm/insn.h>
18 #include <asm/module.h>
19 #include <asm/sections.h>
20 #include <asm/vdso.h>
21 #include <linux/stop_machine.h>
22 
23 #define __ALT_PTR(a, f)		((void *)&(a)->f + (a)->f)
24 #define ALT_ORIG_PTR(a)		__ALT_PTR(a, orig_offset)
25 #define ALT_REPL_PTR(a)		__ALT_PTR(a, alt_offset)
26 
27 #define ALT_CAP(a)		((a)->cpufeature & ~ARM64_CB_BIT)
28 #define ALT_HAS_CB(a)		((a)->cpufeature & ARM64_CB_BIT)
29 
30 /* Volatile, as we may be patching the guts of READ_ONCE() */
31 static volatile int all_alternatives_applied;
32 
33 static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
34 
35 struct alt_region {
36 	struct alt_instr *begin;
37 	struct alt_instr *end;
38 };
39 
alternative_is_applied(u16 cpufeature)40 bool alternative_is_applied(u16 cpufeature)
41 {
42 	if (WARN_ON(cpufeature >= ARM64_NCAPS))
43 		return false;
44 
45 	return test_bit(cpufeature, applied_alternatives);
46 }
47 
48 /*
49  * Check if the target PC is within an alternative block.
50  */
branch_insn_requires_update(struct alt_instr * alt,unsigned long pc)51 static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
52 {
53 	unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
54 	return !(pc >= replptr && pc <= (replptr + alt->alt_len));
55 }
56 
57 #define align_down(x, a)	((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
58 
get_alt_insn(struct alt_instr * alt,__le32 * insnptr,__le32 * altinsnptr)59 static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
60 {
61 	u32 insn;
62 
63 	insn = le32_to_cpu(*altinsnptr);
64 
65 	if (aarch64_insn_is_branch_imm(insn)) {
66 		s32 offset = aarch64_get_branch_offset(insn);
67 		unsigned long target;
68 
69 		target = (unsigned long)altinsnptr + offset;
70 
71 		/*
72 		 * If we're branching inside the alternate sequence,
73 		 * do not rewrite the instruction, as it is already
74 		 * correct. Otherwise, generate the new instruction.
75 		 */
76 		if (branch_insn_requires_update(alt, target)) {
77 			offset = target - (unsigned long)insnptr;
78 			insn = aarch64_set_branch_offset(insn, offset);
79 		}
80 	} else if (aarch64_insn_is_adrp(insn)) {
81 		s32 orig_offset, new_offset;
82 		unsigned long target;
83 
84 		/*
85 		 * If we're replacing an adrp instruction, which uses PC-relative
86 		 * immediate addressing, adjust the offset to reflect the new
87 		 * PC. adrp operates on 4K aligned addresses.
88 		 */
89 		orig_offset  = aarch64_insn_adrp_get_offset(insn);
90 		target = align_down(altinsnptr, SZ_4K) + orig_offset;
91 		new_offset = target - align_down(insnptr, SZ_4K);
92 		insn = aarch64_insn_adrp_set_offset(insn, new_offset);
93 	} else if (aarch64_insn_uses_literal(insn)) {
94 		/*
95 		 * Disallow patching unhandled instructions using PC relative
96 		 * literal addresses
97 		 */
98 		BUG();
99 	}
100 
101 	return insn;
102 }
103 
patch_alternative(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)104 static noinstr void patch_alternative(struct alt_instr *alt,
105 			      __le32 *origptr, __le32 *updptr, int nr_inst)
106 {
107 	__le32 *replptr;
108 	int i;
109 
110 	replptr = ALT_REPL_PTR(alt);
111 	for (i = 0; i < nr_inst; i++) {
112 		u32 insn;
113 
114 		insn = get_alt_insn(alt, origptr + i, replptr + i);
115 		updptr[i] = cpu_to_le32(insn);
116 	}
117 }
118 
119 /*
120  * We provide our own, private D-cache cleaning function so that we don't
121  * accidentally call into the cache.S code, which is patched by us at
122  * runtime.
123  */
clean_dcache_range_nopatch(u64 start,u64 end)124 static void clean_dcache_range_nopatch(u64 start, u64 end)
125 {
126 	u64 cur, d_size, ctr_el0;
127 
128 	ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
129 	d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
130 							   CTR_EL0_DminLine_SHIFT);
131 	cur = start & ~(d_size - 1);
132 	do {
133 		/*
134 		 * We must clean+invalidate to the PoC in order to avoid
135 		 * Cortex-A53 errata 826319, 827319, 824069 and 819472
136 		 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
137 		 */
138 		asm volatile("dc civac, %0" : : "r" (cur) : "memory");
139 	} while (cur += d_size, cur < end);
140 }
141 
__apply_alternatives(const struct alt_region * region,bool is_module,unsigned long * feature_mask)142 static void __apply_alternatives(const struct alt_region *region,
143 				 bool is_module,
144 				 unsigned long *feature_mask)
145 {
146 	struct alt_instr *alt;
147 	__le32 *origptr, *updptr;
148 	alternative_cb_t alt_cb;
149 
150 	for (alt = region->begin; alt < region->end; alt++) {
151 		int nr_inst;
152 		int cap = ALT_CAP(alt);
153 
154 		if (!test_bit(cap, feature_mask))
155 			continue;
156 
157 		if (!cpus_have_cap(cap))
158 			continue;
159 
160 		if (ALT_HAS_CB(alt))
161 			BUG_ON(alt->alt_len != 0);
162 		else
163 			BUG_ON(alt->alt_len != alt->orig_len);
164 
165 		origptr = ALT_ORIG_PTR(alt);
166 		updptr = is_module ? origptr : lm_alias(origptr);
167 		nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
168 
169 		if (ALT_HAS_CB(alt))
170 			alt_cb  = ALT_REPL_PTR(alt);
171 		else
172 			alt_cb = patch_alternative;
173 
174 		alt_cb(alt, origptr, updptr, nr_inst);
175 
176 		if (!is_module) {
177 			clean_dcache_range_nopatch((u64)origptr,
178 						   (u64)(origptr + nr_inst));
179 		}
180 	}
181 
182 	/*
183 	 * The core module code takes care of cache maintenance in
184 	 * flush_module_icache().
185 	 */
186 	if (!is_module) {
187 		dsb(ish);
188 		icache_inval_all_pou();
189 		isb();
190 
191 		/* Ignore ARM64_CB bit from feature mask */
192 		bitmap_or(applied_alternatives, applied_alternatives,
193 			  feature_mask, ARM64_NCAPS);
194 		bitmap_and(applied_alternatives, applied_alternatives,
195 			   cpu_hwcaps, ARM64_NCAPS);
196 	}
197 }
198 
apply_alternatives_vdso(void)199 void apply_alternatives_vdso(void)
200 {
201 	struct alt_region region;
202 	const struct elf64_hdr *hdr;
203 	const struct elf64_shdr *shdr;
204 	const struct elf64_shdr *alt;
205 	DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
206 
207 	bitmap_fill(all_capabilities, ARM64_NCAPS);
208 
209 	hdr = (struct elf64_hdr *)vdso_start;
210 	shdr = (void *)hdr + hdr->e_shoff;
211 	alt = find_section(hdr, shdr, ".altinstructions");
212 	if (!alt)
213 		return;
214 
215 	region = (struct alt_region){
216 		.begin	= (void *)hdr + alt->sh_offset,
217 		.end	= (void *)hdr + alt->sh_offset + alt->sh_size,
218 	};
219 
220 	__apply_alternatives(&region, false, &all_capabilities[0]);
221 }
222 
223 static const struct alt_region kernel_alternatives = {
224 	.begin	= (struct alt_instr *)__alt_instructions,
225 	.end	= (struct alt_instr *)__alt_instructions_end,
226 };
227 
228 /*
229  * We might be patching the stop_machine state machine, so implement a
230  * really simple polling protocol here.
231  */
__apply_alternatives_multi_stop(void * unused)232 static int __apply_alternatives_multi_stop(void *unused)
233 {
234 	/* We always have a CPU 0 at this point (__init) */
235 	if (smp_processor_id()) {
236 		while (!all_alternatives_applied)
237 			cpu_relax();
238 		isb();
239 	} else {
240 		DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
241 
242 		bitmap_complement(remaining_capabilities, boot_capabilities,
243 				  ARM64_NCAPS);
244 
245 		BUG_ON(all_alternatives_applied);
246 		__apply_alternatives(&kernel_alternatives, false,
247 				     remaining_capabilities);
248 		/* Barriers provided by the cache flushing */
249 		all_alternatives_applied = 1;
250 	}
251 
252 	return 0;
253 }
254 
apply_alternatives_all(void)255 void __init apply_alternatives_all(void)
256 {
257 	pr_info("applying system-wide alternatives\n");
258 
259 	apply_alternatives_vdso();
260 	/* better not try code patching on a live SMP system */
261 	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
262 }
263 
264 /*
265  * This is called very early in the boot process (directly after we run
266  * a feature detect on the boot CPU). No need to worry about other CPUs
267  * here.
268  */
apply_boot_alternatives(void)269 void __init apply_boot_alternatives(void)
270 {
271 	/* If called on non-boot cpu things could go wrong */
272 	WARN_ON(smp_processor_id() != 0);
273 
274 	pr_info("applying boot alternatives\n");
275 
276 	__apply_alternatives(&kernel_alternatives, false,
277 			     &boot_capabilities[0]);
278 }
279 
280 #ifdef CONFIG_MODULES
apply_alternatives_module(void * start,size_t length)281 void apply_alternatives_module(void *start, size_t length)
282 {
283 	struct alt_region region = {
284 		.begin	= start,
285 		.end	= start + length,
286 	};
287 	DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
288 
289 	bitmap_fill(all_capabilities, ARM64_NCAPS);
290 
291 	__apply_alternatives(&region, true, &all_capabilities[0]);
292 }
293 #endif
294 
alt_cb_patch_nops(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)295 noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
296 			       __le32 *updptr, int nr_inst)
297 {
298 	for (int i = 0; i < nr_inst; i++)
299 		updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
300 }
301 EXPORT_SYMBOL(alt_cb_patch_nops);
302