1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Kernel module help for x86.
3 Copyright (C) 2001 Rusty Russell.
4
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
21 #include <linux/memory.h>
22
23 #include <asm/text-patching.h>
24 #include <asm/page.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
27
28 #if 0
29 #define DEBUGP(fmt, ...) \
30 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
31 #else
32 #define DEBUGP(fmt, ...) \
33 do { \
34 if (0) \
35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
36 } while (0)
37 #endif
38
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset;
41
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex);
44
get_module_load_offset(void)45 static unsigned long int get_module_load_offset(void)
46 {
47 if (kaslr_enabled()) {
48 mutex_lock(&module_kaslr_mutex);
49 /*
50 * Calculate the module_load_offset the first time this
51 * code is called. Once calculated it stays the same until
52 * reboot.
53 */
54 if (module_load_offset == 0)
55 module_load_offset =
56 get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
57 mutex_unlock(&module_kaslr_mutex);
58 }
59 return module_load_offset;
60 }
61 #else
get_module_load_offset(void)62 static unsigned long int get_module_load_offset(void)
63 {
64 return 0;
65 }
66 #endif
67
module_alloc(unsigned long size)68 void *module_alloc(unsigned long size)
69 {
70 gfp_t gfp_mask = GFP_KERNEL;
71 void *p;
72
73 if (PAGE_ALIGN(size) > MODULES_LEN)
74 return NULL;
75
76 p = __vmalloc_node_range(size, MODULE_ALIGN,
77 MODULES_VADDR + get_module_load_offset(),
78 MODULES_END, gfp_mask, PAGE_KERNEL,
79 VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
80 NUMA_NO_NODE, __builtin_return_address(0));
81
82 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
83 vfree(p);
84 return NULL;
85 }
86
87 return p;
88 }
89
90 #ifdef CONFIG_X86_32
apply_relocate(Elf32_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me)91 int apply_relocate(Elf32_Shdr *sechdrs,
92 const char *strtab,
93 unsigned int symindex,
94 unsigned int relsec,
95 struct module *me)
96 {
97 unsigned int i;
98 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
99 Elf32_Sym *sym;
100 uint32_t *location;
101
102 DEBUGP("Applying relocate section %u to %u\n",
103 relsec, sechdrs[relsec].sh_info);
104 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
105 /* This is where to make the change */
106 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
107 + rel[i].r_offset;
108 /* This is the symbol it is referring to. Note that all
109 undefined symbols have been resolved. */
110 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
111 + ELF32_R_SYM(rel[i].r_info);
112
113 switch (ELF32_R_TYPE(rel[i].r_info)) {
114 case R_386_32:
115 /* We add the value into the location given */
116 *location += sym->st_value;
117 break;
118 case R_386_PC32:
119 case R_386_PLT32:
120 /* Add the value, subtract its position */
121 *location += sym->st_value - (uint32_t)location;
122 break;
123 default:
124 pr_err("%s: Unknown relocation: %u\n",
125 me->name, ELF32_R_TYPE(rel[i].r_info));
126 return -ENOEXEC;
127 }
128 }
129 return 0;
130 }
131 #else /*X86_64*/
__write_relocate_add(Elf64_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me,void * (* write)(void * dest,const void * src,size_t len),bool apply)132 static int __write_relocate_add(Elf64_Shdr *sechdrs,
133 const char *strtab,
134 unsigned int symindex,
135 unsigned int relsec,
136 struct module *me,
137 void *(*write)(void *dest, const void *src, size_t len),
138 bool apply)
139 {
140 unsigned int i;
141 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
142 Elf64_Sym *sym;
143 void *loc;
144 u64 val;
145 u64 zero = 0ULL;
146
147 DEBUGP("%s relocate section %u to %u\n",
148 apply ? "Applying" : "Clearing",
149 relsec, sechdrs[relsec].sh_info);
150 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
151 size_t size;
152
153 /* This is where to make the change */
154 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
155 + rel[i].r_offset;
156
157 /* This is the symbol it is referring to. Note that all
158 undefined symbols have been resolved. */
159 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
160 + ELF64_R_SYM(rel[i].r_info);
161
162 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
163 (int)ELF64_R_TYPE(rel[i].r_info),
164 sym->st_value, rel[i].r_addend, (u64)loc);
165
166 val = sym->st_value + rel[i].r_addend;
167
168 switch (ELF64_R_TYPE(rel[i].r_info)) {
169 case R_X86_64_NONE:
170 continue; /* nothing to write */
171 case R_X86_64_64:
172 size = 8;
173 break;
174 case R_X86_64_32:
175 if (val != *(u32 *)&val)
176 goto overflow;
177 size = 4;
178 break;
179 case R_X86_64_32S:
180 if ((s64)val != *(s32 *)&val)
181 goto overflow;
182 size = 4;
183 break;
184 case R_X86_64_PC32:
185 case R_X86_64_PLT32:
186 val -= (u64)loc;
187 size = 4;
188 break;
189 case R_X86_64_PC64:
190 val -= (u64)loc;
191 size = 8;
192 break;
193 default:
194 pr_err("%s: Unknown rela relocation: %llu\n",
195 me->name, ELF64_R_TYPE(rel[i].r_info));
196 return -ENOEXEC;
197 }
198
199 if (apply) {
200 if (memcmp(loc, &zero, size)) {
201 pr_err("x86/modules: Invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
202 (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
203 return -ENOEXEC;
204 }
205 write(loc, &val, size);
206 } else {
207 if (memcmp(loc, &val, size)) {
208 pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for type %d, loc %p, val %Lx\n",
209 (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
210 return -ENOEXEC;
211 }
212 write(loc, &zero, size);
213 }
214 }
215 return 0;
216
217 overflow:
218 pr_err("overflow in relocation type %d val %Lx\n",
219 (int)ELF64_R_TYPE(rel[i].r_info), val);
220 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
221 me->name);
222 return -ENOEXEC;
223 }
224
write_relocate_add(Elf64_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me,bool apply)225 static int write_relocate_add(Elf64_Shdr *sechdrs,
226 const char *strtab,
227 unsigned int symindex,
228 unsigned int relsec,
229 struct module *me,
230 bool apply)
231 {
232 int ret;
233 bool early = me->state == MODULE_STATE_UNFORMED;
234 void *(*write)(void *, const void *, size_t) = memcpy;
235
236 if (!early) {
237 write = text_poke;
238 mutex_lock(&text_mutex);
239 }
240
241 ret = __write_relocate_add(sechdrs, strtab, symindex, relsec, me,
242 write, apply);
243
244 if (!early) {
245 text_poke_sync();
246 mutex_unlock(&text_mutex);
247 }
248
249 return ret;
250 }
251
apply_relocate_add(Elf64_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me)252 int apply_relocate_add(Elf64_Shdr *sechdrs,
253 const char *strtab,
254 unsigned int symindex,
255 unsigned int relsec,
256 struct module *me)
257 {
258 return write_relocate_add(sechdrs, strtab, symindex, relsec, me, true);
259 }
260
261 #ifdef CONFIG_LIVEPATCH
clear_relocate_add(Elf64_Shdr * sechdrs,const char * strtab,unsigned int symindex,unsigned int relsec,struct module * me)262 void clear_relocate_add(Elf64_Shdr *sechdrs,
263 const char *strtab,
264 unsigned int symindex,
265 unsigned int relsec,
266 struct module *me)
267 {
268 write_relocate_add(sechdrs, strtab, symindex, relsec, me, false);
269 }
270 #endif
271
272 #endif
273
module_finalize(const Elf_Ehdr * hdr,const Elf_Shdr * sechdrs,struct module * me)274 int module_finalize(const Elf_Ehdr *hdr,
275 const Elf_Shdr *sechdrs,
276 struct module *me)
277 {
278 const Elf_Shdr *s, *alt = NULL, *locks = NULL,
279 *para = NULL, *orc = NULL, *orc_ip = NULL,
280 *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
281 *calls = NULL, *cfi = NULL;
282 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
283
284 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
285 if (!strcmp(".altinstructions", secstrings + s->sh_name))
286 alt = s;
287 if (!strcmp(".smp_locks", secstrings + s->sh_name))
288 locks = s;
289 if (!strcmp(".parainstructions", secstrings + s->sh_name))
290 para = s;
291 if (!strcmp(".orc_unwind", secstrings + s->sh_name))
292 orc = s;
293 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
294 orc_ip = s;
295 if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
296 retpolines = s;
297 if (!strcmp(".return_sites", secstrings + s->sh_name))
298 returns = s;
299 if (!strcmp(".call_sites", secstrings + s->sh_name))
300 calls = s;
301 if (!strcmp(".cfi_sites", secstrings + s->sh_name))
302 cfi = s;
303 if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
304 ibt_endbr = s;
305 }
306
307 /*
308 * See alternative_instructions() for the ordering rules between the
309 * various patching types.
310 */
311 if (para) {
312 void *pseg = (void *)para->sh_addr;
313 apply_paravirt(pseg, pseg + para->sh_size);
314 }
315 if (retpolines || cfi) {
316 void *rseg = NULL, *cseg = NULL;
317 unsigned int rsize = 0, csize = 0;
318
319 if (retpolines) {
320 rseg = (void *)retpolines->sh_addr;
321 rsize = retpolines->sh_size;
322 }
323
324 if (cfi) {
325 cseg = (void *)cfi->sh_addr;
326 csize = cfi->sh_size;
327 }
328
329 apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize);
330 }
331 if (retpolines) {
332 void *rseg = (void *)retpolines->sh_addr;
333 apply_retpolines(rseg, rseg + retpolines->sh_size);
334 }
335 if (returns) {
336 void *rseg = (void *)returns->sh_addr;
337 apply_returns(rseg, rseg + returns->sh_size);
338 }
339 if (alt) {
340 /* patch .altinstructions */
341 void *aseg = (void *)alt->sh_addr;
342 apply_alternatives(aseg, aseg + alt->sh_size);
343 }
344 if (calls || para) {
345 struct callthunk_sites cs = {};
346
347 if (calls) {
348 cs.call_start = (void *)calls->sh_addr;
349 cs.call_end = (void *)calls->sh_addr + calls->sh_size;
350 }
351
352 if (para) {
353 cs.pv_start = (void *)para->sh_addr;
354 cs.pv_end = (void *)para->sh_addr + para->sh_size;
355 }
356
357 callthunks_patch_module_calls(&cs, me);
358 }
359 if (ibt_endbr) {
360 void *iseg = (void *)ibt_endbr->sh_addr;
361 apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size);
362 }
363 if (locks) {
364 void *lseg = (void *)locks->sh_addr;
365 void *text = me->mem[MOD_TEXT].base;
366 void *text_end = text + me->mem[MOD_TEXT].size;
367 alternatives_smp_module_add(me, me->name,
368 lseg, lseg + locks->sh_size,
369 text, text_end);
370 }
371
372 if (orc && orc_ip)
373 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
374 (void *)orc->sh_addr, orc->sh_size);
375
376 return 0;
377 }
378
module_arch_cleanup(struct module * mod)379 void module_arch_cleanup(struct module *mod)
380 {
381 alternatives_smp_module_del(mod);
382 }
383