1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * core.c - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <linux/rcupdate.h>
23 #include <asm/cacheflush.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "state.h"
27 #include "transition.h"
28 
29 /*
30  * klp_mutex is a coarse lock which serializes access to klp data.  All
31  * accesses to klp-related variables and structures must have mutex protection,
32  * except within the following functions which carefully avoid the need for it:
33  *
34  * - klp_ftrace_handler()
35  * - klp_update_patch_state()
36  */
37 DEFINE_MUTEX(klp_mutex);
38 
39 /*
40  * Actively used patches: enabled or in transition. Note that replaced
41  * or disabled patches are not listed even though the related kernel
42  * module still can be loaded.
43  */
44 LIST_HEAD(klp_patches);
45 
46 static struct kobject *klp_root_kobj;
47 
klp_is_module(struct klp_object * obj)48 static bool klp_is_module(struct klp_object *obj)
49 {
50 	return obj->name;
51 }
52 
53 /* sets obj->mod if object is not vmlinux and module is found */
klp_find_object_module(struct klp_object * obj)54 static void klp_find_object_module(struct klp_object *obj)
55 {
56 	struct module *mod;
57 
58 	if (!klp_is_module(obj))
59 		return;
60 
61 	rcu_read_lock_sched();
62 	/*
63 	 * We do not want to block removal of patched modules and therefore
64 	 * we do not take a reference here. The patches are removed by
65 	 * klp_module_going() instead.
66 	 */
67 	mod = find_module(obj->name);
68 	/*
69 	 * Do not mess work of klp_module_coming() and klp_module_going().
70 	 * Note that the patch might still be needed before klp_module_going()
71 	 * is called. Module functions can be called even in the GOING state
72 	 * until mod->exit() finishes. This is especially important for
73 	 * patches that modify semantic of the functions.
74 	 */
75 	if (mod && mod->klp_alive)
76 		obj->mod = mod;
77 
78 	rcu_read_unlock_sched();
79 }
80 
klp_initialized(void)81 static bool klp_initialized(void)
82 {
83 	return !!klp_root_kobj;
84 }
85 
klp_find_func(struct klp_object * obj,struct klp_func * old_func)86 static struct klp_func *klp_find_func(struct klp_object *obj,
87 				      struct klp_func *old_func)
88 {
89 	struct klp_func *func;
90 
91 	klp_for_each_func(obj, func) {
92 		if ((strcmp(old_func->old_name, func->old_name) == 0) &&
93 		    (old_func->old_sympos == func->old_sympos)) {
94 			return func;
95 		}
96 	}
97 
98 	return NULL;
99 }
100 
klp_find_object(struct klp_patch * patch,struct klp_object * old_obj)101 static struct klp_object *klp_find_object(struct klp_patch *patch,
102 					  struct klp_object *old_obj)
103 {
104 	struct klp_object *obj;
105 
106 	klp_for_each_object(patch, obj) {
107 		if (klp_is_module(old_obj)) {
108 			if (klp_is_module(obj) &&
109 			    strcmp(old_obj->name, obj->name) == 0) {
110 				return obj;
111 			}
112 		} else if (!klp_is_module(obj)) {
113 			return obj;
114 		}
115 	}
116 
117 	return NULL;
118 }
119 
120 struct klp_find_arg {
121 	const char *objname;
122 	const char *name;
123 	unsigned long addr;
124 	unsigned long count;
125 	unsigned long pos;
126 };
127 
klp_find_callback(void * data,const char * name,struct module * mod,unsigned long addr)128 static int klp_find_callback(void *data, const char *name,
129 			     struct module *mod, unsigned long addr)
130 {
131 	struct klp_find_arg *args = data;
132 
133 	if ((mod && !args->objname) || (!mod && args->objname))
134 		return 0;
135 
136 	if (strcmp(args->name, name))
137 		return 0;
138 
139 	if (args->objname && strcmp(args->objname, mod->name))
140 		return 0;
141 
142 	args->addr = addr;
143 	args->count++;
144 
145 	/*
146 	 * Finish the search when the symbol is found for the desired position
147 	 * or the position is not defined for a non-unique symbol.
148 	 */
149 	if ((args->pos && (args->count == args->pos)) ||
150 	    (!args->pos && (args->count > 1)))
151 		return 1;
152 
153 	return 0;
154 }
155 
klp_find_object_symbol(const char * objname,const char * name,unsigned long sympos,unsigned long * addr)156 static int klp_find_object_symbol(const char *objname, const char *name,
157 				  unsigned long sympos, unsigned long *addr)
158 {
159 	struct klp_find_arg args = {
160 		.objname = objname,
161 		.name = name,
162 		.addr = 0,
163 		.count = 0,
164 		.pos = sympos,
165 	};
166 
167 	if (objname)
168 		module_kallsyms_on_each_symbol(klp_find_callback, &args);
169 	else
170 		kallsyms_on_each_symbol(klp_find_callback, &args);
171 
172 	/*
173 	 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
174 	 * otherwise ensure the symbol position count matches sympos.
175 	 */
176 	if (args.addr == 0)
177 		pr_err("symbol '%s' not found in symbol table\n", name);
178 	else if (args.count > 1 && sympos == 0) {
179 		pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
180 		       name, objname);
181 	} else if (sympos != args.count && sympos > 0) {
182 		pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
183 		       sympos, name, objname ? objname : "vmlinux");
184 	} else {
185 		*addr = args.addr;
186 		return 0;
187 	}
188 
189 	*addr = 0;
190 	return -EINVAL;
191 }
192 
klp_resolve_symbols(Elf_Shdr * sechdrs,const char * strtab,unsigned int symndx,Elf_Shdr * relasec,const char * sec_objname)193 static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
194 			       unsigned int symndx, Elf_Shdr *relasec,
195 			       const char *sec_objname)
196 {
197 	int i, cnt, ret;
198 	char sym_objname[MODULE_NAME_LEN];
199 	char sym_name[KSYM_NAME_LEN];
200 	Elf_Rela *relas;
201 	Elf_Sym *sym;
202 	unsigned long sympos, addr;
203 	bool sym_vmlinux;
204 	bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
205 
206 	/*
207 	 * Since the field widths for sym_objname and sym_name in the sscanf()
208 	 * call are hard-coded and correspond to MODULE_NAME_LEN and
209 	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
210 	 * and KSYM_NAME_LEN have the values we expect them to have.
211 	 *
212 	 * Because the value of MODULE_NAME_LEN can differ among architectures,
213 	 * we use the smallest/strictest upper bound possible (56, based on
214 	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
215 	 */
216 	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
217 
218 	relas = (Elf_Rela *) relasec->sh_addr;
219 	/* For each rela in this klp relocation section */
220 	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
221 		sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
222 		if (sym->st_shndx != SHN_LIVEPATCH) {
223 			pr_err("symbol %s is not marked as a livepatch symbol\n",
224 			       strtab + sym->st_name);
225 			return -EINVAL;
226 		}
227 
228 		/* Format: .klp.sym.sym_objname.sym_name,sympos */
229 		cnt = sscanf(strtab + sym->st_name,
230 			     ".klp.sym.%55[^.].%511[^,],%lu",
231 			     sym_objname, sym_name, &sympos);
232 		if (cnt != 3) {
233 			pr_err("symbol %s has an incorrectly formatted name\n",
234 			       strtab + sym->st_name);
235 			return -EINVAL;
236 		}
237 
238 		sym_vmlinux = !strcmp(sym_objname, "vmlinux");
239 
240 		/*
241 		 * Prevent module-specific KLP rela sections from referencing
242 		 * vmlinux symbols.  This helps prevent ordering issues with
243 		 * module special section initializations.  Presumably such
244 		 * symbols are exported and normal relas can be used instead.
245 		 */
246 		if (!sec_vmlinux && sym_vmlinux) {
247 			pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
248 			       sym_name);
249 			return -EINVAL;
250 		}
251 
252 		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
253 		ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
254 					     sym_name, sympos, &addr);
255 		if (ret)
256 			return ret;
257 
258 		sym->st_value = addr;
259 	}
260 
261 	return 0;
262 }
263 
264 /*
265  * At a high-level, there are two types of klp relocation sections: those which
266  * reference symbols which live in vmlinux; and those which reference symbols
267  * which live in other modules.  This function is called for both types:
268  *
269  * 1) When a klp module itself loads, the module code calls this function to
270  *    write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
271  *    These relocations are written to the klp module text to allow the patched
272  *    code/data to reference unexported vmlinux symbols.  They're written as
273  *    early as possible to ensure that other module init code (.e.g.,
274  *    jump_label_apply_nops) can access any unexported vmlinux symbols which
275  *    might be referenced by the klp module's special sections.
276  *
277  * 2) When a to-be-patched module loads -- or is already loaded when a
278  *    corresponding klp module loads -- klp code calls this function to write
279  *    module-specific klp relocations (.klp.rela.{module}.* sections).  These
280  *    are written to the klp module text to allow the patched code/data to
281  *    reference symbols which live in the to-be-patched module or one of its
282  *    module dependencies.  Exported symbols are supported, in addition to
283  *    unexported symbols, in order to enable late module patching, which allows
284  *    the to-be-patched module to be loaded and patched sometime *after* the
285  *    klp module is loaded.
286  */
klp_apply_section_relocs(struct module * pmod,Elf_Shdr * sechdrs,const char * shstrtab,const char * strtab,unsigned int symndx,unsigned int secndx,const char * objname)287 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
288 			     const char *shstrtab, const char *strtab,
289 			     unsigned int symndx, unsigned int secndx,
290 			     const char *objname)
291 {
292 	int cnt, ret;
293 	char sec_objname[MODULE_NAME_LEN];
294 	Elf_Shdr *sec = sechdrs + secndx;
295 
296 	/*
297 	 * Format: .klp.rela.sec_objname.section_name
298 	 * See comment in klp_resolve_symbols() for an explanation
299 	 * of the selected field width value.
300 	 */
301 	cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
302 		     sec_objname);
303 	if (cnt != 1) {
304 		pr_err("section %s has an incorrectly formatted name\n",
305 		       shstrtab + sec->sh_name);
306 		return -EINVAL;
307 	}
308 
309 	if (strcmp(objname ? objname : "vmlinux", sec_objname))
310 		return 0;
311 
312 	ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
313 	if (ret)
314 		return ret;
315 
316 	return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
317 }
318 
319 /*
320  * Sysfs Interface
321  *
322  * /sys/kernel/livepatch
323  * /sys/kernel/livepatch/<patch>
324  * /sys/kernel/livepatch/<patch>/enabled
325  * /sys/kernel/livepatch/<patch>/transition
326  * /sys/kernel/livepatch/<patch>/force
327  * /sys/kernel/livepatch/<patch>/<object>
328  * /sys/kernel/livepatch/<patch>/<object>/patched
329  * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
330  */
331 static int __klp_disable_patch(struct klp_patch *patch);
332 
enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)333 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
334 			     const char *buf, size_t count)
335 {
336 	struct klp_patch *patch;
337 	int ret;
338 	bool enabled;
339 
340 	ret = kstrtobool(buf, &enabled);
341 	if (ret)
342 		return ret;
343 
344 	patch = container_of(kobj, struct klp_patch, kobj);
345 
346 	mutex_lock(&klp_mutex);
347 
348 	if (patch->enabled == enabled) {
349 		/* already in requested state */
350 		ret = -EINVAL;
351 		goto out;
352 	}
353 
354 	/*
355 	 * Allow to reverse a pending transition in both ways. It might be
356 	 * necessary to complete the transition without forcing and breaking
357 	 * the system integrity.
358 	 *
359 	 * Do not allow to re-enable a disabled patch.
360 	 */
361 	if (patch == klp_transition_patch)
362 		klp_reverse_transition();
363 	else if (!enabled)
364 		ret = __klp_disable_patch(patch);
365 	else
366 		ret = -EINVAL;
367 
368 out:
369 	mutex_unlock(&klp_mutex);
370 
371 	if (ret)
372 		return ret;
373 	return count;
374 }
375 
enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)376 static ssize_t enabled_show(struct kobject *kobj,
377 			    struct kobj_attribute *attr, char *buf)
378 {
379 	struct klp_patch *patch;
380 
381 	patch = container_of(kobj, struct klp_patch, kobj);
382 	return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
383 }
384 
transition_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)385 static ssize_t transition_show(struct kobject *kobj,
386 			       struct kobj_attribute *attr, char *buf)
387 {
388 	struct klp_patch *patch;
389 
390 	patch = container_of(kobj, struct klp_patch, kobj);
391 	return snprintf(buf, PAGE_SIZE-1, "%d\n",
392 			patch == klp_transition_patch);
393 }
394 
force_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)395 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
396 			   const char *buf, size_t count)
397 {
398 	struct klp_patch *patch;
399 	int ret;
400 	bool val;
401 
402 	ret = kstrtobool(buf, &val);
403 	if (ret)
404 		return ret;
405 
406 	if (!val)
407 		return count;
408 
409 	mutex_lock(&klp_mutex);
410 
411 	patch = container_of(kobj, struct klp_patch, kobj);
412 	if (patch != klp_transition_patch) {
413 		mutex_unlock(&klp_mutex);
414 		return -EINVAL;
415 	}
416 
417 	klp_force_transition();
418 
419 	mutex_unlock(&klp_mutex);
420 
421 	return count;
422 }
423 
424 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
425 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
426 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
427 static struct attribute *klp_patch_attrs[] = {
428 	&enabled_kobj_attr.attr,
429 	&transition_kobj_attr.attr,
430 	&force_kobj_attr.attr,
431 	NULL
432 };
433 ATTRIBUTE_GROUPS(klp_patch);
434 
patched_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)435 static ssize_t patched_show(struct kobject *kobj,
436 			    struct kobj_attribute *attr, char *buf)
437 {
438 	struct klp_object *obj;
439 
440 	obj = container_of(kobj, struct klp_object, kobj);
441 	return sysfs_emit(buf, "%d\n", obj->patched);
442 }
443 
444 static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched);
445 static struct attribute *klp_object_attrs[] = {
446 	&patched_kobj_attr.attr,
447 	NULL,
448 };
449 ATTRIBUTE_GROUPS(klp_object);
450 
klp_free_object_dynamic(struct klp_object * obj)451 static void klp_free_object_dynamic(struct klp_object *obj)
452 {
453 	kfree(obj->name);
454 	kfree(obj);
455 }
456 
457 static void klp_init_func_early(struct klp_object *obj,
458 				struct klp_func *func);
459 static void klp_init_object_early(struct klp_patch *patch,
460 				  struct klp_object *obj);
461 
klp_alloc_object_dynamic(const char * name,struct klp_patch * patch)462 static struct klp_object *klp_alloc_object_dynamic(const char *name,
463 						   struct klp_patch *patch)
464 {
465 	struct klp_object *obj;
466 
467 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
468 	if (!obj)
469 		return NULL;
470 
471 	if (name) {
472 		obj->name = kstrdup(name, GFP_KERNEL);
473 		if (!obj->name) {
474 			kfree(obj);
475 			return NULL;
476 		}
477 	}
478 
479 	klp_init_object_early(patch, obj);
480 	obj->dynamic = true;
481 
482 	return obj;
483 }
484 
klp_free_func_nop(struct klp_func * func)485 static void klp_free_func_nop(struct klp_func *func)
486 {
487 	kfree(func->old_name);
488 	kfree(func);
489 }
490 
klp_alloc_func_nop(struct klp_func * old_func,struct klp_object * obj)491 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
492 					   struct klp_object *obj)
493 {
494 	struct klp_func *func;
495 
496 	func = kzalloc(sizeof(*func), GFP_KERNEL);
497 	if (!func)
498 		return NULL;
499 
500 	if (old_func->old_name) {
501 		func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
502 		if (!func->old_name) {
503 			kfree(func);
504 			return NULL;
505 		}
506 	}
507 
508 	klp_init_func_early(obj, func);
509 	/*
510 	 * func->new_func is same as func->old_func. These addresses are
511 	 * set when the object is loaded, see klp_init_object_loaded().
512 	 */
513 	func->old_sympos = old_func->old_sympos;
514 	func->nop = true;
515 
516 	return func;
517 }
518 
klp_add_object_nops(struct klp_patch * patch,struct klp_object * old_obj)519 static int klp_add_object_nops(struct klp_patch *patch,
520 			       struct klp_object *old_obj)
521 {
522 	struct klp_object *obj;
523 	struct klp_func *func, *old_func;
524 
525 	obj = klp_find_object(patch, old_obj);
526 
527 	if (!obj) {
528 		obj = klp_alloc_object_dynamic(old_obj->name, patch);
529 		if (!obj)
530 			return -ENOMEM;
531 	}
532 
533 	klp_for_each_func(old_obj, old_func) {
534 		func = klp_find_func(obj, old_func);
535 		if (func)
536 			continue;
537 
538 		func = klp_alloc_func_nop(old_func, obj);
539 		if (!func)
540 			return -ENOMEM;
541 	}
542 
543 	return 0;
544 }
545 
546 /*
547  * Add 'nop' functions which simply return to the caller to run
548  * the original function. The 'nop' functions are added to a
549  * patch to facilitate a 'replace' mode.
550  */
klp_add_nops(struct klp_patch * patch)551 static int klp_add_nops(struct klp_patch *patch)
552 {
553 	struct klp_patch *old_patch;
554 	struct klp_object *old_obj;
555 
556 	klp_for_each_patch(old_patch) {
557 		klp_for_each_object(old_patch, old_obj) {
558 			int err;
559 
560 			err = klp_add_object_nops(patch, old_obj);
561 			if (err)
562 				return err;
563 		}
564 	}
565 
566 	return 0;
567 }
568 
klp_kobj_release_patch(struct kobject * kobj)569 static void klp_kobj_release_patch(struct kobject *kobj)
570 {
571 	struct klp_patch *patch;
572 
573 	patch = container_of(kobj, struct klp_patch, kobj);
574 	complete(&patch->finish);
575 }
576 
577 static struct kobj_type klp_ktype_patch = {
578 	.release = klp_kobj_release_patch,
579 	.sysfs_ops = &kobj_sysfs_ops,
580 	.default_groups = klp_patch_groups,
581 };
582 
klp_kobj_release_object(struct kobject * kobj)583 static void klp_kobj_release_object(struct kobject *kobj)
584 {
585 	struct klp_object *obj;
586 
587 	obj = container_of(kobj, struct klp_object, kobj);
588 
589 	if (obj->dynamic)
590 		klp_free_object_dynamic(obj);
591 }
592 
593 static struct kobj_type klp_ktype_object = {
594 	.release = klp_kobj_release_object,
595 	.sysfs_ops = &kobj_sysfs_ops,
596 	.default_groups = klp_object_groups,
597 };
598 
klp_kobj_release_func(struct kobject * kobj)599 static void klp_kobj_release_func(struct kobject *kobj)
600 {
601 	struct klp_func *func;
602 
603 	func = container_of(kobj, struct klp_func, kobj);
604 
605 	if (func->nop)
606 		klp_free_func_nop(func);
607 }
608 
609 static struct kobj_type klp_ktype_func = {
610 	.release = klp_kobj_release_func,
611 	.sysfs_ops = &kobj_sysfs_ops,
612 };
613 
__klp_free_funcs(struct klp_object * obj,bool nops_only)614 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
615 {
616 	struct klp_func *func, *tmp_func;
617 
618 	klp_for_each_func_safe(obj, func, tmp_func) {
619 		if (nops_only && !func->nop)
620 			continue;
621 
622 		list_del(&func->node);
623 		kobject_put(&func->kobj);
624 	}
625 }
626 
627 /* Clean up when a patched object is unloaded */
klp_free_object_loaded(struct klp_object * obj)628 static void klp_free_object_loaded(struct klp_object *obj)
629 {
630 	struct klp_func *func;
631 
632 	obj->mod = NULL;
633 
634 	klp_for_each_func(obj, func) {
635 		func->old_func = NULL;
636 
637 		if (func->nop)
638 			func->new_func = NULL;
639 	}
640 }
641 
__klp_free_objects(struct klp_patch * patch,bool nops_only)642 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
643 {
644 	struct klp_object *obj, *tmp_obj;
645 
646 	klp_for_each_object_safe(patch, obj, tmp_obj) {
647 		__klp_free_funcs(obj, nops_only);
648 
649 		if (nops_only && !obj->dynamic)
650 			continue;
651 
652 		list_del(&obj->node);
653 		kobject_put(&obj->kobj);
654 	}
655 }
656 
klp_free_objects(struct klp_patch * patch)657 static void klp_free_objects(struct klp_patch *patch)
658 {
659 	__klp_free_objects(patch, false);
660 }
661 
klp_free_objects_dynamic(struct klp_patch * patch)662 static void klp_free_objects_dynamic(struct klp_patch *patch)
663 {
664 	__klp_free_objects(patch, true);
665 }
666 
667 /*
668  * This function implements the free operations that can be called safely
669  * under klp_mutex.
670  *
671  * The operation must be completed by calling klp_free_patch_finish()
672  * outside klp_mutex.
673  */
klp_free_patch_start(struct klp_patch * patch)674 static void klp_free_patch_start(struct klp_patch *patch)
675 {
676 	if (!list_empty(&patch->list))
677 		list_del(&patch->list);
678 
679 	klp_free_objects(patch);
680 }
681 
682 /*
683  * This function implements the free part that must be called outside
684  * klp_mutex.
685  *
686  * It must be called after klp_free_patch_start(). And it has to be
687  * the last function accessing the livepatch structures when the patch
688  * gets disabled.
689  */
klp_free_patch_finish(struct klp_patch * patch)690 static void klp_free_patch_finish(struct klp_patch *patch)
691 {
692 	/*
693 	 * Avoid deadlock with enabled_store() sysfs callback by
694 	 * calling this outside klp_mutex. It is safe because
695 	 * this is called when the patch gets disabled and it
696 	 * cannot get enabled again.
697 	 */
698 	kobject_put(&patch->kobj);
699 	wait_for_completion(&patch->finish);
700 
701 	/* Put the module after the last access to struct klp_patch. */
702 	if (!patch->forced)
703 		module_put(patch->mod);
704 }
705 
706 /*
707  * The livepatch might be freed from sysfs interface created by the patch.
708  * This work allows to wait until the interface is destroyed in a separate
709  * context.
710  */
klp_free_patch_work_fn(struct work_struct * work)711 static void klp_free_patch_work_fn(struct work_struct *work)
712 {
713 	struct klp_patch *patch =
714 		container_of(work, struct klp_patch, free_work);
715 
716 	klp_free_patch_finish(patch);
717 }
718 
klp_free_patch_async(struct klp_patch * patch)719 void klp_free_patch_async(struct klp_patch *patch)
720 {
721 	klp_free_patch_start(patch);
722 	schedule_work(&patch->free_work);
723 }
724 
klp_free_replaced_patches_async(struct klp_patch * new_patch)725 void klp_free_replaced_patches_async(struct klp_patch *new_patch)
726 {
727 	struct klp_patch *old_patch, *tmp_patch;
728 
729 	klp_for_each_patch_safe(old_patch, tmp_patch) {
730 		if (old_patch == new_patch)
731 			return;
732 		klp_free_patch_async(old_patch);
733 	}
734 }
735 
klp_init_func(struct klp_object * obj,struct klp_func * func)736 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
737 {
738 	if (!func->old_name)
739 		return -EINVAL;
740 
741 	/*
742 	 * NOPs get the address later. The patched module must be loaded,
743 	 * see klp_init_object_loaded().
744 	 */
745 	if (!func->new_func && !func->nop)
746 		return -EINVAL;
747 
748 	if (strlen(func->old_name) >= KSYM_NAME_LEN)
749 		return -EINVAL;
750 
751 	INIT_LIST_HEAD(&func->stack_node);
752 	func->patched = false;
753 	func->transition = false;
754 
755 	/* The format for the sysfs directory is <function,sympos> where sympos
756 	 * is the nth occurrence of this symbol in kallsyms for the patched
757 	 * object. If the user selects 0 for old_sympos, then 1 will be used
758 	 * since a unique symbol will be the first occurrence.
759 	 */
760 	return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
761 			   func->old_name,
762 			   func->old_sympos ? func->old_sympos : 1);
763 }
764 
klp_apply_object_relocs(struct klp_patch * patch,struct klp_object * obj)765 static int klp_apply_object_relocs(struct klp_patch *patch,
766 				   struct klp_object *obj)
767 {
768 	int i, ret;
769 	struct klp_modinfo *info = patch->mod->klp_info;
770 
771 	for (i = 1; i < info->hdr.e_shnum; i++) {
772 		Elf_Shdr *sec = info->sechdrs + i;
773 
774 		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
775 			continue;
776 
777 		ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
778 					       info->secstrings,
779 					       patch->mod->core_kallsyms.strtab,
780 					       info->symndx, i, obj->name);
781 		if (ret)
782 			return ret;
783 	}
784 
785 	return 0;
786 }
787 
788 /* parts of the initialization that is done only when the object is loaded */
klp_init_object_loaded(struct klp_patch * patch,struct klp_object * obj)789 static int klp_init_object_loaded(struct klp_patch *patch,
790 				  struct klp_object *obj)
791 {
792 	struct klp_func *func;
793 	int ret;
794 
795 	if (klp_is_module(obj)) {
796 		/*
797 		 * Only write module-specific relocations here
798 		 * (.klp.rela.{module}.*).  vmlinux-specific relocations were
799 		 * written earlier during the initialization of the klp module
800 		 * itself.
801 		 */
802 		ret = klp_apply_object_relocs(patch, obj);
803 		if (ret)
804 			return ret;
805 	}
806 
807 	klp_for_each_func(obj, func) {
808 		ret = klp_find_object_symbol(obj->name, func->old_name,
809 					     func->old_sympos,
810 					     (unsigned long *)&func->old_func);
811 		if (ret)
812 			return ret;
813 
814 		ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
815 						  &func->old_size, NULL);
816 		if (!ret) {
817 			pr_err("kallsyms size lookup failed for '%s'\n",
818 			       func->old_name);
819 			return -ENOENT;
820 		}
821 
822 		if (func->nop)
823 			func->new_func = func->old_func;
824 
825 		ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
826 						  &func->new_size, NULL);
827 		if (!ret) {
828 			pr_err("kallsyms size lookup failed for '%s' replacement\n",
829 			       func->old_name);
830 			return -ENOENT;
831 		}
832 	}
833 
834 	return 0;
835 }
836 
klp_init_object(struct klp_patch * patch,struct klp_object * obj)837 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
838 {
839 	struct klp_func *func;
840 	int ret;
841 	const char *name;
842 
843 	if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
844 		return -EINVAL;
845 
846 	obj->patched = false;
847 	obj->mod = NULL;
848 
849 	klp_find_object_module(obj);
850 
851 	name = klp_is_module(obj) ? obj->name : "vmlinux";
852 	ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
853 	if (ret)
854 		return ret;
855 
856 	klp_for_each_func(obj, func) {
857 		ret = klp_init_func(obj, func);
858 		if (ret)
859 			return ret;
860 	}
861 
862 	if (klp_is_object_loaded(obj))
863 		ret = klp_init_object_loaded(patch, obj);
864 
865 	return ret;
866 }
867 
klp_init_func_early(struct klp_object * obj,struct klp_func * func)868 static void klp_init_func_early(struct klp_object *obj,
869 				struct klp_func *func)
870 {
871 	kobject_init(&func->kobj, &klp_ktype_func);
872 	list_add_tail(&func->node, &obj->func_list);
873 }
874 
klp_init_object_early(struct klp_patch * patch,struct klp_object * obj)875 static void klp_init_object_early(struct klp_patch *patch,
876 				  struct klp_object *obj)
877 {
878 	INIT_LIST_HEAD(&obj->func_list);
879 	kobject_init(&obj->kobj, &klp_ktype_object);
880 	list_add_tail(&obj->node, &patch->obj_list);
881 }
882 
klp_init_patch_early(struct klp_patch * patch)883 static void klp_init_patch_early(struct klp_patch *patch)
884 {
885 	struct klp_object *obj;
886 	struct klp_func *func;
887 
888 	INIT_LIST_HEAD(&patch->list);
889 	INIT_LIST_HEAD(&patch->obj_list);
890 	kobject_init(&patch->kobj, &klp_ktype_patch);
891 	patch->enabled = false;
892 	patch->forced = false;
893 	INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
894 	init_completion(&patch->finish);
895 
896 	klp_for_each_object_static(patch, obj) {
897 		klp_init_object_early(patch, obj);
898 
899 		klp_for_each_func_static(obj, func) {
900 			klp_init_func_early(obj, func);
901 		}
902 	}
903 }
904 
klp_init_patch(struct klp_patch * patch)905 static int klp_init_patch(struct klp_patch *patch)
906 {
907 	struct klp_object *obj;
908 	int ret;
909 
910 	ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
911 	if (ret)
912 		return ret;
913 
914 	if (patch->replace) {
915 		ret = klp_add_nops(patch);
916 		if (ret)
917 			return ret;
918 	}
919 
920 	klp_for_each_object(patch, obj) {
921 		ret = klp_init_object(patch, obj);
922 		if (ret)
923 			return ret;
924 	}
925 
926 	list_add_tail(&patch->list, &klp_patches);
927 
928 	return 0;
929 }
930 
__klp_disable_patch(struct klp_patch * patch)931 static int __klp_disable_patch(struct klp_patch *patch)
932 {
933 	struct klp_object *obj;
934 
935 	if (WARN_ON(!patch->enabled))
936 		return -EINVAL;
937 
938 	if (klp_transition_patch)
939 		return -EBUSY;
940 
941 	klp_init_transition(patch, KLP_UNPATCHED);
942 
943 	klp_for_each_object(patch, obj)
944 		if (obj->patched)
945 			klp_pre_unpatch_callback(obj);
946 
947 	/*
948 	 * Enforce the order of the func->transition writes in
949 	 * klp_init_transition() and the TIF_PATCH_PENDING writes in
950 	 * klp_start_transition().  In the rare case where klp_ftrace_handler()
951 	 * is called shortly after klp_update_patch_state() switches the task,
952 	 * this ensures the handler sees that func->transition is set.
953 	 */
954 	smp_wmb();
955 
956 	klp_start_transition();
957 	patch->enabled = false;
958 	klp_try_complete_transition();
959 
960 	return 0;
961 }
962 
__klp_enable_patch(struct klp_patch * patch)963 static int __klp_enable_patch(struct klp_patch *patch)
964 {
965 	struct klp_object *obj;
966 	int ret;
967 
968 	if (klp_transition_patch)
969 		return -EBUSY;
970 
971 	if (WARN_ON(patch->enabled))
972 		return -EINVAL;
973 
974 	pr_notice("enabling patch '%s'\n", patch->mod->name);
975 
976 	klp_init_transition(patch, KLP_PATCHED);
977 
978 	/*
979 	 * Enforce the order of the func->transition writes in
980 	 * klp_init_transition() and the ops->func_stack writes in
981 	 * klp_patch_object(), so that klp_ftrace_handler() will see the
982 	 * func->transition updates before the handler is registered and the
983 	 * new funcs become visible to the handler.
984 	 */
985 	smp_wmb();
986 
987 	klp_for_each_object(patch, obj) {
988 		if (!klp_is_object_loaded(obj))
989 			continue;
990 
991 		ret = klp_pre_patch_callback(obj);
992 		if (ret) {
993 			pr_warn("pre-patch callback failed for object '%s'\n",
994 				klp_is_module(obj) ? obj->name : "vmlinux");
995 			goto err;
996 		}
997 
998 		ret = klp_patch_object(obj);
999 		if (ret) {
1000 			pr_warn("failed to patch object '%s'\n",
1001 				klp_is_module(obj) ? obj->name : "vmlinux");
1002 			goto err;
1003 		}
1004 	}
1005 
1006 	klp_start_transition();
1007 	patch->enabled = true;
1008 	klp_try_complete_transition();
1009 
1010 	return 0;
1011 err:
1012 	pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1013 
1014 	klp_cancel_transition();
1015 	return ret;
1016 }
1017 
1018 /**
1019  * klp_enable_patch() - enable the livepatch
1020  * @patch:	patch to be enabled
1021  *
1022  * Initializes the data structure associated with the patch, creates the sysfs
1023  * interface, performs the needed symbol lookups and code relocations,
1024  * registers the patched functions with ftrace.
1025  *
1026  * This function is supposed to be called from the livepatch module_init()
1027  * callback.
1028  *
1029  * Return: 0 on success, otherwise error
1030  */
klp_enable_patch(struct klp_patch * patch)1031 int klp_enable_patch(struct klp_patch *patch)
1032 {
1033 	int ret;
1034 	struct klp_object *obj;
1035 
1036 	if (!patch || !patch->mod || !patch->objs)
1037 		return -EINVAL;
1038 
1039 	klp_for_each_object_static(patch, obj) {
1040 		if (!obj->funcs)
1041 			return -EINVAL;
1042 	}
1043 
1044 
1045 	if (!is_livepatch_module(patch->mod)) {
1046 		pr_err("module %s is not marked as a livepatch module\n",
1047 		       patch->mod->name);
1048 		return -EINVAL;
1049 	}
1050 
1051 	if (!klp_initialized())
1052 		return -ENODEV;
1053 
1054 	if (!klp_have_reliable_stack()) {
1055 		pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1056 		pr_warn("The livepatch transition may never complete.\n");
1057 	}
1058 
1059 	mutex_lock(&klp_mutex);
1060 
1061 	if (!klp_is_patch_compatible(patch)) {
1062 		pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1063 			patch->mod->name);
1064 		mutex_unlock(&klp_mutex);
1065 		return -EINVAL;
1066 	}
1067 
1068 	if (!try_module_get(patch->mod)) {
1069 		mutex_unlock(&klp_mutex);
1070 		return -ENODEV;
1071 	}
1072 
1073 	klp_init_patch_early(patch);
1074 
1075 	ret = klp_init_patch(patch);
1076 	if (ret)
1077 		goto err;
1078 
1079 	ret = __klp_enable_patch(patch);
1080 	if (ret)
1081 		goto err;
1082 
1083 	mutex_unlock(&klp_mutex);
1084 
1085 	return 0;
1086 
1087 err:
1088 	klp_free_patch_start(patch);
1089 
1090 	mutex_unlock(&klp_mutex);
1091 
1092 	klp_free_patch_finish(patch);
1093 
1094 	return ret;
1095 }
1096 EXPORT_SYMBOL_GPL(klp_enable_patch);
1097 
1098 /*
1099  * This function unpatches objects from the replaced livepatches.
1100  *
1101  * We could be pretty aggressive here. It is called in the situation where
1102  * these structures are no longer accessed from the ftrace handler.
1103  * All functions are redirected by the klp_transition_patch. They
1104  * use either a new code or they are in the original code because
1105  * of the special nop function patches.
1106  *
1107  * The only exception is when the transition was forced. In this case,
1108  * klp_ftrace_handler() might still see the replaced patch on the stack.
1109  * Fortunately, it is carefully designed to work with removed functions
1110  * thanks to RCU. We only have to keep the patches on the system. Also
1111  * this is handled transparently by patch->module_put.
1112  */
klp_unpatch_replaced_patches(struct klp_patch * new_patch)1113 void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1114 {
1115 	struct klp_patch *old_patch;
1116 
1117 	klp_for_each_patch(old_patch) {
1118 		if (old_patch == new_patch)
1119 			return;
1120 
1121 		old_patch->enabled = false;
1122 		klp_unpatch_objects(old_patch);
1123 	}
1124 }
1125 
1126 /*
1127  * This function removes the dynamically allocated 'nop' functions.
1128  *
1129  * We could be pretty aggressive. NOPs do not change the existing
1130  * behavior except for adding unnecessary delay by the ftrace handler.
1131  *
1132  * It is safe even when the transition was forced. The ftrace handler
1133  * will see a valid ops->func_stack entry thanks to RCU.
1134  *
1135  * We could even free the NOPs structures. They must be the last entry
1136  * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1137  * It does the same as klp_synchronize_transition() to make sure that
1138  * nobody is inside the ftrace handler once the operation finishes.
1139  *
1140  * IMPORTANT: It must be called right after removing the replaced patches!
1141  */
klp_discard_nops(struct klp_patch * new_patch)1142 void klp_discard_nops(struct klp_patch *new_patch)
1143 {
1144 	klp_unpatch_objects_dynamic(klp_transition_patch);
1145 	klp_free_objects_dynamic(klp_transition_patch);
1146 }
1147 
1148 /*
1149  * Remove parts of patches that touch a given kernel module. The list of
1150  * patches processed might be limited. When limit is NULL, all patches
1151  * will be handled.
1152  */
klp_cleanup_module_patches_limited(struct module * mod,struct klp_patch * limit)1153 static void klp_cleanup_module_patches_limited(struct module *mod,
1154 					       struct klp_patch *limit)
1155 {
1156 	struct klp_patch *patch;
1157 	struct klp_object *obj;
1158 
1159 	klp_for_each_patch(patch) {
1160 		if (patch == limit)
1161 			break;
1162 
1163 		klp_for_each_object(patch, obj) {
1164 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1165 				continue;
1166 
1167 			if (patch != klp_transition_patch)
1168 				klp_pre_unpatch_callback(obj);
1169 
1170 			pr_notice("reverting patch '%s' on unloading module '%s'\n",
1171 				  patch->mod->name, obj->mod->name);
1172 			klp_unpatch_object(obj);
1173 
1174 			klp_post_unpatch_callback(obj);
1175 
1176 			klp_free_object_loaded(obj);
1177 			break;
1178 		}
1179 	}
1180 }
1181 
klp_module_coming(struct module * mod)1182 int klp_module_coming(struct module *mod)
1183 {
1184 	int ret;
1185 	struct klp_patch *patch;
1186 	struct klp_object *obj;
1187 
1188 	if (WARN_ON(mod->state != MODULE_STATE_COMING))
1189 		return -EINVAL;
1190 
1191 	if (!strcmp(mod->name, "vmlinux")) {
1192 		pr_err("vmlinux.ko: invalid module name\n");
1193 		return -EINVAL;
1194 	}
1195 
1196 	mutex_lock(&klp_mutex);
1197 	/*
1198 	 * Each module has to know that klp_module_coming()
1199 	 * has been called. We never know what module will
1200 	 * get patched by a new patch.
1201 	 */
1202 	mod->klp_alive = true;
1203 
1204 	klp_for_each_patch(patch) {
1205 		klp_for_each_object(patch, obj) {
1206 			if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1207 				continue;
1208 
1209 			obj->mod = mod;
1210 
1211 			ret = klp_init_object_loaded(patch, obj);
1212 			if (ret) {
1213 				pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1214 					patch->mod->name, obj->mod->name, ret);
1215 				goto err;
1216 			}
1217 
1218 			pr_notice("applying patch '%s' to loading module '%s'\n",
1219 				  patch->mod->name, obj->mod->name);
1220 
1221 			ret = klp_pre_patch_callback(obj);
1222 			if (ret) {
1223 				pr_warn("pre-patch callback failed for object '%s'\n",
1224 					obj->name);
1225 				goto err;
1226 			}
1227 
1228 			ret = klp_patch_object(obj);
1229 			if (ret) {
1230 				pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1231 					patch->mod->name, obj->mod->name, ret);
1232 
1233 				klp_post_unpatch_callback(obj);
1234 				goto err;
1235 			}
1236 
1237 			if (patch != klp_transition_patch)
1238 				klp_post_patch_callback(obj);
1239 
1240 			break;
1241 		}
1242 	}
1243 
1244 	mutex_unlock(&klp_mutex);
1245 
1246 	return 0;
1247 
1248 err:
1249 	/*
1250 	 * If a patch is unsuccessfully applied, return
1251 	 * error to the module loader.
1252 	 */
1253 	pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1254 		patch->mod->name, obj->mod->name, obj->mod->name);
1255 	mod->klp_alive = false;
1256 	obj->mod = NULL;
1257 	klp_cleanup_module_patches_limited(mod, patch);
1258 	mutex_unlock(&klp_mutex);
1259 
1260 	return ret;
1261 }
1262 
klp_module_going(struct module * mod)1263 void klp_module_going(struct module *mod)
1264 {
1265 	if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1266 		    mod->state != MODULE_STATE_COMING))
1267 		return;
1268 
1269 	mutex_lock(&klp_mutex);
1270 	/*
1271 	 * Each module has to know that klp_module_going()
1272 	 * has been called. We never know what module will
1273 	 * get patched by a new patch.
1274 	 */
1275 	mod->klp_alive = false;
1276 
1277 	klp_cleanup_module_patches_limited(mod, NULL);
1278 
1279 	mutex_unlock(&klp_mutex);
1280 }
1281 
klp_init(void)1282 static int __init klp_init(void)
1283 {
1284 	klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1285 	if (!klp_root_kobj)
1286 		return -ENOMEM;
1287 
1288 	return 0;
1289 }
1290 
1291 module_init(klp_init);
1292