1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * livepatch.h - Kernel Live Patching Core
4  *
5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6  * Copyright (C) 2014 SUSE
7  */
8 
9 #ifndef _LINUX_LIVEPATCH_H_
10 #define _LINUX_LIVEPATCH_H_
11 
12 #include <linux/module.h>
13 #include <linux/ftrace.h>
14 #include <linux/completion.h>
15 #include <linux/list.h>
16 #include <linux/livepatch_sched.h>
17 
18 #if IS_ENABLED(CONFIG_LIVEPATCH)
19 
20 /* task patch states */
21 #define KLP_UNDEFINED	-1
22 #define KLP_UNPATCHED	 0
23 #define KLP_PATCHED	 1
24 
25 /**
26  * struct klp_func - function structure for live patching
27  * @old_name:	name of the function to be patched
28  * @new_func:	pointer to the patched function code
29  * @old_sympos: a hint indicating which symbol position the old function
30  *		can be found (optional)
31  * @old_func:	pointer to the function being patched
32  * @kobj:	kobject for sysfs resources
33  * @node:	list node for klp_object func_list
34  * @stack_node:	list node for klp_ops func_stack list
35  * @old_size:	size of the old function
36  * @new_size:	size of the new function
37  * @nop:        temporary patch to use the original code again; dyn. allocated
38  * @patched:	the func has been added to the klp_ops list
39  * @transition:	the func is currently being applied or reverted
40  *
41  * The patched and transition variables define the func's patching state.  When
42  * patching, a func is always in one of the following states:
43  *
44  *   patched=0 transition=0: unpatched
45  *   patched=0 transition=1: unpatched, temporary starting state
46  *   patched=1 transition=1: patched, may be visible to some tasks
47  *   patched=1 transition=0: patched, visible to all tasks
48  *
49  * And when unpatching, it goes in the reverse order:
50  *
51  *   patched=1 transition=0: patched, visible to all tasks
52  *   patched=1 transition=1: patched, may be visible to some tasks
53  *   patched=0 transition=1: unpatched, temporary ending state
54  *   patched=0 transition=0: unpatched
55  */
56 struct klp_func {
57 	/* external */
58 	const char *old_name;
59 	void *new_func;
60 	/*
61 	 * The old_sympos field is optional and can be used to resolve
62 	 * duplicate symbol names in livepatch objects. If this field is zero,
63 	 * it is expected the symbol is unique, otherwise patching fails. If
64 	 * this value is greater than zero then that occurrence of the symbol
65 	 * in kallsyms for the given object is used.
66 	 */
67 	unsigned long old_sympos;
68 
69 	/* internal */
70 	void *old_func;
71 	struct kobject kobj;
72 	struct list_head node;
73 	struct list_head stack_node;
74 	unsigned long old_size, new_size;
75 	bool nop;
76 	bool patched;
77 	bool transition;
78 };
79 
80 struct klp_object;
81 
82 /**
83  * struct klp_callbacks - pre/post live-(un)patch callback structure
84  * @pre_patch:		executed before code patching
85  * @post_patch:		executed after code patching
86  * @pre_unpatch:	executed before code unpatching
87  * @post_unpatch:	executed after code unpatching
88  * @post_unpatch_enabled:	flag indicating if post-unpatch callback
89  * 				should run
90  *
91  * All callbacks are optional.  Only the pre-patch callback, if provided,
92  * will be unconditionally executed.  If the parent klp_object fails to
93  * patch for any reason, including a non-zero error status returned from
94  * the pre-patch callback, no further callbacks will be executed.
95  */
96 struct klp_callbacks {
97 	int (*pre_patch)(struct klp_object *obj);
98 	void (*post_patch)(struct klp_object *obj);
99 	void (*pre_unpatch)(struct klp_object *obj);
100 	void (*post_unpatch)(struct klp_object *obj);
101 	bool post_unpatch_enabled;
102 };
103 
104 /**
105  * struct klp_object - kernel object structure for live patching
106  * @name:	module name (or NULL for vmlinux)
107  * @funcs:	function entries for functions to be patched in the object
108  * @callbacks:	functions to be executed pre/post (un)patching
109  * @kobj:	kobject for sysfs resources
110  * @func_list:	dynamic list of the function entries
111  * @node:	list node for klp_patch obj_list
112  * @mod:	kernel module associated with the patched object
113  *		(NULL for vmlinux)
114  * @dynamic:    temporary object for nop functions; dynamically allocated
115  * @patched:	the object's funcs have been added to the klp_ops list
116  */
117 struct klp_object {
118 	/* external */
119 	const char *name;
120 	struct klp_func *funcs;
121 	struct klp_callbacks callbacks;
122 
123 	/* internal */
124 	struct kobject kobj;
125 	struct list_head func_list;
126 	struct list_head node;
127 	struct module *mod;
128 	bool dynamic;
129 	bool patched;
130 };
131 
132 /**
133  * struct klp_state - state of the system modified by the livepatch
134  * @id:		system state identifier (non-zero)
135  * @version:	version of the change
136  * @data:	custom data
137  */
138 struct klp_state {
139 	unsigned long id;
140 	unsigned int version;
141 	void *data;
142 };
143 
144 /**
145  * struct klp_patch - patch structure for live patching
146  * @mod:	reference to the live patch module
147  * @objs:	object entries for kernel objects to be patched
148  * @states:	system states that can get modified
149  * @replace:	replace all actively used patches
150  * @list:	list node for global list of actively used patches
151  * @kobj:	kobject for sysfs resources
152  * @obj_list:	dynamic list of the object entries
153  * @enabled:	the patch is enabled (but operation may be incomplete)
154  * @forced:	was involved in a forced transition
155  * @free_work:	patch cleanup from workqueue-context
156  * @finish:	for waiting till it is safe to remove the patch module
157  */
158 struct klp_patch {
159 	/* external */
160 	struct module *mod;
161 	struct klp_object *objs;
162 	struct klp_state *states;
163 	bool replace;
164 
165 	/* internal */
166 	struct list_head list;
167 	struct kobject kobj;
168 	struct list_head obj_list;
169 	bool enabled;
170 	bool forced;
171 	struct work_struct free_work;
172 	struct completion finish;
173 };
174 
175 #define klp_for_each_object_static(patch, obj) \
176 	for (obj = patch->objs; obj->funcs || obj->name; obj++)
177 
178 #define klp_for_each_object_safe(patch, obj, tmp_obj)		\
179 	list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
180 
181 #define klp_for_each_object(patch, obj)	\
182 	list_for_each_entry(obj, &patch->obj_list, node)
183 
184 #define klp_for_each_func_static(obj, func) \
185 	for (func = obj->funcs; \
186 	     func->old_name || func->new_func || func->old_sympos; \
187 	     func++)
188 
189 #define klp_for_each_func_safe(obj, func, tmp_func)			\
190 	list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
191 
192 #define klp_for_each_func(obj, func)	\
193 	list_for_each_entry(func, &obj->func_list, node)
194 
195 int klp_enable_patch(struct klp_patch *);
196 
197 /* Called from the module loader during module coming/going states */
198 int klp_module_coming(struct module *mod);
199 void klp_module_going(struct module *mod);
200 
201 void klp_copy_process(struct task_struct *child);
202 void klp_update_patch_state(struct task_struct *task);
203 
klp_patch_pending(struct task_struct * task)204 static inline bool klp_patch_pending(struct task_struct *task)
205 {
206 	return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
207 }
208 
klp_have_reliable_stack(void)209 static inline bool klp_have_reliable_stack(void)
210 {
211 	return IS_ENABLED(CONFIG_STACKTRACE) &&
212 	       IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
213 }
214 
215 typedef int (*klp_shadow_ctor_t)(void *obj,
216 				 void *shadow_data,
217 				 void *ctor_data);
218 typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
219 
220 void *klp_shadow_get(void *obj, unsigned long id);
221 void *klp_shadow_alloc(void *obj, unsigned long id,
222 		       size_t size, gfp_t gfp_flags,
223 		       klp_shadow_ctor_t ctor, void *ctor_data);
224 void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
225 			      size_t size, gfp_t gfp_flags,
226 			      klp_shadow_ctor_t ctor, void *ctor_data);
227 void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
228 void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
229 
230 struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
231 struct klp_state *klp_get_prev_state(unsigned long id);
232 
233 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
234 			     const char *shstrtab, const char *strtab,
235 			     unsigned int symindex, unsigned int secindex,
236 			     const char *objname);
237 
238 #else /* !CONFIG_LIVEPATCH */
239 
klp_module_coming(struct module * mod)240 static inline int klp_module_coming(struct module *mod) { return 0; }
klp_module_going(struct module * mod)241 static inline void klp_module_going(struct module *mod) {}
klp_patch_pending(struct task_struct * task)242 static inline bool klp_patch_pending(struct task_struct *task) { return false; }
klp_update_patch_state(struct task_struct * task)243 static inline void klp_update_patch_state(struct task_struct *task) {}
klp_copy_process(struct task_struct * child)244 static inline void klp_copy_process(struct task_struct *child) {}
245 
246 static inline
klp_apply_section_relocs(struct module * pmod,Elf_Shdr * sechdrs,const char * shstrtab,const char * strtab,unsigned int symindex,unsigned int secindex,const char * objname)247 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
248 			     const char *shstrtab, const char *strtab,
249 			     unsigned int symindex, unsigned int secindex,
250 			     const char *objname)
251 {
252 	return 0;
253 }
254 
255 #endif /* CONFIG_LIVEPATCH */
256 
257 #endif /* _LINUX_LIVEPATCH_H_ */
258