1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_STATIC_CALL_H
3 #define _LINUX_STATIC_CALL_H
4 
5 /*
6  * Static call support
7  *
8  * Static calls use code patching to hard-code function pointers into direct
9  * branch instructions. They give the flexibility of function pointers, but
10  * with improved performance. This is especially important for cases where
11  * retpolines would otherwise be used, as retpolines can significantly impact
12  * performance.
13  *
14  *
15  * API overview:
16  *
17  *   DECLARE_STATIC_CALL(name, func);
18  *   DEFINE_STATIC_CALL(name, func);
19  *   DEFINE_STATIC_CALL_NULL(name, typename);
20  *   DEFINE_STATIC_CALL_RET0(name, typename);
21  *
22  *   __static_call_return0;
23  *
24  *   static_call(name)(args...);
25  *   static_call_cond(name)(args...);
26  *   static_call_update(name, func);
27  *   static_call_query(name);
28  *
29  *   EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
30  *
31  * Usage example:
32  *
33  *   # Start with the following functions (with identical prototypes):
34  *   int func_a(int arg1, int arg2);
35  *   int func_b(int arg1, int arg2);
36  *
37  *   # Define a 'my_name' reference, associated with func_a() by default
38  *   DEFINE_STATIC_CALL(my_name, func_a);
39  *
40  *   # Call func_a()
41  *   static_call(my_name)(arg1, arg2);
42  *
43  *   # Update 'my_name' to point to func_b()
44  *   static_call_update(my_name, &func_b);
45  *
46  *   # Call func_b()
47  *   static_call(my_name)(arg1, arg2);
48  *
49  *
50  * Implementation details:
51  *
52  *   This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
53  *   Otherwise basic indirect calls are used (with function pointers).
54  *
55  *   Each static_call() site calls into a trampoline associated with the name.
56  *   The trampoline has a direct branch to the default function.  Updates to a
57  *   name will modify the trampoline's branch destination.
58  *
59  *   If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
60  *   themselves will be patched at runtime to call the functions directly,
61  *   rather than calling through the trampoline.  This requires objtool or a
62  *   compiler plugin to detect all the static_call() sites and annotate them
63  *   in the .static_call_sites section.
64  *
65  *
66  * Notes on NULL function pointers:
67  *
68  *   Static_call()s support NULL functions, with many of the caveats that
69  *   regular function pointers have.
70  *
71  *   Clearly calling a NULL function pointer is 'BAD', so too for
72  *   static_call()s (although when HAVE_STATIC_CALL it might not be immediately
73  *   fatal). A NULL static_call can be the result of:
74  *
75  *     DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
76  *
77  *   which is equivalent to declaring a NULL function pointer with just a
78  *   typename:
79  *
80  *     void (*my_func_ptr)(int arg1) = NULL;
81  *
82  *   or using static_call_update() with a NULL function. In both cases the
83  *   HAVE_STATIC_CALL implementation will patch the trampoline with a RET
84  *   instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
85  *   architectures can patch the trampoline call to a NOP.
86  *
87  *   In all cases, any argument evaluation is unconditional. Unlike a regular
88  *   conditional function pointer call:
89  *
90  *     if (my_func_ptr)
91  *         my_func_ptr(arg1)
92  *
93  *   where the argument evaludation also depends on the pointer value.
94  *
95  *   When calling a static_call that can be NULL, use:
96  *
97  *     static_call_cond(name)(arg1);
98  *
99  *   which will include the required value tests to avoid NULL-pointer
100  *   dereferences.
101  *
102  *   To query which function is currently set to be called, use:
103  *
104  *   func = static_call_query(name);
105  *
106  *
107  * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
108  *
109  *   Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
110  *   conditional void function call, DEFINE_STATIC_CALL_RET0 /
111  *   __static_call_return0 optimize the do nothing return 0 function.
112  *
113  *   This feature is strictly UB per the C standard (since it casts a function
114  *   pointer to a different signature) and relies on the architecture ABI to
115  *   make things work. In particular it relies on Caller Stack-cleanup and the
116  *   whole return register being clobbered for short return values. All normal
117  *   CDECL style ABIs conform.
118  *
119  *   In particular the x86_64 implementation replaces the 5 byte CALL
120  *   instruction at the callsite with a 5 byte clear of the RAX register,
121  *   completely eliding any function call overhead.
122  *
123  *   Notably argument setup is unconditional.
124  *
125  *
126  * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
127  *
128  *   The difference is that the _TRAMP variant tries to only export the
129  *   trampoline with the result that a module can use static_call{,_cond}() but
130  *   not static_call_update().
131  *
132  */
133 
134 #include <linux/types.h>
135 #include <linux/cpu.h>
136 #include <linux/static_call_types.h>
137 
138 #ifdef CONFIG_HAVE_STATIC_CALL
139 #include <asm/static_call.h>
140 
141 /*
142  * Either @site or @tramp can be NULL.
143  */
144 extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
145 
146 #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
147 
148 #else
149 #define STATIC_CALL_TRAMP_ADDR(name) NULL
150 #endif
151 
152 #define static_call_update(name, func)					\
153 ({									\
154 	typeof(&STATIC_CALL_TRAMP(name)) __F = (func);			\
155 	__static_call_update(&STATIC_CALL_KEY(name),			\
156 			     STATIC_CALL_TRAMP_ADDR(name), __F);	\
157 })
158 
159 #define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
160 
161 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
162 
163 extern int __init static_call_init(void);
164 
165 extern void static_call_force_reinit(void);
166 
167 struct static_call_mod {
168 	struct static_call_mod *next;
169 	struct module *mod; /* for vmlinux, mod == NULL */
170 	struct static_call_site *sites;
171 };
172 
173 /* For finding the key associated with a trampoline */
174 struct static_call_tramp_key {
175 	s32 tramp;
176 	s32 key;
177 };
178 
179 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
180 extern int static_call_mod_init(struct module *mod);
181 extern int static_call_text_reserved(void *start, void *end);
182 
183 extern long __static_call_return0(void);
184 
185 #define DEFINE_STATIC_CALL(name, _func)					\
186 	DECLARE_STATIC_CALL(name, _func);				\
187 	struct static_call_key STATIC_CALL_KEY(name) = {		\
188 		.func = _func,						\
189 		.type = 1,						\
190 	};								\
191 	ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
192 
193 #define DEFINE_STATIC_CALL_NULL(name, _func)				\
194 	DECLARE_STATIC_CALL(name, _func);				\
195 	struct static_call_key STATIC_CALL_KEY(name) = {		\
196 		.func = NULL,						\
197 		.type = 1,						\
198 	};								\
199 	ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
200 
201 #define DEFINE_STATIC_CALL_RET0(name, _func)				\
202 	DECLARE_STATIC_CALL(name, _func);				\
203 	struct static_call_key STATIC_CALL_KEY(name) = {		\
204 		.func = __static_call_return0,				\
205 		.type = 1,						\
206 	};								\
207 	ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
208 
209 #define static_call_cond(name)	(void)__static_call(name)
210 
211 #define EXPORT_STATIC_CALL(name)					\
212 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
213 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
214 #define EXPORT_STATIC_CALL_GPL(name)					\
215 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
216 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
217 
218 /* Leave the key unexported, so modules can't change static call targets: */
219 #define EXPORT_STATIC_CALL_TRAMP(name)					\
220 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name));				\
221 	ARCH_ADD_TRAMP_KEY(name)
222 #define EXPORT_STATIC_CALL_TRAMP_GPL(name)				\
223 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name));			\
224 	ARCH_ADD_TRAMP_KEY(name)
225 
226 #elif defined(CONFIG_HAVE_STATIC_CALL)
227 
static_call_init(void)228 static inline int static_call_init(void) { return 0; }
229 
230 #define DEFINE_STATIC_CALL(name, _func)					\
231 	DECLARE_STATIC_CALL(name, _func);				\
232 	struct static_call_key STATIC_CALL_KEY(name) = {		\
233 		.func = _func,						\
234 	};								\
235 	ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
236 
237 #define DEFINE_STATIC_CALL_NULL(name, _func)				\
238 	DECLARE_STATIC_CALL(name, _func);				\
239 	struct static_call_key STATIC_CALL_KEY(name) = {		\
240 		.func = NULL,						\
241 	};								\
242 	ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
243 
244 #define DEFINE_STATIC_CALL_RET0(name, _func)				\
245 	DECLARE_STATIC_CALL(name, _func);				\
246 	struct static_call_key STATIC_CALL_KEY(name) = {		\
247 		.func = __static_call_return0,				\
248 	};								\
249 	ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
250 
251 #define static_call_cond(name)	(void)__static_call(name)
252 
253 static inline
__static_call_update(struct static_call_key * key,void * tramp,void * func)254 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
255 {
256 	cpus_read_lock();
257 	WRITE_ONCE(key->func, func);
258 	arch_static_call_transform(NULL, tramp, func, false);
259 	cpus_read_unlock();
260 }
261 
static_call_text_reserved(void * start,void * end)262 static inline int static_call_text_reserved(void *start, void *end)
263 {
264 	return 0;
265 }
266 
267 extern long __static_call_return0(void);
268 
269 #define EXPORT_STATIC_CALL(name)					\
270 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
271 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
272 #define EXPORT_STATIC_CALL_GPL(name)					\
273 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
274 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
275 
276 /* Leave the key unexported, so modules can't change static call targets: */
277 #define EXPORT_STATIC_CALL_TRAMP(name)					\
278 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
279 #define EXPORT_STATIC_CALL_TRAMP_GPL(name)				\
280 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
281 
282 #else /* Generic implementation */
283 
static_call_init(void)284 static inline int static_call_init(void) { return 0; }
285 
__static_call_return0(void)286 static inline long __static_call_return0(void)
287 {
288 	return 0;
289 }
290 
291 #define __DEFINE_STATIC_CALL(name, _func, _func_init)			\
292 	DECLARE_STATIC_CALL(name, _func);				\
293 	struct static_call_key STATIC_CALL_KEY(name) = {		\
294 		.func = _func_init,					\
295 	}
296 
297 #define DEFINE_STATIC_CALL(name, _func)					\
298 	__DEFINE_STATIC_CALL(name, _func, _func)
299 
300 #define DEFINE_STATIC_CALL_NULL(name, _func)				\
301 	__DEFINE_STATIC_CALL(name, _func, NULL)
302 
303 #define DEFINE_STATIC_CALL_RET0(name, _func)				\
304 	__DEFINE_STATIC_CALL(name, _func, __static_call_return0)
305 
__static_call_nop(void)306 static inline void __static_call_nop(void) { }
307 
308 /*
309  * This horrific hack takes care of two things:
310  *
311  *  - it ensures the compiler will only load the function pointer ONCE,
312  *    which avoids a reload race.
313  *
314  *  - it ensures the argument evaluation is unconditional, similar
315  *    to the HAVE_STATIC_CALL variant.
316  *
317  * Sadly current GCC/Clang (10 for both) do not optimize this properly
318  * and will emit an indirect call for the NULL case :-(
319  */
320 #define __static_call_cond(name)					\
321 ({									\
322 	void *func = READ_ONCE(STATIC_CALL_KEY(name).func);		\
323 	if (!func)							\
324 		func = &__static_call_nop;				\
325 	(typeof(STATIC_CALL_TRAMP(name))*)func;				\
326 })
327 
328 #define static_call_cond(name)	(void)__static_call_cond(name)
329 
330 static inline
__static_call_update(struct static_call_key * key,void * tramp,void * func)331 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
332 {
333 	WRITE_ONCE(key->func, func);
334 }
335 
static_call_text_reserved(void * start,void * end)336 static inline int static_call_text_reserved(void *start, void *end)
337 {
338 	return 0;
339 }
340 
341 #define EXPORT_STATIC_CALL(name)	EXPORT_SYMBOL(STATIC_CALL_KEY(name))
342 #define EXPORT_STATIC_CALL_GPL(name)	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
343 
344 #endif /* CONFIG_HAVE_STATIC_CALL */
345 
346 #endif /* _LINUX_STATIC_CALL_H */
347