1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CONTEXT_TRACKING_H
3 #define _LINUX_CONTEXT_TRACKING_H
4 
5 #include <linux/sched.h>
6 #include <linux/vtime.h>
7 #include <linux/context_tracking_state.h>
8 #include <linux/instrumentation.h>
9 
10 #include <asm/ptrace.h>
11 
12 
13 #ifdef CONFIG_CONTEXT_TRACKING_USER
14 extern void ct_cpu_track_user(int cpu);
15 
16 /* Called with interrupts disabled.  */
17 extern void __ct_user_enter(enum ctx_state state);
18 extern void __ct_user_exit(enum ctx_state state);
19 
20 extern void ct_user_enter(enum ctx_state state);
21 extern void ct_user_exit(enum ctx_state state);
22 
23 extern void user_enter_callable(void);
24 extern void user_exit_callable(void);
25 
user_enter(void)26 static inline void user_enter(void)
27 {
28 	if (context_tracking_enabled())
29 		ct_user_enter(CONTEXT_USER);
30 
31 }
user_exit(void)32 static inline void user_exit(void)
33 {
34 	if (context_tracking_enabled())
35 		ct_user_exit(CONTEXT_USER);
36 }
37 
38 /* Called with interrupts disabled.  */
user_enter_irqoff(void)39 static __always_inline void user_enter_irqoff(void)
40 {
41 	if (context_tracking_enabled())
42 		__ct_user_enter(CONTEXT_USER);
43 
44 }
user_exit_irqoff(void)45 static __always_inline void user_exit_irqoff(void)
46 {
47 	if (context_tracking_enabled())
48 		__ct_user_exit(CONTEXT_USER);
49 }
50 
exception_enter(void)51 static inline enum ctx_state exception_enter(void)
52 {
53 	enum ctx_state prev_ctx;
54 
55 	if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
56 	    !context_tracking_enabled())
57 		return 0;
58 
59 	prev_ctx = __ct_state();
60 	if (prev_ctx != CONTEXT_KERNEL)
61 		ct_user_exit(prev_ctx);
62 
63 	return prev_ctx;
64 }
65 
exception_exit(enum ctx_state prev_ctx)66 static inline void exception_exit(enum ctx_state prev_ctx)
67 {
68 	if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
69 	    context_tracking_enabled()) {
70 		if (prev_ctx != CONTEXT_KERNEL)
71 			ct_user_enter(prev_ctx);
72 	}
73 }
74 
context_tracking_guest_enter(void)75 static __always_inline bool context_tracking_guest_enter(void)
76 {
77 	if (context_tracking_enabled())
78 		__ct_user_enter(CONTEXT_GUEST);
79 
80 	return context_tracking_enabled_this_cpu();
81 }
82 
context_tracking_guest_exit(void)83 static __always_inline void context_tracking_guest_exit(void)
84 {
85 	if (context_tracking_enabled())
86 		__ct_user_exit(CONTEXT_GUEST);
87 }
88 
89 #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
90 
91 #else
user_enter(void)92 static inline void user_enter(void) { }
user_exit(void)93 static inline void user_exit(void) { }
user_enter_irqoff(void)94 static inline void user_enter_irqoff(void) { }
user_exit_irqoff(void)95 static inline void user_exit_irqoff(void) { }
exception_enter(void)96 static inline int exception_enter(void) { return 0; }
exception_exit(enum ctx_state prev_ctx)97 static inline void exception_exit(enum ctx_state prev_ctx) { }
ct_state(void)98 static inline int ct_state(void) { return -1; }
context_tracking_guest_enter(void)99 static __always_inline bool context_tracking_guest_enter(void) { return false; }
context_tracking_guest_exit(void)100 static inline void context_tracking_guest_exit(void) { }
101 #define CT_WARN_ON(cond) do { } while (0)
102 #endif /* !CONFIG_CONTEXT_TRACKING_USER */
103 
104 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
105 extern void context_tracking_init(void);
106 #else
context_tracking_init(void)107 static inline void context_tracking_init(void) { }
108 #endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
109 
110 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
111 extern void ct_idle_enter(void);
112 extern void ct_idle_exit(void);
113 
114 /*
115  * Is the current CPU in an extended quiescent state?
116  *
117  * No ordering, as we are sampling CPU-local information.
118  */
rcu_dynticks_curr_cpu_in_eqs(void)119 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
120 {
121 	return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
122 }
123 
124 /*
125  * Increment the current CPU's context_tracking structure's ->state field
126  * with ordering.  Return the new value.
127  */
ct_state_inc(int incby)128 static __always_inline unsigned long ct_state_inc(int incby)
129 {
130 	return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
131 }
132 
133 #else
ct_idle_enter(void)134 static inline void ct_idle_enter(void) { }
ct_idle_exit(void)135 static inline void ct_idle_exit(void) { }
136 #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
137 
138 #endif
139