1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _KERNEL_STATS_H
3 #define _KERNEL_STATS_H
4
5 #ifdef CONFIG_SCHEDSTATS
6
7 extern struct static_key_false sched_schedstats;
8
9 /*
10 * Expects runqueue lock to be held for atomicity of update
11 */
12 static inline void
rq_sched_info_arrive(struct rq * rq,unsigned long long delta)13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
14 {
15 if (rq) {
16 rq->rq_sched_info.run_delay += delta;
17 rq->rq_sched_info.pcount++;
18 }
19 }
20
21 /*
22 * Expects runqueue lock to be held for atomicity of update
23 */
24 static inline void
rq_sched_info_depart(struct rq * rq,unsigned long long delta)25 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
26 {
27 if (rq)
28 rq->rq_cpu_time += delta;
29 }
30
31 static inline void
rq_sched_info_dequeue(struct rq * rq,unsigned long long delta)32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
33 {
34 if (rq)
35 rq->rq_sched_info.run_delay += delta;
36 }
37 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
38 #define __schedstat_inc(var) do { var++; } while (0)
39 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
40 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
41 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
42 #define __schedstat_set(var, val) do { var = (val); } while (0)
43 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
44 #define schedstat_val(var) (var)
45 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
46
47 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
48 struct sched_statistics *stats);
49
50 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
51 struct sched_statistics *stats);
52 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
53 struct sched_statistics *stats);
54
55 static inline void
check_schedstat_required(void)56 check_schedstat_required(void)
57 {
58 if (schedstat_enabled())
59 return;
60
61 /* Force schedstat enabled if a dependent tracepoint is active */
62 if (trace_sched_stat_wait_enabled() ||
63 trace_sched_stat_sleep_enabled() ||
64 trace_sched_stat_iowait_enabled() ||
65 trace_sched_stat_blocked_enabled() ||
66 trace_sched_stat_runtime_enabled())
67 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
68 }
69
70 #else /* !CONFIG_SCHEDSTATS: */
71
rq_sched_info_arrive(struct rq * rq,unsigned long long delta)72 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
rq_sched_info_dequeue(struct rq * rq,unsigned long long delta)73 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
rq_sched_info_depart(struct rq * rq,unsigned long long delta)74 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
75 # define schedstat_enabled() 0
76 # define __schedstat_inc(var) do { } while (0)
77 # define schedstat_inc(var) do { } while (0)
78 # define __schedstat_add(var, amt) do { } while (0)
79 # define schedstat_add(var, amt) do { } while (0)
80 # define __schedstat_set(var, val) do { } while (0)
81 # define schedstat_set(var, val) do { } while (0)
82 # define schedstat_val(var) 0
83 # define schedstat_val_or_zero(var) 0
84
85 # define __update_stats_wait_start(rq, p, stats) do { } while (0)
86 # define __update_stats_wait_end(rq, p, stats) do { } while (0)
87 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
88 # define check_schedstat_required() do { } while (0)
89
90 #endif /* CONFIG_SCHEDSTATS */
91
92 #ifdef CONFIG_FAIR_GROUP_SCHED
93 struct sched_entity_stats {
94 struct sched_entity se;
95 struct sched_statistics stats;
96 } __no_randomize_layout;
97 #endif
98
99 static inline struct sched_statistics *
__schedstats_from_se(struct sched_entity * se)100 __schedstats_from_se(struct sched_entity *se)
101 {
102 #ifdef CONFIG_FAIR_GROUP_SCHED
103 if (!entity_is_task(se))
104 return &container_of(se, struct sched_entity_stats, se)->stats;
105 #endif
106 return &task_of(se)->stats;
107 }
108
109 #ifdef CONFIG_PSI
110 void psi_task_change(struct task_struct *task, int clear, int set);
111 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
112 bool sleep);
113 void psi_account_irqtime(struct task_struct *task, u32 delta);
114
115 /*
116 * PSI tracks state that persists across sleeps, such as iowaits and
117 * memory stalls. As a result, it has to distinguish between sleeps,
118 * where a task's runnable state changes, and requeues, where a task
119 * and its state are being moved between CPUs and runqueues.
120 */
psi_enqueue(struct task_struct * p,bool wakeup)121 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
122 {
123 int clear = 0, set = TSK_RUNNING;
124
125 if (static_branch_likely(&psi_disabled))
126 return;
127
128 if (p->in_memstall)
129 set |= TSK_MEMSTALL_RUNNING;
130
131 if (!wakeup || p->sched_psi_wake_requeue) {
132 if (p->in_memstall)
133 set |= TSK_MEMSTALL;
134 if (p->sched_psi_wake_requeue)
135 p->sched_psi_wake_requeue = 0;
136 } else {
137 if (p->in_iowait)
138 clear |= TSK_IOWAIT;
139 }
140
141 psi_task_change(p, clear, set);
142 }
143
psi_dequeue(struct task_struct * p,bool sleep)144 static inline void psi_dequeue(struct task_struct *p, bool sleep)
145 {
146 int clear = TSK_RUNNING;
147
148 if (static_branch_likely(&psi_disabled))
149 return;
150
151 /*
152 * A voluntary sleep is a dequeue followed by a task switch. To
153 * avoid walking all ancestors twice, psi_task_switch() handles
154 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
155 * Do nothing here.
156 */
157 if (sleep)
158 return;
159
160 if (p->in_memstall)
161 clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
162
163 psi_task_change(p, clear, 0);
164 }
165
psi_ttwu_dequeue(struct task_struct * p)166 static inline void psi_ttwu_dequeue(struct task_struct *p)
167 {
168 if (static_branch_likely(&psi_disabled))
169 return;
170 /*
171 * Is the task being migrated during a wakeup? Make sure to
172 * deregister its sleep-persistent psi states from the old
173 * queue, and let psi_enqueue() know it has to requeue.
174 */
175 if (unlikely(p->in_iowait || p->in_memstall)) {
176 struct rq_flags rf;
177 struct rq *rq;
178 int clear = 0;
179
180 if (p->in_iowait)
181 clear |= TSK_IOWAIT;
182 if (p->in_memstall)
183 clear |= TSK_MEMSTALL;
184
185 rq = __task_rq_lock(p, &rf);
186 psi_task_change(p, clear, 0);
187 p->sched_psi_wake_requeue = 1;
188 __task_rq_unlock(rq, &rf);
189 }
190 }
191
psi_sched_switch(struct task_struct * prev,struct task_struct * next,bool sleep)192 static inline void psi_sched_switch(struct task_struct *prev,
193 struct task_struct *next,
194 bool sleep)
195 {
196 if (static_branch_likely(&psi_disabled))
197 return;
198
199 psi_task_switch(prev, next, sleep);
200 }
201
202 #else /* CONFIG_PSI */
psi_enqueue(struct task_struct * p,bool wakeup)203 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
psi_dequeue(struct task_struct * p,bool sleep)204 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
psi_ttwu_dequeue(struct task_struct * p)205 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
psi_sched_switch(struct task_struct * prev,struct task_struct * next,bool sleep)206 static inline void psi_sched_switch(struct task_struct *prev,
207 struct task_struct *next,
208 bool sleep) {}
psi_account_irqtime(struct task_struct * task,u32 delta)209 static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {}
210 #endif /* CONFIG_PSI */
211
212 #ifdef CONFIG_SCHED_INFO
213 /*
214 * We are interested in knowing how long it was from the *first* time a
215 * task was queued to the time that it finally hit a CPU, we call this routine
216 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
217 * delta taken on each CPU would annul the skew.
218 */
sched_info_dequeue(struct rq * rq,struct task_struct * t)219 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
220 {
221 unsigned long long delta = 0;
222
223 if (!t->sched_info.last_queued)
224 return;
225
226 delta = rq_clock(rq) - t->sched_info.last_queued;
227 t->sched_info.last_queued = 0;
228 t->sched_info.run_delay += delta;
229
230 rq_sched_info_dequeue(rq, delta);
231 }
232
233 /*
234 * Called when a task finally hits the CPU. We can now calculate how
235 * long it was waiting to run. We also note when it began so that we
236 * can keep stats on how long its timeslice is.
237 */
sched_info_arrive(struct rq * rq,struct task_struct * t)238 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
239 {
240 unsigned long long now, delta = 0;
241
242 if (!t->sched_info.last_queued)
243 return;
244
245 now = rq_clock(rq);
246 delta = now - t->sched_info.last_queued;
247 t->sched_info.last_queued = 0;
248 t->sched_info.run_delay += delta;
249 t->sched_info.last_arrival = now;
250 t->sched_info.pcount++;
251
252 rq_sched_info_arrive(rq, delta);
253 }
254
255 /*
256 * This function is only called from enqueue_task(), but also only updates
257 * the timestamp if it is already not set. It's assumed that
258 * sched_info_dequeue() will clear that stamp when appropriate.
259 */
sched_info_enqueue(struct rq * rq,struct task_struct * t)260 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
261 {
262 if (!t->sched_info.last_queued)
263 t->sched_info.last_queued = rq_clock(rq);
264 }
265
266 /*
267 * Called when a process ceases being the active-running process involuntarily
268 * due, typically, to expiring its time slice (this may also be called when
269 * switching to the idle task). Now we can calculate how long we ran.
270 * Also, if the process is still in the TASK_RUNNING state, call
271 * sched_info_enqueue() to mark that it has now again started waiting on
272 * the runqueue.
273 */
sched_info_depart(struct rq * rq,struct task_struct * t)274 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
275 {
276 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
277
278 rq_sched_info_depart(rq, delta);
279
280 if (task_is_running(t))
281 sched_info_enqueue(rq, t);
282 }
283
284 /*
285 * Called when tasks are switched involuntarily due, typically, to expiring
286 * their time slice. (This may also be called when switching to or from
287 * the idle task.) We are only called when prev != next.
288 */
289 static inline void
sched_info_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)290 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
291 {
292 /*
293 * prev now departs the CPU. It's not interesting to record
294 * stats about how efficient we were at scheduling the idle
295 * process, however.
296 */
297 if (prev != rq->idle)
298 sched_info_depart(rq, prev);
299
300 if (next != rq->idle)
301 sched_info_arrive(rq, next);
302 }
303
304 #else /* !CONFIG_SCHED_INFO: */
305 # define sched_info_enqueue(rq, t) do { } while (0)
306 # define sched_info_dequeue(rq, t) do { } while (0)
307 # define sched_info_switch(rq, t, next) do { } while (0)
308 #endif /* CONFIG_SCHED_INFO */
309
310 #endif /* _KERNEL_STATS_H */
311