1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9
10 /*
11 * This allows printing both to /proc/sched_debug and
12 * to the console
13 */
14 #define SEQ_printf(m, x...) \
15 do { \
16 if (m) \
17 seq_printf(m, x); \
18 else \
19 pr_cont(x); \
20 } while (0)
21
22 /*
23 * Ease the printing of nsec fields:
24 */
nsec_high(unsigned long long nsec)25 static long long nsec_high(unsigned long long nsec)
26 {
27 if ((long long)nsec < 0) {
28 nsec = -nsec;
29 do_div(nsec, 1000000);
30 return -nsec;
31 }
32 do_div(nsec, 1000000);
33
34 return nsec;
35 }
36
nsec_low(unsigned long long nsec)37 static unsigned long nsec_low(unsigned long long nsec)
38 {
39 if ((long long)nsec < 0)
40 nsec = -nsec;
41
42 return do_div(nsec, 1000000);
43 }
44
45 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46
47 #define SCHED_FEAT(name, enabled) \
48 #name ,
49
50 static const char * const sched_feat_names[] = {
51 #include "features.h"
52 };
53
54 #undef SCHED_FEAT
55
sched_feat_show(struct seq_file * m,void * v)56 static int sched_feat_show(struct seq_file *m, void *v)
57 {
58 int i;
59
60 for (i = 0; i < __SCHED_FEAT_NR; i++) {
61 if (!(sysctl_sched_features & (1UL << i)))
62 seq_puts(m, "NO_");
63 seq_printf(m, "%s ", sched_feat_names[i]);
64 }
65 seq_puts(m, "\n");
66
67 return 0;
68 }
69
70 #ifdef CONFIG_JUMP_LABEL
71
72 #define jump_label_key__true STATIC_KEY_INIT_TRUE
73 #define jump_label_key__false STATIC_KEY_INIT_FALSE
74
75 #define SCHED_FEAT(name, enabled) \
76 jump_label_key__##enabled ,
77
78 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79 #include "features.h"
80 };
81
82 #undef SCHED_FEAT
83
sched_feat_disable(int i)84 static void sched_feat_disable(int i)
85 {
86 static_key_disable_cpuslocked(&sched_feat_keys[i]);
87 }
88
sched_feat_enable(int i)89 static void sched_feat_enable(int i)
90 {
91 static_key_enable_cpuslocked(&sched_feat_keys[i]);
92 }
93 #else
sched_feat_disable(int i)94 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)95 static void sched_feat_enable(int i) { };
96 #endif /* CONFIG_JUMP_LABEL */
97
sched_feat_set(char * cmp)98 static int sched_feat_set(char *cmp)
99 {
100 int i;
101 int neg = 0;
102
103 if (strncmp(cmp, "NO_", 3) == 0) {
104 neg = 1;
105 cmp += 3;
106 }
107
108 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
109 if (i < 0)
110 return i;
111
112 if (neg) {
113 sysctl_sched_features &= ~(1UL << i);
114 sched_feat_disable(i);
115 } else {
116 sysctl_sched_features |= (1UL << i);
117 sched_feat_enable(i);
118 }
119
120 return 0;
121 }
122
123 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)124 sched_feat_write(struct file *filp, const char __user *ubuf,
125 size_t cnt, loff_t *ppos)
126 {
127 char buf[64];
128 char *cmp;
129 int ret;
130 struct inode *inode;
131
132 if (cnt > 63)
133 cnt = 63;
134
135 if (copy_from_user(&buf, ubuf, cnt))
136 return -EFAULT;
137
138 buf[cnt] = 0;
139 cmp = strstrip(buf);
140
141 /* Ensure the static_key remains in a consistent state */
142 inode = file_inode(filp);
143 cpus_read_lock();
144 inode_lock(inode);
145 ret = sched_feat_set(cmp);
146 inode_unlock(inode);
147 cpus_read_unlock();
148 if (ret < 0)
149 return ret;
150
151 *ppos += cnt;
152
153 return cnt;
154 }
155
sched_feat_open(struct inode * inode,struct file * filp)156 static int sched_feat_open(struct inode *inode, struct file *filp)
157 {
158 return single_open(filp, sched_feat_show, NULL);
159 }
160
161 static const struct file_operations sched_feat_fops = {
162 .open = sched_feat_open,
163 .write = sched_feat_write,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 };
168
169 #ifdef CONFIG_SMP
170
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)171 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
172 size_t cnt, loff_t *ppos)
173 {
174 char buf[16];
175 unsigned int scaling;
176
177 if (cnt > 15)
178 cnt = 15;
179
180 if (copy_from_user(&buf, ubuf, cnt))
181 return -EFAULT;
182 buf[cnt] = '\0';
183
184 if (kstrtouint(buf, 10, &scaling))
185 return -EINVAL;
186
187 if (scaling >= SCHED_TUNABLESCALING_END)
188 return -EINVAL;
189
190 sysctl_sched_tunable_scaling = scaling;
191 if (sched_update_scaling())
192 return -EINVAL;
193
194 *ppos += cnt;
195 return cnt;
196 }
197
sched_scaling_show(struct seq_file * m,void * v)198 static int sched_scaling_show(struct seq_file *m, void *v)
199 {
200 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201 return 0;
202 }
203
sched_scaling_open(struct inode * inode,struct file * filp)204 static int sched_scaling_open(struct inode *inode, struct file *filp)
205 {
206 return single_open(filp, sched_scaling_show, NULL);
207 }
208
209 static const struct file_operations sched_scaling_fops = {
210 .open = sched_scaling_open,
211 .write = sched_scaling_write,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215 };
216
217 #endif /* SMP */
218
219 #ifdef CONFIG_PREEMPT_DYNAMIC
220
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)221 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
222 size_t cnt, loff_t *ppos)
223 {
224 char buf[16];
225 int mode;
226
227 if (cnt > 15)
228 cnt = 15;
229
230 if (copy_from_user(&buf, ubuf, cnt))
231 return -EFAULT;
232
233 buf[cnt] = 0;
234 mode = sched_dynamic_mode(strstrip(buf));
235 if (mode < 0)
236 return mode;
237
238 sched_dynamic_update(mode);
239
240 *ppos += cnt;
241
242 return cnt;
243 }
244
sched_dynamic_show(struct seq_file * m,void * v)245 static int sched_dynamic_show(struct seq_file *m, void *v)
246 {
247 static const char * preempt_modes[] = {
248 "none", "voluntary", "full"
249 };
250 int i;
251
252 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
253 if (preempt_dynamic_mode == i)
254 seq_puts(m, "(");
255 seq_puts(m, preempt_modes[i]);
256 if (preempt_dynamic_mode == i)
257 seq_puts(m, ")");
258
259 seq_puts(m, " ");
260 }
261
262 seq_puts(m, "\n");
263 return 0;
264 }
265
sched_dynamic_open(struct inode * inode,struct file * filp)266 static int sched_dynamic_open(struct inode *inode, struct file *filp)
267 {
268 return single_open(filp, sched_dynamic_show, NULL);
269 }
270
271 static const struct file_operations sched_dynamic_fops = {
272 .open = sched_dynamic_open,
273 .write = sched_dynamic_write,
274 .read = seq_read,
275 .llseek = seq_lseek,
276 .release = single_release,
277 };
278
279 #endif /* CONFIG_PREEMPT_DYNAMIC */
280
281 __read_mostly bool sched_debug_verbose;
282
283 static const struct seq_operations sched_debug_sops;
284
sched_debug_open(struct inode * inode,struct file * filp)285 static int sched_debug_open(struct inode *inode, struct file *filp)
286 {
287 return seq_open(filp, &sched_debug_sops);
288 }
289
290 static const struct file_operations sched_debug_fops = {
291 .open = sched_debug_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294 .release = seq_release,
295 };
296
297 static struct dentry *debugfs_sched;
298
sched_init_debug(void)299 static __init int sched_init_debug(void)
300 {
301 struct dentry __maybe_unused *numa;
302
303 debugfs_sched = debugfs_create_dir("sched", NULL);
304
305 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
306 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
307 #ifdef CONFIG_PREEMPT_DYNAMIC
308 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
309 #endif
310
311 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
312 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
313 debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
314 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
315
316 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
317 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
318
319 #ifdef CONFIG_SMP
320 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
321 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
322 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
323
324 mutex_lock(&sched_domains_mutex);
325 update_sched_domain_debugfs();
326 mutex_unlock(&sched_domains_mutex);
327 #endif
328
329 #ifdef CONFIG_NUMA_BALANCING
330 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
331
332 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
333 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
334 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
335 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
336 #endif
337
338 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
339
340 return 0;
341 }
342 late_initcall(sched_init_debug);
343
344 #ifdef CONFIG_SMP
345
346 static cpumask_var_t sd_sysctl_cpus;
347 static struct dentry *sd_dentry;
348
sd_flags_show(struct seq_file * m,void * v)349 static int sd_flags_show(struct seq_file *m, void *v)
350 {
351 unsigned long flags = *(unsigned int *)m->private;
352 int idx;
353
354 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
355 seq_puts(m, sd_flag_debug[idx].name);
356 seq_puts(m, " ");
357 }
358 seq_puts(m, "\n");
359
360 return 0;
361 }
362
sd_flags_open(struct inode * inode,struct file * file)363 static int sd_flags_open(struct inode *inode, struct file *file)
364 {
365 return single_open(file, sd_flags_show, inode->i_private);
366 }
367
368 static const struct file_operations sd_flags_fops = {
369 .open = sd_flags_open,
370 .read = seq_read,
371 .llseek = seq_lseek,
372 .release = single_release,
373 };
374
register_sd(struct sched_domain * sd,struct dentry * parent)375 static void register_sd(struct sched_domain *sd, struct dentry *parent)
376 {
377 #define SDM(type, mode, member) \
378 debugfs_create_##type(#member, mode, parent, &sd->member)
379
380 SDM(ulong, 0644, min_interval);
381 SDM(ulong, 0644, max_interval);
382 SDM(u64, 0644, max_newidle_lb_cost);
383 SDM(u32, 0644, busy_factor);
384 SDM(u32, 0644, imbalance_pct);
385 SDM(u32, 0644, cache_nice_tries);
386 SDM(str, 0444, name);
387
388 #undef SDM
389
390 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
391 }
392
update_sched_domain_debugfs(void)393 void update_sched_domain_debugfs(void)
394 {
395 int cpu, i;
396
397 /*
398 * This can unfortunately be invoked before sched_debug_init() creates
399 * the debug directory. Don't touch sd_sysctl_cpus until then.
400 */
401 if (!debugfs_sched)
402 return;
403
404 if (!cpumask_available(sd_sysctl_cpus)) {
405 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
406 return;
407 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
408 }
409
410 if (!sd_dentry)
411 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
412
413 for_each_cpu(cpu, sd_sysctl_cpus) {
414 struct sched_domain *sd;
415 struct dentry *d_cpu;
416 char buf[32];
417
418 snprintf(buf, sizeof(buf), "cpu%d", cpu);
419 debugfs_lookup_and_remove(buf, sd_dentry);
420 d_cpu = debugfs_create_dir(buf, sd_dentry);
421
422 i = 0;
423 for_each_domain(cpu, sd) {
424 struct dentry *d_sd;
425
426 snprintf(buf, sizeof(buf), "domain%d", i);
427 d_sd = debugfs_create_dir(buf, d_cpu);
428
429 register_sd(sd, d_sd);
430 i++;
431 }
432
433 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
434 }
435 }
436
dirty_sched_domain_sysctl(int cpu)437 void dirty_sched_domain_sysctl(int cpu)
438 {
439 if (cpumask_available(sd_sysctl_cpus))
440 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
441 }
442
443 #endif /* CONFIG_SMP */
444
445 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)446 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
447 {
448 struct sched_entity *se = tg->se[cpu];
449
450 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
451 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
452 #F, (long long)schedstat_val(stats->F))
453 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
454 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
455 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
456
457 if (!se)
458 return;
459
460 PN(se->exec_start);
461 PN(se->vruntime);
462 PN(se->sum_exec_runtime);
463
464 if (schedstat_enabled()) {
465 struct sched_statistics *stats;
466 stats = __schedstats_from_se(se);
467
468 PN_SCHEDSTAT(wait_start);
469 PN_SCHEDSTAT(sleep_start);
470 PN_SCHEDSTAT(block_start);
471 PN_SCHEDSTAT(sleep_max);
472 PN_SCHEDSTAT(block_max);
473 PN_SCHEDSTAT(exec_max);
474 PN_SCHEDSTAT(slice_max);
475 PN_SCHEDSTAT(wait_max);
476 PN_SCHEDSTAT(wait_sum);
477 P_SCHEDSTAT(wait_count);
478 }
479
480 P(se->load.weight);
481 #ifdef CONFIG_SMP
482 P(se->avg.load_avg);
483 P(se->avg.util_avg);
484 P(se->avg.runnable_avg);
485 #endif
486
487 #undef PN_SCHEDSTAT
488 #undef PN
489 #undef P_SCHEDSTAT
490 #undef P
491 }
492 #endif
493
494 #ifdef CONFIG_CGROUP_SCHED
495 static DEFINE_SPINLOCK(sched_debug_lock);
496 static char group_path[PATH_MAX];
497
task_group_path(struct task_group * tg,char * path,int plen)498 static void task_group_path(struct task_group *tg, char *path, int plen)
499 {
500 if (autogroup_path(tg, path, plen))
501 return;
502
503 cgroup_path(tg->css.cgroup, path, plen);
504 }
505
506 /*
507 * Only 1 SEQ_printf_task_group_path() caller can use the full length
508 * group_path[] for cgroup path. Other simultaneous callers will have
509 * to use a shorter stack buffer. A "..." suffix is appended at the end
510 * of the stack buffer so that it will show up in case the output length
511 * matches the given buffer size to indicate possible path name truncation.
512 */
513 #define SEQ_printf_task_group_path(m, tg, fmt...) \
514 { \
515 if (spin_trylock(&sched_debug_lock)) { \
516 task_group_path(tg, group_path, sizeof(group_path)); \
517 SEQ_printf(m, fmt, group_path); \
518 spin_unlock(&sched_debug_lock); \
519 } else { \
520 char buf[128]; \
521 char *bufend = buf + sizeof(buf) - 3; \
522 task_group_path(tg, buf, bufend - buf); \
523 strcpy(bufend - 1, "..."); \
524 SEQ_printf(m, fmt, buf); \
525 } \
526 }
527 #endif
528
529 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)530 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
531 {
532 if (task_current(rq, p))
533 SEQ_printf(m, ">R");
534 else
535 SEQ_printf(m, " %c", task_state_to_char(p));
536
537 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
538 p->comm, task_pid_nr(p),
539 SPLIT_NS(p->se.vruntime),
540 (long long)(p->nvcsw + p->nivcsw),
541 p->prio);
542
543 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
544 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
545 SPLIT_NS(p->se.sum_exec_runtime),
546 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
547 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
548
549 #ifdef CONFIG_NUMA_BALANCING
550 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
551 #endif
552 #ifdef CONFIG_CGROUP_SCHED
553 SEQ_printf_task_group_path(m, task_group(p), " %s")
554 #endif
555
556 SEQ_printf(m, "\n");
557 }
558
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)559 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
560 {
561 struct task_struct *g, *p;
562
563 SEQ_printf(m, "\n");
564 SEQ_printf(m, "runnable tasks:\n");
565 SEQ_printf(m, " S task PID tree-key switches prio"
566 " wait-time sum-exec sum-sleep\n");
567 SEQ_printf(m, "-------------------------------------------------------"
568 "------------------------------------------------------\n");
569
570 rcu_read_lock();
571 for_each_process_thread(g, p) {
572 if (task_cpu(p) != rq_cpu)
573 continue;
574
575 print_task(m, rq, p);
576 }
577 rcu_read_unlock();
578 }
579
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)580 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
581 {
582 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
583 spread, rq0_min_vruntime, spread0;
584 struct rq *rq = cpu_rq(cpu);
585 struct sched_entity *last;
586 unsigned long flags;
587
588 #ifdef CONFIG_FAIR_GROUP_SCHED
589 SEQ_printf(m, "\n");
590 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
591 #else
592 SEQ_printf(m, "\n");
593 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
594 #endif
595 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
596 SPLIT_NS(cfs_rq->exec_clock));
597
598 raw_spin_rq_lock_irqsave(rq, flags);
599 if (rb_first_cached(&cfs_rq->tasks_timeline))
600 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
601 last = __pick_last_entity(cfs_rq);
602 if (last)
603 max_vruntime = last->vruntime;
604 min_vruntime = cfs_rq->min_vruntime;
605 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
606 raw_spin_rq_unlock_irqrestore(rq, flags);
607 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
608 SPLIT_NS(MIN_vruntime));
609 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
610 SPLIT_NS(min_vruntime));
611 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
612 SPLIT_NS(max_vruntime));
613 spread = max_vruntime - MIN_vruntime;
614 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
615 SPLIT_NS(spread));
616 spread0 = min_vruntime - rq0_min_vruntime;
617 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
618 SPLIT_NS(spread0));
619 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
620 cfs_rq->nr_spread_over);
621 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
622 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
623 SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
624 cfs_rq->idle_nr_running);
625 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
626 cfs_rq->idle_h_nr_running);
627 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
628 #ifdef CONFIG_SMP
629 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
630 cfs_rq->avg.load_avg);
631 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
632 cfs_rq->avg.runnable_avg);
633 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
634 cfs_rq->avg.util_avg);
635 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
636 cfs_rq->avg.util_est.enqueued);
637 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
638 cfs_rq->removed.load_avg);
639 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
640 cfs_rq->removed.util_avg);
641 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
642 cfs_rq->removed.runnable_avg);
643 #ifdef CONFIG_FAIR_GROUP_SCHED
644 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
645 cfs_rq->tg_load_avg_contrib);
646 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
647 atomic_long_read(&cfs_rq->tg->load_avg));
648 #endif
649 #endif
650 #ifdef CONFIG_CFS_BANDWIDTH
651 SEQ_printf(m, " .%-30s: %d\n", "throttled",
652 cfs_rq->throttled);
653 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
654 cfs_rq->throttle_count);
655 #endif
656
657 #ifdef CONFIG_FAIR_GROUP_SCHED
658 print_cfs_group_stats(m, cpu, cfs_rq->tg);
659 #endif
660 }
661
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)662 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
663 {
664 #ifdef CONFIG_RT_GROUP_SCHED
665 SEQ_printf(m, "\n");
666 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
667 #else
668 SEQ_printf(m, "\n");
669 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
670 #endif
671
672 #define P(x) \
673 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
674 #define PU(x) \
675 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
676 #define PN(x) \
677 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
678
679 PU(rt_nr_running);
680 #ifdef CONFIG_SMP
681 PU(rt_nr_migratory);
682 #endif
683 P(rt_throttled);
684 PN(rt_time);
685 PN(rt_runtime);
686
687 #undef PN
688 #undef PU
689 #undef P
690 }
691
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)692 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
693 {
694 struct dl_bw *dl_bw;
695
696 SEQ_printf(m, "\n");
697 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
698
699 #define PU(x) \
700 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
701
702 PU(dl_nr_running);
703 #ifdef CONFIG_SMP
704 PU(dl_nr_migratory);
705 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
706 #else
707 dl_bw = &dl_rq->dl_bw;
708 #endif
709 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
710 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
711
712 #undef PU
713 }
714
print_cpu(struct seq_file * m,int cpu)715 static void print_cpu(struct seq_file *m, int cpu)
716 {
717 struct rq *rq = cpu_rq(cpu);
718
719 #ifdef CONFIG_X86
720 {
721 unsigned int freq = cpu_khz ? : 1;
722
723 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
724 cpu, freq / 1000, (freq % 1000));
725 }
726 #else
727 SEQ_printf(m, "cpu#%d\n", cpu);
728 #endif
729
730 #define P(x) \
731 do { \
732 if (sizeof(rq->x) == 4) \
733 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
734 else \
735 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
736 } while (0)
737
738 #define PN(x) \
739 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
740
741 P(nr_running);
742 P(nr_switches);
743 P(nr_uninterruptible);
744 PN(next_balance);
745 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
746 PN(clock);
747 PN(clock_task);
748 #undef P
749 #undef PN
750
751 #ifdef CONFIG_SMP
752 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
753 P64(avg_idle);
754 P64(max_idle_balance_cost);
755 #undef P64
756 #endif
757
758 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
759 if (schedstat_enabled()) {
760 P(yld_count);
761 P(sched_count);
762 P(sched_goidle);
763 P(ttwu_count);
764 P(ttwu_local);
765 }
766 #undef P
767
768 print_cfs_stats(m, cpu);
769 print_rt_stats(m, cpu);
770 print_dl_stats(m, cpu);
771
772 print_rq(m, rq, cpu);
773 SEQ_printf(m, "\n");
774 }
775
776 static const char *sched_tunable_scaling_names[] = {
777 "none",
778 "logarithmic",
779 "linear"
780 };
781
sched_debug_header(struct seq_file * m)782 static void sched_debug_header(struct seq_file *m)
783 {
784 u64 ktime, sched_clk, cpu_clk;
785 unsigned long flags;
786
787 local_irq_save(flags);
788 ktime = ktime_to_ns(ktime_get());
789 sched_clk = sched_clock();
790 cpu_clk = local_clock();
791 local_irq_restore(flags);
792
793 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
794 init_utsname()->release,
795 (int)strcspn(init_utsname()->version, " "),
796 init_utsname()->version);
797
798 #define P(x) \
799 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
800 #define PN(x) \
801 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
802 PN(ktime);
803 PN(sched_clk);
804 PN(cpu_clk);
805 P(jiffies);
806 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
807 P(sched_clock_stable());
808 #endif
809 #undef PN
810 #undef P
811
812 SEQ_printf(m, "\n");
813 SEQ_printf(m, "sysctl_sched\n");
814
815 #define P(x) \
816 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
817 #define PN(x) \
818 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
819 PN(sysctl_sched_latency);
820 PN(sysctl_sched_min_granularity);
821 PN(sysctl_sched_idle_min_granularity);
822 PN(sysctl_sched_wakeup_granularity);
823 P(sysctl_sched_child_runs_first);
824 P(sysctl_sched_features);
825 #undef PN
826 #undef P
827
828 SEQ_printf(m, " .%-40s: %d (%s)\n",
829 "sysctl_sched_tunable_scaling",
830 sysctl_sched_tunable_scaling,
831 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
832 SEQ_printf(m, "\n");
833 }
834
sched_debug_show(struct seq_file * m,void * v)835 static int sched_debug_show(struct seq_file *m, void *v)
836 {
837 int cpu = (unsigned long)(v - 2);
838
839 if (cpu != -1)
840 print_cpu(m, cpu);
841 else
842 sched_debug_header(m);
843
844 return 0;
845 }
846
sysrq_sched_debug_show(void)847 void sysrq_sched_debug_show(void)
848 {
849 int cpu;
850
851 sched_debug_header(NULL);
852 for_each_online_cpu(cpu) {
853 /*
854 * Need to reset softlockup watchdogs on all CPUs, because
855 * another CPU might be blocked waiting for us to process
856 * an IPI or stop_machine.
857 */
858 touch_nmi_watchdog();
859 touch_all_softlockup_watchdogs();
860 print_cpu(NULL, cpu);
861 }
862 }
863
864 /*
865 * This iterator needs some explanation.
866 * It returns 1 for the header position.
867 * This means 2 is CPU 0.
868 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
869 * to use cpumask_* to iterate over the CPUs.
870 */
sched_debug_start(struct seq_file * file,loff_t * offset)871 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
872 {
873 unsigned long n = *offset;
874
875 if (n == 0)
876 return (void *) 1;
877
878 n--;
879
880 if (n > 0)
881 n = cpumask_next(n - 1, cpu_online_mask);
882 else
883 n = cpumask_first(cpu_online_mask);
884
885 *offset = n + 1;
886
887 if (n < nr_cpu_ids)
888 return (void *)(unsigned long)(n + 2);
889
890 return NULL;
891 }
892
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)893 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
894 {
895 (*offset)++;
896 return sched_debug_start(file, offset);
897 }
898
sched_debug_stop(struct seq_file * file,void * data)899 static void sched_debug_stop(struct seq_file *file, void *data)
900 {
901 }
902
903 static const struct seq_operations sched_debug_sops = {
904 .start = sched_debug_start,
905 .next = sched_debug_next,
906 .stop = sched_debug_stop,
907 .show = sched_debug_show,
908 };
909
910 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
911 #define __P(F) __PS(#F, F)
912 #define P(F) __PS(#F, p->F)
913 #define PM(F, M) __PS(#F, p->F & (M))
914 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
915 #define __PN(F) __PSN(#F, F)
916 #define PN(F) __PSN(#F, p->F)
917
918
919 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)920 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
921 unsigned long tpf, unsigned long gsf, unsigned long gpf)
922 {
923 SEQ_printf(m, "numa_faults node=%d ", node);
924 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
925 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
926 }
927 #endif
928
929
sched_show_numa(struct task_struct * p,struct seq_file * m)930 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
931 {
932 #ifdef CONFIG_NUMA_BALANCING
933 if (p->mm)
934 P(mm->numa_scan_seq);
935
936 P(numa_pages_migrated);
937 P(numa_preferred_nid);
938 P(total_numa_faults);
939 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
940 task_node(p), task_numa_group_id(p));
941 show_numa_stats(p, m);
942 #endif
943 }
944
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)945 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
946 struct seq_file *m)
947 {
948 unsigned long nr_switches;
949
950 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
951 get_nr_threads(p));
952 SEQ_printf(m,
953 "---------------------------------------------------------"
954 "----------\n");
955
956 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
957 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
958
959 PN(se.exec_start);
960 PN(se.vruntime);
961 PN(se.sum_exec_runtime);
962
963 nr_switches = p->nvcsw + p->nivcsw;
964
965 P(se.nr_migrations);
966
967 if (schedstat_enabled()) {
968 u64 avg_atom, avg_per_cpu;
969
970 PN_SCHEDSTAT(sum_sleep_runtime);
971 PN_SCHEDSTAT(sum_block_runtime);
972 PN_SCHEDSTAT(wait_start);
973 PN_SCHEDSTAT(sleep_start);
974 PN_SCHEDSTAT(block_start);
975 PN_SCHEDSTAT(sleep_max);
976 PN_SCHEDSTAT(block_max);
977 PN_SCHEDSTAT(exec_max);
978 PN_SCHEDSTAT(slice_max);
979 PN_SCHEDSTAT(wait_max);
980 PN_SCHEDSTAT(wait_sum);
981 P_SCHEDSTAT(wait_count);
982 PN_SCHEDSTAT(iowait_sum);
983 P_SCHEDSTAT(iowait_count);
984 P_SCHEDSTAT(nr_migrations_cold);
985 P_SCHEDSTAT(nr_failed_migrations_affine);
986 P_SCHEDSTAT(nr_failed_migrations_running);
987 P_SCHEDSTAT(nr_failed_migrations_hot);
988 P_SCHEDSTAT(nr_forced_migrations);
989 P_SCHEDSTAT(nr_wakeups);
990 P_SCHEDSTAT(nr_wakeups_sync);
991 P_SCHEDSTAT(nr_wakeups_migrate);
992 P_SCHEDSTAT(nr_wakeups_local);
993 P_SCHEDSTAT(nr_wakeups_remote);
994 P_SCHEDSTAT(nr_wakeups_affine);
995 P_SCHEDSTAT(nr_wakeups_affine_attempts);
996 P_SCHEDSTAT(nr_wakeups_passive);
997 P_SCHEDSTAT(nr_wakeups_idle);
998
999 avg_atom = p->se.sum_exec_runtime;
1000 if (nr_switches)
1001 avg_atom = div64_ul(avg_atom, nr_switches);
1002 else
1003 avg_atom = -1LL;
1004
1005 avg_per_cpu = p->se.sum_exec_runtime;
1006 if (p->se.nr_migrations) {
1007 avg_per_cpu = div64_u64(avg_per_cpu,
1008 p->se.nr_migrations);
1009 } else {
1010 avg_per_cpu = -1LL;
1011 }
1012
1013 __PN(avg_atom);
1014 __PN(avg_per_cpu);
1015
1016 #ifdef CONFIG_SCHED_CORE
1017 PN_SCHEDSTAT(core_forceidle_sum);
1018 #endif
1019 }
1020
1021 __P(nr_switches);
1022 __PS("nr_voluntary_switches", p->nvcsw);
1023 __PS("nr_involuntary_switches", p->nivcsw);
1024
1025 P(se.load.weight);
1026 #ifdef CONFIG_SMP
1027 P(se.avg.load_sum);
1028 P(se.avg.runnable_sum);
1029 P(se.avg.util_sum);
1030 P(se.avg.load_avg);
1031 P(se.avg.runnable_avg);
1032 P(se.avg.util_avg);
1033 P(se.avg.last_update_time);
1034 P(se.avg.util_est.ewma);
1035 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1036 #endif
1037 #ifdef CONFIG_UCLAMP_TASK
1038 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1039 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1040 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1041 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1042 #endif
1043 P(policy);
1044 P(prio);
1045 if (task_has_dl_policy(p)) {
1046 P(dl.runtime);
1047 P(dl.deadline);
1048 }
1049 #undef PN_SCHEDSTAT
1050 #undef P_SCHEDSTAT
1051
1052 {
1053 unsigned int this_cpu = raw_smp_processor_id();
1054 u64 t0, t1;
1055
1056 t0 = cpu_clock(this_cpu);
1057 t1 = cpu_clock(this_cpu);
1058 __PS("clock-delta", t1-t0);
1059 }
1060
1061 sched_show_numa(p, m);
1062 }
1063
proc_sched_set_task(struct task_struct * p)1064 void proc_sched_set_task(struct task_struct *p)
1065 {
1066 #ifdef CONFIG_SCHEDSTATS
1067 memset(&p->stats, 0, sizeof(p->stats));
1068 #endif
1069 }
1070
resched_latency_warn(int cpu,u64 latency)1071 void resched_latency_warn(int cpu, u64 latency)
1072 {
1073 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1074
1075 WARN(__ratelimit(&latency_check_ratelimit),
1076 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1077 "without schedule\n",
1078 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1079 }
1080