Searched refs:cpu_rq (Results 1 – 15 of 15) sorted by relevance
/linux-6.6.21/kernel/sched/ |
D | membarrier.c | 282 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited() 290 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 363 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited() 376 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 472 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
|
D | cpuacct.c | 112 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 148 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 156 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 339 lockdep_assert_rq_held(cpu_rq(cpu)); in cpuacct_charge()
|
D | core.c | 339 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock() 348 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock() 369 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip() 371 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip() 382 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip() 392 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty() 1067 struct rq *rq = cpu_rq(cpu); in resched_cpu() 1130 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() 2034 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp() 2525 rq = cpu_rq(new_cpu); in move_queued_task() [all …]
|
D | cpufreq_schedutil.c | 159 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util() 285 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); in sugov_iowait_apply() 309 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit() 352 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_freq() 406 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_perf()
|
D | fair.c | 2053 struct rq *rq = cpu_rq(cpu); in update_numa_stats() 2085 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign() 2100 rq = cpu_rq(env->dst_cpu); in task_numa_assign() 2115 rq = cpu_rq(env->best_cpu); in task_numa_assign() 2174 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare() 2533 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate() 2976 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group() 5961 rq = cpu_rq(this_cpu); in distribute_cfs_runtime() 6182 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle() 6336 struct rq *rq = cpu_rq(i); in destroy_cfs_bandwidth() [all …]
|
D | deadline.c | 107 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of() 112 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() 153 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity() 159 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited() 177 struct rq *rq = cpu_rq(i); in __dl_update() 185 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of() 691 later_rq = cpu_rq(cpu); in dl_task_offline_migration() 1827 rq = cpu_rq(cpu); in select_task_rq_dl() 1857 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl() 2222 later_rq = cpu_rq(cpu); in find_lock_later_rq() [all …]
|
D | rt.c | 222 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() 710 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq() 1613 rq = cpu_rq(cpu); in select_task_rq_rt() 1663 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt() 1988 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq() 2392 src_rq = cpu_rq(cpu); in pull_rt_task() 2700 rt_rq = &cpu_rq(cpu)->rt; in task_is_throttled_rt() 2980 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints() 3087 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
|
D | debug.c | 633 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() 752 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq() 764 struct rq *rq = cpu_rq(cpu); in print_cpu() 1122 cpu, latency, cpu_rq(cpu)->ticks_without_resched); in resched_latency_warn()
|
D | sched.h | 1209 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro 1211 #define task_rq(p) cpu_rq(task_cpu(p)) 1212 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1304 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match() 1815 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 2890 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2982 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of()
|
D | stats.c | 132 rq = cpu_rq(cpu); in show_schedstat()
|
D | core_sched.c | 275 rq_i = cpu_rq(i); in __sched_core_account_forceidle()
|
D | topology.c | 379 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() 711 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain() 2486 rq = cpu_rq(i); in build_sched_domains() 2689 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked() 2726 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
|
D | cputime.c | 991 rq = cpu_rq(cpu); in kcpustat_field() 1078 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
|
D | psi.c | 1221 struct rq *rq = cpu_rq(cpu); in psi_cgroup_restart()
|
/linux-6.6.21/tools/perf/Documentation/ |
D | perf-probe.txt | 234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …
|