Searched refs:cpu_rq (Results 1 – 14 of 14) sorted by relevance
277 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()285 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()357 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()370 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()465 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
112 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()148 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()156 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()339 lockdep_assert_rq_held(cpu_rq(cpu)); in cpuacct_charge()
321 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()330 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()351 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()353 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()367 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()377 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()1057 struct rq *rq = cpu_rq(cpu); in resched_cpu()1123 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()2004 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()2334 rq = cpu_rq(new_cpu); in move_queued_task()[all …]
159 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()285 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); in sugov_iowait_apply()309 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()348 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_freq()398 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_perf()
103 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()108 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()127 struct root_domain *rd = cpu_rq(i)->rd; in __dl_bw_capacity()155 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited()173 struct rq *rq = cpu_rq(i); in __dl_update()181 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()694 later_rq = cpu_rq(cpu); in dl_task_offline_migration()1827 rq = cpu_rq(cpu); in select_task_rq_dl()1858 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl()1859 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl()[all …]
1613 struct rq *rq = cpu_rq(cpu); in update_numa_stats()1645 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()1660 rq = cpu_rq(env->dst_cpu); in task_numa_assign()1675 rq = cpu_rq(env->best_cpu); in task_numa_assign()1734 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()2084 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()2527 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()5324 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()5649 return sched_idle_rq(cpu_rq(cpu)); in sched_idle_cpu()5924 return cpu_rq(cpu)->cpu_capacity; in capacity_of()[all …]
584 struct rq *rq = cpu_rq(cpu); in print_cfs_rq()605 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq()705 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()717 struct rq *rq = cpu_rq(cpu); in print_cpu()1078 cpu, latency, cpu_rq(cpu)->ticks_without_resched); in resched_latency_warn()
1353 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro1355 #define task_rq(p) cpu_rq(task_cpu(p))1356 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)1721 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \2743 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)2835 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of()2904 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_cfs()
218 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()706 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()1613 rq = cpu_rq(cpu); in select_task_rq_rt()1663 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()1985 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()2383 src_rq = cpu_rq(cpu); in pull_rt_task()2950 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()3057 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
132 rq = cpu_rq(cpu); in show_schedstat()
274 rq_i = cpu_rq(i); in __sched_core_account_forceidle()
377 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()709 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()2366 rq = cpu_rq(i); in build_sched_domains()2569 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()2606 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
972 rq = cpu_rq(cpu); in kcpustat_field()1059 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …