Searched refs:this_rq (Results 1 – 10 of 10) sorted by relevance
/linux-6.6.21/kernel/sched/ |
D | loadavg.c | 78 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active() argument 82 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active() 83 nr_active += (int)this_rq->nr_uninterruptible; in calc_load_fold_active() 85 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active() 86 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active() 87 this_rq->calc_load_active = nr_active; in calc_load_fold_active() 251 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start() 265 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local 270 this_rq->calc_load_update = READ_ONCE(calc_load_update); in calc_load_nohz_stop() 271 if (time_before(jiffies, this_rq->calc_load_update)) in calc_load_nohz_stop() [all …]
|
D | sched.h | 114 extern void calc_global_load_tick(struct rq *this_rq); 115 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 1210 #define this_rq() this_cpu_ptr(&runqueues) macro 1731 rq = this_rq(); in this_rq_lock_irq() 2257 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2276 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2277 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2278 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2681 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2682 __releases(this_rq->lock) in _double_lock_balance() [all …]
|
D | fair.c | 4715 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 5869 if (rq == this_rq()) { in __unthrottle_cfs_rq_async() 7256 struct rq *this_rq = this_rq(); in select_idle_cpu() local 7276 if (unlikely(this_rq->wake_stamp < now)) { in select_idle_cpu() 7277 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { in select_idle_cpu() 7278 this_rq->wake_stamp++; in select_idle_cpu() 7279 this_rq->wake_avg_idle >>= 1; in select_idle_cpu() 7283 avg_idle = this_rq->wake_avg_idle; in select_idle_cpu() 7331 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time); in select_idle_cpu() 7456 this_rq()->nr_running <= 1 && in select_idle_sibling() [all …]
|
D | rt.c | 636 return this_rq()->rd->span; in sched_rt_period_mask() 2329 rq = this_rq(); in rto_push_irq_work_func() 2359 static void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument 2361 int this_cpu = this_rq->cpu, cpu; in pull_rt_task() 2365 int rt_overload_count = rt_overloaded(this_rq); in pull_rt_task() 2378 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task() 2383 tell_cpu_to_push(this_rq); in pull_rt_task() 2388 for_each_cpu(cpu, this_rq->rd->rto_mask) { in pull_rt_task() 2402 this_rq->rt.highest_prio.curr) in pull_rt_task() 2411 double_lock_balance(this_rq, src_rq); in pull_rt_task() [all …]
|
D | cputime.c | 225 struct rq *rq = this_rq(); in account_idle_time() 260 steal -= this_rq()->prev_steal_time; in steal_account_process_time() 263 this_rq()->prev_steal_time += steal; in steal_account_process_time() 403 } else if (p == this_rq()->idle) { in irqtime_account_process_tick() 509 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
|
D | deadline.c | 2373 static void pull_dl_task(struct rq *this_rq) in pull_dl_task() argument 2375 int this_cpu = this_rq->cpu, cpu; in pull_dl_task() 2381 if (likely(!dl_overloaded(this_rq))) in pull_dl_task() 2390 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task() 2400 if (this_rq->dl.dl_nr_running && in pull_dl_task() 2401 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task() 2407 double_lock_balance(this_rq, src_rq); in pull_dl_task() 2424 dl_task_is_earliest_deadline(p, this_rq)) { in pull_dl_task() 2441 activate_task(this_rq, p, 0); in pull_dl_task() 2449 double_unlock_balance(this_rq, src_rq); in pull_dl_task() [all …]
|
D | idle.c | 19 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
|
D | membarrier.c | 238 struct rq *rq = this_rq(); in membarrier_update_current_mm()
|
D | core.c | 842 if (rq == this_rq()) in hrtick_start() 2421 this_rq()->nr_pinned++; in migrate_disable() 2457 this_rq()->nr_pinned--; in migrate_enable() 2584 struct rq *rq = this_rq(); in migration_cpu_stop() 2687 struct rq *lowest_rq = NULL, *rq = this_rq(); in push_cpu_stop() 3730 rq = this_rq(); in ttwu_stat() 3877 struct rq *rq = this_rq(); in sched_ttwu_pending() 5218 struct rq *rq = this_rq(); in finish_task_switch() 5963 schedstat_inc(this_rq()->sched_count); in schedule_debug() 8940 rq = this_rq(); in yield_to() [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/progs/ |
D | test_access_variable_array.c | 11 int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, in BPF_PROG() argument
|