Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance
/linux-6.1.9/kernel/sched/ |
D | sched.h | 2591 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2593 __acquires(busiest->lock) in _double_lock_balance() 2597 double_rq_lock(this_rq, busiest); in _double_lock_balance() 2610 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2612 __acquires(busiest->lock) in _double_lock_balance() 2615 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || in _double_lock_balance() 2616 likely(raw_spin_rq_trylock(busiest))) { in _double_lock_balance() 2617 double_rq_clock_clear_update(this_rq, busiest); in _double_lock_balance() 2621 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance() 2622 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); in _double_lock_balance() [all …]
|
D | fair.c | 8761 struct sched_group *busiest; /* Busiest group in this sd */ member 8782 .busiest = NULL, in init_sd_lb_stats() 9244 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local 9262 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest() 9265 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest() 9276 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest() 9289 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) in update_sd_pick_busiest() 9298 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest() 9313 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest() 9325 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest() [all …]
|
/linux-6.1.9/Documentation/scheduler/ |
D | sched-domains.rst | 48 Initially, load_balance() finds the busiest group in the current sched domain. 49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in 51 CPU's runqueue and the newly found busiest one and starts moving tasks from it
|