Home
last modified time | relevance | path

Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance

/linux-6.6.21/kernel/sched/
Dfair.c9336 struct sched_group *busiest; /* Busiest group in this sd */ member
9357 .busiest = NULL, in init_sd_lb_stats()
9701 struct sg_lb_stats *busiest, in sibling_imbalance() argument
9707 if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running) in sibling_imbalance()
9710 ncores_busiest = sds->busiest->cores; in sibling_imbalance()
9714 imbalance = busiest->sum_nr_running; in sibling_imbalance()
9720 imbalance = ncores_local * busiest->sum_nr_running; in sibling_imbalance()
9728 busiest->sum_nr_running > 1) in sibling_imbalance()
9856 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
9874 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
[all …]
Dsched.h2681 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument
2683 __acquires(busiest->lock) in _double_lock_balance()
2687 double_rq_lock(this_rq, busiest); in _double_lock_balance()
2700 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument
2702 __acquires(busiest->lock) in _double_lock_balance()
2705 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || in _double_lock_balance()
2706 likely(raw_spin_rq_trylock(busiest))) { in _double_lock_balance()
2707 double_rq_clock_clear_update(this_rq, busiest); in _double_lock_balance()
2711 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance()
2712 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); in _double_lock_balance()
[all …]
/linux-6.6.21/Documentation/scheduler/
Dsched-domains.rst48 Initially, load_balance() finds the busiest group in the current sched domain.
49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in
51 CPU's runqueue and the newly found busiest one and starts moving tasks from it