Lines Matching refs:avg

674 	s64 avg = cfs_rq->avg_vruntime;  in avg_vruntime()  local
680 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
686 if (avg < 0) in avg_vruntime()
687 avg -= (load - 1); in avg_vruntime()
688 avg = div_s64(avg, load); in avg_vruntime()
691 return cfs_rq->min_vruntime + avg; in avg_vruntime()
741 s64 avg = cfs_rq->avg_vruntime; in entity_eligible() local
747 avg += entity_key(cfs_rq, curr) * weight; in entity_eligible()
751 return avg >= entity_key(cfs_rq, se) * load; in entity_eligible()
1055 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1101 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1103 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1116 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1121 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1122 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
1123 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
2708 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3609 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3610 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3616 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3617 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3619 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3620 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3765 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3767 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3883 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3990 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
4025 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
4050 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
4060 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
4085 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4092 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4165 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4176 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4180 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4181 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4182 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4183 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4186 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4187 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4190 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4191 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4197 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4208 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4211 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4212 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4213 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4214 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4217 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4218 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4220 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4221 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4242 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4249 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4257 load_sum = div_u64(gcfs_rq->avg.load_sum, in update_tg_cfs_load()
4262 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4271 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4277 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4281 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4283 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4284 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4285 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4286 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4288 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4289 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4338 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4377 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4471 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4476 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4545 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4554 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4555 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4563 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4565 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4567 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4568 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4569 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4571 se->avg.load_sum = 1; in attach_entity_load_avg()
4574 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4575 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4576 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4577 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4579 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4597 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4598 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4600 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4601 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4603 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4604 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4606 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4607 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4609 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4634 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4640 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4699 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4700 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4701 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4707 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4712 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4719 return READ_ONCE(p->se.avg.util_avg); in task_util()
4724 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4743 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
4745 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
4759 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
4761 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_dequeue()
4802 ue = p->se.avg.util_est; in util_est_update()
4862 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
6806 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6810 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
6829 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6833 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
6836 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7559 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7563 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
7581 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util()
7646 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8109 se->avg.last_update_time = 0; in migrate_task_rq_fair()
9106 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9109 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9252 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9265 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9283 return p->se.avg.load_avg; in task_h_load()
10035 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12592 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
12701 p->se.avg.last_update_time = 0; in task_change_group_fair()