Lines Matching refs:dl
57 return container_of(dl_se, struct task_struct, dl); in dl_task_of()
62 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
70 return &rq->dl; in dl_rq_of_se()
177 rq->dl.extra_bw += bw; in __dl_update()
183 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
204 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); in __dl_update() local
206 dl->extra_bw += bw; in __dl_update()
313 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
319 if (p->dl.dl_non_contending) { in dl_change_utilization()
320 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
321 p->dl.dl_non_contending = 0; in dl_change_utilization()
329 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
332 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
333 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
392 struct sched_dl_entity *dl_se = &p->dl; in task_non_contending()
431 sub_rq_bw(&p->dl, &rq->dl); in task_non_contending()
433 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
485 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
599 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); in __pushable_less()
613 &rq->dl.pushable_dl_tasks_root, in enqueue_pushable_dl_task()
616 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
621 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task()
630 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; in dequeue_pushable_dl_task()
637 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); in has_pushable_dl_tasks()
700 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration()
707 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
708 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
710 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
711 add_running_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
713 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
714 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
724 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
729 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
1052 struct sched_dl_entity *dl_se = &p->dl; in start_dl_timer()
1283 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ in grub_reclaim()
1285 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1295 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) in grub_reclaim()
1298 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; in grub_reclaim()
1310 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
1354 &curr->dl); in update_curr_dl()
1378 if (!is_leftmost(curr, &rq->dl)) in update_curr_dl()
1426 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1427 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1432 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1441 sub_running_bw(dl_se, &rq->dl); in inactive_task_timer()
1672 if (is_dl_boosted(&p->dl)) { in enqueue_task_dl()
1685 if (p->dl.dl_throttled) { in enqueue_task_dl()
1691 hrtimer_try_to_cancel(&p->dl.dl_timer); in enqueue_task_dl()
1692 p->dl.dl_throttled = 0; in enqueue_task_dl()
1704 p->dl.dl_throttled = 0; in enqueue_task_dl()
1718 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) in enqueue_task_dl()
1719 dl_check_constrained_dl(&p->dl); in enqueue_task_dl()
1722 add_rq_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1723 add_running_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1738 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { in enqueue_task_dl()
1740 task_contending(&p->dl, flags); in enqueue_task_dl()
1746 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); in enqueue_task_dl()
1748 enqueue_dl_entity(&p->dl, flags); in enqueue_task_dl()
1756 update_stats_dequeue_dl(&rq->dl, &p->dl, flags); in __dequeue_task_dl()
1757 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
1767 sub_running_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1768 sub_rq_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1802 rq->curr->dl.dl_yielded = 1; in yield_task_dl()
1819 return (!rq->dl.dl_nr_running || in dl_task_is_earliest_deadline()
1820 dl_time_before(p->dl.deadline, in dl_task_is_earliest_deadline()
1821 rq->dl.earliest_dl.curr)); in dl_task_is_earliest_deadline()
1852 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1890 if (p->dl.dl_non_contending) { in migrate_task_rq_dl()
1892 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1893 p->dl.dl_non_contending = 0; in migrate_task_rq_dl()
1901 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in migrate_task_rq_dl()
1904 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1931 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
1954 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1964 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1973 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1983 struct sched_dl_entity *dl_se = &p->dl; in set_next_task_dl()
1984 struct dl_rq *dl_rq = &rq->dl; in set_next_task_dl()
1987 if (on_dl_rq(&p->dl)) in set_next_task_dl()
2018 struct dl_rq *dl_rq = &rq->dl; in pick_task_dl()
2044 struct sched_dl_entity *dl_se = &p->dl; in put_prev_task_dl()
2045 struct dl_rq *dl_rq = &rq->dl; in put_prev_task_dl()
2047 if (on_dl_rq(&p->dl)) in put_prev_task_dl()
2053 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2075 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2076 is_leftmost(p, &rq->dl)) in task_tick_dl()
2113 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); in pick_earliest_pushable_dl_task()
2279 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); in pick_next_pushable_dl_task()
2302 if (!rq->dl.overloaded) in push_dl_task()
2316 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && in push_dl_task()
2408 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2409 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
2410 src_rq->dl.earliest_dl.next)) in pull_dl_task()
2421 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task()
2431 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
2440 if (dl_time_before(p->dl.deadline, in pull_dl_task()
2441 src_rq->curr->dl.deadline)) in pull_dl_task()
2450 dmin = p->dl.deadline; in pull_dl_task()
2482 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2514 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2524 if (rq->dl.overloaded) in rq_online_dl()
2528 if (rq->dl.dl_nr_running > 0) in rq_online_dl()
2529 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
2535 if (rq->dl.overloaded) in rq_offline_dl()
2568 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2596 if (task_on_rq_queued(p) && p->dl.dl_runtime) in switched_from_dl()
2606 if (p->dl.dl_non_contending) in switched_from_dl()
2607 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2608 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2616 if (p->dl.dl_non_contending) in switched_from_dl()
2617 p->dl.dl_non_contending = 0; in switched_from_dl()
2624 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2636 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in switched_to_dl()
2641 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2648 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2675 if (!rq->dl.overloaded) in prio_changed_dl()
2683 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2696 DEFINE_SCHED_CLASS(dl) = {
2811 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); in sched_dl_do_global()
2837 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2851 if (hrtimer_active(&p->dl.inactive_timer)) in sched_dl_overflow()
2852 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2856 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2864 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2891 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
2903 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
2973 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
2994 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()
3035 overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); in dl_cpu_busy()
3044 __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); in dl_cpu_busy()
3057 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); in print_dl_stats()