Lines Matching refs:td
124 return tg->td; in sq_to_td()
137 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) in throtl_adjusted_limit() argument
140 if (td->scale < 4096 && time_after_eq(jiffies, in throtl_adjusted_limit()
141 td->low_upgrade_time + td->scale * td->throtl_slice)) in throtl_adjusted_limit()
142 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; in throtl_adjusted_limit()
144 return low + (low >> 1) * td->scale; in throtl_adjusted_limit()
150 struct throtl_data *td; in tg_bps_limit() local
156 td = tg->td; in tg_bps_limit()
157 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
158 if (ret == 0 && td->limit_index == LIMIT_LOW) { in tg_bps_limit()
161 tg->iops[rw][td->limit_index]) in tg_bps_limit()
167 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
171 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
180 struct throtl_data *td; in tg_iops_limit() local
186 td = tg->td; in tg_iops_limit()
187 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
188 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
191 tg->bps[rw][td->limit_index]) in tg_iops_limit()
197 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
201 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
390 struct throtl_data *td = blkg->q->td; in throtl_pd_init() local
407 sq->parent_sq = &td->service_queue; in throtl_pd_init()
410 tg->td = td; in throtl_pd_init()
421 struct throtl_data *td = tg->td; in tg_update_has_rules() local
427 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
431 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
447 static void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
465 td->limit_valid[LIMIT_LOW] = low_valid; in blk_throtl_update_limit_valid()
468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
473 static void throtl_upgrade_state(struct throtl_data *td);
483 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
485 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
486 throtl_upgrade_state(tg->td); in throtl_pd_offline()
650 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
663 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
678 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
760 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
763 tg->td->throtl_slice); in throtl_trim_slice()
791 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice, in throtl_trim_slice()
845 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_within_iops_limit()
874 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_within_bps_limit()
876 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_within_bps_limit()
937 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
939 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1078 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1079 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1154 static bool throtl_can_upgrade(struct throtl_data *td,
1175 struct throtl_data *td = sq_to_td(sq); in throtl_pending_timer_fn() local
1185 q = td->queue; in throtl_pending_timer_fn()
1192 if (throtl_can_upgrade(td, NULL)) in throtl_pending_timer_fn()
1193 throtl_upgrade_state(td); in throtl_pending_timer_fn()
1235 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1251 struct throtl_data *td = container_of(work, struct throtl_data, in blk_throtl_dispatch_work_fn() local
1253 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1254 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1332 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1667 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1668 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1670 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1672 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1674 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1703 struct throtl_data *td = q->td; in throtl_shutdown_wq() local
1705 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1825 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1855 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1873 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
1879 if (td->limit_index != LIMIT_LOW) in throtl_can_upgrade()
1882 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) in throtl_can_upgrade()
1886 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1906 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1909 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1915 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1918 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1919 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1922 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
1927 throtl_log(&td->service_queue, "upgrade to max"); in throtl_upgrade_state()
1928 td->limit_index = LIMIT_MAX; in throtl_upgrade_state()
1929 td->low_upgrade_time = jiffies; in throtl_upgrade_state()
1930 td->scale = 0; in throtl_upgrade_state()
1932 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
1941 throtl_select_dispatch(&td->service_queue); in throtl_upgrade_state()
1942 throtl_schedule_next_dispatch(&td->service_queue, true); in throtl_upgrade_state()
1943 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_upgrade_state()
1946 static void throtl_downgrade_state(struct throtl_data *td) in throtl_downgrade_state() argument
1948 td->scale /= 2; in throtl_downgrade_state()
1950 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); in throtl_downgrade_state()
1951 if (td->scale) { in throtl_downgrade_state()
1952 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; in throtl_downgrade_state()
1956 td->limit_index = LIMIT_LOW; in throtl_downgrade_state()
1957 td->low_downgrade_time = jiffies; in throtl_downgrade_state()
1962 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade() local
1970 td->throtl_slice) && in throtl_tg_can_downgrade()
1979 struct throtl_data *td = tg->td; in throtl_hierarchy_can_downgrade() local
1981 if (time_before(jiffies, td->low_upgrade_time + td->throtl_slice)) in throtl_hierarchy_can_downgrade()
2001 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
2002 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
2006 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
2013 tg->td->throtl_slice)) in throtl_downgrade_check()
2047 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
2072 static void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2079 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW]) in throtl_update_latency_buckets()
2081 if (time_before(jiffies, td->last_calculate_time + HZ)) in throtl_update_latency_buckets()
2083 td->last_calculate_time = jiffies; in throtl_update_latency_buckets()
2088 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; in throtl_update_latency_buckets()
2094 bucket = per_cpu_ptr(td->latency_buckets[rw], in throtl_update_latency_buckets()
2120 if (td->avg_buckets[rw][i].latency < last_latency[rw]) in throtl_update_latency_buckets()
2121 td->avg_buckets[rw][i].latency = in throtl_update_latency_buckets()
2126 if (!td->avg_buckets[rw][i].valid) in throtl_update_latency_buckets()
2129 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + in throtl_update_latency_buckets()
2132 td->avg_buckets[rw][i].latency = max(latency[rw], in throtl_update_latency_buckets()
2134 td->avg_buckets[rw][i].valid = true; in throtl_update_latency_buckets()
2135 last_latency[rw] = td->avg_buckets[rw][i].latency; in throtl_update_latency_buckets()
2140 throtl_log(&td->service_queue, in throtl_update_latency_buckets()
2143 td->avg_buckets[READ][i].latency, in throtl_update_latency_buckets()
2144 td->avg_buckets[READ][i].valid, in throtl_update_latency_buckets()
2145 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2146 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2149 static inline void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2165 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
2171 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
2185 struct throtl_data *td = tg->td; in __blk_throtl_bio() local
2191 throtl_update_latency_buckets(td); in __blk_throtl_bio()
2210 if (throtl_can_upgrade(td, tg)) { in __blk_throtl_bio()
2211 throtl_upgrade_state(td); in __blk_throtl_bio()
2257 td->nr_queued[rw]++; in __blk_throtl_bio()
2274 if (throttled || !td->track_bio_latency) in __blk_throtl_bio()
2284 static void throtl_track_latency(struct throtl_data *td, sector_t size, in throtl_track_latency() argument
2291 if (!td || td->limit_index != LIMIT_LOW || in throtl_track_latency()
2293 !blk_queue_nonrot(td->queue)) in throtl_track_latency()
2298 latency = get_cpu_ptr(td->latency_buckets[rw]); in throtl_track_latency()
2301 put_cpu_ptr(td->latency_buckets[rw]); in throtl_track_latency()
2307 struct throtl_data *td = q->td; in blk_throtl_stat_add() local
2309 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq), in blk_throtl_stat_add()
2327 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2341 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2344 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2349 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2361 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2371 struct throtl_data *td; in blk_throtl_init() local
2374 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
2375 if (!td) in blk_throtl_init()
2377 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2379 if (!td->latency_buckets[READ]) { in blk_throtl_init()
2380 kfree(td); in blk_throtl_init()
2383 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2385 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2386 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2387 kfree(td); in blk_throtl_init()
2391 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
2392 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
2394 q->td = td; in blk_throtl_init()
2395 td->queue = q; in blk_throtl_init()
2397 td->limit_valid[LIMIT_MAX] = true; in blk_throtl_init()
2398 td->limit_index = LIMIT_MAX; in blk_throtl_init()
2399 td->low_upgrade_time = jiffies; in blk_throtl_init()
2400 td->low_downgrade_time = jiffies; in blk_throtl_init()
2405 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2406 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2407 kfree(td); in blk_throtl_init()
2416 BUG_ON(!q->td); in blk_throtl_exit()
2417 del_timer_sync(&q->td->service_queue.pending_timer); in blk_throtl_exit()
2420 free_percpu(q->td->latency_buckets[READ]); in blk_throtl_exit()
2421 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2422 kfree(q->td); in blk_throtl_exit()
2428 struct throtl_data *td; in blk_throtl_register() local
2431 td = q->td; in blk_throtl_register()
2432 BUG_ON(!td); in blk_throtl_register()
2435 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_register()
2436 td->filtered_latency = LATENCY_FILTERED_SSD; in blk_throtl_register()
2438 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register()
2439 td->filtered_latency = LATENCY_FILTERED_HD; in blk_throtl_register()
2441 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register()
2442 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register()
2447 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register()
2450 td->track_bio_latency = !queue_is_mq(q); in blk_throtl_register()
2451 if (!td->track_bio_latency) in blk_throtl_register()
2459 if (!q->td) in blk_throtl_sample_time_show()
2461 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show()
2470 if (!q->td) in blk_throtl_sample_time_store()
2477 q->td->throtl_slice = t; in blk_throtl_sample_time_store()