1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Interface for controlling IO bandwidth on a request queue
4 *
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include "blk.h"
14 #include "blk-cgroup-rwstat.h"
15 #include "blk-stat.h"
16 #include "blk-throttle.h"
17
18 /* Max dispatch from a group in 1 round */
19 #define THROTL_GRP_QUANTUM 8
20
21 /* Total max dispatch from all groups in one round */
22 #define THROTL_QUANTUM 32
23
24 /* Throttling is performed over a slice and after that slice is renewed */
25 #define DFL_THROTL_SLICE_HD (HZ / 10)
26 #define DFL_THROTL_SLICE_SSD (HZ / 50)
27 #define MAX_THROTL_SLICE (HZ)
28 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
29 #define MIN_THROTL_BPS (320 * 1024)
30 #define MIN_THROTL_IOPS (10)
31 #define DFL_LATENCY_TARGET (-1L)
32 #define DFL_IDLE_THRESHOLD (0)
33 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
34 #define LATENCY_FILTERED_SSD (0)
35 /*
36 * For HD, very small latency comes from sequential IO. Such IO is helpless to
37 * help determine if its IO is impacted by others, hence we ignore the IO
38 */
39 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
40
41 /* A workqueue to queue throttle related work */
42 static struct workqueue_struct *kthrotld_workqueue;
43
44 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
45
46 /* We measure latency for request size from <= 4k to >= 1M */
47 #define LATENCY_BUCKET_SIZE 9
48
49 struct latency_bucket {
50 unsigned long total_latency; /* ns / 1024 */
51 int samples;
52 };
53
54 struct avg_latency_bucket {
55 unsigned long latency; /* ns / 1024 */
56 bool valid;
57 };
58
59 struct throtl_data
60 {
61 /* service tree for active throtl groups */
62 struct throtl_service_queue service_queue;
63
64 struct request_queue *queue;
65
66 /* Total Number of queued bios on READ and WRITE lists */
67 unsigned int nr_queued[2];
68
69 unsigned int throtl_slice;
70
71 /* Work for dispatching throttled bios */
72 struct work_struct dispatch_work;
73 unsigned int limit_index;
74 bool limit_valid[LIMIT_CNT];
75
76 unsigned long low_upgrade_time;
77 unsigned long low_downgrade_time;
78
79 unsigned int scale;
80
81 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
82 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
83 struct latency_bucket __percpu *latency_buckets[2];
84 unsigned long last_calculate_time;
85 unsigned long filtered_latency;
86
87 bool track_bio_latency;
88 };
89
90 static void throtl_pending_timer_fn(struct timer_list *t);
91
tg_to_blkg(struct throtl_grp * tg)92 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
93 {
94 return pd_to_blkg(&tg->pd);
95 }
96
97 /**
98 * sq_to_tg - return the throl_grp the specified service queue belongs to
99 * @sq: the throtl_service_queue of interest
100 *
101 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
102 * embedded in throtl_data, %NULL is returned.
103 */
sq_to_tg(struct throtl_service_queue * sq)104 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
105 {
106 if (sq && sq->parent_sq)
107 return container_of(sq, struct throtl_grp, service_queue);
108 else
109 return NULL;
110 }
111
112 /**
113 * sq_to_td - return throtl_data the specified service queue belongs to
114 * @sq: the throtl_service_queue of interest
115 *
116 * A service_queue can be embedded in either a throtl_grp or throtl_data.
117 * Determine the associated throtl_data accordingly and return it.
118 */
sq_to_td(struct throtl_service_queue * sq)119 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
120 {
121 struct throtl_grp *tg = sq_to_tg(sq);
122
123 if (tg)
124 return tg->td;
125 else
126 return container_of(sq, struct throtl_data, service_queue);
127 }
128
129 /*
130 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
131 * make the IO dispatch more smooth.
132 * Scale up: linearly scale up according to lapsed time since upgrade. For
133 * every throtl_slice, the limit scales up 1/2 .low limit till the
134 * limit hits .max limit
135 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
136 */
throtl_adjusted_limit(uint64_t low,struct throtl_data * td)137 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
138 {
139 /* arbitrary value to avoid too big scale */
140 if (td->scale < 4096 && time_after_eq(jiffies,
141 td->low_upgrade_time + td->scale * td->throtl_slice))
142 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
143
144 return low + (low >> 1) * td->scale;
145 }
146
tg_bps_limit(struct throtl_grp * tg,int rw)147 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
148 {
149 struct blkcg_gq *blkg = tg_to_blkg(tg);
150 struct throtl_data *td;
151 uint64_t ret;
152
153 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
154 return U64_MAX;
155
156 td = tg->td;
157 ret = tg->bps[rw][td->limit_index];
158 if (ret == 0 && td->limit_index == LIMIT_LOW) {
159 /* intermediate node or iops isn't 0 */
160 if (!list_empty(&blkg->blkcg->css.children) ||
161 tg->iops[rw][td->limit_index])
162 return U64_MAX;
163 else
164 return MIN_THROTL_BPS;
165 }
166
167 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
168 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
169 uint64_t adjusted;
170
171 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
172 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
173 }
174 return ret;
175 }
176
tg_iops_limit(struct throtl_grp * tg,int rw)177 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
178 {
179 struct blkcg_gq *blkg = tg_to_blkg(tg);
180 struct throtl_data *td;
181 unsigned int ret;
182
183 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
184 return UINT_MAX;
185
186 td = tg->td;
187 ret = tg->iops[rw][td->limit_index];
188 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
189 /* intermediate node or bps isn't 0 */
190 if (!list_empty(&blkg->blkcg->css.children) ||
191 tg->bps[rw][td->limit_index])
192 return UINT_MAX;
193 else
194 return MIN_THROTL_IOPS;
195 }
196
197 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
198 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
199 uint64_t adjusted;
200
201 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
202 if (adjusted > UINT_MAX)
203 adjusted = UINT_MAX;
204 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
205 }
206 return ret;
207 }
208
209 #define request_bucket_index(sectors) \
210 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
211
212 /**
213 * throtl_log - log debug message via blktrace
214 * @sq: the service_queue being reported
215 * @fmt: printf format string
216 * @args: printf args
217 *
218 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
219 * throtl_grp; otherwise, just "throtl".
220 */
221 #define throtl_log(sq, fmt, args...) do { \
222 struct throtl_grp *__tg = sq_to_tg((sq)); \
223 struct throtl_data *__td = sq_to_td((sq)); \
224 \
225 (void)__td; \
226 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
227 break; \
228 if ((__tg)) { \
229 blk_add_cgroup_trace_msg(__td->queue, \
230 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
231 } else { \
232 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
233 } \
234 } while (0)
235
throtl_bio_data_size(struct bio * bio)236 static inline unsigned int throtl_bio_data_size(struct bio *bio)
237 {
238 /* assume it's one sector */
239 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
240 return 512;
241 return bio->bi_iter.bi_size;
242 }
243
throtl_qnode_init(struct throtl_qnode * qn,struct throtl_grp * tg)244 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
245 {
246 INIT_LIST_HEAD(&qn->node);
247 bio_list_init(&qn->bios);
248 qn->tg = tg;
249 }
250
251 /**
252 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
253 * @bio: bio being added
254 * @qn: qnode to add bio to
255 * @queued: the service_queue->queued[] list @qn belongs to
256 *
257 * Add @bio to @qn and put @qn on @queued if it's not already on.
258 * @qn->tg's reference count is bumped when @qn is activated. See the
259 * comment on top of throtl_qnode definition for details.
260 */
throtl_qnode_add_bio(struct bio * bio,struct throtl_qnode * qn,struct list_head * queued)261 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
262 struct list_head *queued)
263 {
264 bio_list_add(&qn->bios, bio);
265 if (list_empty(&qn->node)) {
266 list_add_tail(&qn->node, queued);
267 blkg_get(tg_to_blkg(qn->tg));
268 }
269 }
270
271 /**
272 * throtl_peek_queued - peek the first bio on a qnode list
273 * @queued: the qnode list to peek
274 */
throtl_peek_queued(struct list_head * queued)275 static struct bio *throtl_peek_queued(struct list_head *queued)
276 {
277 struct throtl_qnode *qn;
278 struct bio *bio;
279
280 if (list_empty(queued))
281 return NULL;
282
283 qn = list_first_entry(queued, struct throtl_qnode, node);
284 bio = bio_list_peek(&qn->bios);
285 WARN_ON_ONCE(!bio);
286 return bio;
287 }
288
289 /**
290 * throtl_pop_queued - pop the first bio form a qnode list
291 * @queued: the qnode list to pop a bio from
292 * @tg_to_put: optional out argument for throtl_grp to put
293 *
294 * Pop the first bio from the qnode list @queued. After popping, the first
295 * qnode is removed from @queued if empty or moved to the end of @queued so
296 * that the popping order is round-robin.
297 *
298 * When the first qnode is removed, its associated throtl_grp should be put
299 * too. If @tg_to_put is NULL, this function automatically puts it;
300 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
301 * responsible for putting it.
302 */
throtl_pop_queued(struct list_head * queued,struct throtl_grp ** tg_to_put)303 static struct bio *throtl_pop_queued(struct list_head *queued,
304 struct throtl_grp **tg_to_put)
305 {
306 struct throtl_qnode *qn;
307 struct bio *bio;
308
309 if (list_empty(queued))
310 return NULL;
311
312 qn = list_first_entry(queued, struct throtl_qnode, node);
313 bio = bio_list_pop(&qn->bios);
314 WARN_ON_ONCE(!bio);
315
316 if (bio_list_empty(&qn->bios)) {
317 list_del_init(&qn->node);
318 if (tg_to_put)
319 *tg_to_put = qn->tg;
320 else
321 blkg_put(tg_to_blkg(qn->tg));
322 } else {
323 list_move_tail(&qn->node, queued);
324 }
325
326 return bio;
327 }
328
329 /* init a service_queue, assumes the caller zeroed it */
throtl_service_queue_init(struct throtl_service_queue * sq)330 static void throtl_service_queue_init(struct throtl_service_queue *sq)
331 {
332 INIT_LIST_HEAD(&sq->queued[READ]);
333 INIT_LIST_HEAD(&sq->queued[WRITE]);
334 sq->pending_tree = RB_ROOT_CACHED;
335 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
336 }
337
throtl_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)338 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
339 struct request_queue *q,
340 struct blkcg *blkcg)
341 {
342 struct throtl_grp *tg;
343 int rw;
344
345 tg = kzalloc_node(sizeof(*tg), gfp, q->node);
346 if (!tg)
347 return NULL;
348
349 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
350 goto err_free_tg;
351
352 if (blkg_rwstat_init(&tg->stat_ios, gfp))
353 goto err_exit_stat_bytes;
354
355 throtl_service_queue_init(&tg->service_queue);
356
357 for (rw = READ; rw <= WRITE; rw++) {
358 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
359 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
360 }
361
362 RB_CLEAR_NODE(&tg->rb_node);
363 tg->bps[READ][LIMIT_MAX] = U64_MAX;
364 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
365 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
366 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
367 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
368 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
369 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
370 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
371 /* LIMIT_LOW will have default value 0 */
372
373 tg->latency_target = DFL_LATENCY_TARGET;
374 tg->latency_target_conf = DFL_LATENCY_TARGET;
375 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
376 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
377
378 return &tg->pd;
379
380 err_exit_stat_bytes:
381 blkg_rwstat_exit(&tg->stat_bytes);
382 err_free_tg:
383 kfree(tg);
384 return NULL;
385 }
386
throtl_pd_init(struct blkg_policy_data * pd)387 static void throtl_pd_init(struct blkg_policy_data *pd)
388 {
389 struct throtl_grp *tg = pd_to_tg(pd);
390 struct blkcg_gq *blkg = tg_to_blkg(tg);
391 struct throtl_data *td = blkg->q->td;
392 struct throtl_service_queue *sq = &tg->service_queue;
393
394 /*
395 * If on the default hierarchy, we switch to properly hierarchical
396 * behavior where limits on a given throtl_grp are applied to the
397 * whole subtree rather than just the group itself. e.g. If 16M
398 * read_bps limit is set on the root group, the whole system can't
399 * exceed 16M for the device.
400 *
401 * If not on the default hierarchy, the broken flat hierarchy
402 * behavior is retained where all throtl_grps are treated as if
403 * they're all separate root groups right below throtl_data.
404 * Limits of a group don't interact with limits of other groups
405 * regardless of the position of the group in the hierarchy.
406 */
407 sq->parent_sq = &td->service_queue;
408 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
409 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
410 tg->td = td;
411 }
412
413 /*
414 * Set has_rules[] if @tg or any of its parents have limits configured.
415 * This doesn't require walking up to the top of the hierarchy as the
416 * parent's has_rules[] is guaranteed to be correct.
417 */
tg_update_has_rules(struct throtl_grp * tg)418 static void tg_update_has_rules(struct throtl_grp *tg)
419 {
420 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
421 struct throtl_data *td = tg->td;
422 int rw;
423
424 for (rw = READ; rw <= WRITE; rw++) {
425 tg->has_rules_iops[rw] =
426 (parent_tg && parent_tg->has_rules_iops[rw]) ||
427 (td->limit_valid[td->limit_index] &&
428 tg_iops_limit(tg, rw) != UINT_MAX);
429 tg->has_rules_bps[rw] =
430 (parent_tg && parent_tg->has_rules_bps[rw]) ||
431 (td->limit_valid[td->limit_index] &&
432 (tg_bps_limit(tg, rw) != U64_MAX));
433 }
434 }
435
throtl_pd_online(struct blkg_policy_data * pd)436 static void throtl_pd_online(struct blkg_policy_data *pd)
437 {
438 struct throtl_grp *tg = pd_to_tg(pd);
439 /*
440 * We don't want new groups to escape the limits of its ancestors.
441 * Update has_rules[] after a new group is brought online.
442 */
443 tg_update_has_rules(tg);
444 }
445
446 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
blk_throtl_update_limit_valid(struct throtl_data * td)447 static void blk_throtl_update_limit_valid(struct throtl_data *td)
448 {
449 struct cgroup_subsys_state *pos_css;
450 struct blkcg_gq *blkg;
451 bool low_valid = false;
452
453 rcu_read_lock();
454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
455 struct throtl_grp *tg = blkg_to_tg(blkg);
456
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
459 low_valid = true;
460 break;
461 }
462 }
463 rcu_read_unlock();
464
465 td->limit_valid[LIMIT_LOW] = low_valid;
466 }
467 #else
blk_throtl_update_limit_valid(struct throtl_data * td)468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
469 {
470 }
471 #endif
472
473 static void throtl_upgrade_state(struct throtl_data *td);
throtl_pd_offline(struct blkg_policy_data * pd)474 static void throtl_pd_offline(struct blkg_policy_data *pd)
475 {
476 struct throtl_grp *tg = pd_to_tg(pd);
477
478 tg->bps[READ][LIMIT_LOW] = 0;
479 tg->bps[WRITE][LIMIT_LOW] = 0;
480 tg->iops[READ][LIMIT_LOW] = 0;
481 tg->iops[WRITE][LIMIT_LOW] = 0;
482
483 blk_throtl_update_limit_valid(tg->td);
484
485 if (!tg->td->limit_valid[tg->td->limit_index])
486 throtl_upgrade_state(tg->td);
487 }
488
throtl_pd_free(struct blkg_policy_data * pd)489 static void throtl_pd_free(struct blkg_policy_data *pd)
490 {
491 struct throtl_grp *tg = pd_to_tg(pd);
492
493 del_timer_sync(&tg->service_queue.pending_timer);
494 blkg_rwstat_exit(&tg->stat_bytes);
495 blkg_rwstat_exit(&tg->stat_ios);
496 kfree(tg);
497 }
498
499 static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue * parent_sq)500 throtl_rb_first(struct throtl_service_queue *parent_sq)
501 {
502 struct rb_node *n;
503
504 n = rb_first_cached(&parent_sq->pending_tree);
505 WARN_ON_ONCE(!n);
506 if (!n)
507 return NULL;
508 return rb_entry_tg(n);
509 }
510
throtl_rb_erase(struct rb_node * n,struct throtl_service_queue * parent_sq)511 static void throtl_rb_erase(struct rb_node *n,
512 struct throtl_service_queue *parent_sq)
513 {
514 rb_erase_cached(n, &parent_sq->pending_tree);
515 RB_CLEAR_NODE(n);
516 }
517
update_min_dispatch_time(struct throtl_service_queue * parent_sq)518 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
519 {
520 struct throtl_grp *tg;
521
522 tg = throtl_rb_first(parent_sq);
523 if (!tg)
524 return;
525
526 parent_sq->first_pending_disptime = tg->disptime;
527 }
528
tg_service_queue_add(struct throtl_grp * tg)529 static void tg_service_queue_add(struct throtl_grp *tg)
530 {
531 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
532 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
533 struct rb_node *parent = NULL;
534 struct throtl_grp *__tg;
535 unsigned long key = tg->disptime;
536 bool leftmost = true;
537
538 while (*node != NULL) {
539 parent = *node;
540 __tg = rb_entry_tg(parent);
541
542 if (time_before(key, __tg->disptime))
543 node = &parent->rb_left;
544 else {
545 node = &parent->rb_right;
546 leftmost = false;
547 }
548 }
549
550 rb_link_node(&tg->rb_node, parent, node);
551 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
552 leftmost);
553 }
554
throtl_enqueue_tg(struct throtl_grp * tg)555 static void throtl_enqueue_tg(struct throtl_grp *tg)
556 {
557 if (!(tg->flags & THROTL_TG_PENDING)) {
558 tg_service_queue_add(tg);
559 tg->flags |= THROTL_TG_PENDING;
560 tg->service_queue.parent_sq->nr_pending++;
561 }
562 }
563
throtl_dequeue_tg(struct throtl_grp * tg)564 static void throtl_dequeue_tg(struct throtl_grp *tg)
565 {
566 if (tg->flags & THROTL_TG_PENDING) {
567 struct throtl_service_queue *parent_sq =
568 tg->service_queue.parent_sq;
569
570 throtl_rb_erase(&tg->rb_node, parent_sq);
571 --parent_sq->nr_pending;
572 tg->flags &= ~THROTL_TG_PENDING;
573 }
574 }
575
576 /* Call with queue lock held */
throtl_schedule_pending_timer(struct throtl_service_queue * sq,unsigned long expires)577 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
578 unsigned long expires)
579 {
580 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
581
582 /*
583 * Since we are adjusting the throttle limit dynamically, the sleep
584 * time calculated according to previous limit might be invalid. It's
585 * possible the cgroup sleep time is very long and no other cgroups
586 * have IO running so notify the limit changes. Make sure the cgroup
587 * doesn't sleep too long to avoid the missed notification.
588 */
589 if (time_after(expires, max_expire))
590 expires = max_expire;
591 mod_timer(&sq->pending_timer, expires);
592 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
593 expires - jiffies, jiffies);
594 }
595
596 /**
597 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
598 * @sq: the service_queue to schedule dispatch for
599 * @force: force scheduling
600 *
601 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
602 * dispatch time of the first pending child. Returns %true if either timer
603 * is armed or there's no pending child left. %false if the current
604 * dispatch window is still open and the caller should continue
605 * dispatching.
606 *
607 * If @force is %true, the dispatch timer is always scheduled and this
608 * function is guaranteed to return %true. This is to be used when the
609 * caller can't dispatch itself and needs to invoke pending_timer
610 * unconditionally. Note that forced scheduling is likely to induce short
611 * delay before dispatch starts even if @sq->first_pending_disptime is not
612 * in the future and thus shouldn't be used in hot paths.
613 */
throtl_schedule_next_dispatch(struct throtl_service_queue * sq,bool force)614 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
615 bool force)
616 {
617 /* any pending children left? */
618 if (!sq->nr_pending)
619 return true;
620
621 update_min_dispatch_time(sq);
622
623 /* is the next dispatch time in the future? */
624 if (force || time_after(sq->first_pending_disptime, jiffies)) {
625 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
626 return true;
627 }
628
629 /* tell the caller to continue dispatching */
630 return false;
631 }
632
throtl_start_new_slice_with_credit(struct throtl_grp * tg,bool rw,unsigned long start)633 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
634 bool rw, unsigned long start)
635 {
636 tg->bytes_disp[rw] = 0;
637 tg->io_disp[rw] = 0;
638 tg->carryover_bytes[rw] = 0;
639 tg->carryover_ios[rw] = 0;
640
641 /*
642 * Previous slice has expired. We must have trimmed it after last
643 * bio dispatch. That means since start of last slice, we never used
644 * that bandwidth. Do try to make use of that bandwidth while giving
645 * credit.
646 */
647 if (time_after_eq(start, tg->slice_start[rw]))
648 tg->slice_start[rw] = start;
649
650 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
651 throtl_log(&tg->service_queue,
652 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
653 rw == READ ? 'R' : 'W', tg->slice_start[rw],
654 tg->slice_end[rw], jiffies);
655 }
656
throtl_start_new_slice(struct throtl_grp * tg,bool rw,bool clear_carryover)657 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
658 bool clear_carryover)
659 {
660 tg->bytes_disp[rw] = 0;
661 tg->io_disp[rw] = 0;
662 tg->slice_start[rw] = jiffies;
663 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
664 if (clear_carryover) {
665 tg->carryover_bytes[rw] = 0;
666 tg->carryover_ios[rw] = 0;
667 }
668
669 throtl_log(&tg->service_queue,
670 "[%c] new slice start=%lu end=%lu jiffies=%lu",
671 rw == READ ? 'R' : 'W', tg->slice_start[rw],
672 tg->slice_end[rw], jiffies);
673 }
674
throtl_set_slice_end(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)675 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
676 unsigned long jiffy_end)
677 {
678 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
679 }
680
throtl_extend_slice(struct throtl_grp * tg,bool rw,unsigned long jiffy_end)681 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
682 unsigned long jiffy_end)
683 {
684 throtl_set_slice_end(tg, rw, jiffy_end);
685 throtl_log(&tg->service_queue,
686 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
687 rw == READ ? 'R' : 'W', tg->slice_start[rw],
688 tg->slice_end[rw], jiffies);
689 }
690
691 /* Determine if previously allocated or extended slice is complete or not */
throtl_slice_used(struct throtl_grp * tg,bool rw)692 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
693 {
694 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
695 return false;
696
697 return true;
698 }
699
700 /* Trim the used slices and adjust slice start accordingly */
throtl_trim_slice(struct throtl_grp * tg,bool rw)701 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
702 {
703 unsigned long nr_slices, time_elapsed, io_trim;
704 u64 bytes_trim, tmp;
705
706 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
707
708 /*
709 * If bps are unlimited (-1), then time slice don't get
710 * renewed. Don't try to trim the slice if slice is used. A new
711 * slice will start when appropriate.
712 */
713 if (throtl_slice_used(tg, rw))
714 return;
715
716 /*
717 * A bio has been dispatched. Also adjust slice_end. It might happen
718 * that initially cgroup limit was very low resulting in high
719 * slice_end, but later limit was bumped up and bio was dispatched
720 * sooner, then we need to reduce slice_end. A high bogus slice_end
721 * is bad because it does not allow new slice to start.
722 */
723
724 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
725
726 time_elapsed = jiffies - tg->slice_start[rw];
727
728 nr_slices = time_elapsed / tg->td->throtl_slice;
729
730 if (!nr_slices)
731 return;
732 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
733 do_div(tmp, HZ);
734 bytes_trim = tmp;
735
736 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
737 HZ;
738
739 if (!bytes_trim && !io_trim)
740 return;
741
742 if (tg->bytes_disp[rw] >= bytes_trim)
743 tg->bytes_disp[rw] -= bytes_trim;
744 else
745 tg->bytes_disp[rw] = 0;
746
747 if (tg->io_disp[rw] >= io_trim)
748 tg->io_disp[rw] -= io_trim;
749 else
750 tg->io_disp[rw] = 0;
751
752 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
753
754 throtl_log(&tg->service_queue,
755 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
756 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
757 tg->slice_start[rw], tg->slice_end[rw], jiffies);
758 }
759
calculate_io_allowed(u32 iops_limit,unsigned long jiffy_elapsed)760 static unsigned int calculate_io_allowed(u32 iops_limit,
761 unsigned long jiffy_elapsed)
762 {
763 unsigned int io_allowed;
764 u64 tmp;
765
766 /*
767 * jiffy_elapsed should not be a big value as minimum iops can be
768 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
769 * will allow dispatch after 1 second and after that slice should
770 * have been trimmed.
771 */
772
773 tmp = (u64)iops_limit * jiffy_elapsed;
774 do_div(tmp, HZ);
775
776 if (tmp > UINT_MAX)
777 io_allowed = UINT_MAX;
778 else
779 io_allowed = tmp;
780
781 return io_allowed;
782 }
783
calculate_bytes_allowed(u64 bps_limit,unsigned long jiffy_elapsed)784 static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
785 {
786 return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
787 }
788
__tg_update_carryover(struct throtl_grp * tg,bool rw)789 static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
790 {
791 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
792 u64 bps_limit = tg_bps_limit(tg, rw);
793 u32 iops_limit = tg_iops_limit(tg, rw);
794
795 /*
796 * If config is updated while bios are still throttled, calculate and
797 * accumulate how many bytes/ios are waited across changes. And
798 * carryover_bytes/ios will be used to calculate new wait time under new
799 * configuration.
800 */
801 if (bps_limit != U64_MAX)
802 tg->carryover_bytes[rw] +=
803 calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
804 tg->bytes_disp[rw];
805 if (iops_limit != UINT_MAX)
806 tg->carryover_ios[rw] +=
807 calculate_io_allowed(iops_limit, jiffy_elapsed) -
808 tg->io_disp[rw];
809 }
810
tg_update_carryover(struct throtl_grp * tg)811 static void tg_update_carryover(struct throtl_grp *tg)
812 {
813 if (tg->service_queue.nr_queued[READ])
814 __tg_update_carryover(tg, READ);
815 if (tg->service_queue.nr_queued[WRITE])
816 __tg_update_carryover(tg, WRITE);
817
818 /* see comments in struct throtl_grp for meaning of these fields. */
819 throtl_log(&tg->service_queue, "%s: %llu %llu %u %u\n", __func__,
820 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
821 tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
822 }
823
tg_within_iops_limit(struct throtl_grp * tg,struct bio * bio,u32 iops_limit,unsigned long * wait)824 static bool tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
825 u32 iops_limit, unsigned long *wait)
826 {
827 bool rw = bio_data_dir(bio);
828 unsigned int io_allowed;
829 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
830
831 if (iops_limit == UINT_MAX) {
832 if (wait)
833 *wait = 0;
834 return true;
835 }
836
837 jiffy_elapsed = jiffies - tg->slice_start[rw];
838
839 /* Round up to the next throttle slice, wait time must be nonzero */
840 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
841 io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
842 tg->carryover_ios[rw];
843 if (tg->io_disp[rw] + 1 <= io_allowed) {
844 if (wait)
845 *wait = 0;
846 return true;
847 }
848
849 /* Calc approx time to dispatch */
850 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
851
852 if (wait)
853 *wait = jiffy_wait;
854 return false;
855 }
856
tg_within_bps_limit(struct throtl_grp * tg,struct bio * bio,u64 bps_limit,unsigned long * wait)857 static bool tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
858 u64 bps_limit, unsigned long *wait)
859 {
860 bool rw = bio_data_dir(bio);
861 u64 bytes_allowed, extra_bytes;
862 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
863 unsigned int bio_size = throtl_bio_data_size(bio);
864
865 /* no need to throttle if this bio's bytes have been accounted */
866 if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
867 if (wait)
868 *wait = 0;
869 return true;
870 }
871
872 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
873
874 /* Slice has just started. Consider one slice interval */
875 if (!jiffy_elapsed)
876 jiffy_elapsed_rnd = tg->td->throtl_slice;
877
878 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
879 bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
880 tg->carryover_bytes[rw];
881 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
882 if (wait)
883 *wait = 0;
884 return true;
885 }
886
887 /* Calc approx time to dispatch */
888 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
889 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
890
891 if (!jiffy_wait)
892 jiffy_wait = 1;
893
894 /*
895 * This wait time is without taking into consideration the rounding
896 * up we did. Add that time also.
897 */
898 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
899 if (wait)
900 *wait = jiffy_wait;
901 return false;
902 }
903
904 /*
905 * Returns whether one can dispatch a bio or not. Also returns approx number
906 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
907 */
tg_may_dispatch(struct throtl_grp * tg,struct bio * bio,unsigned long * wait)908 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
909 unsigned long *wait)
910 {
911 bool rw = bio_data_dir(bio);
912 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
913 u64 bps_limit = tg_bps_limit(tg, rw);
914 u32 iops_limit = tg_iops_limit(tg, rw);
915
916 /*
917 * Currently whole state machine of group depends on first bio
918 * queued in the group bio list. So one should not be calling
919 * this function with a different bio if there are other bios
920 * queued.
921 */
922 BUG_ON(tg->service_queue.nr_queued[rw] &&
923 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
924
925 /* If tg->bps = -1, then BW is unlimited */
926 if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
927 tg->flags & THROTL_TG_CANCELING) {
928 if (wait)
929 *wait = 0;
930 return true;
931 }
932
933 /*
934 * If previous slice expired, start a new one otherwise renew/extend
935 * existing slice to make sure it is at least throtl_slice interval
936 * long since now. New slice is started only for empty throttle group.
937 * If there is queued bio, that means there should be an active
938 * slice and it should be extended instead.
939 */
940 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
941 throtl_start_new_slice(tg, rw, true);
942 else {
943 if (time_before(tg->slice_end[rw],
944 jiffies + tg->td->throtl_slice))
945 throtl_extend_slice(tg, rw,
946 jiffies + tg->td->throtl_slice);
947 }
948
949 if (tg_within_bps_limit(tg, bio, bps_limit, &bps_wait) &&
950 tg_within_iops_limit(tg, bio, iops_limit, &iops_wait)) {
951 if (wait)
952 *wait = 0;
953 return true;
954 }
955
956 max_wait = max(bps_wait, iops_wait);
957
958 if (wait)
959 *wait = max_wait;
960
961 if (time_before(tg->slice_end[rw], jiffies + max_wait))
962 throtl_extend_slice(tg, rw, jiffies + max_wait);
963
964 return false;
965 }
966
throtl_charge_bio(struct throtl_grp * tg,struct bio * bio)967 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
968 {
969 bool rw = bio_data_dir(bio);
970 unsigned int bio_size = throtl_bio_data_size(bio);
971
972 /* Charge the bio to the group */
973 if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
974 tg->bytes_disp[rw] += bio_size;
975 tg->last_bytes_disp[rw] += bio_size;
976 }
977
978 tg->io_disp[rw]++;
979 tg->last_io_disp[rw]++;
980 }
981
982 /**
983 * throtl_add_bio_tg - add a bio to the specified throtl_grp
984 * @bio: bio to add
985 * @qn: qnode to use
986 * @tg: the target throtl_grp
987 *
988 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
989 * tg->qnode_on_self[] is used.
990 */
throtl_add_bio_tg(struct bio * bio,struct throtl_qnode * qn,struct throtl_grp * tg)991 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
992 struct throtl_grp *tg)
993 {
994 struct throtl_service_queue *sq = &tg->service_queue;
995 bool rw = bio_data_dir(bio);
996
997 if (!qn)
998 qn = &tg->qnode_on_self[rw];
999
1000 /*
1001 * If @tg doesn't currently have any bios queued in the same
1002 * direction, queueing @bio can change when @tg should be
1003 * dispatched. Mark that @tg was empty. This is automatically
1004 * cleared on the next tg_update_disptime().
1005 */
1006 if (!sq->nr_queued[rw])
1007 tg->flags |= THROTL_TG_WAS_EMPTY;
1008
1009 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1010
1011 sq->nr_queued[rw]++;
1012 throtl_enqueue_tg(tg);
1013 }
1014
tg_update_disptime(struct throtl_grp * tg)1015 static void tg_update_disptime(struct throtl_grp *tg)
1016 {
1017 struct throtl_service_queue *sq = &tg->service_queue;
1018 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1019 struct bio *bio;
1020
1021 bio = throtl_peek_queued(&sq->queued[READ]);
1022 if (bio)
1023 tg_may_dispatch(tg, bio, &read_wait);
1024
1025 bio = throtl_peek_queued(&sq->queued[WRITE]);
1026 if (bio)
1027 tg_may_dispatch(tg, bio, &write_wait);
1028
1029 min_wait = min(read_wait, write_wait);
1030 disptime = jiffies + min_wait;
1031
1032 /* Update dispatch time */
1033 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
1034 tg->disptime = disptime;
1035 tg_service_queue_add(tg);
1036
1037 /* see throtl_add_bio_tg() */
1038 tg->flags &= ~THROTL_TG_WAS_EMPTY;
1039 }
1040
start_parent_slice_with_credit(struct throtl_grp * child_tg,struct throtl_grp * parent_tg,bool rw)1041 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1042 struct throtl_grp *parent_tg, bool rw)
1043 {
1044 if (throtl_slice_used(parent_tg, rw)) {
1045 throtl_start_new_slice_with_credit(parent_tg, rw,
1046 child_tg->slice_start[rw]);
1047 }
1048
1049 }
1050
tg_dispatch_one_bio(struct throtl_grp * tg,bool rw)1051 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1052 {
1053 struct throtl_service_queue *sq = &tg->service_queue;
1054 struct throtl_service_queue *parent_sq = sq->parent_sq;
1055 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1056 struct throtl_grp *tg_to_put = NULL;
1057 struct bio *bio;
1058
1059 /*
1060 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1061 * from @tg may put its reference and @parent_sq might end up
1062 * getting released prematurely. Remember the tg to put and put it
1063 * after @bio is transferred to @parent_sq.
1064 */
1065 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1066 sq->nr_queued[rw]--;
1067
1068 throtl_charge_bio(tg, bio);
1069 bio_set_flag(bio, BIO_BPS_THROTTLED);
1070
1071 /*
1072 * If our parent is another tg, we just need to transfer @bio to
1073 * the parent using throtl_add_bio_tg(). If our parent is
1074 * @td->service_queue, @bio is ready to be issued. Put it on its
1075 * bio_lists[] and decrease total number queued. The caller is
1076 * responsible for issuing these bios.
1077 */
1078 if (parent_tg) {
1079 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1080 start_parent_slice_with_credit(tg, parent_tg, rw);
1081 } else {
1082 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1083 &parent_sq->queued[rw]);
1084 BUG_ON(tg->td->nr_queued[rw] <= 0);
1085 tg->td->nr_queued[rw]--;
1086 }
1087
1088 throtl_trim_slice(tg, rw);
1089
1090 if (tg_to_put)
1091 blkg_put(tg_to_blkg(tg_to_put));
1092 }
1093
throtl_dispatch_tg(struct throtl_grp * tg)1094 static int throtl_dispatch_tg(struct throtl_grp *tg)
1095 {
1096 struct throtl_service_queue *sq = &tg->service_queue;
1097 unsigned int nr_reads = 0, nr_writes = 0;
1098 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1099 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1100 struct bio *bio;
1101
1102 /* Try to dispatch 75% READS and 25% WRITES */
1103
1104 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1105 tg_may_dispatch(tg, bio, NULL)) {
1106
1107 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1108 nr_reads++;
1109
1110 if (nr_reads >= max_nr_reads)
1111 break;
1112 }
1113
1114 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1115 tg_may_dispatch(tg, bio, NULL)) {
1116
1117 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1118 nr_writes++;
1119
1120 if (nr_writes >= max_nr_writes)
1121 break;
1122 }
1123
1124 return nr_reads + nr_writes;
1125 }
1126
throtl_select_dispatch(struct throtl_service_queue * parent_sq)1127 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1128 {
1129 unsigned int nr_disp = 0;
1130
1131 while (1) {
1132 struct throtl_grp *tg;
1133 struct throtl_service_queue *sq;
1134
1135 if (!parent_sq->nr_pending)
1136 break;
1137
1138 tg = throtl_rb_first(parent_sq);
1139 if (!tg)
1140 break;
1141
1142 if (time_before(jiffies, tg->disptime))
1143 break;
1144
1145 nr_disp += throtl_dispatch_tg(tg);
1146
1147 sq = &tg->service_queue;
1148 if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
1149 tg_update_disptime(tg);
1150 else
1151 throtl_dequeue_tg(tg);
1152
1153 if (nr_disp >= THROTL_QUANTUM)
1154 break;
1155 }
1156
1157 return nr_disp;
1158 }
1159
1160 static bool throtl_can_upgrade(struct throtl_data *td,
1161 struct throtl_grp *this_tg);
1162 /**
1163 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1164 * @t: the pending_timer member of the throtl_service_queue being serviced
1165 *
1166 * This timer is armed when a child throtl_grp with active bio's become
1167 * pending and queued on the service_queue's pending_tree and expires when
1168 * the first child throtl_grp should be dispatched. This function
1169 * dispatches bio's from the children throtl_grps to the parent
1170 * service_queue.
1171 *
1172 * If the parent's parent is another throtl_grp, dispatching is propagated
1173 * by either arming its pending_timer or repeating dispatch directly. If
1174 * the top-level service_tree is reached, throtl_data->dispatch_work is
1175 * kicked so that the ready bio's are issued.
1176 */
throtl_pending_timer_fn(struct timer_list * t)1177 static void throtl_pending_timer_fn(struct timer_list *t)
1178 {
1179 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1180 struct throtl_grp *tg = sq_to_tg(sq);
1181 struct throtl_data *td = sq_to_td(sq);
1182 struct throtl_service_queue *parent_sq;
1183 struct request_queue *q;
1184 bool dispatched;
1185 int ret;
1186
1187 /* throtl_data may be gone, so figure out request queue by blkg */
1188 if (tg)
1189 q = tg->pd.blkg->q;
1190 else
1191 q = td->queue;
1192
1193 spin_lock_irq(&q->queue_lock);
1194
1195 if (!q->root_blkg)
1196 goto out_unlock;
1197
1198 if (throtl_can_upgrade(td, NULL))
1199 throtl_upgrade_state(td);
1200
1201 again:
1202 parent_sq = sq->parent_sq;
1203 dispatched = false;
1204
1205 while (true) {
1206 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1207 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1208 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1209
1210 ret = throtl_select_dispatch(sq);
1211 if (ret) {
1212 throtl_log(sq, "bios disp=%u", ret);
1213 dispatched = true;
1214 }
1215
1216 if (throtl_schedule_next_dispatch(sq, false))
1217 break;
1218
1219 /* this dispatch windows is still open, relax and repeat */
1220 spin_unlock_irq(&q->queue_lock);
1221 cpu_relax();
1222 spin_lock_irq(&q->queue_lock);
1223 }
1224
1225 if (!dispatched)
1226 goto out_unlock;
1227
1228 if (parent_sq) {
1229 /* @parent_sq is another throl_grp, propagate dispatch */
1230 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1231 tg_update_disptime(tg);
1232 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1233 /* window is already open, repeat dispatching */
1234 sq = parent_sq;
1235 tg = sq_to_tg(sq);
1236 goto again;
1237 }
1238 }
1239 } else {
1240 /* reached the top-level, queue issuing */
1241 queue_work(kthrotld_workqueue, &td->dispatch_work);
1242 }
1243 out_unlock:
1244 spin_unlock_irq(&q->queue_lock);
1245 }
1246
1247 /**
1248 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1249 * @work: work item being executed
1250 *
1251 * This function is queued for execution when bios reach the bio_lists[]
1252 * of throtl_data->service_queue. Those bios are ready and issued by this
1253 * function.
1254 */
blk_throtl_dispatch_work_fn(struct work_struct * work)1255 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1256 {
1257 struct throtl_data *td = container_of(work, struct throtl_data,
1258 dispatch_work);
1259 struct throtl_service_queue *td_sq = &td->service_queue;
1260 struct request_queue *q = td->queue;
1261 struct bio_list bio_list_on_stack;
1262 struct bio *bio;
1263 struct blk_plug plug;
1264 int rw;
1265
1266 bio_list_init(&bio_list_on_stack);
1267
1268 spin_lock_irq(&q->queue_lock);
1269 for (rw = READ; rw <= WRITE; rw++)
1270 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1271 bio_list_add(&bio_list_on_stack, bio);
1272 spin_unlock_irq(&q->queue_lock);
1273
1274 if (!bio_list_empty(&bio_list_on_stack)) {
1275 blk_start_plug(&plug);
1276 while ((bio = bio_list_pop(&bio_list_on_stack)))
1277 submit_bio_noacct_nocheck(bio);
1278 blk_finish_plug(&plug);
1279 }
1280 }
1281
tg_prfill_conf_u64(struct seq_file * sf,struct blkg_policy_data * pd,int off)1282 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1283 int off)
1284 {
1285 struct throtl_grp *tg = pd_to_tg(pd);
1286 u64 v = *(u64 *)((void *)tg + off);
1287
1288 if (v == U64_MAX)
1289 return 0;
1290 return __blkg_prfill_u64(sf, pd, v);
1291 }
1292
tg_prfill_conf_uint(struct seq_file * sf,struct blkg_policy_data * pd,int off)1293 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1294 int off)
1295 {
1296 struct throtl_grp *tg = pd_to_tg(pd);
1297 unsigned int v = *(unsigned int *)((void *)tg + off);
1298
1299 if (v == UINT_MAX)
1300 return 0;
1301 return __blkg_prfill_u64(sf, pd, v);
1302 }
1303
tg_print_conf_u64(struct seq_file * sf,void * v)1304 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1305 {
1306 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1307 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1308 return 0;
1309 }
1310
tg_print_conf_uint(struct seq_file * sf,void * v)1311 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1312 {
1313 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1314 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1315 return 0;
1316 }
1317
tg_conf_updated(struct throtl_grp * tg,bool global)1318 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1319 {
1320 struct throtl_service_queue *sq = &tg->service_queue;
1321 struct cgroup_subsys_state *pos_css;
1322 struct blkcg_gq *blkg;
1323
1324 throtl_log(&tg->service_queue,
1325 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1326 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1327 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1328
1329 /*
1330 * Update has_rules[] flags for the updated tg's subtree. A tg is
1331 * considered to have rules if either the tg itself or any of its
1332 * ancestors has rules. This identifies groups without any
1333 * restrictions in the whole hierarchy and allows them to bypass
1334 * blk-throttle.
1335 */
1336 blkg_for_each_descendant_pre(blkg, pos_css,
1337 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1338 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1339 struct throtl_grp *parent_tg;
1340
1341 tg_update_has_rules(this_tg);
1342 /* ignore root/second level */
1343 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1344 !blkg->parent->parent)
1345 continue;
1346 parent_tg = blkg_to_tg(blkg->parent);
1347 /*
1348 * make sure all children has lower idle time threshold and
1349 * higher latency target
1350 */
1351 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1352 parent_tg->idletime_threshold);
1353 this_tg->latency_target = max(this_tg->latency_target,
1354 parent_tg->latency_target);
1355 }
1356
1357 /*
1358 * We're already holding queue_lock and know @tg is valid. Let's
1359 * apply the new config directly.
1360 *
1361 * Restart the slices for both READ and WRITES. It might happen
1362 * that a group's limit are dropped suddenly and we don't want to
1363 * account recently dispatched IO with new low rate.
1364 */
1365 throtl_start_new_slice(tg, READ, false);
1366 throtl_start_new_slice(tg, WRITE, false);
1367
1368 if (tg->flags & THROTL_TG_PENDING) {
1369 tg_update_disptime(tg);
1370 throtl_schedule_next_dispatch(sq->parent_sq, true);
1371 }
1372 }
1373
tg_set_conf(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,bool is_u64)1374 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1375 char *buf, size_t nbytes, loff_t off, bool is_u64)
1376 {
1377 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1378 struct blkg_conf_ctx ctx;
1379 struct throtl_grp *tg;
1380 int ret;
1381 u64 v;
1382
1383 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1384 if (ret)
1385 return ret;
1386
1387 ret = -EINVAL;
1388 if (sscanf(ctx.body, "%llu", &v) != 1)
1389 goto out_finish;
1390 if (!v)
1391 v = U64_MAX;
1392
1393 tg = blkg_to_tg(ctx.blkg);
1394 tg_update_carryover(tg);
1395
1396 if (is_u64)
1397 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1398 else
1399 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1400
1401 tg_conf_updated(tg, false);
1402 ret = 0;
1403 out_finish:
1404 blkg_conf_finish(&ctx);
1405 return ret ?: nbytes;
1406 }
1407
tg_set_conf_u64(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1408 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1409 char *buf, size_t nbytes, loff_t off)
1410 {
1411 return tg_set_conf(of, buf, nbytes, off, true);
1412 }
1413
tg_set_conf_uint(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1414 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1415 char *buf, size_t nbytes, loff_t off)
1416 {
1417 return tg_set_conf(of, buf, nbytes, off, false);
1418 }
1419
tg_print_rwstat(struct seq_file * sf,void * v)1420 static int tg_print_rwstat(struct seq_file *sf, void *v)
1421 {
1422 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1423 blkg_prfill_rwstat, &blkcg_policy_throtl,
1424 seq_cft(sf)->private, true);
1425 return 0;
1426 }
1427
tg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1428 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1429 struct blkg_policy_data *pd, int off)
1430 {
1431 struct blkg_rwstat_sample sum;
1432
1433 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1434 &sum);
1435 return __blkg_prfill_rwstat(sf, pd, &sum);
1436 }
1437
tg_print_rwstat_recursive(struct seq_file * sf,void * v)1438 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1439 {
1440 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1441 tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1442 seq_cft(sf)->private, true);
1443 return 0;
1444 }
1445
1446 static struct cftype throtl_legacy_files[] = {
1447 {
1448 .name = "throttle.read_bps_device",
1449 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1450 .seq_show = tg_print_conf_u64,
1451 .write = tg_set_conf_u64,
1452 },
1453 {
1454 .name = "throttle.write_bps_device",
1455 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1456 .seq_show = tg_print_conf_u64,
1457 .write = tg_set_conf_u64,
1458 },
1459 {
1460 .name = "throttle.read_iops_device",
1461 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1462 .seq_show = tg_print_conf_uint,
1463 .write = tg_set_conf_uint,
1464 },
1465 {
1466 .name = "throttle.write_iops_device",
1467 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1468 .seq_show = tg_print_conf_uint,
1469 .write = tg_set_conf_uint,
1470 },
1471 {
1472 .name = "throttle.io_service_bytes",
1473 .private = offsetof(struct throtl_grp, stat_bytes),
1474 .seq_show = tg_print_rwstat,
1475 },
1476 {
1477 .name = "throttle.io_service_bytes_recursive",
1478 .private = offsetof(struct throtl_grp, stat_bytes),
1479 .seq_show = tg_print_rwstat_recursive,
1480 },
1481 {
1482 .name = "throttle.io_serviced",
1483 .private = offsetof(struct throtl_grp, stat_ios),
1484 .seq_show = tg_print_rwstat,
1485 },
1486 {
1487 .name = "throttle.io_serviced_recursive",
1488 .private = offsetof(struct throtl_grp, stat_ios),
1489 .seq_show = tg_print_rwstat_recursive,
1490 },
1491 { } /* terminate */
1492 };
1493
tg_prfill_limit(struct seq_file * sf,struct blkg_policy_data * pd,int off)1494 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1495 int off)
1496 {
1497 struct throtl_grp *tg = pd_to_tg(pd);
1498 const char *dname = blkg_dev_name(pd->blkg);
1499 char bufs[4][21] = { "max", "max", "max", "max" };
1500 u64 bps_dft;
1501 unsigned int iops_dft;
1502 char idle_time[26] = "";
1503 char latency_time[26] = "";
1504
1505 if (!dname)
1506 return 0;
1507
1508 if (off == LIMIT_LOW) {
1509 bps_dft = 0;
1510 iops_dft = 0;
1511 } else {
1512 bps_dft = U64_MAX;
1513 iops_dft = UINT_MAX;
1514 }
1515
1516 if (tg->bps_conf[READ][off] == bps_dft &&
1517 tg->bps_conf[WRITE][off] == bps_dft &&
1518 tg->iops_conf[READ][off] == iops_dft &&
1519 tg->iops_conf[WRITE][off] == iops_dft &&
1520 (off != LIMIT_LOW ||
1521 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1522 tg->latency_target_conf == DFL_LATENCY_TARGET)))
1523 return 0;
1524
1525 if (tg->bps_conf[READ][off] != U64_MAX)
1526 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1527 tg->bps_conf[READ][off]);
1528 if (tg->bps_conf[WRITE][off] != U64_MAX)
1529 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1530 tg->bps_conf[WRITE][off]);
1531 if (tg->iops_conf[READ][off] != UINT_MAX)
1532 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1533 tg->iops_conf[READ][off]);
1534 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1535 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1536 tg->iops_conf[WRITE][off]);
1537 if (off == LIMIT_LOW) {
1538 if (tg->idletime_threshold_conf == ULONG_MAX)
1539 strcpy(idle_time, " idle=max");
1540 else
1541 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1542 tg->idletime_threshold_conf);
1543
1544 if (tg->latency_target_conf == ULONG_MAX)
1545 strcpy(latency_time, " latency=max");
1546 else
1547 snprintf(latency_time, sizeof(latency_time),
1548 " latency=%lu", tg->latency_target_conf);
1549 }
1550
1551 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1552 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1553 latency_time);
1554 return 0;
1555 }
1556
tg_print_limit(struct seq_file * sf,void * v)1557 static int tg_print_limit(struct seq_file *sf, void *v)
1558 {
1559 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1560 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1561 return 0;
1562 }
1563
tg_set_limit(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1564 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1565 char *buf, size_t nbytes, loff_t off)
1566 {
1567 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1568 struct blkg_conf_ctx ctx;
1569 struct throtl_grp *tg;
1570 u64 v[4];
1571 unsigned long idle_time;
1572 unsigned long latency_time;
1573 int ret;
1574 int index = of_cft(of)->private;
1575
1576 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1577 if (ret)
1578 return ret;
1579
1580 tg = blkg_to_tg(ctx.blkg);
1581 tg_update_carryover(tg);
1582
1583 v[0] = tg->bps_conf[READ][index];
1584 v[1] = tg->bps_conf[WRITE][index];
1585 v[2] = tg->iops_conf[READ][index];
1586 v[3] = tg->iops_conf[WRITE][index];
1587
1588 idle_time = tg->idletime_threshold_conf;
1589 latency_time = tg->latency_target_conf;
1590 while (true) {
1591 char tok[27]; /* wiops=18446744073709551616 */
1592 char *p;
1593 u64 val = U64_MAX;
1594 int len;
1595
1596 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1597 break;
1598 if (tok[0] == '\0')
1599 break;
1600 ctx.body += len;
1601
1602 ret = -EINVAL;
1603 p = tok;
1604 strsep(&p, "=");
1605 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1606 goto out_finish;
1607
1608 ret = -ERANGE;
1609 if (!val)
1610 goto out_finish;
1611
1612 ret = -EINVAL;
1613 if (!strcmp(tok, "rbps") && val > 1)
1614 v[0] = val;
1615 else if (!strcmp(tok, "wbps") && val > 1)
1616 v[1] = val;
1617 else if (!strcmp(tok, "riops") && val > 1)
1618 v[2] = min_t(u64, val, UINT_MAX);
1619 else if (!strcmp(tok, "wiops") && val > 1)
1620 v[3] = min_t(u64, val, UINT_MAX);
1621 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1622 idle_time = val;
1623 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1624 latency_time = val;
1625 else
1626 goto out_finish;
1627 }
1628
1629 tg->bps_conf[READ][index] = v[0];
1630 tg->bps_conf[WRITE][index] = v[1];
1631 tg->iops_conf[READ][index] = v[2];
1632 tg->iops_conf[WRITE][index] = v[3];
1633
1634 if (index == LIMIT_MAX) {
1635 tg->bps[READ][index] = v[0];
1636 tg->bps[WRITE][index] = v[1];
1637 tg->iops[READ][index] = v[2];
1638 tg->iops[WRITE][index] = v[3];
1639 }
1640 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1641 tg->bps_conf[READ][LIMIT_MAX]);
1642 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1643 tg->bps_conf[WRITE][LIMIT_MAX]);
1644 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1645 tg->iops_conf[READ][LIMIT_MAX]);
1646 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1647 tg->iops_conf[WRITE][LIMIT_MAX]);
1648 tg->idletime_threshold_conf = idle_time;
1649 tg->latency_target_conf = latency_time;
1650
1651 /* force user to configure all settings for low limit */
1652 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1653 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1654 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1655 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1656 tg->bps[READ][LIMIT_LOW] = 0;
1657 tg->bps[WRITE][LIMIT_LOW] = 0;
1658 tg->iops[READ][LIMIT_LOW] = 0;
1659 tg->iops[WRITE][LIMIT_LOW] = 0;
1660 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1661 tg->latency_target = DFL_LATENCY_TARGET;
1662 } else if (index == LIMIT_LOW) {
1663 tg->idletime_threshold = tg->idletime_threshold_conf;
1664 tg->latency_target = tg->latency_target_conf;
1665 }
1666
1667 blk_throtl_update_limit_valid(tg->td);
1668 if (tg->td->limit_valid[LIMIT_LOW]) {
1669 if (index == LIMIT_LOW)
1670 tg->td->limit_index = LIMIT_LOW;
1671 } else
1672 tg->td->limit_index = LIMIT_MAX;
1673 tg_conf_updated(tg, index == LIMIT_LOW &&
1674 tg->td->limit_valid[LIMIT_LOW]);
1675 ret = 0;
1676 out_finish:
1677 blkg_conf_finish(&ctx);
1678 return ret ?: nbytes;
1679 }
1680
1681 static struct cftype throtl_files[] = {
1682 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1683 {
1684 .name = "low",
1685 .flags = CFTYPE_NOT_ON_ROOT,
1686 .seq_show = tg_print_limit,
1687 .write = tg_set_limit,
1688 .private = LIMIT_LOW,
1689 },
1690 #endif
1691 {
1692 .name = "max",
1693 .flags = CFTYPE_NOT_ON_ROOT,
1694 .seq_show = tg_print_limit,
1695 .write = tg_set_limit,
1696 .private = LIMIT_MAX,
1697 },
1698 { } /* terminate */
1699 };
1700
throtl_shutdown_wq(struct request_queue * q)1701 static void throtl_shutdown_wq(struct request_queue *q)
1702 {
1703 struct throtl_data *td = q->td;
1704
1705 cancel_work_sync(&td->dispatch_work);
1706 }
1707
1708 struct blkcg_policy blkcg_policy_throtl = {
1709 .dfl_cftypes = throtl_files,
1710 .legacy_cftypes = throtl_legacy_files,
1711
1712 .pd_alloc_fn = throtl_pd_alloc,
1713 .pd_init_fn = throtl_pd_init,
1714 .pd_online_fn = throtl_pd_online,
1715 .pd_offline_fn = throtl_pd_offline,
1716 .pd_free_fn = throtl_pd_free,
1717 };
1718
blk_throtl_cancel_bios(struct gendisk * disk)1719 void blk_throtl_cancel_bios(struct gendisk *disk)
1720 {
1721 struct request_queue *q = disk->queue;
1722 struct cgroup_subsys_state *pos_css;
1723 struct blkcg_gq *blkg;
1724
1725 spin_lock_irq(&q->queue_lock);
1726 /*
1727 * queue_lock is held, rcu lock is not needed here technically.
1728 * However, rcu lock is still held to emphasize that following
1729 * path need RCU protection and to prevent warning from lockdep.
1730 */
1731 rcu_read_lock();
1732 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1733 struct throtl_grp *tg = blkg_to_tg(blkg);
1734 struct throtl_service_queue *sq = &tg->service_queue;
1735
1736 /*
1737 * Set the flag to make sure throtl_pending_timer_fn() won't
1738 * stop until all throttled bios are dispatched.
1739 */
1740 blkg_to_tg(blkg)->flags |= THROTL_TG_CANCELING;
1741 /*
1742 * Update disptime after setting the above flag to make sure
1743 * throtl_select_dispatch() won't exit without dispatching.
1744 */
1745 tg_update_disptime(tg);
1746
1747 throtl_schedule_pending_timer(sq, jiffies + 1);
1748 }
1749 rcu_read_unlock();
1750 spin_unlock_irq(&q->queue_lock);
1751 }
1752
1753 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
__tg_last_low_overflow_time(struct throtl_grp * tg)1754 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1755 {
1756 unsigned long rtime = jiffies, wtime = jiffies;
1757
1758 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1759 rtime = tg->last_low_overflow_time[READ];
1760 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1761 wtime = tg->last_low_overflow_time[WRITE];
1762 return min(rtime, wtime);
1763 }
1764
1765 /* tg should not be an intermediate node */
tg_last_low_overflow_time(struct throtl_grp * tg)1766 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1767 {
1768 struct throtl_service_queue *parent_sq;
1769 struct throtl_grp *parent = tg;
1770 unsigned long ret = __tg_last_low_overflow_time(tg);
1771
1772 while (true) {
1773 parent_sq = parent->service_queue.parent_sq;
1774 parent = sq_to_tg(parent_sq);
1775 if (!parent)
1776 break;
1777
1778 /*
1779 * The parent doesn't have low limit, it always reaches low
1780 * limit. Its overflow time is useless for children
1781 */
1782 if (!parent->bps[READ][LIMIT_LOW] &&
1783 !parent->iops[READ][LIMIT_LOW] &&
1784 !parent->bps[WRITE][LIMIT_LOW] &&
1785 !parent->iops[WRITE][LIMIT_LOW])
1786 continue;
1787 if (time_after(__tg_last_low_overflow_time(parent), ret))
1788 ret = __tg_last_low_overflow_time(parent);
1789 }
1790 return ret;
1791 }
1792
throtl_tg_is_idle(struct throtl_grp * tg)1793 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1794 {
1795 /*
1796 * cgroup is idle if:
1797 * - single idle is too long, longer than a fixed value (in case user
1798 * configure a too big threshold) or 4 times of idletime threshold
1799 * - average think time is more than threshold
1800 * - IO latency is largely below threshold
1801 */
1802 unsigned long time;
1803 bool ret;
1804
1805 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1806 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1807 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1808 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1809 tg->avg_idletime > tg->idletime_threshold ||
1810 (tg->latency_target && tg->bio_cnt &&
1811 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1812 throtl_log(&tg->service_queue,
1813 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1814 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1815 tg->bio_cnt, ret, tg->td->scale);
1816 return ret;
1817 }
1818
throtl_tg_can_upgrade(struct throtl_grp * tg)1819 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1820 {
1821 struct throtl_service_queue *sq = &tg->service_queue;
1822 bool read_limit, write_limit;
1823
1824 /*
1825 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1826 * reaches), it's ok to upgrade to next limit
1827 */
1828 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1829 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1830 if (!read_limit && !write_limit)
1831 return true;
1832 if (read_limit && sq->nr_queued[READ] &&
1833 (!write_limit || sq->nr_queued[WRITE]))
1834 return true;
1835 if (write_limit && sq->nr_queued[WRITE] &&
1836 (!read_limit || sq->nr_queued[READ]))
1837 return true;
1838
1839 if (time_after_eq(jiffies,
1840 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1841 throtl_tg_is_idle(tg))
1842 return true;
1843 return false;
1844 }
1845
throtl_hierarchy_can_upgrade(struct throtl_grp * tg)1846 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1847 {
1848 while (true) {
1849 if (throtl_tg_can_upgrade(tg))
1850 return true;
1851 tg = sq_to_tg(tg->service_queue.parent_sq);
1852 if (!tg || !tg_to_blkg(tg)->parent)
1853 return false;
1854 }
1855 return false;
1856 }
1857
throtl_can_upgrade(struct throtl_data * td,struct throtl_grp * this_tg)1858 static bool throtl_can_upgrade(struct throtl_data *td,
1859 struct throtl_grp *this_tg)
1860 {
1861 struct cgroup_subsys_state *pos_css;
1862 struct blkcg_gq *blkg;
1863
1864 if (td->limit_index != LIMIT_LOW)
1865 return false;
1866
1867 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1868 return false;
1869
1870 rcu_read_lock();
1871 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1872 struct throtl_grp *tg = blkg_to_tg(blkg);
1873
1874 if (tg == this_tg)
1875 continue;
1876 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1877 continue;
1878 if (!throtl_hierarchy_can_upgrade(tg)) {
1879 rcu_read_unlock();
1880 return false;
1881 }
1882 }
1883 rcu_read_unlock();
1884 return true;
1885 }
1886
throtl_upgrade_check(struct throtl_grp * tg)1887 static void throtl_upgrade_check(struct throtl_grp *tg)
1888 {
1889 unsigned long now = jiffies;
1890
1891 if (tg->td->limit_index != LIMIT_LOW)
1892 return;
1893
1894 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1895 return;
1896
1897 tg->last_check_time = now;
1898
1899 if (!time_after_eq(now,
1900 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1901 return;
1902
1903 if (throtl_can_upgrade(tg->td, NULL))
1904 throtl_upgrade_state(tg->td);
1905 }
1906
throtl_upgrade_state(struct throtl_data * td)1907 static void throtl_upgrade_state(struct throtl_data *td)
1908 {
1909 struct cgroup_subsys_state *pos_css;
1910 struct blkcg_gq *blkg;
1911
1912 throtl_log(&td->service_queue, "upgrade to max");
1913 td->limit_index = LIMIT_MAX;
1914 td->low_upgrade_time = jiffies;
1915 td->scale = 0;
1916 rcu_read_lock();
1917 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1918 struct throtl_grp *tg = blkg_to_tg(blkg);
1919 struct throtl_service_queue *sq = &tg->service_queue;
1920
1921 tg->disptime = jiffies - 1;
1922 throtl_select_dispatch(sq);
1923 throtl_schedule_next_dispatch(sq, true);
1924 }
1925 rcu_read_unlock();
1926 throtl_select_dispatch(&td->service_queue);
1927 throtl_schedule_next_dispatch(&td->service_queue, true);
1928 queue_work(kthrotld_workqueue, &td->dispatch_work);
1929 }
1930
throtl_downgrade_state(struct throtl_data * td)1931 static void throtl_downgrade_state(struct throtl_data *td)
1932 {
1933 td->scale /= 2;
1934
1935 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1936 if (td->scale) {
1937 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1938 return;
1939 }
1940
1941 td->limit_index = LIMIT_LOW;
1942 td->low_downgrade_time = jiffies;
1943 }
1944
throtl_tg_can_downgrade(struct throtl_grp * tg)1945 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1946 {
1947 struct throtl_data *td = tg->td;
1948 unsigned long now = jiffies;
1949
1950 /*
1951 * If cgroup is below low limit, consider downgrade and throttle other
1952 * cgroups
1953 */
1954 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1955 time_after_eq(now, tg_last_low_overflow_time(tg) +
1956 td->throtl_slice) &&
1957 (!throtl_tg_is_idle(tg) ||
1958 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1959 return true;
1960 return false;
1961 }
1962
throtl_hierarchy_can_downgrade(struct throtl_grp * tg)1963 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1964 {
1965 while (true) {
1966 if (!throtl_tg_can_downgrade(tg))
1967 return false;
1968 tg = sq_to_tg(tg->service_queue.parent_sq);
1969 if (!tg || !tg_to_blkg(tg)->parent)
1970 break;
1971 }
1972 return true;
1973 }
1974
throtl_downgrade_check(struct throtl_grp * tg)1975 static void throtl_downgrade_check(struct throtl_grp *tg)
1976 {
1977 uint64_t bps;
1978 unsigned int iops;
1979 unsigned long elapsed_time;
1980 unsigned long now = jiffies;
1981
1982 if (tg->td->limit_index != LIMIT_MAX ||
1983 !tg->td->limit_valid[LIMIT_LOW])
1984 return;
1985 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1986 return;
1987 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1988 return;
1989
1990 elapsed_time = now - tg->last_check_time;
1991 tg->last_check_time = now;
1992
1993 if (time_before(now, tg_last_low_overflow_time(tg) +
1994 tg->td->throtl_slice))
1995 return;
1996
1997 if (tg->bps[READ][LIMIT_LOW]) {
1998 bps = tg->last_bytes_disp[READ] * HZ;
1999 do_div(bps, elapsed_time);
2000 if (bps >= tg->bps[READ][LIMIT_LOW])
2001 tg->last_low_overflow_time[READ] = now;
2002 }
2003
2004 if (tg->bps[WRITE][LIMIT_LOW]) {
2005 bps = tg->last_bytes_disp[WRITE] * HZ;
2006 do_div(bps, elapsed_time);
2007 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2008 tg->last_low_overflow_time[WRITE] = now;
2009 }
2010
2011 if (tg->iops[READ][LIMIT_LOW]) {
2012 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2013 if (iops >= tg->iops[READ][LIMIT_LOW])
2014 tg->last_low_overflow_time[READ] = now;
2015 }
2016
2017 if (tg->iops[WRITE][LIMIT_LOW]) {
2018 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2019 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2020 tg->last_low_overflow_time[WRITE] = now;
2021 }
2022
2023 /*
2024 * If cgroup is below low limit, consider downgrade and throttle other
2025 * cgroups
2026 */
2027 if (throtl_hierarchy_can_downgrade(tg))
2028 throtl_downgrade_state(tg->td);
2029
2030 tg->last_bytes_disp[READ] = 0;
2031 tg->last_bytes_disp[WRITE] = 0;
2032 tg->last_io_disp[READ] = 0;
2033 tg->last_io_disp[WRITE] = 0;
2034 }
2035
blk_throtl_update_idletime(struct throtl_grp * tg)2036 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2037 {
2038 unsigned long now;
2039 unsigned long last_finish_time = tg->last_finish_time;
2040
2041 if (last_finish_time == 0)
2042 return;
2043
2044 now = ktime_get_ns() >> 10;
2045 if (now <= last_finish_time ||
2046 last_finish_time == tg->checked_last_finish_time)
2047 return;
2048
2049 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2050 tg->checked_last_finish_time = last_finish_time;
2051 }
2052
throtl_update_latency_buckets(struct throtl_data * td)2053 static void throtl_update_latency_buckets(struct throtl_data *td)
2054 {
2055 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2056 int i, cpu, rw;
2057 unsigned long last_latency[2] = { 0 };
2058 unsigned long latency[2];
2059
2060 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
2061 return;
2062 if (time_before(jiffies, td->last_calculate_time + HZ))
2063 return;
2064 td->last_calculate_time = jiffies;
2065
2066 memset(avg_latency, 0, sizeof(avg_latency));
2067 for (rw = READ; rw <= WRITE; rw++) {
2068 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2069 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2070
2071 for_each_possible_cpu(cpu) {
2072 struct latency_bucket *bucket;
2073
2074 /* this isn't race free, but ok in practice */
2075 bucket = per_cpu_ptr(td->latency_buckets[rw],
2076 cpu);
2077 tmp->total_latency += bucket[i].total_latency;
2078 tmp->samples += bucket[i].samples;
2079 bucket[i].total_latency = 0;
2080 bucket[i].samples = 0;
2081 }
2082
2083 if (tmp->samples >= 32) {
2084 int samples = tmp->samples;
2085
2086 latency[rw] = tmp->total_latency;
2087
2088 tmp->total_latency = 0;
2089 tmp->samples = 0;
2090 latency[rw] /= samples;
2091 if (latency[rw] == 0)
2092 continue;
2093 avg_latency[rw][i].latency = latency[rw];
2094 }
2095 }
2096 }
2097
2098 for (rw = READ; rw <= WRITE; rw++) {
2099 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2100 if (!avg_latency[rw][i].latency) {
2101 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2102 td->avg_buckets[rw][i].latency =
2103 last_latency[rw];
2104 continue;
2105 }
2106
2107 if (!td->avg_buckets[rw][i].valid)
2108 latency[rw] = avg_latency[rw][i].latency;
2109 else
2110 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2111 avg_latency[rw][i].latency) >> 3;
2112
2113 td->avg_buckets[rw][i].latency = max(latency[rw],
2114 last_latency[rw]);
2115 td->avg_buckets[rw][i].valid = true;
2116 last_latency[rw] = td->avg_buckets[rw][i].latency;
2117 }
2118 }
2119
2120 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2121 throtl_log(&td->service_queue,
2122 "Latency bucket %d: read latency=%ld, read valid=%d, "
2123 "write latency=%ld, write valid=%d", i,
2124 td->avg_buckets[READ][i].latency,
2125 td->avg_buckets[READ][i].valid,
2126 td->avg_buckets[WRITE][i].latency,
2127 td->avg_buckets[WRITE][i].valid);
2128 }
2129 #else
throtl_update_latency_buckets(struct throtl_data * td)2130 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2131 {
2132 }
2133
blk_throtl_update_idletime(struct throtl_grp * tg)2134 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2135 {
2136 }
2137
throtl_downgrade_check(struct throtl_grp * tg)2138 static void throtl_downgrade_check(struct throtl_grp *tg)
2139 {
2140 }
2141
throtl_upgrade_check(struct throtl_grp * tg)2142 static void throtl_upgrade_check(struct throtl_grp *tg)
2143 {
2144 }
2145
throtl_can_upgrade(struct throtl_data * td,struct throtl_grp * this_tg)2146 static bool throtl_can_upgrade(struct throtl_data *td,
2147 struct throtl_grp *this_tg)
2148 {
2149 return false;
2150 }
2151
throtl_upgrade_state(struct throtl_data * td)2152 static void throtl_upgrade_state(struct throtl_data *td)
2153 {
2154 }
2155 #endif
2156
__blk_throtl_bio(struct bio * bio)2157 bool __blk_throtl_bio(struct bio *bio)
2158 {
2159 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2160 struct blkcg_gq *blkg = bio->bi_blkg;
2161 struct throtl_qnode *qn = NULL;
2162 struct throtl_grp *tg = blkg_to_tg(blkg);
2163 struct throtl_service_queue *sq;
2164 bool rw = bio_data_dir(bio);
2165 bool throttled = false;
2166 struct throtl_data *td = tg->td;
2167
2168 rcu_read_lock();
2169
2170 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2171 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2172 bio->bi_iter.bi_size);
2173 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2174 }
2175
2176 spin_lock_irq(&q->queue_lock);
2177
2178 throtl_update_latency_buckets(td);
2179
2180 blk_throtl_update_idletime(tg);
2181
2182 sq = &tg->service_queue;
2183
2184 again:
2185 while (true) {
2186 if (tg->last_low_overflow_time[rw] == 0)
2187 tg->last_low_overflow_time[rw] = jiffies;
2188 throtl_downgrade_check(tg);
2189 throtl_upgrade_check(tg);
2190 /* throtl is FIFO - if bios are already queued, should queue */
2191 if (sq->nr_queued[rw])
2192 break;
2193
2194 /* if above limits, break to queue */
2195 if (!tg_may_dispatch(tg, bio, NULL)) {
2196 tg->last_low_overflow_time[rw] = jiffies;
2197 if (throtl_can_upgrade(td, tg)) {
2198 throtl_upgrade_state(td);
2199 goto again;
2200 }
2201 break;
2202 }
2203
2204 /* within limits, let's charge and dispatch directly */
2205 throtl_charge_bio(tg, bio);
2206
2207 /*
2208 * We need to trim slice even when bios are not being queued
2209 * otherwise it might happen that a bio is not queued for
2210 * a long time and slice keeps on extending and trim is not
2211 * called for a long time. Now if limits are reduced suddenly
2212 * we take into account all the IO dispatched so far at new
2213 * low rate and * newly queued IO gets a really long dispatch
2214 * time.
2215 *
2216 * So keep on trimming slice even if bio is not queued.
2217 */
2218 throtl_trim_slice(tg, rw);
2219
2220 /*
2221 * @bio passed through this layer without being throttled.
2222 * Climb up the ladder. If we're already at the top, it
2223 * can be executed directly.
2224 */
2225 qn = &tg->qnode_on_parent[rw];
2226 sq = sq->parent_sq;
2227 tg = sq_to_tg(sq);
2228 if (!tg) {
2229 bio_set_flag(bio, BIO_BPS_THROTTLED);
2230 goto out_unlock;
2231 }
2232 }
2233
2234 /* out-of-limit, queue to @tg */
2235 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2236 rw == READ ? 'R' : 'W',
2237 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2238 tg_bps_limit(tg, rw),
2239 tg->io_disp[rw], tg_iops_limit(tg, rw),
2240 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2241
2242 tg->last_low_overflow_time[rw] = jiffies;
2243
2244 td->nr_queued[rw]++;
2245 throtl_add_bio_tg(bio, qn, tg);
2246 throttled = true;
2247
2248 /*
2249 * Update @tg's dispatch time and force schedule dispatch if @tg
2250 * was empty before @bio. The forced scheduling isn't likely to
2251 * cause undue delay as @bio is likely to be dispatched directly if
2252 * its @tg's disptime is not in the future.
2253 */
2254 if (tg->flags & THROTL_TG_WAS_EMPTY) {
2255 tg_update_disptime(tg);
2256 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2257 }
2258
2259 out_unlock:
2260 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2261 if (throttled || !td->track_bio_latency)
2262 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2263 #endif
2264 spin_unlock_irq(&q->queue_lock);
2265
2266 rcu_read_unlock();
2267 return throttled;
2268 }
2269
2270 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
throtl_track_latency(struct throtl_data * td,sector_t size,enum req_op op,unsigned long time)2271 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2272 enum req_op op, unsigned long time)
2273 {
2274 const bool rw = op_is_write(op);
2275 struct latency_bucket *latency;
2276 int index;
2277
2278 if (!td || td->limit_index != LIMIT_LOW ||
2279 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2280 !blk_queue_nonrot(td->queue))
2281 return;
2282
2283 index = request_bucket_index(size);
2284
2285 latency = get_cpu_ptr(td->latency_buckets[rw]);
2286 latency[index].total_latency += time;
2287 latency[index].samples++;
2288 put_cpu_ptr(td->latency_buckets[rw]);
2289 }
2290
blk_throtl_stat_add(struct request * rq,u64 time_ns)2291 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2292 {
2293 struct request_queue *q = rq->q;
2294 struct throtl_data *td = q->td;
2295
2296 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2297 time_ns >> 10);
2298 }
2299
blk_throtl_bio_endio(struct bio * bio)2300 void blk_throtl_bio_endio(struct bio *bio)
2301 {
2302 struct blkcg_gq *blkg;
2303 struct throtl_grp *tg;
2304 u64 finish_time_ns;
2305 unsigned long finish_time;
2306 unsigned long start_time;
2307 unsigned long lat;
2308 int rw = bio_data_dir(bio);
2309
2310 blkg = bio->bi_blkg;
2311 if (!blkg)
2312 return;
2313 tg = blkg_to_tg(blkg);
2314 if (!tg->td->limit_valid[LIMIT_LOW])
2315 return;
2316
2317 finish_time_ns = ktime_get_ns();
2318 tg->last_finish_time = finish_time_ns >> 10;
2319
2320 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2321 finish_time = __bio_issue_time(finish_time_ns) >> 10;
2322 if (!start_time || finish_time <= start_time)
2323 return;
2324
2325 lat = finish_time - start_time;
2326 /* this is only for bio based driver */
2327 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2328 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2329 bio_op(bio), lat);
2330
2331 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2332 int bucket;
2333 unsigned int threshold;
2334
2335 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2336 threshold = tg->td->avg_buckets[rw][bucket].latency +
2337 tg->latency_target;
2338 if (lat > threshold)
2339 tg->bad_bio_cnt++;
2340 /*
2341 * Not race free, could get wrong count, which means cgroups
2342 * will be throttled
2343 */
2344 tg->bio_cnt++;
2345 }
2346
2347 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2348 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2349 tg->bio_cnt /= 2;
2350 tg->bad_bio_cnt /= 2;
2351 }
2352 }
2353 #endif
2354
blk_throtl_init(struct gendisk * disk)2355 int blk_throtl_init(struct gendisk *disk)
2356 {
2357 struct request_queue *q = disk->queue;
2358 struct throtl_data *td;
2359 int ret;
2360
2361 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2362 if (!td)
2363 return -ENOMEM;
2364 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2365 LATENCY_BUCKET_SIZE, __alignof__(u64));
2366 if (!td->latency_buckets[READ]) {
2367 kfree(td);
2368 return -ENOMEM;
2369 }
2370 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2371 LATENCY_BUCKET_SIZE, __alignof__(u64));
2372 if (!td->latency_buckets[WRITE]) {
2373 free_percpu(td->latency_buckets[READ]);
2374 kfree(td);
2375 return -ENOMEM;
2376 }
2377
2378 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2379 throtl_service_queue_init(&td->service_queue);
2380
2381 q->td = td;
2382 td->queue = q;
2383
2384 td->limit_valid[LIMIT_MAX] = true;
2385 td->limit_index = LIMIT_MAX;
2386 td->low_upgrade_time = jiffies;
2387 td->low_downgrade_time = jiffies;
2388
2389 /* activate policy */
2390 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2391 if (ret) {
2392 free_percpu(td->latency_buckets[READ]);
2393 free_percpu(td->latency_buckets[WRITE]);
2394 kfree(td);
2395 }
2396 return ret;
2397 }
2398
blk_throtl_exit(struct gendisk * disk)2399 void blk_throtl_exit(struct gendisk *disk)
2400 {
2401 struct request_queue *q = disk->queue;
2402
2403 BUG_ON(!q->td);
2404 del_timer_sync(&q->td->service_queue.pending_timer);
2405 throtl_shutdown_wq(q);
2406 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2407 free_percpu(q->td->latency_buckets[READ]);
2408 free_percpu(q->td->latency_buckets[WRITE]);
2409 kfree(q->td);
2410 }
2411
blk_throtl_register(struct gendisk * disk)2412 void blk_throtl_register(struct gendisk *disk)
2413 {
2414 struct request_queue *q = disk->queue;
2415 struct throtl_data *td;
2416 int i;
2417
2418 td = q->td;
2419 BUG_ON(!td);
2420
2421 if (blk_queue_nonrot(q)) {
2422 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2423 td->filtered_latency = LATENCY_FILTERED_SSD;
2424 } else {
2425 td->throtl_slice = DFL_THROTL_SLICE_HD;
2426 td->filtered_latency = LATENCY_FILTERED_HD;
2427 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2428 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2429 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2430 }
2431 }
2432 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2433 /* if no low limit, use previous default */
2434 td->throtl_slice = DFL_THROTL_SLICE_HD;
2435 #endif
2436
2437 td->track_bio_latency = !queue_is_mq(q);
2438 if (!td->track_bio_latency)
2439 blk_stat_enable_accounting(q);
2440 }
2441
2442 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
blk_throtl_sample_time_show(struct request_queue * q,char * page)2443 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2444 {
2445 if (!q->td)
2446 return -EINVAL;
2447 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2448 }
2449
blk_throtl_sample_time_store(struct request_queue * q,const char * page,size_t count)2450 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2451 const char *page, size_t count)
2452 {
2453 unsigned long v;
2454 unsigned long t;
2455
2456 if (!q->td)
2457 return -EINVAL;
2458 if (kstrtoul(page, 10, &v))
2459 return -EINVAL;
2460 t = msecs_to_jiffies(v);
2461 if (t == 0 || t > MAX_THROTL_SLICE)
2462 return -EINVAL;
2463 q->td->throtl_slice = t;
2464 return count;
2465 }
2466 #endif
2467
throtl_init(void)2468 static int __init throtl_init(void)
2469 {
2470 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2471 if (!kthrotld_workqueue)
2472 panic("Failed to create kthrotld\n");
2473
2474 return blkcg_policy_register(&blkcg_policy_throtl);
2475 }
2476
2477 module_init(throtl_init);
2478