1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * cgroups support for the BFQ I/O scheduler.
4 */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/ktime.h>
10 #include <linux/rbtree.h>
11 #include <linux/ioprio.h>
12 #include <linux/sbitmap.h>
13 #include <linux/delay.h>
14
15 #include "elevator.h"
16 #include "bfq-iosched.h"
17
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfq_stat_init(struct bfq_stat * stat,gfp_t gfp)19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20 {
21 int ret;
22
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 if (ret)
25 return ret;
26
27 atomic64_set(&stat->aux_cnt, 0);
28 return 0;
29 }
30
bfq_stat_exit(struct bfq_stat * stat)31 static void bfq_stat_exit(struct bfq_stat *stat)
32 {
33 percpu_counter_destroy(&stat->cpu_cnt);
34 }
35
36 /**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
bfq_stat_add(struct bfq_stat * stat,uint64_t val)44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45 {
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47 }
48
49 /**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
bfq_stat_read(struct bfq_stat * stat)53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54 {
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
56 }
57
58 /**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
bfq_stat_reset(struct bfq_stat * stat)62 static inline void bfq_stat_reset(struct bfq_stat *stat)
63 {
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
66 }
67
68 /**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
bfq_stat_add_aux(struct bfq_stat * to,struct bfq_stat * from)75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
77 {
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 &to->aux_cnt);
80 }
81
82 /**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
blkg_prfill_stat(struct seq_file * sf,struct blkg_policy_data * pd,int off)90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 int off)
92 {
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94 }
95
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
99 BFQG_stats_idling,
100 BFQG_stats_empty,
101 };
102
103 #define BFQG_FLAG_FNS(name) \
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
105 { \
106 stats->flags |= (1 << BFQG_stats_##name); \
107 } \
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
109 { \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
111 } \
112 static int bfqg_stats_##name(struct bfqg_stats *stats) \
113 { \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
115 } \
116
117 BFQG_FLAG_FNS(waiting)
BFQG_FLAG_FNS(idling)118 BFQG_FLAG_FNS(idling)
119 BFQG_FLAG_FNS(empty)
120 #undef BFQG_FLAG_FNS
121
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124 {
125 u64 now;
126
127 if (!bfqg_stats_waiting(stats))
128 return;
129
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
132 bfq_stat_add(&stats->group_wait_time,
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
135 }
136
137 /* This should be called with the scheduler lock held. */
bfqg_stats_set_start_group_wait_time(struct bfq_group * bfqg,struct bfq_group * curr_bfqg)138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
140 {
141 struct bfqg_stats *stats = &bfqg->stats;
142
143 if (bfqg_stats_waiting(stats))
144 return;
145 if (bfqg == curr_bfqg)
146 return;
147 stats->start_group_wait_time = ktime_get_ns();
148 bfqg_stats_mark_waiting(stats);
149 }
150
151 /* This should be called with the scheduler lock held. */
bfqg_stats_end_empty_time(struct bfqg_stats * stats)152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153 {
154 u64 now;
155
156 if (!bfqg_stats_empty(stats))
157 return;
158
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
161 bfq_stat_add(&stats->empty_time,
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
164 }
165
bfqg_stats_update_dequeue(struct bfq_group * bfqg)166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167 {
168 bfq_stat_add(&bfqg->stats.dequeue, 1);
169 }
170
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172 {
173 struct bfqg_stats *stats = &bfqg->stats;
174
175 if (blkg_rwstat_total(&stats->queued))
176 return;
177
178 /*
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
182 */
183 if (bfqg_stats_empty(stats))
184 return;
185
186 stats->start_empty_time = ktime_get_ns();
187 bfqg_stats_mark_empty(stats);
188 }
189
bfqg_stats_update_idle_time(struct bfq_group * bfqg)190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191 {
192 struct bfqg_stats *stats = &bfqg->stats;
193
194 if (bfqg_stats_idling(stats)) {
195 u64 now = ktime_get_ns();
196
197 if (now > stats->start_idle_time)
198 bfq_stat_add(&stats->idle_time,
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
201 }
202 }
203
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205 {
206 struct bfqg_stats *stats = &bfqg->stats;
207
208 stats->start_idle_time = ktime_get_ns();
209 bfqg_stats_mark_idling(stats);
210 }
211
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213 {
214 struct bfqg_stats *stats = &bfqg->stats;
215
216 bfq_stat_add(&stats->avg_queue_size_sum,
217 blkg_rwstat_total(&stats->queued));
218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 bfqg_stats_update_group_wait_time(stats);
220 }
221
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,blk_opf_t opf)222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 blk_opf_t opf)
224 {
225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
226 bfqg_stats_end_empty_time(&bfqg->stats);
227 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229 }
230
bfqg_stats_update_io_remove(struct bfq_group * bfqg,blk_opf_t opf)231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
232 {
233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
234 }
235
bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf)236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
237 {
238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
239 }
240
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf)241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 u64 io_start_time_ns, blk_opf_t opf)
243 {
244 struct bfqg_stats *stats = &bfqg->stats;
245 u64 now = ktime_get_ns();
246
247 if (now > io_start_time_ns)
248 blkg_rwstat_add(&stats->service_time, opf,
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
251 blkg_rwstat_add(&stats->wait_time, opf,
252 io_start_time_ns - start_time_ns);
253 }
254
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
256
bfqg_stats_update_io_remove(struct bfq_group * bfqg,blk_opf_t opf)257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf)258 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf)259 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
260 u64 io_start_time_ns, blk_opf_t opf) { }
bfqg_stats_update_dequeue(struct bfq_group * bfqg)261 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)262 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
263
264 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
265
266 #ifdef CONFIG_BFQ_GROUP_IOSCHED
267
268 /*
269 * blk-cgroup policy-related handlers
270 * The following functions help in converting between blk-cgroup
271 * internal structures and BFQ-specific structures.
272 */
273
pd_to_bfqg(struct blkg_policy_data * pd)274 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
275 {
276 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
277 }
278
bfqg_to_blkg(struct bfq_group * bfqg)279 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
280 {
281 return pd_to_blkg(&bfqg->pd);
282 }
283
blkg_to_bfqg(struct blkcg_gq * blkg)284 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
285 {
286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
287 }
288
289 /*
290 * bfq_group handlers
291 * The following functions help in navigating the bfq_group hierarchy
292 * by allowing to find the parent of a bfq_group or the bfq_group
293 * associated to a bfq_queue.
294 */
295
bfqg_parent(struct bfq_group * bfqg)296 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
297 {
298 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
299
300 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
301 }
302
bfqq_group(struct bfq_queue * bfqq)303 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
304 {
305 struct bfq_entity *group_entity = bfqq->entity.parent;
306
307 return group_entity ? container_of(group_entity, struct bfq_group,
308 entity) :
309 bfqq->bfqd->root_group;
310 }
311
312 /*
313 * The following two functions handle get and put of a bfq_group by
314 * wrapping the related blk-cgroup hooks.
315 */
316
bfqg_get(struct bfq_group * bfqg)317 static void bfqg_get(struct bfq_group *bfqg)
318 {
319 bfqg->ref++;
320 }
321
bfqg_put(struct bfq_group * bfqg)322 static void bfqg_put(struct bfq_group *bfqg)
323 {
324 bfqg->ref--;
325
326 if (bfqg->ref == 0)
327 kfree(bfqg);
328 }
329
bfqg_and_blkg_get(struct bfq_group * bfqg)330 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
331 {
332 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
333 bfqg_get(bfqg);
334
335 blkg_get(bfqg_to_blkg(bfqg));
336 }
337
bfqg_and_blkg_put(struct bfq_group * bfqg)338 void bfqg_and_blkg_put(struct bfq_group *bfqg)
339 {
340 blkg_put(bfqg_to_blkg(bfqg));
341
342 bfqg_put(bfqg);
343 }
344
bfqg_stats_update_legacy_io(struct request_queue * q,struct request * rq)345 void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
346 {
347 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
348
349 if (!bfqg)
350 return;
351
352 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
353 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
354 }
355
356 /* @stats = 0 */
bfqg_stats_reset(struct bfqg_stats * stats)357 static void bfqg_stats_reset(struct bfqg_stats *stats)
358 {
359 #ifdef CONFIG_BFQ_CGROUP_DEBUG
360 /* queued stats shouldn't be cleared */
361 blkg_rwstat_reset(&stats->merged);
362 blkg_rwstat_reset(&stats->service_time);
363 blkg_rwstat_reset(&stats->wait_time);
364 bfq_stat_reset(&stats->time);
365 bfq_stat_reset(&stats->avg_queue_size_sum);
366 bfq_stat_reset(&stats->avg_queue_size_samples);
367 bfq_stat_reset(&stats->dequeue);
368 bfq_stat_reset(&stats->group_wait_time);
369 bfq_stat_reset(&stats->idle_time);
370 bfq_stat_reset(&stats->empty_time);
371 #endif
372 }
373
374 /* @to += @from */
bfqg_stats_add_aux(struct bfqg_stats * to,struct bfqg_stats * from)375 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
376 {
377 if (!to || !from)
378 return;
379
380 #ifdef CONFIG_BFQ_CGROUP_DEBUG
381 /* queued stats shouldn't be cleared */
382 blkg_rwstat_add_aux(&to->merged, &from->merged);
383 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
384 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
385 bfq_stat_add_aux(&from->time, &from->time);
386 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
387 bfq_stat_add_aux(&to->avg_queue_size_samples,
388 &from->avg_queue_size_samples);
389 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
390 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
391 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
392 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
393 #endif
394 }
395
396 /*
397 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
398 * recursive stats can still account for the amount used by this bfqg after
399 * it's gone.
400 */
bfqg_stats_xfer_dead(struct bfq_group * bfqg)401 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
402 {
403 struct bfq_group *parent;
404
405 if (!bfqg) /* root_group */
406 return;
407
408 parent = bfqg_parent(bfqg);
409
410 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
411
412 if (unlikely(!parent))
413 return;
414
415 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
416 bfqg_stats_reset(&bfqg->stats);
417 }
418
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)419 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
420 {
421 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
422
423 entity->weight = entity->new_weight;
424 entity->orig_weight = entity->new_weight;
425 if (bfqq) {
426 bfqq->ioprio = bfqq->new_ioprio;
427 bfqq->ioprio_class = bfqq->new_ioprio_class;
428 /*
429 * Make sure that bfqg and its associated blkg do not
430 * disappear before entity.
431 */
432 bfqg_and_blkg_get(bfqg);
433 }
434 entity->parent = bfqg->my_entity; /* NULL for root group */
435 entity->sched_data = &bfqg->sched_data;
436 }
437
bfqg_stats_exit(struct bfqg_stats * stats)438 static void bfqg_stats_exit(struct bfqg_stats *stats)
439 {
440 blkg_rwstat_exit(&stats->bytes);
441 blkg_rwstat_exit(&stats->ios);
442 #ifdef CONFIG_BFQ_CGROUP_DEBUG
443 blkg_rwstat_exit(&stats->merged);
444 blkg_rwstat_exit(&stats->service_time);
445 blkg_rwstat_exit(&stats->wait_time);
446 blkg_rwstat_exit(&stats->queued);
447 bfq_stat_exit(&stats->time);
448 bfq_stat_exit(&stats->avg_queue_size_sum);
449 bfq_stat_exit(&stats->avg_queue_size_samples);
450 bfq_stat_exit(&stats->dequeue);
451 bfq_stat_exit(&stats->group_wait_time);
452 bfq_stat_exit(&stats->idle_time);
453 bfq_stat_exit(&stats->empty_time);
454 #endif
455 }
456
bfqg_stats_init(struct bfqg_stats * stats,gfp_t gfp)457 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
458 {
459 if (blkg_rwstat_init(&stats->bytes, gfp) ||
460 blkg_rwstat_init(&stats->ios, gfp))
461 goto error;
462
463 #ifdef CONFIG_BFQ_CGROUP_DEBUG
464 if (blkg_rwstat_init(&stats->merged, gfp) ||
465 blkg_rwstat_init(&stats->service_time, gfp) ||
466 blkg_rwstat_init(&stats->wait_time, gfp) ||
467 blkg_rwstat_init(&stats->queued, gfp) ||
468 bfq_stat_init(&stats->time, gfp) ||
469 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
470 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
471 bfq_stat_init(&stats->dequeue, gfp) ||
472 bfq_stat_init(&stats->group_wait_time, gfp) ||
473 bfq_stat_init(&stats->idle_time, gfp) ||
474 bfq_stat_init(&stats->empty_time, gfp))
475 goto error;
476 #endif
477
478 return 0;
479
480 error:
481 bfqg_stats_exit(stats);
482 return -ENOMEM;
483 }
484
cpd_to_bfqgd(struct blkcg_policy_data * cpd)485 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
486 {
487 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
488 }
489
blkcg_to_bfqgd(struct blkcg * blkcg)490 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
491 {
492 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
493 }
494
bfq_cpd_alloc(gfp_t gfp)495 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
496 {
497 struct bfq_group_data *bgd;
498
499 bgd = kzalloc(sizeof(*bgd), gfp);
500 if (!bgd)
501 return NULL;
502 return &bgd->pd;
503 }
504
bfq_cpd_init(struct blkcg_policy_data * cpd)505 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
506 {
507 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
508
509 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
510 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
511 }
512
bfq_cpd_free(struct blkcg_policy_data * cpd)513 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
514 {
515 kfree(cpd_to_bfqgd(cpd));
516 }
517
bfq_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)518 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
519 struct blkcg *blkcg)
520 {
521 struct bfq_group *bfqg;
522
523 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
524 if (!bfqg)
525 return NULL;
526
527 if (bfqg_stats_init(&bfqg->stats, gfp)) {
528 kfree(bfqg);
529 return NULL;
530 }
531
532 /* see comments in bfq_bic_update_cgroup for why refcounting */
533 bfqg_get(bfqg);
534 return &bfqg->pd;
535 }
536
bfq_pd_init(struct blkg_policy_data * pd)537 static void bfq_pd_init(struct blkg_policy_data *pd)
538 {
539 struct blkcg_gq *blkg = pd_to_blkg(pd);
540 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
541 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
542 struct bfq_entity *entity = &bfqg->entity;
543 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
544
545 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
546 entity->my_sched_data = &bfqg->sched_data;
547 entity->last_bfqq_created = NULL;
548
549 bfqg->my_entity = entity; /*
550 * the root_group's will be set to NULL
551 * in bfq_init_queue()
552 */
553 bfqg->bfqd = bfqd;
554 bfqg->active_entities = 0;
555 bfqg->online = true;
556 bfqg->rq_pos_tree = RB_ROOT;
557 }
558
bfq_pd_free(struct blkg_policy_data * pd)559 static void bfq_pd_free(struct blkg_policy_data *pd)
560 {
561 struct bfq_group *bfqg = pd_to_bfqg(pd);
562
563 bfqg_stats_exit(&bfqg->stats);
564 bfqg_put(bfqg);
565 }
566
bfq_pd_reset_stats(struct blkg_policy_data * pd)567 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
568 {
569 struct bfq_group *bfqg = pd_to_bfqg(pd);
570
571 bfqg_stats_reset(&bfqg->stats);
572 }
573
bfq_group_set_parent(struct bfq_group * bfqg,struct bfq_group * parent)574 static void bfq_group_set_parent(struct bfq_group *bfqg,
575 struct bfq_group *parent)
576 {
577 struct bfq_entity *entity;
578
579 entity = &bfqg->entity;
580 entity->parent = parent->my_entity;
581 entity->sched_data = &parent->sched_data;
582 }
583
bfq_link_bfqg(struct bfq_data * bfqd,struct bfq_group * bfqg)584 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
585 {
586 struct bfq_group *parent;
587 struct bfq_entity *entity;
588
589 /*
590 * Update chain of bfq_groups as we might be handling a leaf group
591 * which, along with some of its relatives, has not been hooked yet
592 * to the private hierarchy of BFQ.
593 */
594 entity = &bfqg->entity;
595 for_each_entity(entity) {
596 struct bfq_group *curr_bfqg = container_of(entity,
597 struct bfq_group, entity);
598 if (curr_bfqg != bfqd->root_group) {
599 parent = bfqg_parent(curr_bfqg);
600 if (!parent)
601 parent = bfqd->root_group;
602 bfq_group_set_parent(curr_bfqg, parent);
603 }
604 }
605 }
606
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)607 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
608 {
609 struct blkcg_gq *blkg = bio->bi_blkg;
610 struct bfq_group *bfqg;
611
612 while (blkg) {
613 if (!blkg->online) {
614 blkg = blkg->parent;
615 continue;
616 }
617 bfqg = blkg_to_bfqg(blkg);
618 if (bfqg->online) {
619 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
620 return bfqg;
621 }
622 blkg = blkg->parent;
623 }
624 bio_associate_blkg_from_css(bio,
625 &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
626 return bfqd->root_group;
627 }
628
629 /**
630 * bfq_bfqq_move - migrate @bfqq to @bfqg.
631 * @bfqd: queue descriptor.
632 * @bfqq: the queue to move.
633 * @bfqg: the group to move to.
634 *
635 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
636 * it on the new one. Avoid putting the entity on the old group idle tree.
637 *
638 * Must be called under the scheduler lock, to make sure that the blkg
639 * owning @bfqg does not disappear (see comments in
640 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
641 * objects).
642 */
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)643 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
644 struct bfq_group *bfqg)
645 {
646 struct bfq_entity *entity = &bfqq->entity;
647 struct bfq_group *old_parent = bfqq_group(bfqq);
648
649 /*
650 * No point to move bfqq to the same group, which can happen when
651 * root group is offlined
652 */
653 if (old_parent == bfqg)
654 return;
655
656 /*
657 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
658 * until elevator exit.
659 */
660 if (bfqq == &bfqd->oom_bfqq)
661 return;
662 /*
663 * Get extra reference to prevent bfqq from being freed in
664 * next possible expire or deactivate.
665 */
666 bfqq->ref++;
667
668 /* If bfqq is empty, then bfq_bfqq_expire also invokes
669 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
670 * from data structures related to current group. Otherwise we
671 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
672 * we do below.
673 */
674 if (bfqq == bfqd->in_service_queue)
675 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
676 false, BFQQE_PREEMPTED);
677
678 if (bfq_bfqq_busy(bfqq))
679 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
680 else if (entity->on_st_or_in_serv)
681 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
682 bfqg_and_blkg_put(old_parent);
683
684 if (entity->parent &&
685 entity->parent->last_bfqq_created == bfqq)
686 entity->parent->last_bfqq_created = NULL;
687 else if (bfqd->last_bfqq_created == bfqq)
688 bfqd->last_bfqq_created = NULL;
689
690 entity->parent = bfqg->my_entity;
691 entity->sched_data = &bfqg->sched_data;
692 /* pin down bfqg and its associated blkg */
693 bfqg_and_blkg_get(bfqg);
694
695 if (bfq_bfqq_busy(bfqq)) {
696 if (unlikely(!bfqd->nonrot_with_queueing))
697 bfq_pos_tree_add_move(bfqd, bfqq);
698 bfq_activate_bfqq(bfqd, bfqq);
699 }
700
701 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
702 bfq_schedule_dispatch(bfqd);
703 /* release extra ref taken above, bfqq may happen to be freed now */
704 bfq_put_queue(bfqq);
705 }
706
707 /**
708 * __bfq_bic_change_cgroup - move @bic to @bfqg.
709 * @bfqd: the queue descriptor.
710 * @bic: the bic to move.
711 * @bfqg: the group to move to.
712 *
713 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
714 * sure that the reference to cgroup is valid across the call (see
715 * comments in bfq_bic_update_cgroup on this issue)
716 */
__bfq_bic_change_cgroup(struct bfq_data * bfqd,struct bfq_io_cq * bic,struct bfq_group * bfqg)717 static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
718 struct bfq_io_cq *bic,
719 struct bfq_group *bfqg)
720 {
721 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
722 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
723 struct bfq_entity *entity;
724
725 if (async_bfqq) {
726 entity = &async_bfqq->entity;
727
728 if (entity->sched_data != &bfqg->sched_data) {
729 bic_set_bfqq(bic, NULL, 0);
730 bfq_release_process_ref(bfqd, async_bfqq);
731 }
732 }
733
734 if (sync_bfqq) {
735 if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
736 /* We are the only user of this bfqq, just move it */
737 if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
738 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
739 } else {
740 struct bfq_queue *bfqq;
741
742 /*
743 * The queue was merged to a different queue. Check
744 * that the merge chain still belongs to the same
745 * cgroup.
746 */
747 for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
748 if (bfqq->entity.sched_data !=
749 &bfqg->sched_data)
750 break;
751 if (bfqq) {
752 /*
753 * Some queue changed cgroup so the merge is
754 * not valid anymore. We cannot easily just
755 * cancel the merge (by clearing new_bfqq) as
756 * there may be other processes using this
757 * queue and holding refs to all queues below
758 * sync_bfqq->new_bfqq. Similarly if the merge
759 * already happened, we need to detach from
760 * bfqq now so that we cannot merge bio to a
761 * request from the old cgroup.
762 */
763 bfq_put_cooperator(sync_bfqq);
764 bfq_release_process_ref(bfqd, sync_bfqq);
765 bic_set_bfqq(bic, NULL, 1);
766 }
767 }
768 }
769
770 return bfqg;
771 }
772
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)773 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
774 {
775 struct bfq_data *bfqd = bic_to_bfqd(bic);
776 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
777 uint64_t serial_nr;
778
779 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
780
781 /*
782 * Check whether blkcg has changed. The condition may trigger
783 * spuriously on a newly created cic but there's no harm.
784 */
785 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
786 return;
787
788 /*
789 * New cgroup for this process. Make sure it is linked to bfq internal
790 * cgroup hierarchy.
791 */
792 bfq_link_bfqg(bfqd, bfqg);
793 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
794 /*
795 * Update blkg_path for bfq_log_* functions. We cache this
796 * path, and update it here, for the following
797 * reasons. Operations on blkg objects in blk-cgroup are
798 * protected with the request_queue lock, and not with the
799 * lock that protects the instances of this scheduler
800 * (bfqd->lock). This exposes BFQ to the following sort of
801 * race.
802 *
803 * The blkg_lookup performed in bfq_get_queue, protected
804 * through rcu, may happen to return the address of a copy of
805 * the original blkg. If this is the case, then the
806 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
807 * the blkg, is useless: it does not prevent blk-cgroup code
808 * from destroying both the original blkg and all objects
809 * directly or indirectly referred by the copy of the
810 * blkg.
811 *
812 * On the bright side, destroy operations on a blkg invoke, as
813 * a first step, hooks of the scheduler associated with the
814 * blkg. And these hooks are executed with bfqd->lock held for
815 * BFQ. As a consequence, for any blkg associated with the
816 * request queue this instance of the scheduler is attached
817 * to, we are guaranteed that such a blkg is not destroyed, and
818 * that all the pointers it contains are consistent, while we
819 * are holding bfqd->lock. A blkg_lookup performed with
820 * bfqd->lock held then returns a fully consistent blkg, which
821 * remains consistent until this lock is held.
822 *
823 * Thanks to the last fact, and to the fact that: (1) bfqg has
824 * been obtained through a blkg_lookup in the above
825 * assignment, and (2) bfqd->lock is being held, here we can
826 * safely use the policy data for the involved blkg (i.e., the
827 * field bfqg->pd) to get to the blkg associated with bfqg,
828 * and then we can safely use any field of blkg. After we
829 * release bfqd->lock, even just getting blkg through this
830 * bfqg may cause dangling references to be traversed, as
831 * bfqg->pd may not exist any more.
832 *
833 * In view of the above facts, here we cache, in the bfqg, any
834 * blkg data we may need for this bic, and for its associated
835 * bfq_queue. As of now, we need to cache only the path of the
836 * blkg, which is used in the bfq_log_* functions.
837 *
838 * Finally, note that bfqg itself needs to be protected from
839 * destruction on the blkg_free of the original blkg (which
840 * invokes bfq_pd_free). We use an additional private
841 * refcounter for bfqg, to let it disappear only after no
842 * bfq_queue refers to it any longer.
843 */
844 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
845 bic->blkcg_serial_nr = serial_nr;
846 }
847
848 /**
849 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
850 * @st: the service tree being flushed.
851 */
bfq_flush_idle_tree(struct bfq_service_tree * st)852 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
853 {
854 struct bfq_entity *entity = st->first_idle;
855
856 for (; entity ; entity = st->first_idle)
857 __bfq_deactivate_entity(entity, false);
858 }
859
860 /**
861 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
862 * @bfqd: the device data structure with the root group.
863 * @entity: the entity to move, if entity is a leaf; or the parent entity
864 * of an active leaf entity to move, if entity is not a leaf.
865 * @ioprio_class: I/O priority class to reparent.
866 */
bfq_reparent_leaf_entity(struct bfq_data * bfqd,struct bfq_entity * entity,int ioprio_class)867 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
868 struct bfq_entity *entity,
869 int ioprio_class)
870 {
871 struct bfq_queue *bfqq;
872 struct bfq_entity *child_entity = entity;
873
874 while (child_entity->my_sched_data) { /* leaf not reached yet */
875 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
876 struct bfq_service_tree *child_st = child_sd->service_tree +
877 ioprio_class;
878 struct rb_root *child_active = &child_st->active;
879
880 child_entity = bfq_entity_of(rb_first(child_active));
881
882 if (!child_entity)
883 child_entity = child_sd->in_service_entity;
884 }
885
886 bfqq = bfq_entity_to_bfqq(child_entity);
887 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
888 }
889
890 /**
891 * bfq_reparent_active_queues - move to the root group all active queues.
892 * @bfqd: the device data structure with the root group.
893 * @bfqg: the group to move from.
894 * @st: the service tree to start the search from.
895 * @ioprio_class: I/O priority class to reparent.
896 */
bfq_reparent_active_queues(struct bfq_data * bfqd,struct bfq_group * bfqg,struct bfq_service_tree * st,int ioprio_class)897 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
898 struct bfq_group *bfqg,
899 struct bfq_service_tree *st,
900 int ioprio_class)
901 {
902 struct rb_root *active = &st->active;
903 struct bfq_entity *entity;
904
905 while ((entity = bfq_entity_of(rb_first(active))))
906 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
907
908 if (bfqg->sched_data.in_service_entity)
909 bfq_reparent_leaf_entity(bfqd,
910 bfqg->sched_data.in_service_entity,
911 ioprio_class);
912 }
913
914 /**
915 * bfq_pd_offline - deactivate the entity associated with @pd,
916 * and reparent its children entities.
917 * @pd: descriptor of the policy going offline.
918 *
919 * blkio already grabs the queue_lock for us, so no need to use
920 * RCU-based magic
921 */
bfq_pd_offline(struct blkg_policy_data * pd)922 static void bfq_pd_offline(struct blkg_policy_data *pd)
923 {
924 struct bfq_service_tree *st;
925 struct bfq_group *bfqg = pd_to_bfqg(pd);
926 struct bfq_data *bfqd = bfqg->bfqd;
927 struct bfq_entity *entity = bfqg->my_entity;
928 unsigned long flags;
929 int i;
930
931 spin_lock_irqsave(&bfqd->lock, flags);
932
933 if (!entity) /* root group */
934 goto put_async_queues;
935
936 /*
937 * Empty all service_trees belonging to this group before
938 * deactivating the group itself.
939 */
940 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
941 st = bfqg->sched_data.service_tree + i;
942
943 /*
944 * It may happen that some queues are still active
945 * (busy) upon group destruction (if the corresponding
946 * processes have been forced to terminate). We move
947 * all the leaf entities corresponding to these queues
948 * to the root_group.
949 * Also, it may happen that the group has an entity
950 * in service, which is disconnected from the active
951 * tree: it must be moved, too.
952 * There is no need to put the sync queues, as the
953 * scheduler has taken no reference.
954 */
955 bfq_reparent_active_queues(bfqd, bfqg, st, i);
956
957 /*
958 * The idle tree may still contain bfq_queues
959 * belonging to exited task because they never
960 * migrated to a different cgroup from the one being
961 * destroyed now. In addition, even
962 * bfq_reparent_active_queues() may happen to add some
963 * entities to the idle tree. It happens if, in some
964 * of the calls to bfq_bfqq_move() performed by
965 * bfq_reparent_active_queues(), the queue to move is
966 * empty and gets expired.
967 */
968 bfq_flush_idle_tree(st);
969 }
970
971 __bfq_deactivate_entity(entity, false);
972
973 put_async_queues:
974 bfq_put_async_queues(bfqd, bfqg);
975 bfqg->online = false;
976
977 spin_unlock_irqrestore(&bfqd->lock, flags);
978 /*
979 * @blkg is going offline and will be ignored by
980 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
981 * that they don't get lost. If IOs complete after this point, the
982 * stats for them will be lost. Oh well...
983 */
984 bfqg_stats_xfer_dead(bfqg);
985 }
986
bfq_end_wr_async(struct bfq_data * bfqd)987 void bfq_end_wr_async(struct bfq_data *bfqd)
988 {
989 struct blkcg_gq *blkg;
990
991 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
992 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
993
994 bfq_end_wr_async_queues(bfqd, bfqg);
995 }
996 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
997 }
998
bfq_io_show_weight_legacy(struct seq_file * sf,void * v)999 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
1000 {
1001 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1002 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1003 unsigned int val = 0;
1004
1005 if (bfqgd)
1006 val = bfqgd->weight;
1007
1008 seq_printf(sf, "%u\n", val);
1009
1010 return 0;
1011 }
1012
bfqg_prfill_weight_device(struct seq_file * sf,struct blkg_policy_data * pd,int off)1013 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1014 struct blkg_policy_data *pd, int off)
1015 {
1016 struct bfq_group *bfqg = pd_to_bfqg(pd);
1017
1018 if (!bfqg->entity.dev_weight)
1019 return 0;
1020 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1021 }
1022
bfq_io_show_weight(struct seq_file * sf,void * v)1023 static int bfq_io_show_weight(struct seq_file *sf, void *v)
1024 {
1025 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1026 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1027
1028 seq_printf(sf, "default %u\n", bfqgd->weight);
1029 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1030 &blkcg_policy_bfq, 0, false);
1031 return 0;
1032 }
1033
bfq_group_set_weight(struct bfq_group * bfqg,u64 weight,u64 dev_weight)1034 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1035 {
1036 weight = dev_weight ?: weight;
1037
1038 bfqg->entity.dev_weight = dev_weight;
1039 /*
1040 * Setting the prio_changed flag of the entity
1041 * to 1 with new_weight == weight would re-set
1042 * the value of the weight to its ioprio mapping.
1043 * Set the flag only if necessary.
1044 */
1045 if ((unsigned short)weight != bfqg->entity.new_weight) {
1046 bfqg->entity.new_weight = (unsigned short)weight;
1047 /*
1048 * Make sure that the above new value has been
1049 * stored in bfqg->entity.new_weight before
1050 * setting the prio_changed flag. In fact,
1051 * this flag may be read asynchronously (in
1052 * critical sections protected by a different
1053 * lock than that held here), and finding this
1054 * flag set may cause the execution of the code
1055 * for updating parameters whose value may
1056 * depend also on bfqg->entity.new_weight (in
1057 * __bfq_entity_update_weight_prio).
1058 * This barrier makes sure that the new value
1059 * of bfqg->entity.new_weight is correctly
1060 * seen in that code.
1061 */
1062 smp_wmb();
1063 bfqg->entity.prio_changed = 1;
1064 }
1065 }
1066
bfq_io_set_weight_legacy(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)1067 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1068 struct cftype *cftype,
1069 u64 val)
1070 {
1071 struct blkcg *blkcg = css_to_blkcg(css);
1072 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1073 struct blkcg_gq *blkg;
1074 int ret = -ERANGE;
1075
1076 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1077 return ret;
1078
1079 ret = 0;
1080 spin_lock_irq(&blkcg->lock);
1081 bfqgd->weight = (unsigned short)val;
1082 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1083 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1084
1085 if (bfqg)
1086 bfq_group_set_weight(bfqg, val, 0);
1087 }
1088 spin_unlock_irq(&blkcg->lock);
1089
1090 return ret;
1091 }
1092
bfq_io_set_device_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1093 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1094 char *buf, size_t nbytes,
1095 loff_t off)
1096 {
1097 int ret;
1098 struct blkg_conf_ctx ctx;
1099 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1100 struct bfq_group *bfqg;
1101 u64 v;
1102
1103 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1104 if (ret)
1105 return ret;
1106
1107 if (sscanf(ctx.body, "%llu", &v) == 1) {
1108 /* require "default" on dfl */
1109 ret = -ERANGE;
1110 if (!v)
1111 goto out;
1112 } else if (!strcmp(strim(ctx.body), "default")) {
1113 v = 0;
1114 } else {
1115 ret = -EINVAL;
1116 goto out;
1117 }
1118
1119 bfqg = blkg_to_bfqg(ctx.blkg);
1120
1121 ret = -ERANGE;
1122 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1123 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1124 ret = 0;
1125 }
1126 out:
1127 blkg_conf_finish(&ctx);
1128 return ret ?: nbytes;
1129 }
1130
bfq_io_set_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1131 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1132 char *buf, size_t nbytes,
1133 loff_t off)
1134 {
1135 char *endp;
1136 int ret;
1137 u64 v;
1138
1139 buf = strim(buf);
1140
1141 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1142 v = simple_strtoull(buf, &endp, 0);
1143 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1144 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1145 return ret ?: nbytes;
1146 }
1147
1148 return bfq_io_set_device_weight(of, buf, nbytes, off);
1149 }
1150
bfqg_print_rwstat(struct seq_file * sf,void * v)1151 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1152 {
1153 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1154 &blkcg_policy_bfq, seq_cft(sf)->private, true);
1155 return 0;
1156 }
1157
bfqg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1158 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1159 struct blkg_policy_data *pd, int off)
1160 {
1161 struct blkg_rwstat_sample sum;
1162
1163 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1164 return __blkg_prfill_rwstat(sf, pd, &sum);
1165 }
1166
bfqg_print_rwstat_recursive(struct seq_file * sf,void * v)1167 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1168 {
1169 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1170 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1171 seq_cft(sf)->private, true);
1172 return 0;
1173 }
1174
1175 #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfqg_print_stat(struct seq_file * sf,void * v)1176 static int bfqg_print_stat(struct seq_file *sf, void *v)
1177 {
1178 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1179 &blkcg_policy_bfq, seq_cft(sf)->private, false);
1180 return 0;
1181 }
1182
bfqg_prfill_stat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1183 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1184 struct blkg_policy_data *pd, int off)
1185 {
1186 struct blkcg_gq *blkg = pd_to_blkg(pd);
1187 struct blkcg_gq *pos_blkg;
1188 struct cgroup_subsys_state *pos_css;
1189 u64 sum = 0;
1190
1191 lockdep_assert_held(&blkg->q->queue_lock);
1192
1193 rcu_read_lock();
1194 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1195 struct bfq_stat *stat;
1196
1197 if (!pos_blkg->online)
1198 continue;
1199
1200 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1201 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1202 }
1203 rcu_read_unlock();
1204
1205 return __blkg_prfill_u64(sf, pd, sum);
1206 }
1207
bfqg_print_stat_recursive(struct seq_file * sf,void * v)1208 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1209 {
1210 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1211 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1212 seq_cft(sf)->private, false);
1213 return 0;
1214 }
1215
bfqg_prfill_sectors(struct seq_file * sf,struct blkg_policy_data * pd,int off)1216 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1217 int off)
1218 {
1219 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1220 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
1221
1222 return __blkg_prfill_u64(sf, pd, sum >> 9);
1223 }
1224
bfqg_print_stat_sectors(struct seq_file * sf,void * v)1225 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1226 {
1227 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1228 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1229 return 0;
1230 }
1231
bfqg_prfill_sectors_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1232 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1233 struct blkg_policy_data *pd, int off)
1234 {
1235 struct blkg_rwstat_sample tmp;
1236
1237 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1238 offsetof(struct bfq_group, stats.bytes), &tmp);
1239
1240 return __blkg_prfill_u64(sf, pd,
1241 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1242 }
1243
bfqg_print_stat_sectors_recursive(struct seq_file * sf,void * v)1244 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1245 {
1246 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1247 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1248 false);
1249 return 0;
1250 }
1251
bfqg_prfill_avg_queue_size(struct seq_file * sf,struct blkg_policy_data * pd,int off)1252 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1253 struct blkg_policy_data *pd, int off)
1254 {
1255 struct bfq_group *bfqg = pd_to_bfqg(pd);
1256 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1257 u64 v = 0;
1258
1259 if (samples) {
1260 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1261 v = div64_u64(v, samples);
1262 }
1263 __blkg_prfill_u64(sf, pd, v);
1264 return 0;
1265 }
1266
1267 /* print avg_queue_size */
bfqg_print_avg_queue_size(struct seq_file * sf,void * v)1268 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1269 {
1270 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1271 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1272 0, false);
1273 return 0;
1274 }
1275 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1276
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1277 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1278 {
1279 int ret;
1280
1281 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1282 if (ret)
1283 return NULL;
1284
1285 return blkg_to_bfqg(bfqd->queue->root_blkg);
1286 }
1287
1288 struct blkcg_policy blkcg_policy_bfq = {
1289 .dfl_cftypes = bfq_blkg_files,
1290 .legacy_cftypes = bfq_blkcg_legacy_files,
1291
1292 .cpd_alloc_fn = bfq_cpd_alloc,
1293 .cpd_init_fn = bfq_cpd_init,
1294 .cpd_bind_fn = bfq_cpd_init,
1295 .cpd_free_fn = bfq_cpd_free,
1296
1297 .pd_alloc_fn = bfq_pd_alloc,
1298 .pd_init_fn = bfq_pd_init,
1299 .pd_offline_fn = bfq_pd_offline,
1300 .pd_free_fn = bfq_pd_free,
1301 .pd_reset_stats_fn = bfq_pd_reset_stats,
1302 };
1303
1304 struct cftype bfq_blkcg_legacy_files[] = {
1305 {
1306 .name = "bfq.weight",
1307 .flags = CFTYPE_NOT_ON_ROOT,
1308 .seq_show = bfq_io_show_weight_legacy,
1309 .write_u64 = bfq_io_set_weight_legacy,
1310 },
1311 {
1312 .name = "bfq.weight_device",
1313 .flags = CFTYPE_NOT_ON_ROOT,
1314 .seq_show = bfq_io_show_weight,
1315 .write = bfq_io_set_weight,
1316 },
1317
1318 /* statistics, covers only the tasks in the bfqg */
1319 {
1320 .name = "bfq.io_service_bytes",
1321 .private = offsetof(struct bfq_group, stats.bytes),
1322 .seq_show = bfqg_print_rwstat,
1323 },
1324 {
1325 .name = "bfq.io_serviced",
1326 .private = offsetof(struct bfq_group, stats.ios),
1327 .seq_show = bfqg_print_rwstat,
1328 },
1329 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1330 {
1331 .name = "bfq.time",
1332 .private = offsetof(struct bfq_group, stats.time),
1333 .seq_show = bfqg_print_stat,
1334 },
1335 {
1336 .name = "bfq.sectors",
1337 .seq_show = bfqg_print_stat_sectors,
1338 },
1339 {
1340 .name = "bfq.io_service_time",
1341 .private = offsetof(struct bfq_group, stats.service_time),
1342 .seq_show = bfqg_print_rwstat,
1343 },
1344 {
1345 .name = "bfq.io_wait_time",
1346 .private = offsetof(struct bfq_group, stats.wait_time),
1347 .seq_show = bfqg_print_rwstat,
1348 },
1349 {
1350 .name = "bfq.io_merged",
1351 .private = offsetof(struct bfq_group, stats.merged),
1352 .seq_show = bfqg_print_rwstat,
1353 },
1354 {
1355 .name = "bfq.io_queued",
1356 .private = offsetof(struct bfq_group, stats.queued),
1357 .seq_show = bfqg_print_rwstat,
1358 },
1359 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1360
1361 /* the same statistics which cover the bfqg and its descendants */
1362 {
1363 .name = "bfq.io_service_bytes_recursive",
1364 .private = offsetof(struct bfq_group, stats.bytes),
1365 .seq_show = bfqg_print_rwstat_recursive,
1366 },
1367 {
1368 .name = "bfq.io_serviced_recursive",
1369 .private = offsetof(struct bfq_group, stats.ios),
1370 .seq_show = bfqg_print_rwstat_recursive,
1371 },
1372 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1373 {
1374 .name = "bfq.time_recursive",
1375 .private = offsetof(struct bfq_group, stats.time),
1376 .seq_show = bfqg_print_stat_recursive,
1377 },
1378 {
1379 .name = "bfq.sectors_recursive",
1380 .seq_show = bfqg_print_stat_sectors_recursive,
1381 },
1382 {
1383 .name = "bfq.io_service_time_recursive",
1384 .private = offsetof(struct bfq_group, stats.service_time),
1385 .seq_show = bfqg_print_rwstat_recursive,
1386 },
1387 {
1388 .name = "bfq.io_wait_time_recursive",
1389 .private = offsetof(struct bfq_group, stats.wait_time),
1390 .seq_show = bfqg_print_rwstat_recursive,
1391 },
1392 {
1393 .name = "bfq.io_merged_recursive",
1394 .private = offsetof(struct bfq_group, stats.merged),
1395 .seq_show = bfqg_print_rwstat_recursive,
1396 },
1397 {
1398 .name = "bfq.io_queued_recursive",
1399 .private = offsetof(struct bfq_group, stats.queued),
1400 .seq_show = bfqg_print_rwstat_recursive,
1401 },
1402 {
1403 .name = "bfq.avg_queue_size",
1404 .seq_show = bfqg_print_avg_queue_size,
1405 },
1406 {
1407 .name = "bfq.group_wait_time",
1408 .private = offsetof(struct bfq_group, stats.group_wait_time),
1409 .seq_show = bfqg_print_stat,
1410 },
1411 {
1412 .name = "bfq.idle_time",
1413 .private = offsetof(struct bfq_group, stats.idle_time),
1414 .seq_show = bfqg_print_stat,
1415 },
1416 {
1417 .name = "bfq.empty_time",
1418 .private = offsetof(struct bfq_group, stats.empty_time),
1419 .seq_show = bfqg_print_stat,
1420 },
1421 {
1422 .name = "bfq.dequeue",
1423 .private = offsetof(struct bfq_group, stats.dequeue),
1424 .seq_show = bfqg_print_stat,
1425 },
1426 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1427 { } /* terminate */
1428 };
1429
1430 struct cftype bfq_blkg_files[] = {
1431 {
1432 .name = "bfq.weight",
1433 .flags = CFTYPE_NOT_ON_ROOT,
1434 .seq_show = bfq_io_show_weight,
1435 .write = bfq_io_set_weight,
1436 },
1437 {} /* terminate */
1438 };
1439
1440 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1441
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)1442 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1443 struct bfq_group *bfqg) {}
1444
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)1445 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1446 {
1447 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1448
1449 entity->weight = entity->new_weight;
1450 entity->orig_weight = entity->new_weight;
1451 if (bfqq) {
1452 bfqq->ioprio = bfqq->new_ioprio;
1453 bfqq->ioprio_class = bfqq->new_ioprio_class;
1454 }
1455 entity->sched_data = &bfqg->sched_data;
1456 }
1457
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)1458 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1459
bfq_end_wr_async(struct bfq_data * bfqd)1460 void bfq_end_wr_async(struct bfq_data *bfqd)
1461 {
1462 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1463 }
1464
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)1465 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1466 {
1467 return bfqd->root_group;
1468 }
1469
bfqq_group(struct bfq_queue * bfqq)1470 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1471 {
1472 return bfqq->bfqd->root_group;
1473 }
1474
bfqg_and_blkg_put(struct bfq_group * bfqg)1475 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1476
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1477 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1478 {
1479 struct bfq_group *bfqg;
1480 int i;
1481
1482 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1483 if (!bfqg)
1484 return NULL;
1485
1486 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1487 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1488
1489 return bfqg;
1490 }
1491 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
1492