1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_PRIVATE_H
3 #define _BLK_CGROUP_PRIVATE_H
4 /*
5 * block cgroup private header
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17 #include <linux/blk-cgroup.h>
18 #include <linux/cgroup.h>
19 #include <linux/kthread.h>
20 #include <linux/blk-mq.h>
21 #include <linux/llist.h>
22
23 struct blkcg_gq;
24 struct blkg_policy_data;
25
26
27 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
28 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
29
30 #ifdef CONFIG_BLK_CGROUP
31
32 enum blkg_iostat_type {
33 BLKG_IOSTAT_READ,
34 BLKG_IOSTAT_WRITE,
35 BLKG_IOSTAT_DISCARD,
36
37 BLKG_IOSTAT_NR,
38 };
39
40 struct blkg_iostat {
41 u64 bytes[BLKG_IOSTAT_NR];
42 u64 ios[BLKG_IOSTAT_NR];
43 };
44
45 struct blkg_iostat_set {
46 struct u64_stats_sync sync;
47 struct blkcg_gq *blkg;
48 struct llist_node lnode;
49 int lqueued; /* queued in llist */
50 struct blkg_iostat cur;
51 struct blkg_iostat last;
52 };
53
54 /* association between a blk cgroup and a request queue */
55 struct blkcg_gq {
56 /* Pointer to the associated request_queue */
57 struct request_queue *q;
58 struct list_head q_node;
59 struct hlist_node blkcg_node;
60 struct blkcg *blkcg;
61
62 /* all non-root blkcg_gq's are guaranteed to have access to parent */
63 struct blkcg_gq *parent;
64
65 /* reference count */
66 struct percpu_ref refcnt;
67
68 /* is this blkg online? protected by both blkcg and q locks */
69 bool online;
70
71 struct blkg_iostat_set __percpu *iostat_cpu;
72 struct blkg_iostat_set iostat;
73
74 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
75 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
76 spinlock_t async_bio_lock;
77 struct bio_list async_bios;
78 #endif
79 union {
80 struct work_struct async_bio_work;
81 struct work_struct free_work;
82 };
83
84 atomic_t use_delay;
85 atomic64_t delay_nsec;
86 atomic64_t delay_start;
87 u64 last_delay;
88 int last_use;
89
90 struct rcu_head rcu_head;
91 };
92
93 struct blkcg {
94 struct cgroup_subsys_state css;
95 spinlock_t lock;
96 refcount_t online_pin;
97
98 struct radix_tree_root blkg_tree;
99 struct blkcg_gq __rcu *blkg_hint;
100 struct hlist_head blkg_list;
101
102 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
103
104 struct list_head all_blkcgs_node;
105
106 /*
107 * List of updated percpu blkg_iostat_set's since the last flush.
108 */
109 struct llist_head __percpu *lhead;
110
111 #ifdef CONFIG_BLK_CGROUP_FC_APPID
112 char fc_app_id[FC_APPID_LEN];
113 #endif
114 #ifdef CONFIG_CGROUP_WRITEBACK
115 struct list_head cgwb_list;
116 #endif
117 };
118
css_to_blkcg(struct cgroup_subsys_state * css)119 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
120 {
121 return css ? container_of(css, struct blkcg, css) : NULL;
122 }
123
124 /*
125 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
126 * request_queue (q). This is used by blkcg policies which need to track
127 * information per blkcg - q pair.
128 *
129 * There can be multiple active blkcg policies and each blkg:policy pair is
130 * represented by a blkg_policy_data which is allocated and freed by each
131 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
132 * area by allocating larger data structure which embeds blkg_policy_data
133 * at the beginning.
134 */
135 struct blkg_policy_data {
136 /* the blkg and policy id this per-policy data belongs to */
137 struct blkcg_gq *blkg;
138 int plid;
139 bool online;
140 };
141
142 /*
143 * Policies that need to keep per-blkcg data which is independent from any
144 * request_queue associated to it should implement cpd_alloc/free_fn()
145 * methods. A policy can allocate private data area by allocating larger
146 * data structure which embeds blkcg_policy_data at the beginning.
147 * cpd_init() is invoked to let each policy handle per-blkcg data.
148 */
149 struct blkcg_policy_data {
150 /* the blkcg and policy id this per-policy data belongs to */
151 struct blkcg *blkcg;
152 int plid;
153 };
154
155 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
156 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
157 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
158 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
159 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk,
160 struct blkcg *blkcg, gfp_t gfp);
161 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
162 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
163 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
164 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
165 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
166 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
167 struct seq_file *s);
168
169 struct blkcg_policy {
170 int plid;
171 /* cgroup files for the policy */
172 struct cftype *dfl_cftypes;
173 struct cftype *legacy_cftypes;
174
175 /* operations */
176 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
177 blkcg_pol_free_cpd_fn *cpd_free_fn;
178
179 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
180 blkcg_pol_init_pd_fn *pd_init_fn;
181 blkcg_pol_online_pd_fn *pd_online_fn;
182 blkcg_pol_offline_pd_fn *pd_offline_fn;
183 blkcg_pol_free_pd_fn *pd_free_fn;
184 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
185 blkcg_pol_stat_pd_fn *pd_stat_fn;
186 };
187
188 extern struct blkcg blkcg_root;
189 extern bool blkcg_debug_stats;
190
191 int blkcg_init_disk(struct gendisk *disk);
192 void blkcg_exit_disk(struct gendisk *disk);
193
194 /* Blkio controller policy registration */
195 int blkcg_policy_register(struct blkcg_policy *pol);
196 void blkcg_policy_unregister(struct blkcg_policy *pol);
197 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
198 void blkcg_deactivate_policy(struct gendisk *disk,
199 const struct blkcg_policy *pol);
200
201 const char *blkg_dev_name(struct blkcg_gq *blkg);
202 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
203 u64 (*prfill)(struct seq_file *,
204 struct blkg_policy_data *, int),
205 const struct blkcg_policy *pol, int data,
206 bool show_total);
207 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
208
209 struct blkg_conf_ctx {
210 char *input;
211 char *body;
212 struct block_device *bdev;
213 struct blkcg_gq *blkg;
214 };
215
216 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
217 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
218 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
219 struct blkg_conf_ctx *ctx);
220 void blkg_conf_exit(struct blkg_conf_ctx *ctx);
221
222 /**
223 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
224 * @return: true if this bio needs to be submitted with the root blkg context.
225 *
226 * In order to avoid priority inversions we sometimes need to issue a bio as if
227 * it were attached to the root blkg, and then backcharge to the actual owning
228 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for
229 * the bio and attach the appropriate blkg to the bio. Then we call this helper
230 * and if it is true run with the root blkg for that queue and then do any
231 * backcharging to the originating cgroup once the io is complete.
232 */
bio_issue_as_root_blkg(struct bio * bio)233 static inline bool bio_issue_as_root_blkg(struct bio *bio)
234 {
235 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
236 }
237
238 /**
239 * blkg_lookup - lookup blkg for the specified blkcg - q pair
240 * @blkcg: blkcg of interest
241 * @q: request_queue of interest
242 *
243 * Lookup blkg for the @blkcg - @q pair.
244
245 * Must be called in a RCU critical section.
246 */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)247 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
248 struct request_queue *q)
249 {
250 struct blkcg_gq *blkg;
251
252 if (blkcg == &blkcg_root)
253 return q->root_blkg;
254
255 blkg = rcu_dereference_check(blkcg->blkg_hint,
256 lockdep_is_held(&q->queue_lock));
257 if (blkg && blkg->q == q)
258 return blkg;
259
260 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
261 if (blkg && blkg->q != q)
262 blkg = NULL;
263 return blkg;
264 }
265
266 /**
267 * blkg_to_pdata - get policy private data
268 * @blkg: blkg of interest
269 * @pol: policy of interest
270 *
271 * Return pointer to private data associated with the @blkg-@pol pair.
272 */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)273 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
274 struct blkcg_policy *pol)
275 {
276 return blkg ? blkg->pd[pol->plid] : NULL;
277 }
278
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)279 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
280 struct blkcg_policy *pol)
281 {
282 return blkcg ? blkcg->cpd[pol->plid] : NULL;
283 }
284
285 /**
286 * pdata_to_blkg - get blkg associated with policy private data
287 * @pd: policy private data of interest
288 *
289 * @pd is policy private data. Determine the blkg it's associated with.
290 */
pd_to_blkg(struct blkg_policy_data * pd)291 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
292 {
293 return pd ? pd->blkg : NULL;
294 }
295
cpd_to_blkcg(struct blkcg_policy_data * cpd)296 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
297 {
298 return cpd ? cpd->blkcg : NULL;
299 }
300
301 /**
302 * blkg_path - format cgroup path of blkg
303 * @blkg: blkg of interest
304 * @buf: target buffer
305 * @buflen: target buffer length
306 *
307 * Format the path of the cgroup of @blkg into @buf.
308 */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)309 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
310 {
311 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
312 }
313
314 /**
315 * blkg_get - get a blkg reference
316 * @blkg: blkg to get
317 *
318 * The caller should be holding an existing reference.
319 */
blkg_get(struct blkcg_gq * blkg)320 static inline void blkg_get(struct blkcg_gq *blkg)
321 {
322 percpu_ref_get(&blkg->refcnt);
323 }
324
325 /**
326 * blkg_tryget - try and get a blkg reference
327 * @blkg: blkg to get
328 *
329 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
330 * of freeing this blkg, so we can only use it if the refcnt is not zero.
331 */
blkg_tryget(struct blkcg_gq * blkg)332 static inline bool blkg_tryget(struct blkcg_gq *blkg)
333 {
334 return blkg && percpu_ref_tryget(&blkg->refcnt);
335 }
336
337 /**
338 * blkg_put - put a blkg reference
339 * @blkg: blkg to put
340 */
blkg_put(struct blkcg_gq * blkg)341 static inline void blkg_put(struct blkcg_gq *blkg)
342 {
343 percpu_ref_put(&blkg->refcnt);
344 }
345
346 /**
347 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
348 * @d_blkg: loop cursor pointing to the current descendant
349 * @pos_css: used for iteration
350 * @p_blkg: target blkg to walk descendants of
351 *
352 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
353 * read locked. If called under either blkcg or queue lock, the iteration
354 * is guaranteed to include all and only online blkgs. The caller may
355 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
356 * @p_blkg is included in the iteration and the first node to be visited.
357 */
358 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
359 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
360 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
361 (p_blkg)->q)))
362
363 /**
364 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
365 * @d_blkg: loop cursor pointing to the current descendant
366 * @pos_css: used for iteration
367 * @p_blkg: target blkg to walk descendants of
368 *
369 * Similar to blkg_for_each_descendant_pre() but performs post-order
370 * traversal instead. Synchronization rules are the same. @p_blkg is
371 * included in the iteration and the last node to be visited.
372 */
373 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
374 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
375 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
376 (p_blkg)->q)))
377
blkcg_bio_issue_init(struct bio * bio)378 static inline void blkcg_bio_issue_init(struct bio *bio)
379 {
380 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
381 }
382
blkcg_use_delay(struct blkcg_gq * blkg)383 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
384 {
385 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
386 return;
387 if (atomic_add_return(1, &blkg->use_delay) == 1)
388 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
389 }
390
blkcg_unuse_delay(struct blkcg_gq * blkg)391 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
392 {
393 int old = atomic_read(&blkg->use_delay);
394
395 if (WARN_ON_ONCE(old < 0))
396 return 0;
397 if (old == 0)
398 return 0;
399
400 /*
401 * We do this song and dance because we can race with somebody else
402 * adding or removing delay. If we just did an atomic_dec we'd end up
403 * negative and we'd already be in trouble. We need to subtract 1 and
404 * then check to see if we were the last delay so we can drop the
405 * congestion count on the cgroup.
406 */
407 while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
408 ;
409
410 if (old == 0)
411 return 0;
412 if (old == 1)
413 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
414 return 1;
415 }
416
417 /**
418 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
419 * @blkg: target blkg
420 * @delay: delay duration in nsecs
421 *
422 * When enabled with this function, the delay is not decayed and must be
423 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
424 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
425 */
blkcg_set_delay(struct blkcg_gq * blkg,u64 delay)426 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
427 {
428 int old = atomic_read(&blkg->use_delay);
429
430 /* We only want 1 person setting the congestion count for this blkg. */
431 if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
432 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
433
434 atomic64_set(&blkg->delay_nsec, delay);
435 }
436
437 /**
438 * blkcg_clear_delay - Disable allocator delay mechanism
439 * @blkg: target blkg
440 *
441 * Disable use_delay mechanism. See blkcg_set_delay().
442 */
blkcg_clear_delay(struct blkcg_gq * blkg)443 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
444 {
445 int old = atomic_read(&blkg->use_delay);
446
447 /* We only want 1 person clearing the congestion count for this blkg. */
448 if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
449 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
450 }
451
452 /**
453 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
454 * @rq: request to merge into
455 * @bio: bio to merge
456 *
457 * @bio and @rq should belong to the same cgroup and their issue_as_root should
458 * match. The latter is necessary as we don't want to throttle e.g. a metadata
459 * update because it happens to be next to a regular IO.
460 */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)461 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
462 {
463 return rq->bio->bi_blkg == bio->bi_blkg &&
464 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
465 }
466
467 void blk_cgroup_bio_start(struct bio *bio);
468 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
469 #else /* CONFIG_BLK_CGROUP */
470
471 struct blkg_policy_data {
472 };
473
474 struct blkcg_policy_data {
475 };
476
477 struct blkcg_policy {
478 };
479
480 struct blkcg {
481 };
482
blkg_lookup(struct blkcg * blkcg,void * key)483 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blkcg_init_disk(struct gendisk * disk)484 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
blkcg_exit_disk(struct gendisk * disk)485 static inline void blkcg_exit_disk(struct gendisk *disk) { }
blkcg_policy_register(struct blkcg_policy * pol)486 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)487 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct gendisk * disk,const struct blkcg_policy * pol)488 static inline int blkcg_activate_policy(struct gendisk *disk,
489 const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct gendisk * disk,const struct blkcg_policy * pol)490 static inline void blkcg_deactivate_policy(struct gendisk *disk,
491 const struct blkcg_policy *pol) { }
492
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)493 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
494 struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)495 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)496 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)497 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)498 static inline void blkg_put(struct blkcg_gq *blkg) { }
blkcg_bio_issue_init(struct bio * bio)499 static inline void blkcg_bio_issue_init(struct bio *bio) { }
blk_cgroup_bio_start(struct bio * bio)500 static inline void blk_cgroup_bio_start(struct bio *bio) { }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)501 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
502
503 #define blk_queue_for_each_rl(rl, q) \
504 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
505
506 #endif /* CONFIG_BLK_CGROUP */
507
508 #endif /* _BLK_CGROUP_PRIVATE_H */
509