1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
4 
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 #include <linux/blk-mq.h>
11 
12 #include "blk-mq-debugfs.h"
13 
14 struct blk_mq_debugfs_attr;
15 
16 enum rq_qos_id {
17 	RQ_QOS_WBT,
18 	RQ_QOS_LATENCY,
19 	RQ_QOS_COST,
20 };
21 
22 struct rq_wait {
23 	wait_queue_head_t wait;
24 	atomic_t inflight;
25 };
26 
27 struct rq_qos {
28 	struct rq_qos_ops *ops;
29 	struct request_queue *q;
30 	enum rq_qos_id id;
31 	struct rq_qos *next;
32 #ifdef CONFIG_BLK_DEBUG_FS
33 	struct dentry *debugfs_dir;
34 #endif
35 };
36 
37 struct rq_qos_ops {
38 	void (*throttle)(struct rq_qos *, struct bio *);
39 	void (*track)(struct rq_qos *, struct request *, struct bio *);
40 	void (*merge)(struct rq_qos *, struct request *, struct bio *);
41 	void (*issue)(struct rq_qos *, struct request *);
42 	void (*requeue)(struct rq_qos *, struct request *);
43 	void (*done)(struct rq_qos *, struct request *);
44 	void (*done_bio)(struct rq_qos *, struct bio *);
45 	void (*cleanup)(struct rq_qos *, struct bio *);
46 	void (*queue_depth_changed)(struct rq_qos *);
47 	void (*exit)(struct rq_qos *);
48 	const struct blk_mq_debugfs_attr *debugfs_attrs;
49 };
50 
51 struct rq_depth {
52 	unsigned int max_depth;
53 
54 	int scale_step;
55 	bool scaled_max;
56 
57 	unsigned int queue_depth;
58 	unsigned int default_depth;
59 };
60 
rq_qos_id(struct request_queue * q,enum rq_qos_id id)61 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
62 				       enum rq_qos_id id)
63 {
64 	struct rq_qos *rqos;
65 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
66 		if (rqos->id == id)
67 			break;
68 	}
69 	return rqos;
70 }
71 
wbt_rq_qos(struct request_queue * q)72 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
73 {
74 	return rq_qos_id(q, RQ_QOS_WBT);
75 }
76 
blkcg_rq_qos(struct request_queue * q)77 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
78 {
79 	return rq_qos_id(q, RQ_QOS_LATENCY);
80 }
81 
rq_wait_init(struct rq_wait * rq_wait)82 static inline void rq_wait_init(struct rq_wait *rq_wait)
83 {
84 	atomic_set(&rq_wait->inflight, 0);
85 	init_waitqueue_head(&rq_wait->wait);
86 }
87 
rq_qos_add(struct request_queue * q,struct rq_qos * rqos)88 static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
89 {
90 	/*
91 	 * No IO can be in-flight when adding rqos, so freeze queue, which
92 	 * is fine since we only support rq_qos for blk-mq queue.
93 	 *
94 	 * Reuse ->queue_lock for protecting against other concurrent
95 	 * rq_qos adding/deleting
96 	 */
97 	blk_mq_freeze_queue(q);
98 
99 	spin_lock_irq(&q->queue_lock);
100 	if (rq_qos_id(q, rqos->id))
101 		goto ebusy;
102 	rqos->next = q->rq_qos;
103 	q->rq_qos = rqos;
104 	spin_unlock_irq(&q->queue_lock);
105 
106 	blk_mq_unfreeze_queue(q);
107 
108 	if (rqos->ops->debugfs_attrs) {
109 		mutex_lock(&q->debugfs_mutex);
110 		blk_mq_debugfs_register_rqos(rqos);
111 		mutex_unlock(&q->debugfs_mutex);
112 	}
113 
114 	return 0;
115 ebusy:
116 	spin_unlock_irq(&q->queue_lock);
117 	blk_mq_unfreeze_queue(q);
118 	return -EBUSY;
119 
120 }
121 
rq_qos_del(struct request_queue * q,struct rq_qos * rqos)122 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
123 {
124 	struct rq_qos **cur;
125 
126 	/*
127 	 * See comment in rq_qos_add() about freezing queue & using
128 	 * ->queue_lock.
129 	 */
130 	blk_mq_freeze_queue(q);
131 
132 	spin_lock_irq(&q->queue_lock);
133 	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
134 		if (*cur == rqos) {
135 			*cur = rqos->next;
136 			break;
137 		}
138 	}
139 	spin_unlock_irq(&q->queue_lock);
140 
141 	blk_mq_unfreeze_queue(q);
142 
143 	mutex_lock(&q->debugfs_mutex);
144 	blk_mq_debugfs_unregister_rqos(rqos);
145 	mutex_unlock(&q->debugfs_mutex);
146 }
147 
148 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
149 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
150 
151 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
152 		 acquire_inflight_cb_t *acquire_inflight_cb,
153 		 cleanup_cb_t *cleanup_cb);
154 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
155 bool rq_depth_scale_up(struct rq_depth *rqd);
156 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
157 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
158 
159 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
160 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
161 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
162 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
163 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
164 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
165 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
166 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
167 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
168 
rq_qos_cleanup(struct request_queue * q,struct bio * bio)169 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
170 {
171 	if (q->rq_qos)
172 		__rq_qos_cleanup(q->rq_qos, bio);
173 }
174 
rq_qos_done(struct request_queue * q,struct request * rq)175 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
176 {
177 	if (q->rq_qos)
178 		__rq_qos_done(q->rq_qos, rq);
179 }
180 
rq_qos_issue(struct request_queue * q,struct request * rq)181 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
182 {
183 	if (q->rq_qos)
184 		__rq_qos_issue(q->rq_qos, rq);
185 }
186 
rq_qos_requeue(struct request_queue * q,struct request * rq)187 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
188 {
189 	if (q->rq_qos)
190 		__rq_qos_requeue(q->rq_qos, rq);
191 }
192 
rq_qos_done_bio(struct bio * bio)193 static inline void rq_qos_done_bio(struct bio *bio)
194 {
195 	if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
196 			     bio_flagged(bio, BIO_QOS_MERGED))) {
197 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
198 		if (q->rq_qos)
199 			__rq_qos_done_bio(q->rq_qos, bio);
200 	}
201 }
202 
rq_qos_throttle(struct request_queue * q,struct bio * bio)203 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
204 {
205 	if (q->rq_qos) {
206 		bio_set_flag(bio, BIO_QOS_THROTTLED);
207 		__rq_qos_throttle(q->rq_qos, bio);
208 	}
209 }
210 
rq_qos_track(struct request_queue * q,struct request * rq,struct bio * bio)211 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
212 				struct bio *bio)
213 {
214 	if (q->rq_qos)
215 		__rq_qos_track(q->rq_qos, rq, bio);
216 }
217 
rq_qos_merge(struct request_queue * q,struct request * rq,struct bio * bio)218 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
219 				struct bio *bio)
220 {
221 	if (q->rq_qos) {
222 		bio_set_flag(bio, BIO_QOS_MERGED);
223 		__rq_qos_merge(q->rq_qos, rq, bio);
224 	}
225 }
226 
rq_qos_queue_depth_changed(struct request_queue * q)227 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
228 {
229 	if (q->rq_qos)
230 		__rq_qos_queue_depth_changed(q->rq_qos);
231 }
232 
233 void rq_qos_exit(struct request_queue *);
234 
235 #endif
236