1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef RQ_QOS_H
3 #define RQ_QOS_H
4 
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 #include <linux/blk-mq.h>
11 
12 #include "blk-mq-debugfs.h"
13 
14 struct blk_mq_debugfs_attr;
15 
16 enum rq_qos_id {
17 	RQ_QOS_WBT,
18 	RQ_QOS_LATENCY,
19 	RQ_QOS_COST,
20 	RQ_QOS_IOPRIO,
21 };
22 
23 struct rq_wait {
24 	wait_queue_head_t wait;
25 	atomic_t inflight;
26 };
27 
28 struct rq_qos {
29 	struct rq_qos_ops *ops;
30 	struct request_queue *q;
31 	enum rq_qos_id id;
32 	struct rq_qos *next;
33 #ifdef CONFIG_BLK_DEBUG_FS
34 	struct dentry *debugfs_dir;
35 #endif
36 };
37 
38 struct rq_qos_ops {
39 	void (*throttle)(struct rq_qos *, struct bio *);
40 	void (*track)(struct rq_qos *, struct request *, struct bio *);
41 	void (*merge)(struct rq_qos *, struct request *, struct bio *);
42 	void (*issue)(struct rq_qos *, struct request *);
43 	void (*requeue)(struct rq_qos *, struct request *);
44 	void (*done)(struct rq_qos *, struct request *);
45 	void (*done_bio)(struct rq_qos *, struct bio *);
46 	void (*cleanup)(struct rq_qos *, struct bio *);
47 	void (*queue_depth_changed)(struct rq_qos *);
48 	void (*exit)(struct rq_qos *);
49 	const struct blk_mq_debugfs_attr *debugfs_attrs;
50 };
51 
52 struct rq_depth {
53 	unsigned int max_depth;
54 
55 	int scale_step;
56 	bool scaled_max;
57 
58 	unsigned int queue_depth;
59 	unsigned int default_depth;
60 };
61 
rq_qos_id(struct request_queue * q,enum rq_qos_id id)62 static inline struct rq_qos *rq_qos_id(struct request_queue *q,
63 				       enum rq_qos_id id)
64 {
65 	struct rq_qos *rqos;
66 	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
67 		if (rqos->id == id)
68 			break;
69 	}
70 	return rqos;
71 }
72 
wbt_rq_qos(struct request_queue * q)73 static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
74 {
75 	return rq_qos_id(q, RQ_QOS_WBT);
76 }
77 
blkcg_rq_qos(struct request_queue * q)78 static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
79 {
80 	return rq_qos_id(q, RQ_QOS_LATENCY);
81 }
82 
rq_wait_init(struct rq_wait * rq_wait)83 static inline void rq_wait_init(struct rq_wait *rq_wait)
84 {
85 	atomic_set(&rq_wait->inflight, 0);
86 	init_waitqueue_head(&rq_wait->wait);
87 }
88 
rq_qos_add(struct request_queue * q,struct rq_qos * rqos)89 static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
90 {
91 	/*
92 	 * No IO can be in-flight when adding rqos, so freeze queue, which
93 	 * is fine since we only support rq_qos for blk-mq queue.
94 	 *
95 	 * Reuse ->queue_lock for protecting against other concurrent
96 	 * rq_qos adding/deleting
97 	 */
98 	blk_mq_freeze_queue(q);
99 
100 	spin_lock_irq(&q->queue_lock);
101 	if (rq_qos_id(q, rqos->id))
102 		goto ebusy;
103 	rqos->next = q->rq_qos;
104 	q->rq_qos = rqos;
105 	spin_unlock_irq(&q->queue_lock);
106 
107 	blk_mq_unfreeze_queue(q);
108 
109 	if (rqos->ops->debugfs_attrs) {
110 		mutex_lock(&q->debugfs_mutex);
111 		blk_mq_debugfs_register_rqos(rqos);
112 		mutex_unlock(&q->debugfs_mutex);
113 	}
114 
115 	return 0;
116 ebusy:
117 	spin_unlock_irq(&q->queue_lock);
118 	blk_mq_unfreeze_queue(q);
119 	return -EBUSY;
120 
121 }
122 
rq_qos_del(struct request_queue * q,struct rq_qos * rqos)123 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
124 {
125 	struct rq_qos **cur;
126 
127 	/*
128 	 * See comment in rq_qos_add() about freezing queue & using
129 	 * ->queue_lock.
130 	 */
131 	blk_mq_freeze_queue(q);
132 
133 	spin_lock_irq(&q->queue_lock);
134 	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
135 		if (*cur == rqos) {
136 			*cur = rqos->next;
137 			break;
138 		}
139 	}
140 	spin_unlock_irq(&q->queue_lock);
141 
142 	blk_mq_unfreeze_queue(q);
143 
144 	mutex_lock(&q->debugfs_mutex);
145 	blk_mq_debugfs_unregister_rqos(rqos);
146 	mutex_unlock(&q->debugfs_mutex);
147 }
148 
149 typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
150 typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
151 
152 void rq_qos_wait(struct rq_wait *rqw, void *private_data,
153 		 acquire_inflight_cb_t *acquire_inflight_cb,
154 		 cleanup_cb_t *cleanup_cb);
155 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
156 bool rq_depth_scale_up(struct rq_depth *rqd);
157 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
158 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
159 
160 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
161 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
162 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
163 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
164 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
165 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
166 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
167 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
168 void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
169 
rq_qos_cleanup(struct request_queue * q,struct bio * bio)170 static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
171 {
172 	if (q->rq_qos)
173 		__rq_qos_cleanup(q->rq_qos, bio);
174 }
175 
rq_qos_done(struct request_queue * q,struct request * rq)176 static inline void rq_qos_done(struct request_queue *q, struct request *rq)
177 {
178 	if (q->rq_qos)
179 		__rq_qos_done(q->rq_qos, rq);
180 }
181 
rq_qos_issue(struct request_queue * q,struct request * rq)182 static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
183 {
184 	if (q->rq_qos)
185 		__rq_qos_issue(q->rq_qos, rq);
186 }
187 
rq_qos_requeue(struct request_queue * q,struct request * rq)188 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
189 {
190 	if (q->rq_qos)
191 		__rq_qos_requeue(q->rq_qos, rq);
192 }
193 
rq_qos_done_bio(struct bio * bio)194 static inline void rq_qos_done_bio(struct bio *bio)
195 {
196 	if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
197 			     bio_flagged(bio, BIO_QOS_MERGED))) {
198 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
199 		if (q->rq_qos)
200 			__rq_qos_done_bio(q->rq_qos, bio);
201 	}
202 }
203 
rq_qos_throttle(struct request_queue * q,struct bio * bio)204 static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
205 {
206 	if (q->rq_qos) {
207 		bio_set_flag(bio, BIO_QOS_THROTTLED);
208 		__rq_qos_throttle(q->rq_qos, bio);
209 	}
210 }
211 
rq_qos_track(struct request_queue * q,struct request * rq,struct bio * bio)212 static inline void rq_qos_track(struct request_queue *q, struct request *rq,
213 				struct bio *bio)
214 {
215 	if (q->rq_qos)
216 		__rq_qos_track(q->rq_qos, rq, bio);
217 }
218 
rq_qos_merge(struct request_queue * q,struct request * rq,struct bio * bio)219 static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
220 				struct bio *bio)
221 {
222 	if (q->rq_qos) {
223 		bio_set_flag(bio, BIO_QOS_MERGED);
224 		__rq_qos_merge(q->rq_qos, rq, bio);
225 	}
226 }
227 
rq_qos_queue_depth_changed(struct request_queue * q)228 static inline void rq_qos_queue_depth_changed(struct request_queue *q)
229 {
230 	if (q->rq_qos)
231 		__rq_qos_queue_depth_changed(q->rq_qos);
232 }
233 
234 void rq_qos_exit(struct request_queue *);
235 
236 #endif
237