1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4
5 #include <linux/blk-crypto.h>
6 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
7 #include <xen/xen.h>
8 #include "blk-crypto-internal.h"
9
10 struct elevator_type;
11
12 /* Max future timer expiry for timeouts */
13 #define BLK_MAX_TIMEOUT (5 * HZ)
14
15 extern struct dentry *blk_debugfs_root;
16
17 struct blk_flush_queue {
18 unsigned int flush_pending_idx:1;
19 unsigned int flush_running_idx:1;
20 blk_status_t rq_status;
21 unsigned long flush_pending_since;
22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
25
26 spinlock_t mq_flush_lock;
27 };
28
29 extern struct kmem_cache *blk_requestq_cachep;
30 extern struct kmem_cache *blk_requestq_srcu_cachep;
31 extern struct kobj_type blk_queue_ktype;
32 extern struct ida blk_queue_ida;
33
__blk_get_queue(struct request_queue * q)34 static inline void __blk_get_queue(struct request_queue *q)
35 {
36 kobject_get(&q->kobj);
37 }
38
39 bool is_flush_rq(struct request *req);
40
41 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
42 gfp_t flags);
43 void blk_free_flush_queue(struct blk_flush_queue *q);
44
45 void blk_freeze_queue(struct request_queue *q);
46 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
47 void blk_queue_start_drain(struct request_queue *q);
48 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
49 void submit_bio_noacct_nocheck(struct bio *bio);
50
blk_try_enter_queue(struct request_queue * q,bool pm)51 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
52 {
53 rcu_read_lock();
54 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
55 goto fail;
56
57 /*
58 * The code that increments the pm_only counter must ensure that the
59 * counter is globally visible before the queue is unfrozen.
60 */
61 if (blk_queue_pm_only(q) &&
62 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
63 goto fail_put;
64
65 rcu_read_unlock();
66 return true;
67
68 fail_put:
69 blk_queue_exit(q);
70 fail:
71 rcu_read_unlock();
72 return false;
73 }
74
bio_queue_enter(struct bio * bio)75 static inline int bio_queue_enter(struct bio *bio)
76 {
77 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
78
79 if (blk_try_enter_queue(q, false))
80 return 0;
81 return __bio_queue_enter(q, bio);
82 }
83
84 #define BIO_INLINE_VECS 4
85 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
86 gfp_t gfp_mask);
87 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
88
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)89 static inline bool biovec_phys_mergeable(struct request_queue *q,
90 struct bio_vec *vec1, struct bio_vec *vec2)
91 {
92 unsigned long mask = queue_segment_boundary(q);
93 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
94 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
95
96 if (addr1 + vec1->bv_len != addr2)
97 return false;
98 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
99 return false;
100 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
101 return false;
102 return true;
103 }
104
__bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)105 static inline bool __bvec_gap_to_prev(struct request_queue *q,
106 struct bio_vec *bprv, unsigned int offset)
107 {
108 return (offset & queue_virt_boundary(q)) ||
109 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
110 }
111
112 /*
113 * Check if adding a bio_vec after bprv with offset would create a gap in
114 * the SG list. Most drivers don't care about this, but some do.
115 */
bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)116 static inline bool bvec_gap_to_prev(struct request_queue *q,
117 struct bio_vec *bprv, unsigned int offset)
118 {
119 if (!queue_virt_boundary(q))
120 return false;
121 return __bvec_gap_to_prev(q, bprv, offset);
122 }
123
rq_mergeable(struct request * rq)124 static inline bool rq_mergeable(struct request *rq)
125 {
126 if (blk_rq_is_passthrough(rq))
127 return false;
128
129 if (req_op(rq) == REQ_OP_FLUSH)
130 return false;
131
132 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
133 return false;
134
135 if (req_op(rq) == REQ_OP_ZONE_APPEND)
136 return false;
137
138 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
139 return false;
140 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
141 return false;
142
143 return true;
144 }
145
146 /*
147 * There are two different ways to handle DISCARD merges:
148 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
149 * send the bios to controller together. The ranges don't need to be
150 * contiguous.
151 * 2) Otherwise, the request will be normal read/write requests. The ranges
152 * need to be contiguous.
153 */
blk_discard_mergable(struct request * req)154 static inline bool blk_discard_mergable(struct request *req)
155 {
156 if (req_op(req) == REQ_OP_DISCARD &&
157 queue_max_discard_segments(req->q) > 1)
158 return true;
159 return false;
160 }
161
162 #ifdef CONFIG_BLK_DEV_INTEGRITY
163 void blk_flush_integrity(void);
164 bool __bio_integrity_endio(struct bio *);
165 void bio_integrity_free(struct bio *bio);
bio_integrity_endio(struct bio * bio)166 static inline bool bio_integrity_endio(struct bio *bio)
167 {
168 if (bio_integrity(bio))
169 return __bio_integrity_endio(bio);
170 return true;
171 }
172
173 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
174 struct request *);
175 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
176 struct bio *);
177
integrity_req_gap_back_merge(struct request * req,struct bio * next)178 static inline bool integrity_req_gap_back_merge(struct request *req,
179 struct bio *next)
180 {
181 struct bio_integrity_payload *bip = bio_integrity(req->bio);
182 struct bio_integrity_payload *bip_next = bio_integrity(next);
183
184 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
185 bip_next->bip_vec[0].bv_offset);
186 }
187
integrity_req_gap_front_merge(struct request * req,struct bio * bio)188 static inline bool integrity_req_gap_front_merge(struct request *req,
189 struct bio *bio)
190 {
191 struct bio_integrity_payload *bip = bio_integrity(bio);
192 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
193
194 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
195 bip_next->bip_vec[0].bv_offset);
196 }
197
198 int blk_integrity_add(struct gendisk *disk);
199 void blk_integrity_del(struct gendisk *);
200 #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)201 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
202 struct request *r1, struct request *r2)
203 {
204 return true;
205 }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)206 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
207 struct request *r, struct bio *b)
208 {
209 return true;
210 }
integrity_req_gap_back_merge(struct request * req,struct bio * next)211 static inline bool integrity_req_gap_back_merge(struct request *req,
212 struct bio *next)
213 {
214 return false;
215 }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)216 static inline bool integrity_req_gap_front_merge(struct request *req,
217 struct bio *bio)
218 {
219 return false;
220 }
221
blk_flush_integrity(void)222 static inline void blk_flush_integrity(void)
223 {
224 }
bio_integrity_endio(struct bio * bio)225 static inline bool bio_integrity_endio(struct bio *bio)
226 {
227 return true;
228 }
bio_integrity_free(struct bio * bio)229 static inline void bio_integrity_free(struct bio *bio)
230 {
231 }
blk_integrity_add(struct gendisk * disk)232 static inline int blk_integrity_add(struct gendisk *disk)
233 {
234 return 0;
235 }
blk_integrity_del(struct gendisk * disk)236 static inline void blk_integrity_del(struct gendisk *disk)
237 {
238 }
239 #endif /* CONFIG_BLK_DEV_INTEGRITY */
240
241 unsigned long blk_rq_timeout(unsigned long timeout);
242 void blk_add_timer(struct request *req);
243 const char *blk_status_to_str(blk_status_t status);
244
245 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
246 unsigned int nr_segs);
247 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
248 struct bio *bio, unsigned int nr_segs);
249
250 /*
251 * Plug flush limits
252 */
253 #define BLK_MAX_REQUEST_COUNT 32
254 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
255
256 /*
257 * Internal elevator interface
258 */
259 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
260
261 void blk_insert_flush(struct request *rq);
262
263 int elevator_switch_mq(struct request_queue *q,
264 struct elevator_type *new_e);
265 void elevator_exit(struct request_queue *q);
266 int elv_register_queue(struct request_queue *q, bool uevent);
267 void elv_unregister_queue(struct request_queue *q);
268
269 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
270 char *buf);
271 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
272 char *buf);
273 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
274 char *buf);
275 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
276 char *buf);
277 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
278 const char *buf, size_t count);
279 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
280 ssize_t part_timeout_store(struct device *, struct device_attribute *,
281 const char *, size_t);
282
blk_may_split(struct request_queue * q,struct bio * bio)283 static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
284 {
285 switch (bio_op(bio)) {
286 case REQ_OP_DISCARD:
287 case REQ_OP_SECURE_ERASE:
288 case REQ_OP_WRITE_ZEROES:
289 return true; /* non-trivial splitting decisions */
290 default:
291 break;
292 }
293
294 /*
295 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
296 * This is a quick and dirty check that relies on the fact that
297 * bi_io_vec[0] is always valid if a bio has data. The check might
298 * lead to occasional false negatives when bios are cloned, but compared
299 * to the performance impact of cloned bios themselves the loop below
300 * doesn't matter anyway.
301 */
302 return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
303 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
304 }
305
306 void __blk_queue_split(struct request_queue *q, struct bio **bio,
307 unsigned int *nr_segs);
308 int ll_back_merge_fn(struct request *req, struct bio *bio,
309 unsigned int nr_segs);
310 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
311 struct request *next);
312 unsigned int blk_recalc_rq_segments(struct request *rq);
313 void blk_rq_set_mixed_merge(struct request *rq);
314 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
315 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
316
317 int blk_dev_init(void);
318
319 /*
320 * Contribute to IO statistics IFF:
321 *
322 * a) it's attached to a gendisk, and
323 * b) the queue had IO stats enabled when this request was started
324 */
blk_do_io_stat(struct request * rq)325 static inline bool blk_do_io_stat(struct request *rq)
326 {
327 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
328 }
329
330 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
331
req_set_nomerge(struct request_queue * q,struct request * req)332 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
333 {
334 req->cmd_flags |= REQ_NOMERGE;
335 if (req == q->last_merge)
336 q->last_merge = NULL;
337 }
338
339 /*
340 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
341 * is defined as 'unsigned int', meantime it has to aligned to with logical
342 * block size which is the minimum accepted unit by hardware.
343 */
bio_allowed_max_sectors(struct request_queue * q)344 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
345 {
346 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
347 }
348
349 /*
350 * Internal io_context interface
351 */
352 struct io_cq *ioc_find_get_icq(struct request_queue *q);
353 struct io_cq *ioc_lookup_icq(struct request_queue *q);
354 #ifdef CONFIG_BLK_ICQ
355 void ioc_clear_queue(struct request_queue *q);
356 #else
ioc_clear_queue(struct request_queue * q)357 static inline void ioc_clear_queue(struct request_queue *q)
358 {
359 }
360 #endif /* CONFIG_BLK_ICQ */
361
362 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
363 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
364 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
365 const char *page, size_t count);
366 extern void blk_throtl_bio_endio(struct bio *bio);
367 extern void blk_throtl_stat_add(struct request *rq, u64 time);
368 #else
blk_throtl_bio_endio(struct bio * bio)369 static inline void blk_throtl_bio_endio(struct bio *bio) { }
blk_throtl_stat_add(struct request * rq,u64 time)370 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
371 #endif
372
373 void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
374
blk_queue_may_bounce(struct request_queue * q)375 static inline bool blk_queue_may_bounce(struct request_queue *q)
376 {
377 return IS_ENABLED(CONFIG_BOUNCE) &&
378 q->limits.bounce == BLK_BOUNCE_HIGH &&
379 max_low_pfn >= max_pfn;
380 }
381
blk_queue_bounce(struct request_queue * q,struct bio ** bio)382 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
383 {
384 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
385 __blk_queue_bounce(q, bio);
386 }
387
388 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
389 extern int blk_iolatency_init(struct request_queue *q);
390 #else
blk_iolatency_init(struct request_queue * q)391 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
392 #endif
393
394 #ifdef CONFIG_BLK_DEV_ZONED
395 void blk_queue_free_zone_bitmaps(struct request_queue *q);
396 void blk_queue_clear_zone_settings(struct request_queue *q);
397 #else
blk_queue_free_zone_bitmaps(struct request_queue * q)398 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
blk_queue_clear_zone_settings(struct request_queue * q)399 static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
400 #endif
401
402 int blk_alloc_ext_minor(void);
403 void blk_free_ext_minor(unsigned int minor);
404 #define ADDPART_FLAG_NONE 0
405 #define ADDPART_FLAG_RAID 1
406 #define ADDPART_FLAG_WHOLEDISK 2
407 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
408 sector_t length);
409 int bdev_del_partition(struct gendisk *disk, int partno);
410 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
411 sector_t length);
412 void blk_drop_partitions(struct gendisk *disk);
413
414 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
415 struct page *page, unsigned int len, unsigned int offset,
416 unsigned int max_sectors, bool *same_page);
417
blk_get_queue_kmem_cache(bool srcu)418 static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
419 {
420 if (srcu)
421 return blk_requestq_srcu_cachep;
422 return blk_requestq_cachep;
423 }
424 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
425
426 int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
427
428 int disk_alloc_events(struct gendisk *disk);
429 void disk_add_events(struct gendisk *disk);
430 void disk_del_events(struct gendisk *disk);
431 void disk_release_events(struct gendisk *disk);
432 void disk_block_events(struct gendisk *disk);
433 void disk_unblock_events(struct gendisk *disk);
434 void disk_flush_events(struct gendisk *disk, unsigned int mask);
435 extern struct device_attribute dev_attr_events;
436 extern struct device_attribute dev_attr_events_async;
437 extern struct device_attribute dev_attr_events_poll_msecs;
438
439 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
440 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
441
442 extern const struct address_space_operations def_blk_aops;
443
444 int disk_register_independent_access_ranges(struct gendisk *disk,
445 struct blk_independent_access_ranges *new_iars);
446 void disk_unregister_independent_access_ranges(struct gendisk *disk);
447
448 #ifdef CONFIG_FAIL_MAKE_REQUEST
449 bool should_fail_request(struct block_device *part, unsigned int bytes);
450 #else /* CONFIG_FAIL_MAKE_REQUEST */
should_fail_request(struct block_device * part,unsigned int bytes)451 static inline bool should_fail_request(struct block_device *part,
452 unsigned int bytes)
453 {
454 return false;
455 }
456 #endif /* CONFIG_FAIL_MAKE_REQUEST */
457
458 /*
459 * Optimized request reference counting. Ideally we'd make timeouts be more
460 * clever, as that's the only reason we need references at all... But until
461 * this happens, this is faster than using refcount_t. Also see:
462 *
463 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
464 */
465 #define req_ref_zero_or_close_to_overflow(req) \
466 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
467
req_ref_inc_not_zero(struct request * req)468 static inline bool req_ref_inc_not_zero(struct request *req)
469 {
470 return atomic_inc_not_zero(&req->ref);
471 }
472
req_ref_put_and_test(struct request * req)473 static inline bool req_ref_put_and_test(struct request *req)
474 {
475 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
476 return atomic_dec_and_test(&req->ref);
477 }
478
req_ref_set(struct request * req,int value)479 static inline void req_ref_set(struct request *req, int value)
480 {
481 atomic_set(&req->ref, value);
482 }
483
req_ref_read(struct request * req)484 static inline int req_ref_read(struct request *req)
485 {
486 return atomic_read(&req->ref);
487 }
488
489 #endif /* BLK_INTERNAL_H */
490