1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12 /*
13 * This handles all read/write requests to block devices
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-pm.h>
20 #include <linux/blk-integrity.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/t10-pi.h>
38 #include <linux/debugfs.h>
39 #include <linux/bpf.h>
40 #include <linux/part_stat.h>
41 #include <linux/sched/sysctl.h>
42 #include <linux/blk-crypto.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/block.h>
46
47 #include "blk.h"
48 #include "blk-mq-sched.h"
49 #include "blk-pm.h"
50 #include "blk-cgroup.h"
51 #include "blk-throttle.h"
52
53 struct dentry *blk_debugfs_root;
54
55 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
61
62 DEFINE_IDA(blk_queue_ida);
63
64 /*
65 * For queue allocation
66 */
67 struct kmem_cache *blk_requestq_cachep;
68 struct kmem_cache *blk_requestq_srcu_cachep;
69
70 /*
71 * Controlling structure to kblockd
72 */
73 static struct workqueue_struct *kblockd_workqueue;
74
75 /**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
blk_queue_flag_set(unsigned int flag,struct request_queue * q)80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81 {
82 set_bit(flag, &q->queue_flags);
83 }
84 EXPORT_SYMBOL(blk_queue_flag_set);
85
86 /**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
blk_queue_flag_clear(unsigned int flag,struct request_queue * q)91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92 {
93 clear_bit(flag, &q->queue_flags);
94 }
95 EXPORT_SYMBOL(blk_queue_flag_clear);
96
97 /**
98 * blk_queue_flag_test_and_set - atomically test and set a queue flag
99 * @flag: flag to be set
100 * @q: request queue
101 *
102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
103 * the flag was already set.
104 */
blk_queue_flag_test_and_set(unsigned int flag,struct request_queue * q)105 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
106 {
107 return test_and_set_bit(flag, &q->queue_flags);
108 }
109 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
110
111 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
112 static const char *const blk_op_name[] = {
113 REQ_OP_NAME(READ),
114 REQ_OP_NAME(WRITE),
115 REQ_OP_NAME(FLUSH),
116 REQ_OP_NAME(DISCARD),
117 REQ_OP_NAME(SECURE_ERASE),
118 REQ_OP_NAME(ZONE_RESET),
119 REQ_OP_NAME(ZONE_RESET_ALL),
120 REQ_OP_NAME(ZONE_OPEN),
121 REQ_OP_NAME(ZONE_CLOSE),
122 REQ_OP_NAME(ZONE_FINISH),
123 REQ_OP_NAME(ZONE_APPEND),
124 REQ_OP_NAME(WRITE_ZEROES),
125 REQ_OP_NAME(DRV_IN),
126 REQ_OP_NAME(DRV_OUT),
127 };
128 #undef REQ_OP_NAME
129
130 /**
131 * blk_op_str - Return string XXX in the REQ_OP_XXX.
132 * @op: REQ_OP_XXX.
133 *
134 * Description: Centralize block layer function to convert REQ_OP_XXX into
135 * string format. Useful in the debugging and tracing bio or request. For
136 * invalid REQ_OP_XXX it returns string "UNKNOWN".
137 */
blk_op_str(enum req_op op)138 inline const char *blk_op_str(enum req_op op)
139 {
140 const char *op_str = "UNKNOWN";
141
142 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
143 op_str = blk_op_name[op];
144
145 return op_str;
146 }
147 EXPORT_SYMBOL_GPL(blk_op_str);
148
149 static const struct {
150 int errno;
151 const char *name;
152 } blk_errors[] = {
153 [BLK_STS_OK] = { 0, "" },
154 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
155 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
156 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
157 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
158 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
159 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
160 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
161 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
162 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
163 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
164 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
165 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
166
167 /* device mapper special case, should not leak out: */
168 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
169
170 /* zone device specific errors */
171 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
172 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
173
174 /* everything else not covered above: */
175 [BLK_STS_IOERR] = { -EIO, "I/O" },
176 };
177
errno_to_blk_status(int errno)178 blk_status_t errno_to_blk_status(int errno)
179 {
180 int i;
181
182 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
183 if (blk_errors[i].errno == errno)
184 return (__force blk_status_t)i;
185 }
186
187 return BLK_STS_IOERR;
188 }
189 EXPORT_SYMBOL_GPL(errno_to_blk_status);
190
blk_status_to_errno(blk_status_t status)191 int blk_status_to_errno(blk_status_t status)
192 {
193 int idx = (__force int)status;
194
195 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
196 return -EIO;
197 return blk_errors[idx].errno;
198 }
199 EXPORT_SYMBOL_GPL(blk_status_to_errno);
200
blk_status_to_str(blk_status_t status)201 const char *blk_status_to_str(blk_status_t status)
202 {
203 int idx = (__force int)status;
204
205 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
206 return "<null>";
207 return blk_errors[idx].name;
208 }
209
210 /**
211 * blk_sync_queue - cancel any pending callbacks on a queue
212 * @q: the queue
213 *
214 * Description:
215 * The block layer may perform asynchronous callback activity
216 * on a queue, such as calling the unplug function after a timeout.
217 * A block device may call blk_sync_queue to ensure that any
218 * such activity is cancelled, thus allowing it to release resources
219 * that the callbacks might use. The caller must already have made sure
220 * that its ->submit_bio will not re-add plugging prior to calling
221 * this function.
222 *
223 * This function does not cancel any asynchronous activity arising
224 * out of elevator or throttling code. That would require elevator_exit()
225 * and blkcg_exit_queue() to be called with queue lock initialized.
226 *
227 */
blk_sync_queue(struct request_queue * q)228 void blk_sync_queue(struct request_queue *q)
229 {
230 del_timer_sync(&q->timeout);
231 cancel_work_sync(&q->timeout_work);
232 }
233 EXPORT_SYMBOL(blk_sync_queue);
234
235 /**
236 * blk_set_pm_only - increment pm_only counter
237 * @q: request queue pointer
238 */
blk_set_pm_only(struct request_queue * q)239 void blk_set_pm_only(struct request_queue *q)
240 {
241 atomic_inc(&q->pm_only);
242 }
243 EXPORT_SYMBOL_GPL(blk_set_pm_only);
244
blk_clear_pm_only(struct request_queue * q)245 void blk_clear_pm_only(struct request_queue *q)
246 {
247 int pm_only;
248
249 pm_only = atomic_dec_return(&q->pm_only);
250 WARN_ON_ONCE(pm_only < 0);
251 if (pm_only == 0)
252 wake_up_all(&q->mq_freeze_wq);
253 }
254 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
255
256 /**
257 * blk_put_queue - decrement the request_queue refcount
258 * @q: the request_queue structure to decrement the refcount for
259 *
260 * Decrements the refcount of the request_queue kobject. When this reaches 0
261 * we'll have blk_release_queue() called.
262 *
263 * Context: Any context, but the last reference must not be dropped from
264 * atomic context.
265 */
blk_put_queue(struct request_queue * q)266 void blk_put_queue(struct request_queue *q)
267 {
268 kobject_put(&q->kobj);
269 }
270 EXPORT_SYMBOL(blk_put_queue);
271
blk_queue_start_drain(struct request_queue * q)272 void blk_queue_start_drain(struct request_queue *q)
273 {
274 /*
275 * When queue DYING flag is set, we need to block new req
276 * entering queue, so we call blk_freeze_queue_start() to
277 * prevent I/O from crossing blk_queue_enter().
278 */
279 blk_freeze_queue_start(q);
280 if (queue_is_mq(q))
281 blk_mq_wake_waiters(q);
282 /* Make blk_queue_enter() reexamine the DYING flag. */
283 wake_up_all(&q->mq_freeze_wq);
284 }
285
286 /**
287 * blk_queue_enter() - try to increase q->q_usage_counter
288 * @q: request queue pointer
289 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
290 */
blk_queue_enter(struct request_queue * q,blk_mq_req_flags_t flags)291 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
292 {
293 const bool pm = flags & BLK_MQ_REQ_PM;
294
295 while (!blk_try_enter_queue(q, pm)) {
296 if (flags & BLK_MQ_REQ_NOWAIT)
297 return -EAGAIN;
298
299 /*
300 * read pair of barrier in blk_freeze_queue_start(), we need to
301 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
302 * reading .mq_freeze_depth or queue dying flag, otherwise the
303 * following wait may never return if the two reads are
304 * reordered.
305 */
306 smp_rmb();
307 wait_event(q->mq_freeze_wq,
308 (!q->mq_freeze_depth &&
309 blk_pm_resume_queue(pm, q)) ||
310 blk_queue_dying(q));
311 if (blk_queue_dying(q))
312 return -ENODEV;
313 }
314
315 return 0;
316 }
317
__bio_queue_enter(struct request_queue * q,struct bio * bio)318 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
319 {
320 while (!blk_try_enter_queue(q, false)) {
321 struct gendisk *disk = bio->bi_bdev->bd_disk;
322
323 if (bio->bi_opf & REQ_NOWAIT) {
324 if (test_bit(GD_DEAD, &disk->state))
325 goto dead;
326 bio_wouldblock_error(bio);
327 return -EAGAIN;
328 }
329
330 /*
331 * read pair of barrier in blk_freeze_queue_start(), we need to
332 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
333 * reading .mq_freeze_depth or queue dying flag, otherwise the
334 * following wait may never return if the two reads are
335 * reordered.
336 */
337 smp_rmb();
338 wait_event(q->mq_freeze_wq,
339 (!q->mq_freeze_depth &&
340 blk_pm_resume_queue(false, q)) ||
341 test_bit(GD_DEAD, &disk->state));
342 if (test_bit(GD_DEAD, &disk->state))
343 goto dead;
344 }
345
346 return 0;
347 dead:
348 bio_io_error(bio);
349 return -ENODEV;
350 }
351
blk_queue_exit(struct request_queue * q)352 void blk_queue_exit(struct request_queue *q)
353 {
354 percpu_ref_put(&q->q_usage_counter);
355 }
356
blk_queue_usage_counter_release(struct percpu_ref * ref)357 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
358 {
359 struct request_queue *q =
360 container_of(ref, struct request_queue, q_usage_counter);
361
362 wake_up_all(&q->mq_freeze_wq);
363 }
364
blk_rq_timed_out_timer(struct timer_list * t)365 static void blk_rq_timed_out_timer(struct timer_list *t)
366 {
367 struct request_queue *q = from_timer(q, t, timeout);
368
369 kblockd_schedule_work(&q->timeout_work);
370 }
371
blk_timeout_work(struct work_struct * work)372 static void blk_timeout_work(struct work_struct *work)
373 {
374 }
375
blk_alloc_queue(int node_id,bool alloc_srcu)376 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
377 {
378 struct request_queue *q;
379
380 q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
381 GFP_KERNEL | __GFP_ZERO, node_id);
382 if (!q)
383 return NULL;
384
385 if (alloc_srcu) {
386 blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
387 if (init_srcu_struct(q->srcu) != 0)
388 goto fail_q;
389 }
390
391 q->last_merge = NULL;
392
393 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
394 if (q->id < 0)
395 goto fail_srcu;
396
397 q->stats = blk_alloc_queue_stats();
398 if (!q->stats)
399 goto fail_id;
400
401 q->node = node_id;
402
403 atomic_set(&q->nr_active_requests_shared_tags, 0);
404
405 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
406 INIT_WORK(&q->timeout_work, blk_timeout_work);
407 INIT_LIST_HEAD(&q->icq_list);
408
409 kobject_init(&q->kobj, &blk_queue_ktype);
410
411 mutex_init(&q->debugfs_mutex);
412 mutex_init(&q->sysfs_lock);
413 mutex_init(&q->sysfs_dir_lock);
414 spin_lock_init(&q->queue_lock);
415
416 init_waitqueue_head(&q->mq_freeze_wq);
417 mutex_init(&q->mq_freeze_lock);
418
419 /*
420 * Init percpu_ref in atomic mode so that it's faster to shutdown.
421 * See blk_register_queue() for details.
422 */
423 if (percpu_ref_init(&q->q_usage_counter,
424 blk_queue_usage_counter_release,
425 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
426 goto fail_stats;
427
428 blk_set_default_limits(&q->limits);
429 q->nr_requests = BLKDEV_DEFAULT_RQ;
430
431 return q;
432
433 fail_stats:
434 blk_free_queue_stats(q->stats);
435 fail_id:
436 ida_free(&blk_queue_ida, q->id);
437 fail_srcu:
438 if (alloc_srcu)
439 cleanup_srcu_struct(q->srcu);
440 fail_q:
441 kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
442 return NULL;
443 }
444
445 /**
446 * blk_get_queue - increment the request_queue refcount
447 * @q: the request_queue structure to increment the refcount for
448 *
449 * Increment the refcount of the request_queue kobject.
450 *
451 * Context: Any context.
452 */
blk_get_queue(struct request_queue * q)453 bool blk_get_queue(struct request_queue *q)
454 {
455 if (unlikely(blk_queue_dying(q)))
456 return false;
457 kobject_get(&q->kobj);
458 return true;
459 }
460 EXPORT_SYMBOL(blk_get_queue);
461
462 #ifdef CONFIG_FAIL_MAKE_REQUEST
463
464 static DECLARE_FAULT_ATTR(fail_make_request);
465
setup_fail_make_request(char * str)466 static int __init setup_fail_make_request(char *str)
467 {
468 return setup_fault_attr(&fail_make_request, str);
469 }
470 __setup("fail_make_request=", setup_fail_make_request);
471
should_fail_request(struct block_device * part,unsigned int bytes)472 bool should_fail_request(struct block_device *part, unsigned int bytes)
473 {
474 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
475 }
476
fail_make_request_debugfs(void)477 static int __init fail_make_request_debugfs(void)
478 {
479 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
480 NULL, &fail_make_request);
481
482 return PTR_ERR_OR_ZERO(dir);
483 }
484
485 late_initcall(fail_make_request_debugfs);
486 #endif /* CONFIG_FAIL_MAKE_REQUEST */
487
bio_check_ro(struct bio * bio)488 static inline void bio_check_ro(struct bio *bio)
489 {
490 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
491 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
492 return;
493 pr_warn("Trying to write to read-only block-device %pg\n",
494 bio->bi_bdev);
495 /* Older lvm-tools actually trigger this */
496 }
497 }
498
should_fail_bio(struct bio * bio)499 static noinline int should_fail_bio(struct bio *bio)
500 {
501 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
502 return -EIO;
503 return 0;
504 }
505 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
506
507 /*
508 * Check whether this bio extends beyond the end of the device or partition.
509 * This may well happen - the kernel calls bread() without checking the size of
510 * the device, e.g., when mounting a file system.
511 */
bio_check_eod(struct bio * bio)512 static inline int bio_check_eod(struct bio *bio)
513 {
514 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
515 unsigned int nr_sectors = bio_sectors(bio);
516
517 if (nr_sectors && maxsector &&
518 (nr_sectors > maxsector ||
519 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
520 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
521 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
522 current->comm, bio->bi_bdev, bio->bi_opf,
523 bio->bi_iter.bi_sector, nr_sectors, maxsector);
524 return -EIO;
525 }
526 return 0;
527 }
528
529 /*
530 * Remap block n of partition p to block n+start(p) of the disk.
531 */
blk_partition_remap(struct bio * bio)532 static int blk_partition_remap(struct bio *bio)
533 {
534 struct block_device *p = bio->bi_bdev;
535
536 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
537 return -EIO;
538 if (bio_sectors(bio)) {
539 bio->bi_iter.bi_sector += p->bd_start_sect;
540 trace_block_bio_remap(bio, p->bd_dev,
541 bio->bi_iter.bi_sector -
542 p->bd_start_sect);
543 }
544 bio_set_flag(bio, BIO_REMAPPED);
545 return 0;
546 }
547
548 /*
549 * Check write append to a zoned block device.
550 */
blk_check_zone_append(struct request_queue * q,struct bio * bio)551 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
552 struct bio *bio)
553 {
554 int nr_sectors = bio_sectors(bio);
555
556 /* Only applicable to zoned block devices */
557 if (!bdev_is_zoned(bio->bi_bdev))
558 return BLK_STS_NOTSUPP;
559
560 /* The bio sector must point to the start of a sequential zone */
561 if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) ||
562 !bio_zone_is_seq(bio))
563 return BLK_STS_IOERR;
564
565 /*
566 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
567 * split and could result in non-contiguous sectors being written in
568 * different zones.
569 */
570 if (nr_sectors > q->limits.chunk_sectors)
571 return BLK_STS_IOERR;
572
573 /* Make sure the BIO is small enough and will not get split */
574 if (nr_sectors > q->limits.max_zone_append_sectors)
575 return BLK_STS_IOERR;
576
577 bio->bi_opf |= REQ_NOMERGE;
578
579 return BLK_STS_OK;
580 }
581
__submit_bio(struct bio * bio)582 static void __submit_bio(struct bio *bio)
583 {
584 struct gendisk *disk = bio->bi_bdev->bd_disk;
585
586 if (unlikely(!blk_crypto_bio_prep(&bio)))
587 return;
588
589 if (!disk->fops->submit_bio) {
590 blk_mq_submit_bio(bio);
591 } else if (likely(bio_queue_enter(bio) == 0)) {
592 disk->fops->submit_bio(bio);
593 blk_queue_exit(disk->queue);
594 }
595 }
596
597 /*
598 * The loop in this function may be a bit non-obvious, and so deserves some
599 * explanation:
600 *
601 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
602 * that), so we have a list with a single bio.
603 * - We pretend that we have just taken it off a longer list, so we assign
604 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
605 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
606 * bios through a recursive call to submit_bio_noacct. If it did, we find a
607 * non-NULL value in bio_list and re-enter the loop from the top.
608 * - In this case we really did just take the bio of the top of the list (no
609 * pretending) and so remove it from bio_list, and call into ->submit_bio()
610 * again.
611 *
612 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
613 * bio_list_on_stack[1] contains bios that were submitted before the current
614 * ->submit_bio, but that haven't been processed yet.
615 */
__submit_bio_noacct(struct bio * bio)616 static void __submit_bio_noacct(struct bio *bio)
617 {
618 struct bio_list bio_list_on_stack[2];
619
620 BUG_ON(bio->bi_next);
621
622 bio_list_init(&bio_list_on_stack[0]);
623 current->bio_list = bio_list_on_stack;
624
625 do {
626 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
627 struct bio_list lower, same;
628
629 /*
630 * Create a fresh bio_list for all subordinate requests.
631 */
632 bio_list_on_stack[1] = bio_list_on_stack[0];
633 bio_list_init(&bio_list_on_stack[0]);
634
635 __submit_bio(bio);
636
637 /*
638 * Sort new bios into those for a lower level and those for the
639 * same level.
640 */
641 bio_list_init(&lower);
642 bio_list_init(&same);
643 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
644 if (q == bdev_get_queue(bio->bi_bdev))
645 bio_list_add(&same, bio);
646 else
647 bio_list_add(&lower, bio);
648
649 /*
650 * Now assemble so we handle the lowest level first.
651 */
652 bio_list_merge(&bio_list_on_stack[0], &lower);
653 bio_list_merge(&bio_list_on_stack[0], &same);
654 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
655 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
656
657 current->bio_list = NULL;
658 }
659
__submit_bio_noacct_mq(struct bio * bio)660 static void __submit_bio_noacct_mq(struct bio *bio)
661 {
662 struct bio_list bio_list[2] = { };
663
664 current->bio_list = bio_list;
665
666 do {
667 __submit_bio(bio);
668 } while ((bio = bio_list_pop(&bio_list[0])));
669
670 current->bio_list = NULL;
671 }
672
submit_bio_noacct_nocheck(struct bio * bio)673 void submit_bio_noacct_nocheck(struct bio *bio)
674 {
675 /*
676 * We only want one ->submit_bio to be active at a time, else stack
677 * usage with stacked devices could be a problem. Use current->bio_list
678 * to collect a list of requests submited by a ->submit_bio method while
679 * it is active, and then process them after it returned.
680 */
681 if (current->bio_list)
682 bio_list_add(¤t->bio_list[0], bio);
683 else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
684 __submit_bio_noacct_mq(bio);
685 else
686 __submit_bio_noacct(bio);
687 }
688
689 /**
690 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
691 * @bio: The bio describing the location in memory and on the device.
692 *
693 * This is a version of submit_bio() that shall only be used for I/O that is
694 * resubmitted to lower level drivers by stacking block drivers. All file
695 * systems and other upper level users of the block layer should use
696 * submit_bio() instead.
697 */
submit_bio_noacct(struct bio * bio)698 void submit_bio_noacct(struct bio *bio)
699 {
700 struct block_device *bdev = bio->bi_bdev;
701 struct request_queue *q = bdev_get_queue(bdev);
702 blk_status_t status = BLK_STS_IOERR;
703 struct blk_plug *plug;
704
705 might_sleep();
706
707 plug = blk_mq_plug(bio);
708 if (plug && plug->nowait)
709 bio->bi_opf |= REQ_NOWAIT;
710
711 /*
712 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
713 * if queue does not support NOWAIT.
714 */
715 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
716 goto not_supported;
717
718 if (should_fail_bio(bio))
719 goto end_io;
720 bio_check_ro(bio);
721 if (!bio_flagged(bio, BIO_REMAPPED)) {
722 if (unlikely(bio_check_eod(bio)))
723 goto end_io;
724 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
725 goto end_io;
726 }
727
728 /*
729 * Filter flush bio's early so that bio based drivers without flush
730 * support don't have to worry about them.
731 */
732 if (op_is_flush(bio->bi_opf) &&
733 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
734 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
735 if (!bio_sectors(bio)) {
736 status = BLK_STS_OK;
737 goto end_io;
738 }
739 }
740
741 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
742 bio_clear_polled(bio);
743
744 switch (bio_op(bio)) {
745 case REQ_OP_DISCARD:
746 if (!bdev_max_discard_sectors(bdev))
747 goto not_supported;
748 break;
749 case REQ_OP_SECURE_ERASE:
750 if (!bdev_max_secure_erase_sectors(bdev))
751 goto not_supported;
752 break;
753 case REQ_OP_ZONE_APPEND:
754 status = blk_check_zone_append(q, bio);
755 if (status != BLK_STS_OK)
756 goto end_io;
757 break;
758 case REQ_OP_ZONE_RESET:
759 case REQ_OP_ZONE_OPEN:
760 case REQ_OP_ZONE_CLOSE:
761 case REQ_OP_ZONE_FINISH:
762 if (!bdev_is_zoned(bio->bi_bdev))
763 goto not_supported;
764 break;
765 case REQ_OP_ZONE_RESET_ALL:
766 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
767 goto not_supported;
768 break;
769 case REQ_OP_WRITE_ZEROES:
770 if (!q->limits.max_write_zeroes_sectors)
771 goto not_supported;
772 break;
773 default:
774 break;
775 }
776
777 if (blk_throtl_bio(bio))
778 return;
779
780 blk_cgroup_bio_start(bio);
781 blkcg_bio_issue_init(bio);
782
783 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
784 trace_block_bio_queue(bio);
785 /* Now that enqueuing has been traced, we need to trace
786 * completion as well.
787 */
788 bio_set_flag(bio, BIO_TRACE_COMPLETION);
789 }
790 submit_bio_noacct_nocheck(bio);
791 return;
792
793 not_supported:
794 status = BLK_STS_NOTSUPP;
795 end_io:
796 bio->bi_status = status;
797 bio_endio(bio);
798 }
799 EXPORT_SYMBOL(submit_bio_noacct);
800
801 /**
802 * submit_bio - submit a bio to the block device layer for I/O
803 * @bio: The &struct bio which describes the I/O
804 *
805 * submit_bio() is used to submit I/O requests to block devices. It is passed a
806 * fully set up &struct bio that describes the I/O that needs to be done. The
807 * bio will be send to the device described by the bi_bdev field.
808 *
809 * The success/failure status of the request, along with notification of
810 * completion, is delivered asynchronously through the ->bi_end_io() callback
811 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
812 * been called.
813 */
submit_bio(struct bio * bio)814 void submit_bio(struct bio *bio)
815 {
816 if (blkcg_punt_bio_submit(bio))
817 return;
818
819 if (bio_op(bio) == REQ_OP_READ) {
820 task_io_account_read(bio->bi_iter.bi_size);
821 count_vm_events(PGPGIN, bio_sectors(bio));
822 } else if (bio_op(bio) == REQ_OP_WRITE) {
823 count_vm_events(PGPGOUT, bio_sectors(bio));
824 }
825
826 submit_bio_noacct(bio);
827 }
828 EXPORT_SYMBOL(submit_bio);
829
830 /**
831 * bio_poll - poll for BIO completions
832 * @bio: bio to poll for
833 * @iob: batches of IO
834 * @flags: BLK_POLL_* flags that control the behavior
835 *
836 * Poll for completions on queue associated with the bio. Returns number of
837 * completed entries found.
838 *
839 * Note: the caller must either be the context that submitted @bio, or
840 * be in a RCU critical section to prevent freeing of @bio.
841 */
bio_poll(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)842 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
843 {
844 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
845 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
846 int ret = 0;
847
848 if (cookie == BLK_QC_T_NONE ||
849 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
850 return 0;
851
852 /*
853 * As the requests that require a zone lock are not plugged in the
854 * first place, directly accessing the plug instead of using
855 * blk_mq_plug() should not have any consequences during flushing for
856 * zoned devices.
857 */
858 blk_flush_plug(current->plug, false);
859
860 if (bio_queue_enter(bio))
861 return 0;
862 if (queue_is_mq(q)) {
863 ret = blk_mq_poll(q, cookie, iob, flags);
864 } else {
865 struct gendisk *disk = q->disk;
866
867 if (disk && disk->fops->poll_bio)
868 ret = disk->fops->poll_bio(bio, iob, flags);
869 }
870 blk_queue_exit(q);
871 return ret;
872 }
873 EXPORT_SYMBOL_GPL(bio_poll);
874
875 /*
876 * Helper to implement file_operations.iopoll. Requires the bio to be stored
877 * in iocb->private, and cleared before freeing the bio.
878 */
iocb_bio_iopoll(struct kiocb * kiocb,struct io_comp_batch * iob,unsigned int flags)879 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
880 unsigned int flags)
881 {
882 struct bio *bio;
883 int ret = 0;
884
885 /*
886 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
887 * point to a freshly allocated bio at this point. If that happens
888 * we have a few cases to consider:
889 *
890 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
891 * simply nothing in this case
892 * 2) the bio points to a not poll enabled device. bio_poll will catch
893 * this and return 0
894 * 3) the bio points to a poll capable device, including but not
895 * limited to the one that the original bio pointed to. In this
896 * case we will call into the actual poll method and poll for I/O,
897 * even if we don't need to, but it won't cause harm either.
898 *
899 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
900 * is still allocated. Because partitions hold a reference to the whole
901 * device bdev and thus disk, the disk is also still valid. Grabbing
902 * a reference to the queue in bio_poll() ensures the hctxs and requests
903 * are still valid as well.
904 */
905 rcu_read_lock();
906 bio = READ_ONCE(kiocb->private);
907 if (bio && bio->bi_bdev)
908 ret = bio_poll(bio, iob, flags);
909 rcu_read_unlock();
910
911 return ret;
912 }
913 EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
914
update_io_ticks(struct block_device * part,unsigned long now,bool end)915 void update_io_ticks(struct block_device *part, unsigned long now, bool end)
916 {
917 unsigned long stamp;
918 again:
919 stamp = READ_ONCE(part->bd_stamp);
920 if (unlikely(time_after(now, stamp))) {
921 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
922 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
923 }
924 if (part->bd_partno) {
925 part = bdev_whole(part);
926 goto again;
927 }
928 }
929
bdev_start_io_acct(struct block_device * bdev,unsigned int sectors,enum req_op op,unsigned long start_time)930 unsigned long bdev_start_io_acct(struct block_device *bdev,
931 unsigned int sectors, enum req_op op,
932 unsigned long start_time)
933 {
934 const int sgrp = op_stat_group(op);
935
936 part_stat_lock();
937 update_io_ticks(bdev, start_time, false);
938 part_stat_inc(bdev, ios[sgrp]);
939 part_stat_add(bdev, sectors[sgrp], sectors);
940 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
941 part_stat_unlock();
942
943 return start_time;
944 }
945 EXPORT_SYMBOL(bdev_start_io_acct);
946
947 /**
948 * bio_start_io_acct_time - start I/O accounting for bio based drivers
949 * @bio: bio to start account for
950 * @start_time: start time that should be passed back to bio_end_io_acct().
951 */
bio_start_io_acct_time(struct bio * bio,unsigned long start_time)952 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
953 {
954 bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
955 bio_op(bio), start_time);
956 }
957 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
958
959 /**
960 * bio_start_io_acct - start I/O accounting for bio based drivers
961 * @bio: bio to start account for
962 *
963 * Returns the start time that should be passed back to bio_end_io_acct().
964 */
bio_start_io_acct(struct bio * bio)965 unsigned long bio_start_io_acct(struct bio *bio)
966 {
967 return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
968 bio_op(bio), jiffies);
969 }
970 EXPORT_SYMBOL_GPL(bio_start_io_acct);
971
bdev_end_io_acct(struct block_device * bdev,enum req_op op,unsigned long start_time)972 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
973 unsigned long start_time)
974 {
975 const int sgrp = op_stat_group(op);
976 unsigned long now = READ_ONCE(jiffies);
977 unsigned long duration = now - start_time;
978
979 part_stat_lock();
980 update_io_ticks(bdev, now, true);
981 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
982 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
983 part_stat_unlock();
984 }
985 EXPORT_SYMBOL(bdev_end_io_acct);
986
bio_end_io_acct_remapped(struct bio * bio,unsigned long start_time,struct block_device * orig_bdev)987 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
988 struct block_device *orig_bdev)
989 {
990 bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
991 }
992 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
993
994 /**
995 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
996 * @q : the queue of the device being checked
997 *
998 * Description:
999 * Check if underlying low-level drivers of a device are busy.
1000 * If the drivers want to export their busy state, they must set own
1001 * exporting function using blk_queue_lld_busy() first.
1002 *
1003 * Basically, this function is used only by request stacking drivers
1004 * to stop dispatching requests to underlying devices when underlying
1005 * devices are busy. This behavior helps more I/O merging on the queue
1006 * of the request stacking driver and prevents I/O throughput regression
1007 * on burst I/O load.
1008 *
1009 * Return:
1010 * 0 - Not busy (The request stacking driver should dispatch request)
1011 * 1 - Busy (The request stacking driver should stop dispatching request)
1012 */
blk_lld_busy(struct request_queue * q)1013 int blk_lld_busy(struct request_queue *q)
1014 {
1015 if (queue_is_mq(q) && q->mq_ops->busy)
1016 return q->mq_ops->busy(q);
1017
1018 return 0;
1019 }
1020 EXPORT_SYMBOL_GPL(blk_lld_busy);
1021
kblockd_schedule_work(struct work_struct * work)1022 int kblockd_schedule_work(struct work_struct *work)
1023 {
1024 return queue_work(kblockd_workqueue, work);
1025 }
1026 EXPORT_SYMBOL(kblockd_schedule_work);
1027
kblockd_mod_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)1028 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1029 unsigned long delay)
1030 {
1031 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1032 }
1033 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1034
blk_start_plug_nr_ios(struct blk_plug * plug,unsigned short nr_ios)1035 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1036 {
1037 struct task_struct *tsk = current;
1038
1039 /*
1040 * If this is a nested plug, don't actually assign it.
1041 */
1042 if (tsk->plug)
1043 return;
1044
1045 plug->mq_list = NULL;
1046 plug->cached_rq = NULL;
1047 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1048 plug->rq_count = 0;
1049 plug->multiple_queues = false;
1050 plug->has_elevator = false;
1051 plug->nowait = false;
1052 INIT_LIST_HEAD(&plug->cb_list);
1053
1054 /*
1055 * Store ordering should not be needed here, since a potential
1056 * preempt will imply a full memory barrier
1057 */
1058 tsk->plug = plug;
1059 }
1060
1061 /**
1062 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1063 * @plug: The &struct blk_plug that needs to be initialized
1064 *
1065 * Description:
1066 * blk_start_plug() indicates to the block layer an intent by the caller
1067 * to submit multiple I/O requests in a batch. The block layer may use
1068 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1069 * is called. However, the block layer may choose to submit requests
1070 * before a call to blk_finish_plug() if the number of queued I/Os
1071 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1072 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1073 * the task schedules (see below).
1074 *
1075 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1076 * pending I/O should the task end up blocking between blk_start_plug() and
1077 * blk_finish_plug(). This is important from a performance perspective, but
1078 * also ensures that we don't deadlock. For instance, if the task is blocking
1079 * for a memory allocation, memory reclaim could end up wanting to free a
1080 * page belonging to that request that is currently residing in our private
1081 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1082 * this kind of deadlock.
1083 */
blk_start_plug(struct blk_plug * plug)1084 void blk_start_plug(struct blk_plug *plug)
1085 {
1086 blk_start_plug_nr_ios(plug, 1);
1087 }
1088 EXPORT_SYMBOL(blk_start_plug);
1089
flush_plug_callbacks(struct blk_plug * plug,bool from_schedule)1090 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1091 {
1092 LIST_HEAD(callbacks);
1093
1094 while (!list_empty(&plug->cb_list)) {
1095 list_splice_init(&plug->cb_list, &callbacks);
1096
1097 while (!list_empty(&callbacks)) {
1098 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1099 struct blk_plug_cb,
1100 list);
1101 list_del(&cb->list);
1102 cb->callback(cb, from_schedule);
1103 }
1104 }
1105 }
1106
blk_check_plugged(blk_plug_cb_fn unplug,void * data,int size)1107 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1108 int size)
1109 {
1110 struct blk_plug *plug = current->plug;
1111 struct blk_plug_cb *cb;
1112
1113 if (!plug)
1114 return NULL;
1115
1116 list_for_each_entry(cb, &plug->cb_list, list)
1117 if (cb->callback == unplug && cb->data == data)
1118 return cb;
1119
1120 /* Not currently on the callback list */
1121 BUG_ON(size < sizeof(*cb));
1122 cb = kzalloc(size, GFP_ATOMIC);
1123 if (cb) {
1124 cb->data = data;
1125 cb->callback = unplug;
1126 list_add(&cb->list, &plug->cb_list);
1127 }
1128 return cb;
1129 }
1130 EXPORT_SYMBOL(blk_check_plugged);
1131
__blk_flush_plug(struct blk_plug * plug,bool from_schedule)1132 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1133 {
1134 if (!list_empty(&plug->cb_list))
1135 flush_plug_callbacks(plug, from_schedule);
1136 if (!rq_list_empty(plug->mq_list))
1137 blk_mq_flush_plug_list(plug, from_schedule);
1138 /*
1139 * Unconditionally flush out cached requests, even if the unplug
1140 * event came from schedule. Since we know hold references to the
1141 * queue for cached requests, we don't want a blocked task holding
1142 * up a queue freeze/quiesce event.
1143 */
1144 if (unlikely(!rq_list_empty(plug->cached_rq)))
1145 blk_mq_free_plug_rqs(plug);
1146 }
1147
1148 /**
1149 * blk_finish_plug - mark the end of a batch of submitted I/O
1150 * @plug: The &struct blk_plug passed to blk_start_plug()
1151 *
1152 * Description:
1153 * Indicate that a batch of I/O submissions is complete. This function
1154 * must be paired with an initial call to blk_start_plug(). The intent
1155 * is to allow the block layer to optimize I/O submission. See the
1156 * documentation for blk_start_plug() for more information.
1157 */
blk_finish_plug(struct blk_plug * plug)1158 void blk_finish_plug(struct blk_plug *plug)
1159 {
1160 if (plug == current->plug) {
1161 __blk_flush_plug(plug, false);
1162 current->plug = NULL;
1163 }
1164 }
1165 EXPORT_SYMBOL(blk_finish_plug);
1166
blk_io_schedule(void)1167 void blk_io_schedule(void)
1168 {
1169 /* Prevent hang_check timer from firing at us during very long I/O */
1170 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1171
1172 if (timeout)
1173 io_schedule_timeout(timeout);
1174 else
1175 io_schedule();
1176 }
1177 EXPORT_SYMBOL_GPL(blk_io_schedule);
1178
blk_dev_init(void)1179 int __init blk_dev_init(void)
1180 {
1181 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1182 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1183 sizeof_field(struct request, cmd_flags));
1184 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1185 sizeof_field(struct bio, bi_opf));
1186 BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
1187 __alignof__(struct request_queue)) !=
1188 sizeof(struct request_queue));
1189
1190 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1191 kblockd_workqueue = alloc_workqueue("kblockd",
1192 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1193 if (!kblockd_workqueue)
1194 panic("Failed to create kblockd\n");
1195
1196 blk_requestq_cachep = kmem_cache_create("request_queue",
1197 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1198
1199 blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
1200 sizeof(struct request_queue) +
1201 sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
1202
1203 blk_debugfs_root = debugfs_create_dir("block", NULL);
1204
1205 return 0;
1206 }
1207