1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32 
33 #include <trace/events/block.h>
34 
35 #include <linux/blk-mq.h>
36 #include <linux/t10-pi.h>
37 #include "blk.h"
38 #include "blk-mq.h"
39 #include "blk-mq-debugfs.h"
40 #include "blk-mq-tag.h"
41 #include "blk-pm.h"
42 #include "blk-stat.h"
43 #include "blk-mq-sched.h"
44 #include "blk-rq-qos.h"
45 #include "blk-ioprio.h"
46 
47 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
48 
49 static void blk_mq_poll_stats_start(struct request_queue *q);
50 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
51 
blk_mq_poll_stats_bkt(const struct request * rq)52 static int blk_mq_poll_stats_bkt(const struct request *rq)
53 {
54 	int ddir, sectors, bucket;
55 
56 	ddir = rq_data_dir(rq);
57 	sectors = blk_rq_stats_sectors(rq);
58 
59 	bucket = ddir + 2 * ilog2(sectors);
60 
61 	if (bucket < 0)
62 		return -1;
63 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
64 		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
65 
66 	return bucket;
67 }
68 
69 #define BLK_QC_T_SHIFT		16
70 #define BLK_QC_T_INTERNAL	(1U << 31)
71 
blk_qc_to_hctx(struct request_queue * q,blk_qc_t qc)72 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
73 		blk_qc_t qc)
74 {
75 	return xa_load(&q->hctx_table,
76 			(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
77 }
78 
blk_qc_to_rq(struct blk_mq_hw_ctx * hctx,blk_qc_t qc)79 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
80 		blk_qc_t qc)
81 {
82 	unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);
83 
84 	if (qc & BLK_QC_T_INTERNAL)
85 		return blk_mq_tag_to_rq(hctx->sched_tags, tag);
86 	return blk_mq_tag_to_rq(hctx->tags, tag);
87 }
88 
blk_rq_to_qc(struct request * rq)89 static inline blk_qc_t blk_rq_to_qc(struct request *rq)
90 {
91 	return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
92 		(rq->tag != -1 ?
93 		 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
94 }
95 
96 /*
97  * Check if any of the ctx, dispatch list or elevator
98  * have pending work in this hardware queue.
99  */
blk_mq_hctx_has_pending(struct blk_mq_hw_ctx * hctx)100 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
101 {
102 	return !list_empty_careful(&hctx->dispatch) ||
103 		sbitmap_any_bit_set(&hctx->ctx_map) ||
104 			blk_mq_sched_has_work(hctx);
105 }
106 
107 /*
108  * Mark this ctx as having pending work in this hardware queue
109  */
blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)110 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
111 				     struct blk_mq_ctx *ctx)
112 {
113 	const int bit = ctx->index_hw[hctx->type];
114 
115 	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
116 		sbitmap_set_bit(&hctx->ctx_map, bit);
117 }
118 
blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx)119 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
120 				      struct blk_mq_ctx *ctx)
121 {
122 	const int bit = ctx->index_hw[hctx->type];
123 
124 	sbitmap_clear_bit(&hctx->ctx_map, bit);
125 }
126 
127 struct mq_inflight {
128 	struct block_device *part;
129 	unsigned int inflight[2];
130 };
131 
blk_mq_check_inflight(struct request * rq,void * priv)132 static bool blk_mq_check_inflight(struct request *rq, void *priv)
133 {
134 	struct mq_inflight *mi = priv;
135 
136 	if (rq->part && blk_do_io_stat(rq) &&
137 	    (!mi->part->bd_partno || rq->part == mi->part) &&
138 	    blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
139 		mi->inflight[rq_data_dir(rq)]++;
140 
141 	return true;
142 }
143 
blk_mq_in_flight(struct request_queue * q,struct block_device * part)144 unsigned int blk_mq_in_flight(struct request_queue *q,
145 		struct block_device *part)
146 {
147 	struct mq_inflight mi = { .part = part };
148 
149 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
150 
151 	return mi.inflight[0] + mi.inflight[1];
152 }
153 
blk_mq_in_flight_rw(struct request_queue * q,struct block_device * part,unsigned int inflight[2])154 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
155 		unsigned int inflight[2])
156 {
157 	struct mq_inflight mi = { .part = part };
158 
159 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
160 	inflight[0] = mi.inflight[0];
161 	inflight[1] = mi.inflight[1];
162 }
163 
blk_freeze_queue_start(struct request_queue * q)164 void blk_freeze_queue_start(struct request_queue *q)
165 {
166 	mutex_lock(&q->mq_freeze_lock);
167 	if (++q->mq_freeze_depth == 1) {
168 		percpu_ref_kill(&q->q_usage_counter);
169 		mutex_unlock(&q->mq_freeze_lock);
170 		if (queue_is_mq(q))
171 			blk_mq_run_hw_queues(q, false);
172 	} else {
173 		mutex_unlock(&q->mq_freeze_lock);
174 	}
175 }
176 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
177 
blk_mq_freeze_queue_wait(struct request_queue * q)178 void blk_mq_freeze_queue_wait(struct request_queue *q)
179 {
180 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
181 }
182 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
183 
blk_mq_freeze_queue_wait_timeout(struct request_queue * q,unsigned long timeout)184 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
185 				     unsigned long timeout)
186 {
187 	return wait_event_timeout(q->mq_freeze_wq,
188 					percpu_ref_is_zero(&q->q_usage_counter),
189 					timeout);
190 }
191 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
192 
193 /*
194  * Guarantee no request is in use, so we can change any data structure of
195  * the queue afterward.
196  */
blk_freeze_queue(struct request_queue * q)197 void blk_freeze_queue(struct request_queue *q)
198 {
199 	/*
200 	 * In the !blk_mq case we are only calling this to kill the
201 	 * q_usage_counter, otherwise this increases the freeze depth
202 	 * and waits for it to return to zero.  For this reason there is
203 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
204 	 * exported to drivers as the only user for unfreeze is blk_mq.
205 	 */
206 	blk_freeze_queue_start(q);
207 	blk_mq_freeze_queue_wait(q);
208 }
209 
blk_mq_freeze_queue(struct request_queue * q)210 void blk_mq_freeze_queue(struct request_queue *q)
211 {
212 	/*
213 	 * ...just an alias to keep freeze and unfreeze actions balanced
214 	 * in the blk_mq_* namespace
215 	 */
216 	blk_freeze_queue(q);
217 }
218 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
219 
__blk_mq_unfreeze_queue(struct request_queue * q,bool force_atomic)220 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
221 {
222 	mutex_lock(&q->mq_freeze_lock);
223 	if (force_atomic)
224 		q->q_usage_counter.data->force_atomic = true;
225 	q->mq_freeze_depth--;
226 	WARN_ON_ONCE(q->mq_freeze_depth < 0);
227 	if (!q->mq_freeze_depth) {
228 		percpu_ref_resurrect(&q->q_usage_counter);
229 		wake_up_all(&q->mq_freeze_wq);
230 	}
231 	mutex_unlock(&q->mq_freeze_lock);
232 }
233 
blk_mq_unfreeze_queue(struct request_queue * q)234 void blk_mq_unfreeze_queue(struct request_queue *q)
235 {
236 	__blk_mq_unfreeze_queue(q, false);
237 }
238 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
239 
240 /*
241  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
242  * mpt3sas driver such that this function can be removed.
243  */
blk_mq_quiesce_queue_nowait(struct request_queue * q)244 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
245 {
246 	unsigned long flags;
247 
248 	spin_lock_irqsave(&q->queue_lock, flags);
249 	if (!q->quiesce_depth++)
250 		blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
251 	spin_unlock_irqrestore(&q->queue_lock, flags);
252 }
253 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
254 
255 /**
256  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
257  * @q: request queue.
258  *
259  * Note: it is driver's responsibility for making sure that quiesce has
260  * been started.
261  */
blk_mq_wait_quiesce_done(struct request_queue * q)262 void blk_mq_wait_quiesce_done(struct request_queue *q)
263 {
264 	if (blk_queue_has_srcu(q))
265 		synchronize_srcu(q->srcu);
266 	else
267 		synchronize_rcu();
268 }
269 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
270 
271 /**
272  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
273  * @q: request queue.
274  *
275  * Note: this function does not prevent that the struct request end_io()
276  * callback function is invoked. Once this function is returned, we make
277  * sure no dispatch can happen until the queue is unquiesced via
278  * blk_mq_unquiesce_queue().
279  */
blk_mq_quiesce_queue(struct request_queue * q)280 void blk_mq_quiesce_queue(struct request_queue *q)
281 {
282 	blk_mq_quiesce_queue_nowait(q);
283 	blk_mq_wait_quiesce_done(q);
284 }
285 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
286 
287 /*
288  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
289  * @q: request queue.
290  *
291  * This function recovers queue into the state before quiescing
292  * which is done by blk_mq_quiesce_queue.
293  */
blk_mq_unquiesce_queue(struct request_queue * q)294 void blk_mq_unquiesce_queue(struct request_queue *q)
295 {
296 	unsigned long flags;
297 	bool run_queue = false;
298 
299 	spin_lock_irqsave(&q->queue_lock, flags);
300 	if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
301 		;
302 	} else if (!--q->quiesce_depth) {
303 		blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
304 		run_queue = true;
305 	}
306 	spin_unlock_irqrestore(&q->queue_lock, flags);
307 
308 	/* dispatch requests which are inserted during quiescing */
309 	if (run_queue)
310 		blk_mq_run_hw_queues(q, true);
311 }
312 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
313 
blk_mq_wake_waiters(struct request_queue * q)314 void blk_mq_wake_waiters(struct request_queue *q)
315 {
316 	struct blk_mq_hw_ctx *hctx;
317 	unsigned long i;
318 
319 	queue_for_each_hw_ctx(q, hctx, i)
320 		if (blk_mq_hw_queue_mapped(hctx))
321 			blk_mq_tag_wakeup_all(hctx->tags, true);
322 }
323 
blk_rq_init(struct request_queue * q,struct request * rq)324 void blk_rq_init(struct request_queue *q, struct request *rq)
325 {
326 	memset(rq, 0, sizeof(*rq));
327 
328 	INIT_LIST_HEAD(&rq->queuelist);
329 	rq->q = q;
330 	rq->__sector = (sector_t) -1;
331 	INIT_HLIST_NODE(&rq->hash);
332 	RB_CLEAR_NODE(&rq->rb_node);
333 	rq->tag = BLK_MQ_NO_TAG;
334 	rq->internal_tag = BLK_MQ_NO_TAG;
335 	rq->start_time_ns = ktime_get_ns();
336 	rq->part = NULL;
337 	blk_crypto_rq_set_defaults(rq);
338 }
339 EXPORT_SYMBOL(blk_rq_init);
340 
blk_mq_rq_ctx_init(struct blk_mq_alloc_data * data,struct blk_mq_tags * tags,unsigned int tag,u64 alloc_time_ns)341 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
342 		struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
343 {
344 	struct blk_mq_ctx *ctx = data->ctx;
345 	struct blk_mq_hw_ctx *hctx = data->hctx;
346 	struct request_queue *q = data->q;
347 	struct request *rq = tags->static_rqs[tag];
348 
349 	rq->q = q;
350 	rq->mq_ctx = ctx;
351 	rq->mq_hctx = hctx;
352 	rq->cmd_flags = data->cmd_flags;
353 
354 	if (data->flags & BLK_MQ_REQ_PM)
355 		data->rq_flags |= RQF_PM;
356 	if (blk_queue_io_stat(q))
357 		data->rq_flags |= RQF_IO_STAT;
358 	rq->rq_flags = data->rq_flags;
359 
360 	if (!(data->rq_flags & RQF_ELV)) {
361 		rq->tag = tag;
362 		rq->internal_tag = BLK_MQ_NO_TAG;
363 	} else {
364 		rq->tag = BLK_MQ_NO_TAG;
365 		rq->internal_tag = tag;
366 	}
367 	rq->timeout = 0;
368 
369 	if (blk_mq_need_time_stamp(rq))
370 		rq->start_time_ns = ktime_get_ns();
371 	else
372 		rq->start_time_ns = 0;
373 	rq->part = NULL;
374 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
375 	rq->alloc_time_ns = alloc_time_ns;
376 #endif
377 	rq->io_start_time_ns = 0;
378 	rq->stats_sectors = 0;
379 	rq->nr_phys_segments = 0;
380 #if defined(CONFIG_BLK_DEV_INTEGRITY)
381 	rq->nr_integrity_segments = 0;
382 #endif
383 	rq->end_io = NULL;
384 	rq->end_io_data = NULL;
385 
386 	blk_crypto_rq_set_defaults(rq);
387 	INIT_LIST_HEAD(&rq->queuelist);
388 	/* tag was already set */
389 	WRITE_ONCE(rq->deadline, 0);
390 	req_ref_set(rq, 1);
391 
392 	if (rq->rq_flags & RQF_ELV) {
393 		struct elevator_queue *e = data->q->elevator;
394 
395 		INIT_HLIST_NODE(&rq->hash);
396 		RB_CLEAR_NODE(&rq->rb_node);
397 
398 		if (!op_is_flush(data->cmd_flags) &&
399 		    e->type->ops.prepare_request) {
400 			e->type->ops.prepare_request(rq);
401 			rq->rq_flags |= RQF_ELVPRIV;
402 		}
403 	}
404 
405 	return rq;
406 }
407 
408 static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data * data,u64 alloc_time_ns)409 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
410 		u64 alloc_time_ns)
411 {
412 	unsigned int tag, tag_offset;
413 	struct blk_mq_tags *tags;
414 	struct request *rq;
415 	unsigned long tag_mask;
416 	int i, nr = 0;
417 
418 	tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
419 	if (unlikely(!tag_mask))
420 		return NULL;
421 
422 	tags = blk_mq_tags_from_data(data);
423 	for (i = 0; tag_mask; i++) {
424 		if (!(tag_mask & (1UL << i)))
425 			continue;
426 		tag = tag_offset + i;
427 		prefetch(tags->static_rqs[tag]);
428 		tag_mask &= ~(1UL << i);
429 		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
430 		rq_list_add(data->cached_rq, rq);
431 		nr++;
432 	}
433 	/* caller already holds a reference, add for remainder */
434 	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
435 	data->nr_tags -= nr;
436 
437 	return rq_list_pop(data->cached_rq);
438 }
439 
__blk_mq_alloc_requests(struct blk_mq_alloc_data * data)440 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
441 {
442 	struct request_queue *q = data->q;
443 	u64 alloc_time_ns = 0;
444 	struct request *rq;
445 	unsigned int tag;
446 
447 	/* alloc_time includes depth and tag waits */
448 	if (blk_queue_rq_alloc_time(q))
449 		alloc_time_ns = ktime_get_ns();
450 
451 	if (data->cmd_flags & REQ_NOWAIT)
452 		data->flags |= BLK_MQ_REQ_NOWAIT;
453 
454 	if (q->elevator) {
455 		struct elevator_queue *e = q->elevator;
456 
457 		data->rq_flags |= RQF_ELV;
458 
459 		/*
460 		 * Flush/passthrough requests are special and go directly to the
461 		 * dispatch list. Don't include reserved tags in the
462 		 * limiting, as it isn't useful.
463 		 */
464 		if (!op_is_flush(data->cmd_flags) &&
465 		    !blk_op_is_passthrough(data->cmd_flags) &&
466 		    e->type->ops.limit_depth &&
467 		    !(data->flags & BLK_MQ_REQ_RESERVED))
468 			e->type->ops.limit_depth(data->cmd_flags, data);
469 	}
470 
471 retry:
472 	data->ctx = blk_mq_get_ctx(q);
473 	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
474 	if (!(data->rq_flags & RQF_ELV))
475 		blk_mq_tag_busy(data->hctx);
476 
477 	if (data->flags & BLK_MQ_REQ_RESERVED)
478 		data->rq_flags |= RQF_RESV;
479 
480 	/*
481 	 * Try batched alloc if we want more than 1 tag.
482 	 */
483 	if (data->nr_tags > 1) {
484 		rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
485 		if (rq)
486 			return rq;
487 		data->nr_tags = 1;
488 	}
489 
490 	/*
491 	 * Waiting allocations only fail because of an inactive hctx.  In that
492 	 * case just retry the hctx assignment and tag allocation as CPU hotplug
493 	 * should have migrated us to an online CPU by now.
494 	 */
495 	tag = blk_mq_get_tag(data);
496 	if (tag == BLK_MQ_NO_TAG) {
497 		if (data->flags & BLK_MQ_REQ_NOWAIT)
498 			return NULL;
499 		/*
500 		 * Give up the CPU and sleep for a random short time to
501 		 * ensure that thread using a realtime scheduling class
502 		 * are migrated off the CPU, and thus off the hctx that
503 		 * is going away.
504 		 */
505 		msleep(3);
506 		goto retry;
507 	}
508 
509 	return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
510 					alloc_time_ns);
511 }
512 
blk_mq_rq_cache_fill(struct request_queue * q,struct blk_plug * plug,blk_opf_t opf,blk_mq_req_flags_t flags)513 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
514 					    struct blk_plug *plug,
515 					    blk_opf_t opf,
516 					    blk_mq_req_flags_t flags)
517 {
518 	struct blk_mq_alloc_data data = {
519 		.q		= q,
520 		.flags		= flags,
521 		.cmd_flags	= opf,
522 		.nr_tags	= plug->nr_ios,
523 		.cached_rq	= &plug->cached_rq,
524 	};
525 	struct request *rq;
526 
527 	if (blk_queue_enter(q, flags))
528 		return NULL;
529 
530 	plug->nr_ios = 1;
531 
532 	rq = __blk_mq_alloc_requests(&data);
533 	if (unlikely(!rq))
534 		blk_queue_exit(q);
535 	return rq;
536 }
537 
blk_mq_alloc_cached_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)538 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
539 						   blk_opf_t opf,
540 						   blk_mq_req_flags_t flags)
541 {
542 	struct blk_plug *plug = current->plug;
543 	struct request *rq;
544 
545 	if (!plug)
546 		return NULL;
547 	if (rq_list_empty(plug->cached_rq)) {
548 		if (plug->nr_ios == 1)
549 			return NULL;
550 		rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
551 		if (rq)
552 			goto got_it;
553 		return NULL;
554 	}
555 	rq = rq_list_peek(&plug->cached_rq);
556 	if (!rq || rq->q != q)
557 		return NULL;
558 
559 	if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
560 		return NULL;
561 	if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
562 		return NULL;
563 
564 	plug->cached_rq = rq_list_next(rq);
565 got_it:
566 	rq->cmd_flags = opf;
567 	INIT_LIST_HEAD(&rq->queuelist);
568 	return rq;
569 }
570 
blk_mq_alloc_request(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags)571 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
572 		blk_mq_req_flags_t flags)
573 {
574 	struct request *rq;
575 
576 	rq = blk_mq_alloc_cached_request(q, opf, flags);
577 	if (!rq) {
578 		struct blk_mq_alloc_data data = {
579 			.q		= q,
580 			.flags		= flags,
581 			.cmd_flags	= opf,
582 			.nr_tags	= 1,
583 		};
584 		int ret;
585 
586 		ret = blk_queue_enter(q, flags);
587 		if (ret)
588 			return ERR_PTR(ret);
589 
590 		rq = __blk_mq_alloc_requests(&data);
591 		if (!rq)
592 			goto out_queue_exit;
593 	}
594 	rq->__data_len = 0;
595 	rq->__sector = (sector_t) -1;
596 	rq->bio = rq->biotail = NULL;
597 	return rq;
598 out_queue_exit:
599 	blk_queue_exit(q);
600 	return ERR_PTR(-EWOULDBLOCK);
601 }
602 EXPORT_SYMBOL(blk_mq_alloc_request);
603 
blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx)604 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
605 	blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
606 {
607 	struct blk_mq_alloc_data data = {
608 		.q		= q,
609 		.flags		= flags,
610 		.cmd_flags	= opf,
611 		.nr_tags	= 1,
612 	};
613 	u64 alloc_time_ns = 0;
614 	struct request *rq;
615 	unsigned int cpu;
616 	unsigned int tag;
617 	int ret;
618 
619 	/* alloc_time includes depth and tag waits */
620 	if (blk_queue_rq_alloc_time(q))
621 		alloc_time_ns = ktime_get_ns();
622 
623 	/*
624 	 * If the tag allocator sleeps we could get an allocation for a
625 	 * different hardware context.  No need to complicate the low level
626 	 * allocator for this for the rare use case of a command tied to
627 	 * a specific queue.
628 	 */
629 	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
630 		return ERR_PTR(-EINVAL);
631 
632 	if (hctx_idx >= q->nr_hw_queues)
633 		return ERR_PTR(-EIO);
634 
635 	ret = blk_queue_enter(q, flags);
636 	if (ret)
637 		return ERR_PTR(ret);
638 
639 	/*
640 	 * Check if the hardware context is actually mapped to anything.
641 	 * If not tell the caller that it should skip this queue.
642 	 */
643 	ret = -EXDEV;
644 	data.hctx = xa_load(&q->hctx_table, hctx_idx);
645 	if (!blk_mq_hw_queue_mapped(data.hctx))
646 		goto out_queue_exit;
647 	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
648 	if (cpu >= nr_cpu_ids)
649 		goto out_queue_exit;
650 	data.ctx = __blk_mq_get_ctx(q, cpu);
651 
652 	if (!q->elevator)
653 		blk_mq_tag_busy(data.hctx);
654 	else
655 		data.rq_flags |= RQF_ELV;
656 
657 	if (flags & BLK_MQ_REQ_RESERVED)
658 		data.rq_flags |= RQF_RESV;
659 
660 	ret = -EWOULDBLOCK;
661 	tag = blk_mq_get_tag(&data);
662 	if (tag == BLK_MQ_NO_TAG)
663 		goto out_queue_exit;
664 	rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
665 					alloc_time_ns);
666 	rq->__data_len = 0;
667 	rq->__sector = (sector_t) -1;
668 	rq->bio = rq->biotail = NULL;
669 	return rq;
670 
671 out_queue_exit:
672 	blk_queue_exit(q);
673 	return ERR_PTR(ret);
674 }
675 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
676 
__blk_mq_free_request(struct request * rq)677 static void __blk_mq_free_request(struct request *rq)
678 {
679 	struct request_queue *q = rq->q;
680 	struct blk_mq_ctx *ctx = rq->mq_ctx;
681 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
682 	const int sched_tag = rq->internal_tag;
683 
684 	blk_crypto_free_request(rq);
685 	blk_pm_mark_last_busy(rq);
686 	rq->mq_hctx = NULL;
687 	if (rq->tag != BLK_MQ_NO_TAG)
688 		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
689 	if (sched_tag != BLK_MQ_NO_TAG)
690 		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
691 	blk_mq_sched_restart(hctx);
692 	blk_queue_exit(q);
693 }
694 
blk_mq_free_request(struct request * rq)695 void blk_mq_free_request(struct request *rq)
696 {
697 	struct request_queue *q = rq->q;
698 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
699 
700 	if ((rq->rq_flags & RQF_ELVPRIV) &&
701 	    q->elevator->type->ops.finish_request)
702 		q->elevator->type->ops.finish_request(rq);
703 
704 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
705 		__blk_mq_dec_active_requests(hctx);
706 
707 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
708 		laptop_io_completion(q->disk->bdi);
709 
710 	rq_qos_done(q, rq);
711 
712 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
713 	if (req_ref_put_and_test(rq))
714 		__blk_mq_free_request(rq);
715 }
716 EXPORT_SYMBOL_GPL(blk_mq_free_request);
717 
blk_mq_free_plug_rqs(struct blk_plug * plug)718 void blk_mq_free_plug_rqs(struct blk_plug *plug)
719 {
720 	struct request *rq;
721 
722 	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
723 		blk_mq_free_request(rq);
724 }
725 
blk_dump_rq_flags(struct request * rq,char * msg)726 void blk_dump_rq_flags(struct request *rq, char *msg)
727 {
728 	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
729 		rq->q->disk ? rq->q->disk->disk_name : "?",
730 		(__force unsigned long long) rq->cmd_flags);
731 
732 	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
733 	       (unsigned long long)blk_rq_pos(rq),
734 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
735 	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
736 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
737 }
738 EXPORT_SYMBOL(blk_dump_rq_flags);
739 
req_bio_endio(struct request * rq,struct bio * bio,unsigned int nbytes,blk_status_t error)740 static void req_bio_endio(struct request *rq, struct bio *bio,
741 			  unsigned int nbytes, blk_status_t error)
742 {
743 	if (unlikely(error)) {
744 		bio->bi_status = error;
745 	} else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
746 		/*
747 		 * Partial zone append completions cannot be supported as the
748 		 * BIO fragments may end up not being written sequentially.
749 		 */
750 		if (bio->bi_iter.bi_size != nbytes)
751 			bio->bi_status = BLK_STS_IOERR;
752 		else
753 			bio->bi_iter.bi_sector = rq->__sector;
754 	}
755 
756 	bio_advance(bio, nbytes);
757 
758 	if (unlikely(rq->rq_flags & RQF_QUIET))
759 		bio_set_flag(bio, BIO_QUIET);
760 	/* don't actually finish bio if it's part of flush sequence */
761 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
762 		bio_endio(bio);
763 }
764 
blk_account_io_completion(struct request * req,unsigned int bytes)765 static void blk_account_io_completion(struct request *req, unsigned int bytes)
766 {
767 	if (req->part && blk_do_io_stat(req)) {
768 		const int sgrp = op_stat_group(req_op(req));
769 
770 		part_stat_lock();
771 		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
772 		part_stat_unlock();
773 	}
774 }
775 
blk_print_req_error(struct request * req,blk_status_t status)776 static void blk_print_req_error(struct request *req, blk_status_t status)
777 {
778 	printk_ratelimited(KERN_ERR
779 		"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
780 		"phys_seg %u prio class %u\n",
781 		blk_status_to_str(status),
782 		req->q->disk ? req->q->disk->disk_name : "?",
783 		blk_rq_pos(req), (__force u32)req_op(req),
784 		blk_op_str(req_op(req)),
785 		(__force u32)(req->cmd_flags & ~REQ_OP_MASK),
786 		req->nr_phys_segments,
787 		IOPRIO_PRIO_CLASS(req->ioprio));
788 }
789 
790 /*
791  * Fully end IO on a request. Does not support partial completions, or
792  * errors.
793  */
blk_complete_request(struct request * req)794 static void blk_complete_request(struct request *req)
795 {
796 	const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
797 	int total_bytes = blk_rq_bytes(req);
798 	struct bio *bio = req->bio;
799 
800 	trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
801 
802 	if (!bio)
803 		return;
804 
805 #ifdef CONFIG_BLK_DEV_INTEGRITY
806 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
807 		req->q->integrity.profile->complete_fn(req, total_bytes);
808 #endif
809 
810 	blk_account_io_completion(req, total_bytes);
811 
812 	do {
813 		struct bio *next = bio->bi_next;
814 
815 		/* Completion has already been traced */
816 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
817 
818 		if (req_op(req) == REQ_OP_ZONE_APPEND)
819 			bio->bi_iter.bi_sector = req->__sector;
820 
821 		if (!is_flush)
822 			bio_endio(bio);
823 		bio = next;
824 	} while (bio);
825 
826 	/*
827 	 * Reset counters so that the request stacking driver
828 	 * can find how many bytes remain in the request
829 	 * later.
830 	 */
831 	if (!req->end_io) {
832 		req->bio = NULL;
833 		req->__data_len = 0;
834 	}
835 }
836 
837 /**
838  * blk_update_request - Complete multiple bytes without completing the request
839  * @req:      the request being processed
840  * @error:    block status code
841  * @nr_bytes: number of bytes to complete for @req
842  *
843  * Description:
844  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
845  *     the request structure even if @req doesn't have leftover.
846  *     If @req has leftover, sets it up for the next range of segments.
847  *
848  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
849  *     %false return from this function.
850  *
851  * Note:
852  *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
853  *      except in the consistency check at the end of this function.
854  *
855  * Return:
856  *     %false - this request doesn't have any more data
857  *     %true  - this request has more data
858  **/
blk_update_request(struct request * req,blk_status_t error,unsigned int nr_bytes)859 bool blk_update_request(struct request *req, blk_status_t error,
860 		unsigned int nr_bytes)
861 {
862 	int total_bytes;
863 
864 	trace_block_rq_complete(req, error, nr_bytes);
865 
866 	if (!req->bio)
867 		return false;
868 
869 #ifdef CONFIG_BLK_DEV_INTEGRITY
870 	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
871 	    error == BLK_STS_OK)
872 		req->q->integrity.profile->complete_fn(req, nr_bytes);
873 #endif
874 
875 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
876 		     !(req->rq_flags & RQF_QUIET)) &&
877 		     !test_bit(GD_DEAD, &req->q->disk->state)) {
878 		blk_print_req_error(req, error);
879 		trace_block_rq_error(req, error, nr_bytes);
880 	}
881 
882 	blk_account_io_completion(req, nr_bytes);
883 
884 	total_bytes = 0;
885 	while (req->bio) {
886 		struct bio *bio = req->bio;
887 		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
888 
889 		if (bio_bytes == bio->bi_iter.bi_size)
890 			req->bio = bio->bi_next;
891 
892 		/* Completion has already been traced */
893 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
894 		req_bio_endio(req, bio, bio_bytes, error);
895 
896 		total_bytes += bio_bytes;
897 		nr_bytes -= bio_bytes;
898 
899 		if (!nr_bytes)
900 			break;
901 	}
902 
903 	/*
904 	 * completely done
905 	 */
906 	if (!req->bio) {
907 		/*
908 		 * Reset counters so that the request stacking driver
909 		 * can find how many bytes remain in the request
910 		 * later.
911 		 */
912 		req->__data_len = 0;
913 		return false;
914 	}
915 
916 	req->__data_len -= total_bytes;
917 
918 	/* update sector only for requests with clear definition of sector */
919 	if (!blk_rq_is_passthrough(req))
920 		req->__sector += total_bytes >> 9;
921 
922 	/* mixed attributes always follow the first bio */
923 	if (req->rq_flags & RQF_MIXED_MERGE) {
924 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
925 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
926 	}
927 
928 	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
929 		/*
930 		 * If total number of sectors is less than the first segment
931 		 * size, something has gone terribly wrong.
932 		 */
933 		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
934 			blk_dump_rq_flags(req, "request botched");
935 			req->__data_len = blk_rq_cur_bytes(req);
936 		}
937 
938 		/* recalculate the number of segments */
939 		req->nr_phys_segments = blk_recalc_rq_segments(req);
940 	}
941 
942 	return true;
943 }
944 EXPORT_SYMBOL_GPL(blk_update_request);
945 
__blk_account_io_done(struct request * req,u64 now)946 static void __blk_account_io_done(struct request *req, u64 now)
947 {
948 	const int sgrp = op_stat_group(req_op(req));
949 
950 	part_stat_lock();
951 	update_io_ticks(req->part, jiffies, true);
952 	part_stat_inc(req->part, ios[sgrp]);
953 	part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
954 	part_stat_unlock();
955 }
956 
blk_account_io_done(struct request * req,u64 now)957 static inline void blk_account_io_done(struct request *req, u64 now)
958 {
959 	/*
960 	 * Account IO completion.  flush_rq isn't accounted as a
961 	 * normal IO on queueing nor completion.  Accounting the
962 	 * containing request is enough.
963 	 */
964 	if (blk_do_io_stat(req) && req->part &&
965 	    !(req->rq_flags & RQF_FLUSH_SEQ))
966 		__blk_account_io_done(req, now);
967 }
968 
__blk_account_io_start(struct request * rq)969 static void __blk_account_io_start(struct request *rq)
970 {
971 	/*
972 	 * All non-passthrough requests are created from a bio with one
973 	 * exception: when a flush command that is part of a flush sequence
974 	 * generated by the state machine in blk-flush.c is cloned onto the
975 	 * lower device by dm-multipath we can get here without a bio.
976 	 */
977 	if (rq->bio)
978 		rq->part = rq->bio->bi_bdev;
979 	else
980 		rq->part = rq->q->disk->part0;
981 
982 	part_stat_lock();
983 	update_io_ticks(rq->part, jiffies, false);
984 	part_stat_unlock();
985 }
986 
blk_account_io_start(struct request * req)987 static inline void blk_account_io_start(struct request *req)
988 {
989 	if (blk_do_io_stat(req))
990 		__blk_account_io_start(req);
991 }
992 
__blk_mq_end_request_acct(struct request * rq,u64 now)993 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
994 {
995 	if (rq->rq_flags & RQF_STATS) {
996 		blk_mq_poll_stats_start(rq->q);
997 		blk_stat_add(rq, now);
998 	}
999 
1000 	blk_mq_sched_completed_request(rq, now);
1001 	blk_account_io_done(rq, now);
1002 }
1003 
__blk_mq_end_request(struct request * rq,blk_status_t error)1004 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1005 {
1006 	if (blk_mq_need_time_stamp(rq))
1007 		__blk_mq_end_request_acct(rq, ktime_get_ns());
1008 
1009 	if (rq->end_io) {
1010 		rq_qos_done(rq->q, rq);
1011 		if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1012 			blk_mq_free_request(rq);
1013 	} else {
1014 		blk_mq_free_request(rq);
1015 	}
1016 }
1017 EXPORT_SYMBOL(__blk_mq_end_request);
1018 
blk_mq_end_request(struct request * rq,blk_status_t error)1019 void blk_mq_end_request(struct request *rq, blk_status_t error)
1020 {
1021 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1022 		BUG();
1023 	__blk_mq_end_request(rq, error);
1024 }
1025 EXPORT_SYMBOL(blk_mq_end_request);
1026 
1027 #define TAG_COMP_BATCH		32
1028 
blk_mq_flush_tag_batch(struct blk_mq_hw_ctx * hctx,int * tag_array,int nr_tags)1029 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1030 					  int *tag_array, int nr_tags)
1031 {
1032 	struct request_queue *q = hctx->queue;
1033 
1034 	/*
1035 	 * All requests should have been marked as RQF_MQ_INFLIGHT, so
1036 	 * update hctx->nr_active in batch
1037 	 */
1038 	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
1039 		__blk_mq_sub_active_requests(hctx, nr_tags);
1040 
1041 	blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1042 	percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1043 }
1044 
blk_mq_end_request_batch(struct io_comp_batch * iob)1045 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1046 {
1047 	int tags[TAG_COMP_BATCH], nr_tags = 0;
1048 	struct blk_mq_hw_ctx *cur_hctx = NULL;
1049 	struct request *rq;
1050 	u64 now = 0;
1051 
1052 	if (iob->need_ts)
1053 		now = ktime_get_ns();
1054 
1055 	while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1056 		prefetch(rq->bio);
1057 		prefetch(rq->rq_next);
1058 
1059 		blk_complete_request(rq);
1060 		if (iob->need_ts)
1061 			__blk_mq_end_request_acct(rq, now);
1062 
1063 		rq_qos_done(rq->q, rq);
1064 
1065 		/*
1066 		 * If end_io handler returns NONE, then it still has
1067 		 * ownership of the request.
1068 		 */
1069 		if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1070 			continue;
1071 
1072 		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1073 		if (!req_ref_put_and_test(rq))
1074 			continue;
1075 
1076 		blk_crypto_free_request(rq);
1077 		blk_pm_mark_last_busy(rq);
1078 
1079 		if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1080 			if (cur_hctx)
1081 				blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1082 			nr_tags = 0;
1083 			cur_hctx = rq->mq_hctx;
1084 		}
1085 		tags[nr_tags++] = rq->tag;
1086 	}
1087 
1088 	if (nr_tags)
1089 		blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1090 }
1091 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1092 
blk_complete_reqs(struct llist_head * list)1093 static void blk_complete_reqs(struct llist_head *list)
1094 {
1095 	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1096 	struct request *rq, *next;
1097 
1098 	llist_for_each_entry_safe(rq, next, entry, ipi_list)
1099 		rq->q->mq_ops->complete(rq);
1100 }
1101 
blk_done_softirq(struct softirq_action * h)1102 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1103 {
1104 	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1105 }
1106 
blk_softirq_cpu_dead(unsigned int cpu)1107 static int blk_softirq_cpu_dead(unsigned int cpu)
1108 {
1109 	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1110 	return 0;
1111 }
1112 
__blk_mq_complete_request_remote(void * data)1113 static void __blk_mq_complete_request_remote(void *data)
1114 {
1115 	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
1116 }
1117 
blk_mq_complete_need_ipi(struct request * rq)1118 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1119 {
1120 	int cpu = raw_smp_processor_id();
1121 
1122 	if (!IS_ENABLED(CONFIG_SMP) ||
1123 	    !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1124 		return false;
1125 	/*
1126 	 * With force threaded interrupts enabled, raising softirq from an SMP
1127 	 * function call will always result in waking the ksoftirqd thread.
1128 	 * This is probably worse than completing the request on a different
1129 	 * cache domain.
1130 	 */
1131 	if (force_irqthreads())
1132 		return false;
1133 
1134 	/* same CPU or cache domain?  Complete locally */
1135 	if (cpu == rq->mq_ctx->cpu ||
1136 	    (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1137 	     cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1138 		return false;
1139 
1140 	/* don't try to IPI to an offline CPU */
1141 	return cpu_online(rq->mq_ctx->cpu);
1142 }
1143 
blk_mq_complete_send_ipi(struct request * rq)1144 static void blk_mq_complete_send_ipi(struct request *rq)
1145 {
1146 	struct llist_head *list;
1147 	unsigned int cpu;
1148 
1149 	cpu = rq->mq_ctx->cpu;
1150 	list = &per_cpu(blk_cpu_done, cpu);
1151 	if (llist_add(&rq->ipi_list, list)) {
1152 		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
1153 		smp_call_function_single_async(cpu, &rq->csd);
1154 	}
1155 }
1156 
blk_mq_raise_softirq(struct request * rq)1157 static void blk_mq_raise_softirq(struct request *rq)
1158 {
1159 	struct llist_head *list;
1160 
1161 	preempt_disable();
1162 	list = this_cpu_ptr(&blk_cpu_done);
1163 	if (llist_add(&rq->ipi_list, list))
1164 		raise_softirq(BLOCK_SOFTIRQ);
1165 	preempt_enable();
1166 }
1167 
blk_mq_complete_request_remote(struct request * rq)1168 bool blk_mq_complete_request_remote(struct request *rq)
1169 {
1170 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1171 
1172 	/*
1173 	 * For request which hctx has only one ctx mapping,
1174 	 * or a polled request, always complete locally,
1175 	 * it's pointless to redirect the completion.
1176 	 */
1177 	if (rq->mq_hctx->nr_ctx == 1 ||
1178 		rq->cmd_flags & REQ_POLLED)
1179 		return false;
1180 
1181 	if (blk_mq_complete_need_ipi(rq)) {
1182 		blk_mq_complete_send_ipi(rq);
1183 		return true;
1184 	}
1185 
1186 	if (rq->q->nr_hw_queues == 1) {
1187 		blk_mq_raise_softirq(rq);
1188 		return true;
1189 	}
1190 	return false;
1191 }
1192 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1193 
1194 /**
1195  * blk_mq_complete_request - end I/O on a request
1196  * @rq:		the request being processed
1197  *
1198  * Description:
1199  *	Complete a request by scheduling the ->complete_rq operation.
1200  **/
blk_mq_complete_request(struct request * rq)1201 void blk_mq_complete_request(struct request *rq)
1202 {
1203 	if (!blk_mq_complete_request_remote(rq))
1204 		rq->q->mq_ops->complete(rq);
1205 }
1206 EXPORT_SYMBOL(blk_mq_complete_request);
1207 
1208 /**
1209  * blk_mq_start_request - Start processing a request
1210  * @rq: Pointer to request to be started
1211  *
1212  * Function used by device drivers to notify the block layer that a request
1213  * is going to be processed now, so blk layer can do proper initializations
1214  * such as starting the timeout timer.
1215  */
blk_mq_start_request(struct request * rq)1216 void blk_mq_start_request(struct request *rq)
1217 {
1218 	struct request_queue *q = rq->q;
1219 
1220 	trace_block_rq_issue(rq);
1221 
1222 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1223 		rq->io_start_time_ns = ktime_get_ns();
1224 		rq->stats_sectors = blk_rq_sectors(rq);
1225 		rq->rq_flags |= RQF_STATS;
1226 		rq_qos_issue(q, rq);
1227 	}
1228 
1229 	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1230 
1231 	blk_add_timer(rq);
1232 	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1233 
1234 #ifdef CONFIG_BLK_DEV_INTEGRITY
1235 	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1236 		q->integrity.profile->prepare_fn(rq);
1237 #endif
1238 	if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1239 	        WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1240 }
1241 EXPORT_SYMBOL(blk_mq_start_request);
1242 
1243 /*
1244  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1245  * queues. This is important for md arrays to benefit from merging
1246  * requests.
1247  */
blk_plug_max_rq_count(struct blk_plug * plug)1248 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1249 {
1250 	if (plug->multiple_queues)
1251 		return BLK_MAX_REQUEST_COUNT * 2;
1252 	return BLK_MAX_REQUEST_COUNT;
1253 }
1254 
blk_add_rq_to_plug(struct blk_plug * plug,struct request * rq)1255 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1256 {
1257 	struct request *last = rq_list_peek(&plug->mq_list);
1258 
1259 	if (!plug->rq_count) {
1260 		trace_block_plug(rq->q);
1261 	} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1262 		   (!blk_queue_nomerges(rq->q) &&
1263 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1264 		blk_mq_flush_plug_list(plug, false);
1265 		last = NULL;
1266 		trace_block_plug(rq->q);
1267 	}
1268 
1269 	if (!plug->multiple_queues && last && last->q != rq->q)
1270 		plug->multiple_queues = true;
1271 	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
1272 		plug->has_elevator = true;
1273 	rq->rq_next = NULL;
1274 	rq_list_add(&plug->mq_list, rq);
1275 	plug->rq_count++;
1276 }
1277 
1278 /**
1279  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1280  * @rq:		request to insert
1281  * @at_head:    insert request at head or tail of queue
1282  *
1283  * Description:
1284  *    Insert a fully prepared request at the back of the I/O scheduler queue
1285  *    for execution.  Don't wait for completion.
1286  *
1287  * Note:
1288  *    This function will invoke @done directly if the queue is dead.
1289  */
blk_execute_rq_nowait(struct request * rq,bool at_head)1290 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1291 {
1292 	WARN_ON(irqs_disabled());
1293 	WARN_ON(!blk_rq_is_passthrough(rq));
1294 
1295 	blk_account_io_start(rq);
1296 
1297 	/*
1298 	 * As plugging can be enabled for passthrough requests on a zoned
1299 	 * device, directly accessing the plug instead of using blk_mq_plug()
1300 	 * should not have any consequences.
1301 	 */
1302 	if (current->plug)
1303 		blk_add_rq_to_plug(current->plug, rq);
1304 	else
1305 		blk_mq_sched_insert_request(rq, at_head, true, false);
1306 }
1307 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1308 
1309 struct blk_rq_wait {
1310 	struct completion done;
1311 	blk_status_t ret;
1312 };
1313 
blk_end_sync_rq(struct request * rq,blk_status_t ret)1314 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1315 {
1316 	struct blk_rq_wait *wait = rq->end_io_data;
1317 
1318 	wait->ret = ret;
1319 	complete(&wait->done);
1320 	return RQ_END_IO_NONE;
1321 }
1322 
blk_rq_is_poll(struct request * rq)1323 bool blk_rq_is_poll(struct request *rq)
1324 {
1325 	if (!rq->mq_hctx)
1326 		return false;
1327 	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1328 		return false;
1329 	if (WARN_ON_ONCE(!rq->bio))
1330 		return false;
1331 	return true;
1332 }
1333 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1334 
blk_rq_poll_completion(struct request * rq,struct completion * wait)1335 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1336 {
1337 	do {
1338 		bio_poll(rq->bio, NULL, 0);
1339 		cond_resched();
1340 	} while (!completion_done(wait));
1341 }
1342 
1343 /**
1344  * blk_execute_rq - insert a request into queue for execution
1345  * @rq:		request to insert
1346  * @at_head:    insert request at head or tail of queue
1347  *
1348  * Description:
1349  *    Insert a fully prepared request at the back of the I/O scheduler queue
1350  *    for execution and wait for completion.
1351  * Return: The blk_status_t result provided to blk_mq_end_request().
1352  */
blk_execute_rq(struct request * rq,bool at_head)1353 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1354 {
1355 	struct blk_rq_wait wait = {
1356 		.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1357 	};
1358 
1359 	WARN_ON(irqs_disabled());
1360 	WARN_ON(!blk_rq_is_passthrough(rq));
1361 
1362 	rq->end_io_data = &wait;
1363 	rq->end_io = blk_end_sync_rq;
1364 
1365 	blk_account_io_start(rq);
1366 	blk_mq_sched_insert_request(rq, at_head, true, false);
1367 
1368 	if (blk_rq_is_poll(rq)) {
1369 		blk_rq_poll_completion(rq, &wait.done);
1370 	} else {
1371 		/*
1372 		 * Prevent hang_check timer from firing at us during very long
1373 		 * I/O
1374 		 */
1375 		unsigned long hang_check = sysctl_hung_task_timeout_secs;
1376 
1377 		if (hang_check)
1378 			while (!wait_for_completion_io_timeout(&wait.done,
1379 					hang_check * (HZ/2)))
1380 				;
1381 		else
1382 			wait_for_completion_io(&wait.done);
1383 	}
1384 
1385 	return wait.ret;
1386 }
1387 EXPORT_SYMBOL(blk_execute_rq);
1388 
__blk_mq_requeue_request(struct request * rq)1389 static void __blk_mq_requeue_request(struct request *rq)
1390 {
1391 	struct request_queue *q = rq->q;
1392 
1393 	blk_mq_put_driver_tag(rq);
1394 
1395 	trace_block_rq_requeue(rq);
1396 	rq_qos_requeue(q, rq);
1397 
1398 	if (blk_mq_request_started(rq)) {
1399 		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1400 		rq->rq_flags &= ~RQF_TIMED_OUT;
1401 	}
1402 }
1403 
blk_mq_requeue_request(struct request * rq,bool kick_requeue_list)1404 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1405 {
1406 	__blk_mq_requeue_request(rq);
1407 
1408 	/* this request will be re-inserted to io scheduler queue */
1409 	blk_mq_sched_requeue_request(rq);
1410 
1411 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1412 }
1413 EXPORT_SYMBOL(blk_mq_requeue_request);
1414 
blk_mq_requeue_work(struct work_struct * work)1415 static void blk_mq_requeue_work(struct work_struct *work)
1416 {
1417 	struct request_queue *q =
1418 		container_of(work, struct request_queue, requeue_work.work);
1419 	LIST_HEAD(rq_list);
1420 	struct request *rq, *next;
1421 
1422 	spin_lock_irq(&q->requeue_lock);
1423 	list_splice_init(&q->requeue_list, &rq_list);
1424 	spin_unlock_irq(&q->requeue_lock);
1425 
1426 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1427 		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1428 			continue;
1429 
1430 		rq->rq_flags &= ~RQF_SOFTBARRIER;
1431 		list_del_init(&rq->queuelist);
1432 		/*
1433 		 * If RQF_DONTPREP, rq has contained some driver specific
1434 		 * data, so insert it to hctx dispatch list to avoid any
1435 		 * merge.
1436 		 */
1437 		if (rq->rq_flags & RQF_DONTPREP)
1438 			blk_mq_request_bypass_insert(rq, false, false);
1439 		else
1440 			blk_mq_sched_insert_request(rq, true, false, false);
1441 	}
1442 
1443 	while (!list_empty(&rq_list)) {
1444 		rq = list_entry(rq_list.next, struct request, queuelist);
1445 		list_del_init(&rq->queuelist);
1446 		blk_mq_sched_insert_request(rq, false, false, false);
1447 	}
1448 
1449 	blk_mq_run_hw_queues(q, false);
1450 }
1451 
blk_mq_add_to_requeue_list(struct request * rq,bool at_head,bool kick_requeue_list)1452 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
1453 				bool kick_requeue_list)
1454 {
1455 	struct request_queue *q = rq->q;
1456 	unsigned long flags;
1457 
1458 	/*
1459 	 * We abuse this flag that is otherwise used by the I/O scheduler to
1460 	 * request head insertion from the workqueue.
1461 	 */
1462 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1463 
1464 	spin_lock_irqsave(&q->requeue_lock, flags);
1465 	if (at_head) {
1466 		rq->rq_flags |= RQF_SOFTBARRIER;
1467 		list_add(&rq->queuelist, &q->requeue_list);
1468 	} else {
1469 		list_add_tail(&rq->queuelist, &q->requeue_list);
1470 	}
1471 	spin_unlock_irqrestore(&q->requeue_lock, flags);
1472 
1473 	if (kick_requeue_list)
1474 		blk_mq_kick_requeue_list(q);
1475 }
1476 
blk_mq_kick_requeue_list(struct request_queue * q)1477 void blk_mq_kick_requeue_list(struct request_queue *q)
1478 {
1479 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1480 }
1481 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1482 
blk_mq_delay_kick_requeue_list(struct request_queue * q,unsigned long msecs)1483 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1484 				    unsigned long msecs)
1485 {
1486 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1487 				    msecs_to_jiffies(msecs));
1488 }
1489 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1490 
blk_mq_rq_inflight(struct request * rq,void * priv)1491 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1492 {
1493 	/*
1494 	 * If we find a request that isn't idle we know the queue is busy
1495 	 * as it's checked in the iter.
1496 	 * Return false to stop the iteration.
1497 	 */
1498 	if (blk_mq_request_started(rq)) {
1499 		bool *busy = priv;
1500 
1501 		*busy = true;
1502 		return false;
1503 	}
1504 
1505 	return true;
1506 }
1507 
blk_mq_queue_inflight(struct request_queue * q)1508 bool blk_mq_queue_inflight(struct request_queue *q)
1509 {
1510 	bool busy = false;
1511 
1512 	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1513 	return busy;
1514 }
1515 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1516 
blk_mq_rq_timed_out(struct request * req)1517 static void blk_mq_rq_timed_out(struct request *req)
1518 {
1519 	req->rq_flags |= RQF_TIMED_OUT;
1520 	if (req->q->mq_ops->timeout) {
1521 		enum blk_eh_timer_return ret;
1522 
1523 		ret = req->q->mq_ops->timeout(req);
1524 		if (ret == BLK_EH_DONE)
1525 			return;
1526 		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1527 	}
1528 
1529 	blk_add_timer(req);
1530 }
1531 
1532 struct blk_expired_data {
1533 	bool has_timedout_rq;
1534 	unsigned long next;
1535 	unsigned long timeout_start;
1536 };
1537 
blk_mq_req_expired(struct request * rq,struct blk_expired_data * expired)1538 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1539 {
1540 	unsigned long deadline;
1541 
1542 	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1543 		return false;
1544 	if (rq->rq_flags & RQF_TIMED_OUT)
1545 		return false;
1546 
1547 	deadline = READ_ONCE(rq->deadline);
1548 	if (time_after_eq(expired->timeout_start, deadline))
1549 		return true;
1550 
1551 	if (expired->next == 0)
1552 		expired->next = deadline;
1553 	else if (time_after(expired->next, deadline))
1554 		expired->next = deadline;
1555 	return false;
1556 }
1557 
blk_mq_put_rq_ref(struct request * rq)1558 void blk_mq_put_rq_ref(struct request *rq)
1559 {
1560 	if (is_flush_rq(rq)) {
1561 		if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1562 			blk_mq_free_request(rq);
1563 	} else if (req_ref_put_and_test(rq)) {
1564 		__blk_mq_free_request(rq);
1565 	}
1566 }
1567 
blk_mq_check_expired(struct request * rq,void * priv)1568 static bool blk_mq_check_expired(struct request *rq, void *priv)
1569 {
1570 	struct blk_expired_data *expired = priv;
1571 
1572 	/*
1573 	 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1574 	 * be reallocated underneath the timeout handler's processing, then
1575 	 * the expire check is reliable. If the request is not expired, then
1576 	 * it was completed and reallocated as a new request after returning
1577 	 * from blk_mq_check_expired().
1578 	 */
1579 	if (blk_mq_req_expired(rq, expired)) {
1580 		expired->has_timedout_rq = true;
1581 		return false;
1582 	}
1583 	return true;
1584 }
1585 
blk_mq_handle_expired(struct request * rq,void * priv)1586 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1587 {
1588 	struct blk_expired_data *expired = priv;
1589 
1590 	if (blk_mq_req_expired(rq, expired))
1591 		blk_mq_rq_timed_out(rq);
1592 	return true;
1593 }
1594 
blk_mq_timeout_work(struct work_struct * work)1595 static void blk_mq_timeout_work(struct work_struct *work)
1596 {
1597 	struct request_queue *q =
1598 		container_of(work, struct request_queue, timeout_work);
1599 	struct blk_expired_data expired = {
1600 		.timeout_start = jiffies,
1601 	};
1602 	struct blk_mq_hw_ctx *hctx;
1603 	unsigned long i;
1604 
1605 	/* A deadlock might occur if a request is stuck requiring a
1606 	 * timeout at the same time a queue freeze is waiting
1607 	 * completion, since the timeout code would not be able to
1608 	 * acquire the queue reference here.
1609 	 *
1610 	 * That's why we don't use blk_queue_enter here; instead, we use
1611 	 * percpu_ref_tryget directly, because we need to be able to
1612 	 * obtain a reference even in the short window between the queue
1613 	 * starting to freeze, by dropping the first reference in
1614 	 * blk_freeze_queue_start, and the moment the last request is
1615 	 * consumed, marked by the instant q_usage_counter reaches
1616 	 * zero.
1617 	 */
1618 	if (!percpu_ref_tryget(&q->q_usage_counter))
1619 		return;
1620 
1621 	/* check if there is any timed-out request */
1622 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1623 	if (expired.has_timedout_rq) {
1624 		/*
1625 		 * Before walking tags, we must ensure any submit started
1626 		 * before the current time has finished. Since the submit
1627 		 * uses srcu or rcu, wait for a synchronization point to
1628 		 * ensure all running submits have finished
1629 		 */
1630 		blk_mq_wait_quiesce_done(q);
1631 
1632 		expired.next = 0;
1633 		blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1634 	}
1635 
1636 	if (expired.next != 0) {
1637 		mod_timer(&q->timeout, expired.next);
1638 	} else {
1639 		/*
1640 		 * Request timeouts are handled as a forward rolling timer. If
1641 		 * we end up here it means that no requests are pending and
1642 		 * also that no request has been pending for a while. Mark
1643 		 * each hctx as idle.
1644 		 */
1645 		queue_for_each_hw_ctx(q, hctx, i) {
1646 			/* the hctx may be unmapped, so check it here */
1647 			if (blk_mq_hw_queue_mapped(hctx))
1648 				blk_mq_tag_idle(hctx);
1649 		}
1650 	}
1651 	blk_queue_exit(q);
1652 }
1653 
1654 struct flush_busy_ctx_data {
1655 	struct blk_mq_hw_ctx *hctx;
1656 	struct list_head *list;
1657 };
1658 
flush_busy_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1659 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1660 {
1661 	struct flush_busy_ctx_data *flush_data = data;
1662 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1663 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1664 	enum hctx_type type = hctx->type;
1665 
1666 	spin_lock(&ctx->lock);
1667 	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1668 	sbitmap_clear_bit(sb, bitnr);
1669 	spin_unlock(&ctx->lock);
1670 	return true;
1671 }
1672 
1673 /*
1674  * Process software queues that have been marked busy, splicing them
1675  * to the for-dispatch
1676  */
blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx * hctx,struct list_head * list)1677 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1678 {
1679 	struct flush_busy_ctx_data data = {
1680 		.hctx = hctx,
1681 		.list = list,
1682 	};
1683 
1684 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1685 }
1686 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1687 
1688 struct dispatch_rq_data {
1689 	struct blk_mq_hw_ctx *hctx;
1690 	struct request *rq;
1691 };
1692 
dispatch_rq_from_ctx(struct sbitmap * sb,unsigned int bitnr,void * data)1693 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1694 		void *data)
1695 {
1696 	struct dispatch_rq_data *dispatch_data = data;
1697 	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1698 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1699 	enum hctx_type type = hctx->type;
1700 
1701 	spin_lock(&ctx->lock);
1702 	if (!list_empty(&ctx->rq_lists[type])) {
1703 		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1704 		list_del_init(&dispatch_data->rq->queuelist);
1705 		if (list_empty(&ctx->rq_lists[type]))
1706 			sbitmap_clear_bit(sb, bitnr);
1707 	}
1708 	spin_unlock(&ctx->lock);
1709 
1710 	return !dispatch_data->rq;
1711 }
1712 
blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * start)1713 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1714 					struct blk_mq_ctx *start)
1715 {
1716 	unsigned off = start ? start->index_hw[hctx->type] : 0;
1717 	struct dispatch_rq_data data = {
1718 		.hctx = hctx,
1719 		.rq   = NULL,
1720 	};
1721 
1722 	__sbitmap_for_each_set(&hctx->ctx_map, off,
1723 			       dispatch_rq_from_ctx, &data);
1724 
1725 	return data.rq;
1726 }
1727 
__blk_mq_alloc_driver_tag(struct request * rq)1728 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1729 {
1730 	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1731 	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1732 	int tag;
1733 
1734 	blk_mq_tag_busy(rq->mq_hctx);
1735 
1736 	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1737 		bt = &rq->mq_hctx->tags->breserved_tags;
1738 		tag_offset = 0;
1739 	} else {
1740 		if (!hctx_may_queue(rq->mq_hctx, bt))
1741 			return false;
1742 	}
1743 
1744 	tag = __sbitmap_queue_get(bt);
1745 	if (tag == BLK_MQ_NO_TAG)
1746 		return false;
1747 
1748 	rq->tag = tag + tag_offset;
1749 	return true;
1750 }
1751 
__blk_mq_get_driver_tag(struct blk_mq_hw_ctx * hctx,struct request * rq)1752 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1753 {
1754 	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1755 		return false;
1756 
1757 	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1758 			!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1759 		rq->rq_flags |= RQF_MQ_INFLIGHT;
1760 		__blk_mq_inc_active_requests(hctx);
1761 	}
1762 	hctx->tags->rqs[rq->tag] = rq;
1763 	return true;
1764 }
1765 
blk_mq_dispatch_wake(wait_queue_entry_t * wait,unsigned mode,int flags,void * key)1766 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1767 				int flags, void *key)
1768 {
1769 	struct blk_mq_hw_ctx *hctx;
1770 
1771 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1772 
1773 	spin_lock(&hctx->dispatch_wait_lock);
1774 	if (!list_empty(&wait->entry)) {
1775 		struct sbitmap_queue *sbq;
1776 
1777 		list_del_init(&wait->entry);
1778 		sbq = &hctx->tags->bitmap_tags;
1779 		atomic_dec(&sbq->ws_active);
1780 	}
1781 	spin_unlock(&hctx->dispatch_wait_lock);
1782 
1783 	blk_mq_run_hw_queue(hctx, true);
1784 	return 1;
1785 }
1786 
1787 /*
1788  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1789  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1790  * restart. For both cases, take care to check the condition again after
1791  * marking us as waiting.
1792  */
blk_mq_mark_tag_wait(struct blk_mq_hw_ctx * hctx,struct request * rq)1793 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1794 				 struct request *rq)
1795 {
1796 	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1797 	struct wait_queue_head *wq;
1798 	wait_queue_entry_t *wait;
1799 	bool ret;
1800 
1801 	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1802 		blk_mq_sched_mark_restart_hctx(hctx);
1803 
1804 		/*
1805 		 * It's possible that a tag was freed in the window between the
1806 		 * allocation failure and adding the hardware queue to the wait
1807 		 * queue.
1808 		 *
1809 		 * Don't clear RESTART here, someone else could have set it.
1810 		 * At most this will cost an extra queue run.
1811 		 */
1812 		return blk_mq_get_driver_tag(rq);
1813 	}
1814 
1815 	wait = &hctx->dispatch_wait;
1816 	if (!list_empty_careful(&wait->entry))
1817 		return false;
1818 
1819 	wq = &bt_wait_ptr(sbq, hctx)->wait;
1820 
1821 	spin_lock_irq(&wq->lock);
1822 	spin_lock(&hctx->dispatch_wait_lock);
1823 	if (!list_empty(&wait->entry)) {
1824 		spin_unlock(&hctx->dispatch_wait_lock);
1825 		spin_unlock_irq(&wq->lock);
1826 		return false;
1827 	}
1828 
1829 	atomic_inc(&sbq->ws_active);
1830 	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1831 	__add_wait_queue(wq, wait);
1832 
1833 	/*
1834 	 * It's possible that a tag was freed in the window between the
1835 	 * allocation failure and adding the hardware queue to the wait
1836 	 * queue.
1837 	 */
1838 	ret = blk_mq_get_driver_tag(rq);
1839 	if (!ret) {
1840 		spin_unlock(&hctx->dispatch_wait_lock);
1841 		spin_unlock_irq(&wq->lock);
1842 		return false;
1843 	}
1844 
1845 	/*
1846 	 * We got a tag, remove ourselves from the wait queue to ensure
1847 	 * someone else gets the wakeup.
1848 	 */
1849 	list_del_init(&wait->entry);
1850 	atomic_dec(&sbq->ws_active);
1851 	spin_unlock(&hctx->dispatch_wait_lock);
1852 	spin_unlock_irq(&wq->lock);
1853 
1854 	return true;
1855 }
1856 
1857 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1858 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1859 /*
1860  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1861  * - EWMA is one simple way to compute running average value
1862  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1863  * - take 4 as factor for avoiding to get too small(0) result, and this
1864  *   factor doesn't matter because EWMA decreases exponentially
1865  */
blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx * hctx,bool busy)1866 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1867 {
1868 	unsigned int ewma;
1869 
1870 	ewma = hctx->dispatch_busy;
1871 
1872 	if (!ewma && !busy)
1873 		return;
1874 
1875 	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1876 	if (busy)
1877 		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1878 	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1879 
1880 	hctx->dispatch_busy = ewma;
1881 }
1882 
1883 #define BLK_MQ_RESOURCE_DELAY	3		/* ms units */
1884 
blk_mq_handle_dev_resource(struct request * rq,struct list_head * list)1885 static void blk_mq_handle_dev_resource(struct request *rq,
1886 				       struct list_head *list)
1887 {
1888 	struct request *next =
1889 		list_first_entry_or_null(list, struct request, queuelist);
1890 
1891 	/*
1892 	 * If an I/O scheduler has been configured and we got a driver tag for
1893 	 * the next request already, free it.
1894 	 */
1895 	if (next)
1896 		blk_mq_put_driver_tag(next);
1897 
1898 	list_add(&rq->queuelist, list);
1899 	__blk_mq_requeue_request(rq);
1900 }
1901 
blk_mq_handle_zone_resource(struct request * rq,struct list_head * zone_list)1902 static void blk_mq_handle_zone_resource(struct request *rq,
1903 					struct list_head *zone_list)
1904 {
1905 	/*
1906 	 * If we end up here it is because we cannot dispatch a request to a
1907 	 * specific zone due to LLD level zone-write locking or other zone
1908 	 * related resource not being available. In this case, set the request
1909 	 * aside in zone_list for retrying it later.
1910 	 */
1911 	list_add(&rq->queuelist, zone_list);
1912 	__blk_mq_requeue_request(rq);
1913 }
1914 
1915 enum prep_dispatch {
1916 	PREP_DISPATCH_OK,
1917 	PREP_DISPATCH_NO_TAG,
1918 	PREP_DISPATCH_NO_BUDGET,
1919 };
1920 
blk_mq_prep_dispatch_rq(struct request * rq,bool need_budget)1921 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1922 						  bool need_budget)
1923 {
1924 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1925 	int budget_token = -1;
1926 
1927 	if (need_budget) {
1928 		budget_token = blk_mq_get_dispatch_budget(rq->q);
1929 		if (budget_token < 0) {
1930 			blk_mq_put_driver_tag(rq);
1931 			return PREP_DISPATCH_NO_BUDGET;
1932 		}
1933 		blk_mq_set_rq_budget_token(rq, budget_token);
1934 	}
1935 
1936 	if (!blk_mq_get_driver_tag(rq)) {
1937 		/*
1938 		 * The initial allocation attempt failed, so we need to
1939 		 * rerun the hardware queue when a tag is freed. The
1940 		 * waitqueue takes care of that. If the queue is run
1941 		 * before we add this entry back on the dispatch list,
1942 		 * we'll re-run it below.
1943 		 */
1944 		if (!blk_mq_mark_tag_wait(hctx, rq)) {
1945 			/*
1946 			 * All budgets not got from this function will be put
1947 			 * together during handling partial dispatch
1948 			 */
1949 			if (need_budget)
1950 				blk_mq_put_dispatch_budget(rq->q, budget_token);
1951 			return PREP_DISPATCH_NO_TAG;
1952 		}
1953 	}
1954 
1955 	return PREP_DISPATCH_OK;
1956 }
1957 
1958 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
blk_mq_release_budgets(struct request_queue * q,struct list_head * list)1959 static void blk_mq_release_budgets(struct request_queue *q,
1960 		struct list_head *list)
1961 {
1962 	struct request *rq;
1963 
1964 	list_for_each_entry(rq, list, queuelist) {
1965 		int budget_token = blk_mq_get_rq_budget_token(rq);
1966 
1967 		if (budget_token >= 0)
1968 			blk_mq_put_dispatch_budget(q, budget_token);
1969 	}
1970 }
1971 
1972 /*
1973  * Returns true if we did some work AND can potentially do more.
1974  */
blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx * hctx,struct list_head * list,unsigned int nr_budgets)1975 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1976 			     unsigned int nr_budgets)
1977 {
1978 	enum prep_dispatch prep;
1979 	struct request_queue *q = hctx->queue;
1980 	struct request *rq, *nxt;
1981 	int errors, queued;
1982 	blk_status_t ret = BLK_STS_OK;
1983 	LIST_HEAD(zone_list);
1984 	bool needs_resource = false;
1985 
1986 	if (list_empty(list))
1987 		return false;
1988 
1989 	/*
1990 	 * Now process all the entries, sending them to the driver.
1991 	 */
1992 	errors = queued = 0;
1993 	do {
1994 		struct blk_mq_queue_data bd;
1995 
1996 		rq = list_first_entry(list, struct request, queuelist);
1997 
1998 		WARN_ON_ONCE(hctx != rq->mq_hctx);
1999 		prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2000 		if (prep != PREP_DISPATCH_OK)
2001 			break;
2002 
2003 		list_del_init(&rq->queuelist);
2004 
2005 		bd.rq = rq;
2006 
2007 		/*
2008 		 * Flag last if we have no more requests, or if we have more
2009 		 * but can't assign a driver tag to it.
2010 		 */
2011 		if (list_empty(list))
2012 			bd.last = true;
2013 		else {
2014 			nxt = list_first_entry(list, struct request, queuelist);
2015 			bd.last = !blk_mq_get_driver_tag(nxt);
2016 		}
2017 
2018 		/*
2019 		 * once the request is queued to lld, no need to cover the
2020 		 * budget any more
2021 		 */
2022 		if (nr_budgets)
2023 			nr_budgets--;
2024 		ret = q->mq_ops->queue_rq(hctx, &bd);
2025 		switch (ret) {
2026 		case BLK_STS_OK:
2027 			queued++;
2028 			break;
2029 		case BLK_STS_RESOURCE:
2030 			needs_resource = true;
2031 			fallthrough;
2032 		case BLK_STS_DEV_RESOURCE:
2033 			blk_mq_handle_dev_resource(rq, list);
2034 			goto out;
2035 		case BLK_STS_ZONE_RESOURCE:
2036 			/*
2037 			 * Move the request to zone_list and keep going through
2038 			 * the dispatch list to find more requests the drive can
2039 			 * accept.
2040 			 */
2041 			blk_mq_handle_zone_resource(rq, &zone_list);
2042 			needs_resource = true;
2043 			break;
2044 		default:
2045 			errors++;
2046 			blk_mq_end_request(rq, ret);
2047 		}
2048 	} while (!list_empty(list));
2049 out:
2050 	if (!list_empty(&zone_list))
2051 		list_splice_tail_init(&zone_list, list);
2052 
2053 	/* If we didn't flush the entire list, we could have told the driver
2054 	 * there was more coming, but that turned out to be a lie.
2055 	 */
2056 	if ((!list_empty(list) || errors || needs_resource ||
2057 	     ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
2058 		q->mq_ops->commit_rqs(hctx);
2059 	/*
2060 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
2061 	 * that is where we will continue on next queue run.
2062 	 */
2063 	if (!list_empty(list)) {
2064 		bool needs_restart;
2065 		/* For non-shared tags, the RESTART check will suffice */
2066 		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2067 			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
2068 
2069 		if (nr_budgets)
2070 			blk_mq_release_budgets(q, list);
2071 
2072 		spin_lock(&hctx->lock);
2073 		list_splice_tail_init(list, &hctx->dispatch);
2074 		spin_unlock(&hctx->lock);
2075 
2076 		/*
2077 		 * Order adding requests to hctx->dispatch and checking
2078 		 * SCHED_RESTART flag. The pair of this smp_mb() is the one
2079 		 * in blk_mq_sched_restart(). Avoid restart code path to
2080 		 * miss the new added requests to hctx->dispatch, meantime
2081 		 * SCHED_RESTART is observed here.
2082 		 */
2083 		smp_mb();
2084 
2085 		/*
2086 		 * If SCHED_RESTART was set by the caller of this function and
2087 		 * it is no longer set that means that it was cleared by another
2088 		 * thread and hence that a queue rerun is needed.
2089 		 *
2090 		 * If 'no_tag' is set, that means that we failed getting
2091 		 * a driver tag with an I/O scheduler attached. If our dispatch
2092 		 * waitqueue is no longer active, ensure that we run the queue
2093 		 * AFTER adding our entries back to the list.
2094 		 *
2095 		 * If no I/O scheduler has been configured it is possible that
2096 		 * the hardware queue got stopped and restarted before requests
2097 		 * were pushed back onto the dispatch list. Rerun the queue to
2098 		 * avoid starvation. Notes:
2099 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
2100 		 *   been stopped before rerunning a queue.
2101 		 * - Some but not all block drivers stop a queue before
2102 		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2103 		 *   and dm-rq.
2104 		 *
2105 		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2106 		 * bit is set, run queue after a delay to avoid IO stalls
2107 		 * that could otherwise occur if the queue is idle.  We'll do
2108 		 * similar if we couldn't get budget or couldn't lock a zone
2109 		 * and SCHED_RESTART is set.
2110 		 */
2111 		needs_restart = blk_mq_sched_needs_restart(hctx);
2112 		if (prep == PREP_DISPATCH_NO_BUDGET)
2113 			needs_resource = true;
2114 		if (!needs_restart ||
2115 		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2116 			blk_mq_run_hw_queue(hctx, true);
2117 		else if (needs_resource)
2118 			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2119 
2120 		blk_mq_update_dispatch_busy(hctx, true);
2121 		return false;
2122 	} else
2123 		blk_mq_update_dispatch_busy(hctx, false);
2124 
2125 	return (queued + errors) != 0;
2126 }
2127 
2128 /**
2129  * __blk_mq_run_hw_queue - Run a hardware queue.
2130  * @hctx: Pointer to the hardware queue to run.
2131  *
2132  * Send pending requests to the hardware.
2133  */
__blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx)2134 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
2135 {
2136 	/*
2137 	 * We can't run the queue inline with ints disabled. Ensure that
2138 	 * we catch bad users of this early.
2139 	 */
2140 	WARN_ON_ONCE(in_interrupt());
2141 
2142 	blk_mq_run_dispatch_ops(hctx->queue,
2143 			blk_mq_sched_dispatch_requests(hctx));
2144 }
2145 
blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx * hctx)2146 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2147 {
2148 	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2149 
2150 	if (cpu >= nr_cpu_ids)
2151 		cpu = cpumask_first(hctx->cpumask);
2152 	return cpu;
2153 }
2154 
2155 /*
2156  * It'd be great if the workqueue API had a way to pass
2157  * in a mask and had some smarts for more clever placement.
2158  * For now we just round-robin here, switching for every
2159  * BLK_MQ_CPU_WORK_BATCH queued items.
2160  */
blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx * hctx)2161 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2162 {
2163 	bool tried = false;
2164 	int next_cpu = hctx->next_cpu;
2165 
2166 	if (hctx->queue->nr_hw_queues == 1)
2167 		return WORK_CPU_UNBOUND;
2168 
2169 	if (--hctx->next_cpu_batch <= 0) {
2170 select_cpu:
2171 		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2172 				cpu_online_mask);
2173 		if (next_cpu >= nr_cpu_ids)
2174 			next_cpu = blk_mq_first_mapped_cpu(hctx);
2175 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2176 	}
2177 
2178 	/*
2179 	 * Do unbound schedule if we can't find a online CPU for this hctx,
2180 	 * and it should only happen in the path of handling CPU DEAD.
2181 	 */
2182 	if (!cpu_online(next_cpu)) {
2183 		if (!tried) {
2184 			tried = true;
2185 			goto select_cpu;
2186 		}
2187 
2188 		/*
2189 		 * Make sure to re-select CPU next time once after CPUs
2190 		 * in hctx->cpumask become online again.
2191 		 */
2192 		hctx->next_cpu = next_cpu;
2193 		hctx->next_cpu_batch = 1;
2194 		return WORK_CPU_UNBOUND;
2195 	}
2196 
2197 	hctx->next_cpu = next_cpu;
2198 	return next_cpu;
2199 }
2200 
2201 /**
2202  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
2203  * @hctx: Pointer to the hardware queue to run.
2204  * @async: If we want to run the queue asynchronously.
2205  * @msecs: Milliseconds of delay to wait before running the queue.
2206  *
2207  * If !@async, try to run the queue now. Else, run the queue asynchronously and
2208  * with a delay of @msecs.
2209  */
__blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async,unsigned long msecs)2210 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
2211 					unsigned long msecs)
2212 {
2213 	if (unlikely(blk_mq_hctx_stopped(hctx)))
2214 		return;
2215 
2216 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2217 		if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2218 			__blk_mq_run_hw_queue(hctx);
2219 			return;
2220 		}
2221 	}
2222 
2223 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2224 				    msecs_to_jiffies(msecs));
2225 }
2226 
2227 /**
2228  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2229  * @hctx: Pointer to the hardware queue to run.
2230  * @msecs: Milliseconds of delay to wait before running the queue.
2231  *
2232  * Run a hardware queue asynchronously with a delay of @msecs.
2233  */
blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx * hctx,unsigned long msecs)2234 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2235 {
2236 	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
2237 }
2238 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2239 
2240 /**
2241  * blk_mq_run_hw_queue - Start to run a hardware queue.
2242  * @hctx: Pointer to the hardware queue to run.
2243  * @async: If we want to run the queue asynchronously.
2244  *
2245  * Check if the request queue is not in a quiesced state and if there are
2246  * pending requests to be sent. If this is true, run the queue to send requests
2247  * to hardware.
2248  */
blk_mq_run_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2249 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2250 {
2251 	bool need_run;
2252 
2253 	/*
2254 	 * When queue is quiesced, we may be switching io scheduler, or
2255 	 * updating nr_hw_queues, or other things, and we can't run queue
2256 	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2257 	 *
2258 	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2259 	 * quiesced.
2260 	 */
2261 	__blk_mq_run_dispatch_ops(hctx->queue, false,
2262 		need_run = !blk_queue_quiesced(hctx->queue) &&
2263 		blk_mq_hctx_has_pending(hctx));
2264 
2265 	if (need_run)
2266 		__blk_mq_delay_run_hw_queue(hctx, async, 0);
2267 }
2268 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2269 
2270 /*
2271  * Return prefered queue to dispatch from (if any) for non-mq aware IO
2272  * scheduler.
2273  */
blk_mq_get_sq_hctx(struct request_queue * q)2274 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2275 {
2276 	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2277 	/*
2278 	 * If the IO scheduler does not respect hardware queues when
2279 	 * dispatching, we just don't bother with multiple HW queues and
2280 	 * dispatch from hctx for the current CPU since running multiple queues
2281 	 * just causes lock contention inside the scheduler and pointless cache
2282 	 * bouncing.
2283 	 */
2284 	struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2285 
2286 	if (!blk_mq_hctx_stopped(hctx))
2287 		return hctx;
2288 	return NULL;
2289 }
2290 
2291 /**
2292  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2293  * @q: Pointer to the request queue to run.
2294  * @async: If we want to run the queue asynchronously.
2295  */
blk_mq_run_hw_queues(struct request_queue * q,bool async)2296 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2297 {
2298 	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2299 	unsigned long i;
2300 
2301 	sq_hctx = NULL;
2302 	if (blk_queue_sq_sched(q))
2303 		sq_hctx = blk_mq_get_sq_hctx(q);
2304 	queue_for_each_hw_ctx(q, hctx, i) {
2305 		if (blk_mq_hctx_stopped(hctx))
2306 			continue;
2307 		/*
2308 		 * Dispatch from this hctx either if there's no hctx preferred
2309 		 * by IO scheduler or if it has requests that bypass the
2310 		 * scheduler.
2311 		 */
2312 		if (!sq_hctx || sq_hctx == hctx ||
2313 		    !list_empty_careful(&hctx->dispatch))
2314 			blk_mq_run_hw_queue(hctx, async);
2315 	}
2316 }
2317 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2318 
2319 /**
2320  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2321  * @q: Pointer to the request queue to run.
2322  * @msecs: Milliseconds of delay to wait before running the queues.
2323  */
blk_mq_delay_run_hw_queues(struct request_queue * q,unsigned long msecs)2324 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2325 {
2326 	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2327 	unsigned long i;
2328 
2329 	sq_hctx = NULL;
2330 	if (blk_queue_sq_sched(q))
2331 		sq_hctx = blk_mq_get_sq_hctx(q);
2332 	queue_for_each_hw_ctx(q, hctx, i) {
2333 		if (blk_mq_hctx_stopped(hctx))
2334 			continue;
2335 		/*
2336 		 * If there is already a run_work pending, leave the
2337 		 * pending delay untouched. Otherwise, a hctx can stall
2338 		 * if another hctx is re-delaying the other's work
2339 		 * before the work executes.
2340 		 */
2341 		if (delayed_work_pending(&hctx->run_work))
2342 			continue;
2343 		/*
2344 		 * Dispatch from this hctx either if there's no hctx preferred
2345 		 * by IO scheduler or if it has requests that bypass the
2346 		 * scheduler.
2347 		 */
2348 		if (!sq_hctx || sq_hctx == hctx ||
2349 		    !list_empty_careful(&hctx->dispatch))
2350 			blk_mq_delay_run_hw_queue(hctx, msecs);
2351 	}
2352 }
2353 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2354 
2355 /*
2356  * This function is often used for pausing .queue_rq() by driver when
2357  * there isn't enough resource or some conditions aren't satisfied, and
2358  * BLK_STS_RESOURCE is usually returned.
2359  *
2360  * We do not guarantee that dispatch can be drained or blocked
2361  * after blk_mq_stop_hw_queue() returns. Please use
2362  * blk_mq_quiesce_queue() for that requirement.
2363  */
blk_mq_stop_hw_queue(struct blk_mq_hw_ctx * hctx)2364 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2365 {
2366 	cancel_delayed_work(&hctx->run_work);
2367 
2368 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2369 }
2370 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2371 
2372 /*
2373  * This function is often used for pausing .queue_rq() by driver when
2374  * there isn't enough resource or some conditions aren't satisfied, and
2375  * BLK_STS_RESOURCE is usually returned.
2376  *
2377  * We do not guarantee that dispatch can be drained or blocked
2378  * after blk_mq_stop_hw_queues() returns. Please use
2379  * blk_mq_quiesce_queue() for that requirement.
2380  */
blk_mq_stop_hw_queues(struct request_queue * q)2381 void blk_mq_stop_hw_queues(struct request_queue *q)
2382 {
2383 	struct blk_mq_hw_ctx *hctx;
2384 	unsigned long i;
2385 
2386 	queue_for_each_hw_ctx(q, hctx, i)
2387 		blk_mq_stop_hw_queue(hctx);
2388 }
2389 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2390 
blk_mq_start_hw_queue(struct blk_mq_hw_ctx * hctx)2391 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2392 {
2393 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2394 
2395 	blk_mq_run_hw_queue(hctx, false);
2396 }
2397 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2398 
blk_mq_start_hw_queues(struct request_queue * q)2399 void blk_mq_start_hw_queues(struct request_queue *q)
2400 {
2401 	struct blk_mq_hw_ctx *hctx;
2402 	unsigned long i;
2403 
2404 	queue_for_each_hw_ctx(q, hctx, i)
2405 		blk_mq_start_hw_queue(hctx);
2406 }
2407 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2408 
blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx * hctx,bool async)2409 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2410 {
2411 	if (!blk_mq_hctx_stopped(hctx))
2412 		return;
2413 
2414 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2415 	blk_mq_run_hw_queue(hctx, async);
2416 }
2417 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2418 
blk_mq_start_stopped_hw_queues(struct request_queue * q,bool async)2419 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2420 {
2421 	struct blk_mq_hw_ctx *hctx;
2422 	unsigned long i;
2423 
2424 	queue_for_each_hw_ctx(q, hctx, i)
2425 		blk_mq_start_stopped_hw_queue(hctx, async);
2426 }
2427 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2428 
blk_mq_run_work_fn(struct work_struct * work)2429 static void blk_mq_run_work_fn(struct work_struct *work)
2430 {
2431 	struct blk_mq_hw_ctx *hctx;
2432 
2433 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2434 
2435 	/*
2436 	 * If we are stopped, don't run the queue.
2437 	 */
2438 	if (blk_mq_hctx_stopped(hctx))
2439 		return;
2440 
2441 	__blk_mq_run_hw_queue(hctx);
2442 }
2443 
__blk_mq_insert_req_list(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)2444 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
2445 					    struct request *rq,
2446 					    bool at_head)
2447 {
2448 	struct blk_mq_ctx *ctx = rq->mq_ctx;
2449 	enum hctx_type type = hctx->type;
2450 
2451 	lockdep_assert_held(&ctx->lock);
2452 
2453 	trace_block_rq_insert(rq);
2454 
2455 	if (at_head)
2456 		list_add(&rq->queuelist, &ctx->rq_lists[type]);
2457 	else
2458 		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2459 }
2460 
__blk_mq_insert_request(struct blk_mq_hw_ctx * hctx,struct request * rq,bool at_head)2461 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
2462 			     bool at_head)
2463 {
2464 	struct blk_mq_ctx *ctx = rq->mq_ctx;
2465 
2466 	lockdep_assert_held(&ctx->lock);
2467 
2468 	__blk_mq_insert_req_list(hctx, rq, at_head);
2469 	blk_mq_hctx_mark_pending(hctx, ctx);
2470 }
2471 
2472 /**
2473  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2474  * @rq: Pointer to request to be inserted.
2475  * @at_head: true if the request should be inserted at the head of the list.
2476  * @run_queue: If we should run the hardware queue after inserting the request.
2477  *
2478  * Should only be used carefully, when the caller knows we want to
2479  * bypass a potential IO scheduler on the target device.
2480  */
blk_mq_request_bypass_insert(struct request * rq,bool at_head,bool run_queue)2481 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
2482 				  bool run_queue)
2483 {
2484 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2485 
2486 	spin_lock(&hctx->lock);
2487 	if (at_head)
2488 		list_add(&rq->queuelist, &hctx->dispatch);
2489 	else
2490 		list_add_tail(&rq->queuelist, &hctx->dispatch);
2491 	spin_unlock(&hctx->lock);
2492 
2493 	if (run_queue)
2494 		blk_mq_run_hw_queue(hctx, false);
2495 }
2496 
blk_mq_insert_requests(struct blk_mq_hw_ctx * hctx,struct blk_mq_ctx * ctx,struct list_head * list)2497 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
2498 			    struct list_head *list)
2499 
2500 {
2501 	struct request *rq;
2502 	enum hctx_type type = hctx->type;
2503 
2504 	/*
2505 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
2506 	 * offline now
2507 	 */
2508 	list_for_each_entry(rq, list, queuelist) {
2509 		BUG_ON(rq->mq_ctx != ctx);
2510 		trace_block_rq_insert(rq);
2511 	}
2512 
2513 	spin_lock(&ctx->lock);
2514 	list_splice_tail_init(list, &ctx->rq_lists[type]);
2515 	blk_mq_hctx_mark_pending(hctx, ctx);
2516 	spin_unlock(&ctx->lock);
2517 }
2518 
blk_mq_commit_rqs(struct blk_mq_hw_ctx * hctx,int * queued,bool from_schedule)2519 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
2520 			      bool from_schedule)
2521 {
2522 	if (hctx->queue->mq_ops->commit_rqs) {
2523 		trace_block_unplug(hctx->queue, *queued, !from_schedule);
2524 		hctx->queue->mq_ops->commit_rqs(hctx);
2525 	}
2526 	*queued = 0;
2527 }
2528 
blk_mq_bio_to_request(struct request * rq,struct bio * bio,unsigned int nr_segs)2529 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2530 		unsigned int nr_segs)
2531 {
2532 	int err;
2533 
2534 	if (bio->bi_opf & REQ_RAHEAD)
2535 		rq->cmd_flags |= REQ_FAILFAST_MASK;
2536 
2537 	rq->__sector = bio->bi_iter.bi_sector;
2538 	blk_rq_bio_prep(rq, bio, nr_segs);
2539 
2540 	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2541 	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2542 	WARN_ON_ONCE(err);
2543 
2544 	blk_account_io_start(rq);
2545 }
2546 
__blk_mq_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool last)2547 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2548 					    struct request *rq, bool last)
2549 {
2550 	struct request_queue *q = rq->q;
2551 	struct blk_mq_queue_data bd = {
2552 		.rq = rq,
2553 		.last = last,
2554 	};
2555 	blk_status_t ret;
2556 
2557 	/*
2558 	 * For OK queue, we are done. For error, caller may kill it.
2559 	 * Any other error (busy), just add it to our list as we
2560 	 * previously would have done.
2561 	 */
2562 	ret = q->mq_ops->queue_rq(hctx, &bd);
2563 	switch (ret) {
2564 	case BLK_STS_OK:
2565 		blk_mq_update_dispatch_busy(hctx, false);
2566 		break;
2567 	case BLK_STS_RESOURCE:
2568 	case BLK_STS_DEV_RESOURCE:
2569 		blk_mq_update_dispatch_busy(hctx, true);
2570 		__blk_mq_requeue_request(rq);
2571 		break;
2572 	default:
2573 		blk_mq_update_dispatch_busy(hctx, false);
2574 		break;
2575 	}
2576 
2577 	return ret;
2578 }
2579 
__blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq,bool bypass_insert,bool last)2580 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2581 						struct request *rq,
2582 						bool bypass_insert, bool last)
2583 {
2584 	struct request_queue *q = rq->q;
2585 	bool run_queue = true;
2586 	int budget_token;
2587 
2588 	/*
2589 	 * RCU or SRCU read lock is needed before checking quiesced flag.
2590 	 *
2591 	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
2592 	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2593 	 * and avoid driver to try to dispatch again.
2594 	 */
2595 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2596 		run_queue = false;
2597 		bypass_insert = false;
2598 		goto insert;
2599 	}
2600 
2601 	if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2602 		goto insert;
2603 
2604 	budget_token = blk_mq_get_dispatch_budget(q);
2605 	if (budget_token < 0)
2606 		goto insert;
2607 
2608 	blk_mq_set_rq_budget_token(rq, budget_token);
2609 
2610 	if (!blk_mq_get_driver_tag(rq)) {
2611 		blk_mq_put_dispatch_budget(q, budget_token);
2612 		goto insert;
2613 	}
2614 
2615 	return __blk_mq_issue_directly(hctx, rq, last);
2616 insert:
2617 	if (bypass_insert)
2618 		return BLK_STS_RESOURCE;
2619 
2620 	blk_mq_sched_insert_request(rq, false, run_queue, false);
2621 
2622 	return BLK_STS_OK;
2623 }
2624 
2625 /**
2626  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2627  * @hctx: Pointer of the associated hardware queue.
2628  * @rq: Pointer to request to be sent.
2629  *
2630  * If the device has enough resources to accept a new request now, send the
2631  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2632  * we can try send it another time in the future. Requests inserted at this
2633  * queue have higher priority.
2634  */
blk_mq_try_issue_directly(struct blk_mq_hw_ctx * hctx,struct request * rq)2635 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2636 		struct request *rq)
2637 {
2638 	blk_status_t ret =
2639 		__blk_mq_try_issue_directly(hctx, rq, false, true);
2640 
2641 	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2642 		blk_mq_request_bypass_insert(rq, false, true);
2643 	else if (ret != BLK_STS_OK)
2644 		blk_mq_end_request(rq, ret);
2645 }
2646 
blk_mq_request_issue_directly(struct request * rq,bool last)2647 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2648 {
2649 	return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2650 }
2651 
blk_mq_plug_issue_direct(struct blk_plug * plug,bool from_schedule)2652 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
2653 {
2654 	struct blk_mq_hw_ctx *hctx = NULL;
2655 	struct request *rq;
2656 	int queued = 0;
2657 	int errors = 0;
2658 
2659 	while ((rq = rq_list_pop(&plug->mq_list))) {
2660 		bool last = rq_list_empty(plug->mq_list);
2661 		blk_status_t ret;
2662 
2663 		if (hctx != rq->mq_hctx) {
2664 			if (hctx)
2665 				blk_mq_commit_rqs(hctx, &queued, from_schedule);
2666 			hctx = rq->mq_hctx;
2667 		}
2668 
2669 		ret = blk_mq_request_issue_directly(rq, last);
2670 		switch (ret) {
2671 		case BLK_STS_OK:
2672 			queued++;
2673 			break;
2674 		case BLK_STS_RESOURCE:
2675 		case BLK_STS_DEV_RESOURCE:
2676 			blk_mq_request_bypass_insert(rq, false, true);
2677 			blk_mq_commit_rqs(hctx, &queued, from_schedule);
2678 			return;
2679 		default:
2680 			blk_mq_end_request(rq, ret);
2681 			errors++;
2682 			break;
2683 		}
2684 	}
2685 
2686 	/*
2687 	 * If we didn't flush the entire list, we could have told the driver
2688 	 * there was more coming, but that turned out to be a lie.
2689 	 */
2690 	if (errors)
2691 		blk_mq_commit_rqs(hctx, &queued, from_schedule);
2692 }
2693 
__blk_mq_flush_plug_list(struct request_queue * q,struct blk_plug * plug)2694 static void __blk_mq_flush_plug_list(struct request_queue *q,
2695 				     struct blk_plug *plug)
2696 {
2697 	if (blk_queue_quiesced(q))
2698 		return;
2699 	q->mq_ops->queue_rqs(&plug->mq_list);
2700 }
2701 
blk_mq_dispatch_plug_list(struct blk_plug * plug,bool from_sched)2702 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2703 {
2704 	struct blk_mq_hw_ctx *this_hctx = NULL;
2705 	struct blk_mq_ctx *this_ctx = NULL;
2706 	struct request *requeue_list = NULL;
2707 	unsigned int depth = 0;
2708 	LIST_HEAD(list);
2709 
2710 	do {
2711 		struct request *rq = rq_list_pop(&plug->mq_list);
2712 
2713 		if (!this_hctx) {
2714 			this_hctx = rq->mq_hctx;
2715 			this_ctx = rq->mq_ctx;
2716 		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
2717 			rq_list_add(&requeue_list, rq);
2718 			continue;
2719 		}
2720 		list_add_tail(&rq->queuelist, &list);
2721 		depth++;
2722 	} while (!rq_list_empty(plug->mq_list));
2723 
2724 	plug->mq_list = requeue_list;
2725 	trace_block_unplug(this_hctx->queue, depth, !from_sched);
2726 	blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
2727 }
2728 
blk_mq_flush_plug_list(struct blk_plug * plug,bool from_schedule)2729 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2730 {
2731 	struct request *rq;
2732 
2733 	if (rq_list_empty(plug->mq_list))
2734 		return;
2735 	plug->rq_count = 0;
2736 
2737 	if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2738 		struct request_queue *q;
2739 
2740 		rq = rq_list_peek(&plug->mq_list);
2741 		q = rq->q;
2742 
2743 		/*
2744 		 * Peek first request and see if we have a ->queue_rqs() hook.
2745 		 * If we do, we can dispatch the whole plug list in one go. We
2746 		 * already know at this point that all requests belong to the
2747 		 * same queue, caller must ensure that's the case.
2748 		 *
2749 		 * Since we pass off the full list to the driver at this point,
2750 		 * we do not increment the active request count for the queue.
2751 		 * Bypass shared tags for now because of that.
2752 		 */
2753 		if (q->mq_ops->queue_rqs &&
2754 		    !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2755 			blk_mq_run_dispatch_ops(q,
2756 				__blk_mq_flush_plug_list(q, plug));
2757 			if (rq_list_empty(plug->mq_list))
2758 				return;
2759 		}
2760 
2761 		blk_mq_run_dispatch_ops(q,
2762 				blk_mq_plug_issue_direct(plug, false));
2763 		if (rq_list_empty(plug->mq_list))
2764 			return;
2765 	}
2766 
2767 	do {
2768 		blk_mq_dispatch_plug_list(plug, from_schedule);
2769 	} while (!rq_list_empty(plug->mq_list));
2770 }
2771 
blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx * hctx,struct list_head * list)2772 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2773 		struct list_head *list)
2774 {
2775 	int queued = 0;
2776 	int errors = 0;
2777 
2778 	while (!list_empty(list)) {
2779 		blk_status_t ret;
2780 		struct request *rq = list_first_entry(list, struct request,
2781 				queuelist);
2782 
2783 		list_del_init(&rq->queuelist);
2784 		ret = blk_mq_request_issue_directly(rq, list_empty(list));
2785 		if (ret != BLK_STS_OK) {
2786 			errors++;
2787 			if (ret == BLK_STS_RESOURCE ||
2788 					ret == BLK_STS_DEV_RESOURCE) {
2789 				blk_mq_request_bypass_insert(rq, false,
2790 							list_empty(list));
2791 				break;
2792 			}
2793 			blk_mq_end_request(rq, ret);
2794 		} else
2795 			queued++;
2796 	}
2797 
2798 	/*
2799 	 * If we didn't flush the entire list, we could have told
2800 	 * the driver there was more coming, but that turned out to
2801 	 * be a lie.
2802 	 */
2803 	if ((!list_empty(list) || errors) &&
2804 	     hctx->queue->mq_ops->commit_rqs && queued)
2805 		hctx->queue->mq_ops->commit_rqs(hctx);
2806 }
2807 
blk_mq_attempt_bio_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)2808 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2809 				     struct bio *bio, unsigned int nr_segs)
2810 {
2811 	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2812 		if (blk_attempt_plug_merge(q, bio, nr_segs))
2813 			return true;
2814 		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2815 			return true;
2816 	}
2817 	return false;
2818 }
2819 
blk_mq_get_new_requests(struct request_queue * q,struct blk_plug * plug,struct bio * bio,unsigned int nsegs)2820 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2821 					       struct blk_plug *plug,
2822 					       struct bio *bio,
2823 					       unsigned int nsegs)
2824 {
2825 	struct blk_mq_alloc_data data = {
2826 		.q		= q,
2827 		.nr_tags	= 1,
2828 		.cmd_flags	= bio->bi_opf,
2829 	};
2830 	struct request *rq;
2831 
2832 	if (unlikely(bio_queue_enter(bio)))
2833 		return NULL;
2834 
2835 	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2836 		goto queue_exit;
2837 
2838 	rq_qos_throttle(q, bio);
2839 
2840 	if (plug) {
2841 		data.nr_tags = plug->nr_ios;
2842 		plug->nr_ios = 1;
2843 		data.cached_rq = &plug->cached_rq;
2844 	}
2845 
2846 	rq = __blk_mq_alloc_requests(&data);
2847 	if (rq)
2848 		return rq;
2849 	rq_qos_cleanup(q, bio);
2850 	if (bio->bi_opf & REQ_NOWAIT)
2851 		bio_wouldblock_error(bio);
2852 queue_exit:
2853 	blk_queue_exit(q);
2854 	return NULL;
2855 }
2856 
blk_mq_get_cached_request(struct request_queue * q,struct blk_plug * plug,struct bio ** bio,unsigned int nsegs)2857 static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2858 		struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2859 {
2860 	struct request *rq;
2861 
2862 	if (!plug)
2863 		return NULL;
2864 	rq = rq_list_peek(&plug->cached_rq);
2865 	if (!rq || rq->q != q)
2866 		return NULL;
2867 
2868 	if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
2869 		*bio = NULL;
2870 		return NULL;
2871 	}
2872 
2873 	if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
2874 		return NULL;
2875 	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2876 		return NULL;
2877 
2878 	/*
2879 	 * If any qos ->throttle() end up blocking, we will have flushed the
2880 	 * plug and hence killed the cached_rq list as well. Pop this entry
2881 	 * before we throttle.
2882 	 */
2883 	plug->cached_rq = rq_list_next(rq);
2884 	rq_qos_throttle(q, *bio);
2885 
2886 	rq->cmd_flags = (*bio)->bi_opf;
2887 	INIT_LIST_HEAD(&rq->queuelist);
2888 	return rq;
2889 }
2890 
bio_set_ioprio(struct bio * bio)2891 static void bio_set_ioprio(struct bio *bio)
2892 {
2893 	/* Nobody set ioprio so far? Initialize it based on task's nice value */
2894 	if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
2895 		bio->bi_ioprio = get_current_ioprio();
2896 	blkcg_set_ioprio(bio);
2897 }
2898 
2899 /**
2900  * blk_mq_submit_bio - Create and send a request to block device.
2901  * @bio: Bio pointer.
2902  *
2903  * Builds up a request structure from @q and @bio and send to the device. The
2904  * request may not be queued directly to hardware if:
2905  * * This request can be merged with another one
2906  * * We want to place request at plug queue for possible future merging
2907  * * There is an IO scheduler active at this queue
2908  *
2909  * It will not queue the request if there is an error with the bio, or at the
2910  * request creation.
2911  */
blk_mq_submit_bio(struct bio * bio)2912 void blk_mq_submit_bio(struct bio *bio)
2913 {
2914 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2915 	struct blk_plug *plug = blk_mq_plug(bio);
2916 	const int is_sync = op_is_sync(bio->bi_opf);
2917 	struct request *rq;
2918 	unsigned int nr_segs = 1;
2919 	blk_status_t ret;
2920 
2921 	bio = blk_queue_bounce(bio, q);
2922 	if (bio_may_exceed_limits(bio, &q->limits)) {
2923 		bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2924 		if (!bio)
2925 			return;
2926 	}
2927 
2928 	if (!bio_integrity_prep(bio))
2929 		return;
2930 
2931 	bio_set_ioprio(bio);
2932 
2933 	rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2934 	if (!rq) {
2935 		if (!bio)
2936 			return;
2937 		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2938 		if (unlikely(!rq))
2939 			return;
2940 	}
2941 
2942 	trace_block_getrq(bio);
2943 
2944 	rq_qos_track(q, rq, bio);
2945 
2946 	blk_mq_bio_to_request(rq, bio, nr_segs);
2947 
2948 	ret = blk_crypto_init_request(rq);
2949 	if (ret != BLK_STS_OK) {
2950 		bio->bi_status = ret;
2951 		bio_endio(bio);
2952 		blk_mq_free_request(rq);
2953 		return;
2954 	}
2955 
2956 	if (op_is_flush(bio->bi_opf)) {
2957 		blk_insert_flush(rq);
2958 		return;
2959 	}
2960 
2961 	if (plug)
2962 		blk_add_rq_to_plug(plug, rq);
2963 	else if ((rq->rq_flags & RQF_ELV) ||
2964 		 (rq->mq_hctx->dispatch_busy &&
2965 		  (q->nr_hw_queues == 1 || !is_sync)))
2966 		blk_mq_sched_insert_request(rq, false, true, true);
2967 	else
2968 		blk_mq_run_dispatch_ops(rq->q,
2969 				blk_mq_try_issue_directly(rq->mq_hctx, rq));
2970 }
2971 
2972 #ifdef CONFIG_BLK_MQ_STACKING
2973 /**
2974  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2975  * @rq: the request being queued
2976  */
blk_insert_cloned_request(struct request * rq)2977 blk_status_t blk_insert_cloned_request(struct request *rq)
2978 {
2979 	struct request_queue *q = rq->q;
2980 	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
2981 	blk_status_t ret;
2982 
2983 	if (blk_rq_sectors(rq) > max_sectors) {
2984 		/*
2985 		 * SCSI device does not have a good way to return if
2986 		 * Write Same/Zero is actually supported. If a device rejects
2987 		 * a non-read/write command (discard, write same,etc.) the
2988 		 * low-level device driver will set the relevant queue limit to
2989 		 * 0 to prevent blk-lib from issuing more of the offending
2990 		 * operations. Commands queued prior to the queue limit being
2991 		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
2992 		 * errors being propagated to upper layers.
2993 		 */
2994 		if (max_sectors == 0)
2995 			return BLK_STS_NOTSUPP;
2996 
2997 		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
2998 			__func__, blk_rq_sectors(rq), max_sectors);
2999 		return BLK_STS_IOERR;
3000 	}
3001 
3002 	/*
3003 	 * The queue settings related to segment counting may differ from the
3004 	 * original queue.
3005 	 */
3006 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3007 	if (rq->nr_phys_segments > queue_max_segments(q)) {
3008 		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
3009 			__func__, rq->nr_phys_segments, queue_max_segments(q));
3010 		return BLK_STS_IOERR;
3011 	}
3012 
3013 	if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3014 		return BLK_STS_IOERR;
3015 
3016 	if (blk_crypto_insert_cloned_request(rq))
3017 		return BLK_STS_IOERR;
3018 
3019 	blk_account_io_start(rq);
3020 
3021 	/*
3022 	 * Since we have a scheduler attached on the top device,
3023 	 * bypass a potential scheduler on the bottom device for
3024 	 * insert.
3025 	 */
3026 	blk_mq_run_dispatch_ops(q,
3027 			ret = blk_mq_request_issue_directly(rq, true));
3028 	if (ret)
3029 		blk_account_io_done(rq, ktime_get_ns());
3030 	return ret;
3031 }
3032 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
3033 
3034 /**
3035  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3036  * @rq: the clone request to be cleaned up
3037  *
3038  * Description:
3039  *     Free all bios in @rq for a cloned request.
3040  */
blk_rq_unprep_clone(struct request * rq)3041 void blk_rq_unprep_clone(struct request *rq)
3042 {
3043 	struct bio *bio;
3044 
3045 	while ((bio = rq->bio) != NULL) {
3046 		rq->bio = bio->bi_next;
3047 
3048 		bio_put(bio);
3049 	}
3050 }
3051 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3052 
3053 /**
3054  * blk_rq_prep_clone - Helper function to setup clone request
3055  * @rq: the request to be setup
3056  * @rq_src: original request to be cloned
3057  * @bs: bio_set that bios for clone are allocated from
3058  * @gfp_mask: memory allocation mask for bio
3059  * @bio_ctr: setup function to be called for each clone bio.
3060  *           Returns %0 for success, non %0 for failure.
3061  * @data: private data to be passed to @bio_ctr
3062  *
3063  * Description:
3064  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3065  *     Also, pages which the original bios are pointing to are not copied
3066  *     and the cloned bios just point same pages.
3067  *     So cloned bios must be completed before original bios, which means
3068  *     the caller must complete @rq before @rq_src.
3069  */
blk_rq_prep_clone(struct request * rq,struct request * rq_src,struct bio_set * bs,gfp_t gfp_mask,int (* bio_ctr)(struct bio *,struct bio *,void *),void * data)3070 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3071 		      struct bio_set *bs, gfp_t gfp_mask,
3072 		      int (*bio_ctr)(struct bio *, struct bio *, void *),
3073 		      void *data)
3074 {
3075 	struct bio *bio, *bio_src;
3076 
3077 	if (!bs)
3078 		bs = &fs_bio_set;
3079 
3080 	__rq_for_each_bio(bio_src, rq_src) {
3081 		bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3082 				      bs);
3083 		if (!bio)
3084 			goto free_and_out;
3085 
3086 		if (bio_ctr && bio_ctr(bio, bio_src, data))
3087 			goto free_and_out;
3088 
3089 		if (rq->bio) {
3090 			rq->biotail->bi_next = bio;
3091 			rq->biotail = bio;
3092 		} else {
3093 			rq->bio = rq->biotail = bio;
3094 		}
3095 		bio = NULL;
3096 	}
3097 
3098 	/* Copy attributes of the original request to the clone request. */
3099 	rq->__sector = blk_rq_pos(rq_src);
3100 	rq->__data_len = blk_rq_bytes(rq_src);
3101 	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3102 		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3103 		rq->special_vec = rq_src->special_vec;
3104 	}
3105 	rq->nr_phys_segments = rq_src->nr_phys_segments;
3106 	rq->ioprio = rq_src->ioprio;
3107 
3108 	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3109 		goto free_and_out;
3110 
3111 	return 0;
3112 
3113 free_and_out:
3114 	if (bio)
3115 		bio_put(bio);
3116 	blk_rq_unprep_clone(rq);
3117 
3118 	return -ENOMEM;
3119 }
3120 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3121 #endif /* CONFIG_BLK_MQ_STACKING */
3122 
3123 /*
3124  * Steal bios from a request and add them to a bio list.
3125  * The request must not have been partially completed before.
3126  */
blk_steal_bios(struct bio_list * list,struct request * rq)3127 void blk_steal_bios(struct bio_list *list, struct request *rq)
3128 {
3129 	if (rq->bio) {
3130 		if (list->tail)
3131 			list->tail->bi_next = rq->bio;
3132 		else
3133 			list->head = rq->bio;
3134 		list->tail = rq->biotail;
3135 
3136 		rq->bio = NULL;
3137 		rq->biotail = NULL;
3138 	}
3139 
3140 	rq->__data_len = 0;
3141 }
3142 EXPORT_SYMBOL_GPL(blk_steal_bios);
3143 
order_to_size(unsigned int order)3144 static size_t order_to_size(unsigned int order)
3145 {
3146 	return (size_t)PAGE_SIZE << order;
3147 }
3148 
3149 /* called before freeing request pool in @tags */
blk_mq_clear_rq_mapping(struct blk_mq_tags * drv_tags,struct blk_mq_tags * tags)3150 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3151 				    struct blk_mq_tags *tags)
3152 {
3153 	struct page *page;
3154 	unsigned long flags;
3155 
3156 	/*
3157 	 * There is no need to clear mapping if driver tags is not initialized
3158 	 * or the mapping belongs to the driver tags.
3159 	 */
3160 	if (!drv_tags || drv_tags == tags)
3161 		return;
3162 
3163 	list_for_each_entry(page, &tags->page_list, lru) {
3164 		unsigned long start = (unsigned long)page_address(page);
3165 		unsigned long end = start + order_to_size(page->private);
3166 		int i;
3167 
3168 		for (i = 0; i < drv_tags->nr_tags; i++) {
3169 			struct request *rq = drv_tags->rqs[i];
3170 			unsigned long rq_addr = (unsigned long)rq;
3171 
3172 			if (rq_addr >= start && rq_addr < end) {
3173 				WARN_ON_ONCE(req_ref_read(rq) != 0);
3174 				cmpxchg(&drv_tags->rqs[i], rq, NULL);
3175 			}
3176 		}
3177 	}
3178 
3179 	/*
3180 	 * Wait until all pending iteration is done.
3181 	 *
3182 	 * Request reference is cleared and it is guaranteed to be observed
3183 	 * after the ->lock is released.
3184 	 */
3185 	spin_lock_irqsave(&drv_tags->lock, flags);
3186 	spin_unlock_irqrestore(&drv_tags->lock, flags);
3187 }
3188 
blk_mq_free_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3189 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3190 		     unsigned int hctx_idx)
3191 {
3192 	struct blk_mq_tags *drv_tags;
3193 	struct page *page;
3194 
3195 	if (list_empty(&tags->page_list))
3196 		return;
3197 
3198 	if (blk_mq_is_shared_tags(set->flags))
3199 		drv_tags = set->shared_tags;
3200 	else
3201 		drv_tags = set->tags[hctx_idx];
3202 
3203 	if (tags->static_rqs && set->ops->exit_request) {
3204 		int i;
3205 
3206 		for (i = 0; i < tags->nr_tags; i++) {
3207 			struct request *rq = tags->static_rqs[i];
3208 
3209 			if (!rq)
3210 				continue;
3211 			set->ops->exit_request(set, rq, hctx_idx);
3212 			tags->static_rqs[i] = NULL;
3213 		}
3214 	}
3215 
3216 	blk_mq_clear_rq_mapping(drv_tags, tags);
3217 
3218 	while (!list_empty(&tags->page_list)) {
3219 		page = list_first_entry(&tags->page_list, struct page, lru);
3220 		list_del_init(&page->lru);
3221 		/*
3222 		 * Remove kmemleak object previously allocated in
3223 		 * blk_mq_alloc_rqs().
3224 		 */
3225 		kmemleak_free(page_address(page));
3226 		__free_pages(page, page->private);
3227 	}
3228 }
3229 
blk_mq_free_rq_map(struct blk_mq_tags * tags)3230 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3231 {
3232 	kfree(tags->rqs);
3233 	tags->rqs = NULL;
3234 	kfree(tags->static_rqs);
3235 	tags->static_rqs = NULL;
3236 
3237 	blk_mq_free_tags(tags);
3238 }
3239 
hctx_idx_to_type(struct blk_mq_tag_set * set,unsigned int hctx_idx)3240 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3241 		unsigned int hctx_idx)
3242 {
3243 	int i;
3244 
3245 	for (i = 0; i < set->nr_maps; i++) {
3246 		unsigned int start = set->map[i].queue_offset;
3247 		unsigned int end = start + set->map[i].nr_queues;
3248 
3249 		if (hctx_idx >= start && hctx_idx < end)
3250 			break;
3251 	}
3252 
3253 	if (i >= set->nr_maps)
3254 		i = HCTX_TYPE_DEFAULT;
3255 
3256 	return i;
3257 }
3258 
blk_mq_get_hctx_node(struct blk_mq_tag_set * set,unsigned int hctx_idx)3259 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3260 		unsigned int hctx_idx)
3261 {
3262 	enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3263 
3264 	return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3265 }
3266 
blk_mq_alloc_rq_map(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int nr_tags,unsigned int reserved_tags)3267 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3268 					       unsigned int hctx_idx,
3269 					       unsigned int nr_tags,
3270 					       unsigned int reserved_tags)
3271 {
3272 	int node = blk_mq_get_hctx_node(set, hctx_idx);
3273 	struct blk_mq_tags *tags;
3274 
3275 	if (node == NUMA_NO_NODE)
3276 		node = set->numa_node;
3277 
3278 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3279 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3280 	if (!tags)
3281 		return NULL;
3282 
3283 	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3284 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3285 				 node);
3286 	if (!tags->rqs) {
3287 		blk_mq_free_tags(tags);
3288 		return NULL;
3289 	}
3290 
3291 	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3292 					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3293 					node);
3294 	if (!tags->static_rqs) {
3295 		kfree(tags->rqs);
3296 		blk_mq_free_tags(tags);
3297 		return NULL;
3298 	}
3299 
3300 	return tags;
3301 }
3302 
blk_mq_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,int node)3303 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3304 			       unsigned int hctx_idx, int node)
3305 {
3306 	int ret;
3307 
3308 	if (set->ops->init_request) {
3309 		ret = set->ops->init_request(set, rq, hctx_idx, node);
3310 		if (ret)
3311 			return ret;
3312 	}
3313 
3314 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3315 	return 0;
3316 }
3317 
blk_mq_alloc_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx,unsigned int depth)3318 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3319 			    struct blk_mq_tags *tags,
3320 			    unsigned int hctx_idx, unsigned int depth)
3321 {
3322 	unsigned int i, j, entries_per_page, max_order = 4;
3323 	int node = blk_mq_get_hctx_node(set, hctx_idx);
3324 	size_t rq_size, left;
3325 
3326 	if (node == NUMA_NO_NODE)
3327 		node = set->numa_node;
3328 
3329 	INIT_LIST_HEAD(&tags->page_list);
3330 
3331 	/*
3332 	 * rq_size is the size of the request plus driver payload, rounded
3333 	 * to the cacheline size
3334 	 */
3335 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
3336 				cache_line_size());
3337 	left = rq_size * depth;
3338 
3339 	for (i = 0; i < depth; ) {
3340 		int this_order = max_order;
3341 		struct page *page;
3342 		int to_do;
3343 		void *p;
3344 
3345 		while (this_order && left < order_to_size(this_order - 1))
3346 			this_order--;
3347 
3348 		do {
3349 			page = alloc_pages_node(node,
3350 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3351 				this_order);
3352 			if (page)
3353 				break;
3354 			if (!this_order--)
3355 				break;
3356 			if (order_to_size(this_order) < rq_size)
3357 				break;
3358 		} while (1);
3359 
3360 		if (!page)
3361 			goto fail;
3362 
3363 		page->private = this_order;
3364 		list_add_tail(&page->lru, &tags->page_list);
3365 
3366 		p = page_address(page);
3367 		/*
3368 		 * Allow kmemleak to scan these pages as they contain pointers
3369 		 * to additional allocations like via ops->init_request().
3370 		 */
3371 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3372 		entries_per_page = order_to_size(this_order) / rq_size;
3373 		to_do = min(entries_per_page, depth - i);
3374 		left -= to_do * rq_size;
3375 		for (j = 0; j < to_do; j++) {
3376 			struct request *rq = p;
3377 
3378 			tags->static_rqs[i] = rq;
3379 			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3380 				tags->static_rqs[i] = NULL;
3381 				goto fail;
3382 			}
3383 
3384 			p += rq_size;
3385 			i++;
3386 		}
3387 	}
3388 	return 0;
3389 
3390 fail:
3391 	blk_mq_free_rqs(set, tags, hctx_idx);
3392 	return -ENOMEM;
3393 }
3394 
3395 struct rq_iter_data {
3396 	struct blk_mq_hw_ctx *hctx;
3397 	bool has_rq;
3398 };
3399 
blk_mq_has_request(struct request * rq,void * data)3400 static bool blk_mq_has_request(struct request *rq, void *data)
3401 {
3402 	struct rq_iter_data *iter_data = data;
3403 
3404 	if (rq->mq_hctx != iter_data->hctx)
3405 		return true;
3406 	iter_data->has_rq = true;
3407 	return false;
3408 }
3409 
blk_mq_hctx_has_requests(struct blk_mq_hw_ctx * hctx)3410 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3411 {
3412 	struct blk_mq_tags *tags = hctx->sched_tags ?
3413 			hctx->sched_tags : hctx->tags;
3414 	struct rq_iter_data data = {
3415 		.hctx	= hctx,
3416 	};
3417 
3418 	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3419 	return data.has_rq;
3420 }
3421 
blk_mq_last_cpu_in_hctx(unsigned int cpu,struct blk_mq_hw_ctx * hctx)3422 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3423 		struct blk_mq_hw_ctx *hctx)
3424 {
3425 	if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3426 		return false;
3427 	if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3428 		return false;
3429 	return true;
3430 }
3431 
blk_mq_hctx_notify_offline(unsigned int cpu,struct hlist_node * node)3432 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3433 {
3434 	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3435 			struct blk_mq_hw_ctx, cpuhp_online);
3436 
3437 	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3438 	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
3439 		return 0;
3440 
3441 	/*
3442 	 * Prevent new request from being allocated on the current hctx.
3443 	 *
3444 	 * The smp_mb__after_atomic() Pairs with the implied barrier in
3445 	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3446 	 * seen once we return from the tag allocator.
3447 	 */
3448 	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3449 	smp_mb__after_atomic();
3450 
3451 	/*
3452 	 * Try to grab a reference to the queue and wait for any outstanding
3453 	 * requests.  If we could not grab a reference the queue has been
3454 	 * frozen and there are no requests.
3455 	 */
3456 	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3457 		while (blk_mq_hctx_has_requests(hctx))
3458 			msleep(5);
3459 		percpu_ref_put(&hctx->queue->q_usage_counter);
3460 	}
3461 
3462 	return 0;
3463 }
3464 
blk_mq_hctx_notify_online(unsigned int cpu,struct hlist_node * node)3465 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3466 {
3467 	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3468 			struct blk_mq_hw_ctx, cpuhp_online);
3469 
3470 	if (cpumask_test_cpu(cpu, hctx->cpumask))
3471 		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3472 	return 0;
3473 }
3474 
3475 /*
3476  * 'cpu' is going away. splice any existing rq_list entries from this
3477  * software queue to the hw queue dispatch list, and ensure that it
3478  * gets run.
3479  */
blk_mq_hctx_notify_dead(unsigned int cpu,struct hlist_node * node)3480 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3481 {
3482 	struct blk_mq_hw_ctx *hctx;
3483 	struct blk_mq_ctx *ctx;
3484 	LIST_HEAD(tmp);
3485 	enum hctx_type type;
3486 
3487 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3488 	if (!cpumask_test_cpu(cpu, hctx->cpumask))
3489 		return 0;
3490 
3491 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3492 	type = hctx->type;
3493 
3494 	spin_lock(&ctx->lock);
3495 	if (!list_empty(&ctx->rq_lists[type])) {
3496 		list_splice_init(&ctx->rq_lists[type], &tmp);
3497 		blk_mq_hctx_clear_pending(hctx, ctx);
3498 	}
3499 	spin_unlock(&ctx->lock);
3500 
3501 	if (list_empty(&tmp))
3502 		return 0;
3503 
3504 	spin_lock(&hctx->lock);
3505 	list_splice_tail_init(&tmp, &hctx->dispatch);
3506 	spin_unlock(&hctx->lock);
3507 
3508 	blk_mq_run_hw_queue(hctx, true);
3509 	return 0;
3510 }
3511 
blk_mq_remove_cpuhp(struct blk_mq_hw_ctx * hctx)3512 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3513 {
3514 	if (!(hctx->flags & BLK_MQ_F_STACKING))
3515 		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3516 						    &hctx->cpuhp_online);
3517 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3518 					    &hctx->cpuhp_dead);
3519 }
3520 
3521 /*
3522  * Before freeing hw queue, clearing the flush request reference in
3523  * tags->rqs[] for avoiding potential UAF.
3524  */
blk_mq_clear_flush_rq_mapping(struct blk_mq_tags * tags,unsigned int queue_depth,struct request * flush_rq)3525 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3526 		unsigned int queue_depth, struct request *flush_rq)
3527 {
3528 	int i;
3529 	unsigned long flags;
3530 
3531 	/* The hw queue may not be mapped yet */
3532 	if (!tags)
3533 		return;
3534 
3535 	WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3536 
3537 	for (i = 0; i < queue_depth; i++)
3538 		cmpxchg(&tags->rqs[i], flush_rq, NULL);
3539 
3540 	/*
3541 	 * Wait until all pending iteration is done.
3542 	 *
3543 	 * Request reference is cleared and it is guaranteed to be observed
3544 	 * after the ->lock is released.
3545 	 */
3546 	spin_lock_irqsave(&tags->lock, flags);
3547 	spin_unlock_irqrestore(&tags->lock, flags);
3548 }
3549 
3550 /* hctx->ctxs will be freed in queue's release handler */
blk_mq_exit_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned int hctx_idx)3551 static void blk_mq_exit_hctx(struct request_queue *q,
3552 		struct blk_mq_tag_set *set,
3553 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3554 {
3555 	struct request *flush_rq = hctx->fq->flush_rq;
3556 
3557 	if (blk_mq_hw_queue_mapped(hctx))
3558 		blk_mq_tag_idle(hctx);
3559 
3560 	if (blk_queue_init_done(q))
3561 		blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3562 				set->queue_depth, flush_rq);
3563 	if (set->ops->exit_request)
3564 		set->ops->exit_request(set, flush_rq, hctx_idx);
3565 
3566 	if (set->ops->exit_hctx)
3567 		set->ops->exit_hctx(hctx, hctx_idx);
3568 
3569 	blk_mq_remove_cpuhp(hctx);
3570 
3571 	xa_erase(&q->hctx_table, hctx_idx);
3572 
3573 	spin_lock(&q->unused_hctx_lock);
3574 	list_add(&hctx->hctx_list, &q->unused_hctx_list);
3575 	spin_unlock(&q->unused_hctx_lock);
3576 }
3577 
blk_mq_exit_hw_queues(struct request_queue * q,struct blk_mq_tag_set * set,int nr_queue)3578 static void blk_mq_exit_hw_queues(struct request_queue *q,
3579 		struct blk_mq_tag_set *set, int nr_queue)
3580 {
3581 	struct blk_mq_hw_ctx *hctx;
3582 	unsigned long i;
3583 
3584 	queue_for_each_hw_ctx(q, hctx, i) {
3585 		if (i == nr_queue)
3586 			break;
3587 		blk_mq_exit_hctx(q, set, hctx, i);
3588 	}
3589 }
3590 
blk_mq_init_hctx(struct request_queue * q,struct blk_mq_tag_set * set,struct blk_mq_hw_ctx * hctx,unsigned hctx_idx)3591 static int blk_mq_init_hctx(struct request_queue *q,
3592 		struct blk_mq_tag_set *set,
3593 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3594 {
3595 	hctx->queue_num = hctx_idx;
3596 
3597 	if (!(hctx->flags & BLK_MQ_F_STACKING))
3598 		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3599 				&hctx->cpuhp_online);
3600 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3601 
3602 	hctx->tags = set->tags[hctx_idx];
3603 
3604 	if (set->ops->init_hctx &&
3605 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3606 		goto unregister_cpu_notifier;
3607 
3608 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3609 				hctx->numa_node))
3610 		goto exit_hctx;
3611 
3612 	if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3613 		goto exit_flush_rq;
3614 
3615 	return 0;
3616 
3617  exit_flush_rq:
3618 	if (set->ops->exit_request)
3619 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3620  exit_hctx:
3621 	if (set->ops->exit_hctx)
3622 		set->ops->exit_hctx(hctx, hctx_idx);
3623  unregister_cpu_notifier:
3624 	blk_mq_remove_cpuhp(hctx);
3625 	return -1;
3626 }
3627 
3628 static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue * q,struct blk_mq_tag_set * set,int node)3629 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3630 		int node)
3631 {
3632 	struct blk_mq_hw_ctx *hctx;
3633 	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3634 
3635 	hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3636 	if (!hctx)
3637 		goto fail_alloc_hctx;
3638 
3639 	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3640 		goto free_hctx;
3641 
3642 	atomic_set(&hctx->nr_active, 0);
3643 	if (node == NUMA_NO_NODE)
3644 		node = set->numa_node;
3645 	hctx->numa_node = node;
3646 
3647 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3648 	spin_lock_init(&hctx->lock);
3649 	INIT_LIST_HEAD(&hctx->dispatch);
3650 	hctx->queue = q;
3651 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3652 
3653 	INIT_LIST_HEAD(&hctx->hctx_list);
3654 
3655 	/*
3656 	 * Allocate space for all possible cpus to avoid allocation at
3657 	 * runtime
3658 	 */
3659 	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3660 			gfp, node);
3661 	if (!hctx->ctxs)
3662 		goto free_cpumask;
3663 
3664 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3665 				gfp, node, false, false))
3666 		goto free_ctxs;
3667 	hctx->nr_ctx = 0;
3668 
3669 	spin_lock_init(&hctx->dispatch_wait_lock);
3670 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3671 	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3672 
3673 	hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3674 	if (!hctx->fq)
3675 		goto free_bitmap;
3676 
3677 	blk_mq_hctx_kobj_init(hctx);
3678 
3679 	return hctx;
3680 
3681  free_bitmap:
3682 	sbitmap_free(&hctx->ctx_map);
3683  free_ctxs:
3684 	kfree(hctx->ctxs);
3685  free_cpumask:
3686 	free_cpumask_var(hctx->cpumask);
3687  free_hctx:
3688 	kfree(hctx);
3689  fail_alloc_hctx:
3690 	return NULL;
3691 }
3692 
blk_mq_init_cpu_queues(struct request_queue * q,unsigned int nr_hw_queues)3693 static void blk_mq_init_cpu_queues(struct request_queue *q,
3694 				   unsigned int nr_hw_queues)
3695 {
3696 	struct blk_mq_tag_set *set = q->tag_set;
3697 	unsigned int i, j;
3698 
3699 	for_each_possible_cpu(i) {
3700 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3701 		struct blk_mq_hw_ctx *hctx;
3702 		int k;
3703 
3704 		__ctx->cpu = i;
3705 		spin_lock_init(&__ctx->lock);
3706 		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3707 			INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3708 
3709 		__ctx->queue = q;
3710 
3711 		/*
3712 		 * Set local node, IFF we have more than one hw queue. If
3713 		 * not, we remain on the home node of the device
3714 		 */
3715 		for (j = 0; j < set->nr_maps; j++) {
3716 			hctx = blk_mq_map_queue_type(q, j, i);
3717 			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3718 				hctx->numa_node = cpu_to_node(i);
3719 		}
3720 	}
3721 }
3722 
blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx,unsigned int depth)3723 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3724 					     unsigned int hctx_idx,
3725 					     unsigned int depth)
3726 {
3727 	struct blk_mq_tags *tags;
3728 	int ret;
3729 
3730 	tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3731 	if (!tags)
3732 		return NULL;
3733 
3734 	ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3735 	if (ret) {
3736 		blk_mq_free_rq_map(tags);
3737 		return NULL;
3738 	}
3739 
3740 	return tags;
3741 }
3742 
__blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set * set,int hctx_idx)3743 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3744 				       int hctx_idx)
3745 {
3746 	if (blk_mq_is_shared_tags(set->flags)) {
3747 		set->tags[hctx_idx] = set->shared_tags;
3748 
3749 		return true;
3750 	}
3751 
3752 	set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3753 						       set->queue_depth);
3754 
3755 	return set->tags[hctx_idx];
3756 }
3757 
blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,struct blk_mq_tags * tags,unsigned int hctx_idx)3758 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3759 			     struct blk_mq_tags *tags,
3760 			     unsigned int hctx_idx)
3761 {
3762 	if (tags) {
3763 		blk_mq_free_rqs(set, tags, hctx_idx);
3764 		blk_mq_free_rq_map(tags);
3765 	}
3766 }
3767 
__blk_mq_free_map_and_rqs(struct blk_mq_tag_set * set,unsigned int hctx_idx)3768 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3769 				      unsigned int hctx_idx)
3770 {
3771 	if (!blk_mq_is_shared_tags(set->flags))
3772 		blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3773 
3774 	set->tags[hctx_idx] = NULL;
3775 }
3776 
blk_mq_map_swqueue(struct request_queue * q)3777 static void blk_mq_map_swqueue(struct request_queue *q)
3778 {
3779 	unsigned int j, hctx_idx;
3780 	unsigned long i;
3781 	struct blk_mq_hw_ctx *hctx;
3782 	struct blk_mq_ctx *ctx;
3783 	struct blk_mq_tag_set *set = q->tag_set;
3784 
3785 	queue_for_each_hw_ctx(q, hctx, i) {
3786 		cpumask_clear(hctx->cpumask);
3787 		hctx->nr_ctx = 0;
3788 		hctx->dispatch_from = NULL;
3789 	}
3790 
3791 	/*
3792 	 * Map software to hardware queues.
3793 	 *
3794 	 * If the cpu isn't present, the cpu is mapped to first hctx.
3795 	 */
3796 	for_each_possible_cpu(i) {
3797 
3798 		ctx = per_cpu_ptr(q->queue_ctx, i);
3799 		for (j = 0; j < set->nr_maps; j++) {
3800 			if (!set->map[j].nr_queues) {
3801 				ctx->hctxs[j] = blk_mq_map_queue_type(q,
3802 						HCTX_TYPE_DEFAULT, i);
3803 				continue;
3804 			}
3805 			hctx_idx = set->map[j].mq_map[i];
3806 			/* unmapped hw queue can be remapped after CPU topo changed */
3807 			if (!set->tags[hctx_idx] &&
3808 			    !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3809 				/*
3810 				 * If tags initialization fail for some hctx,
3811 				 * that hctx won't be brought online.  In this
3812 				 * case, remap the current ctx to hctx[0] which
3813 				 * is guaranteed to always have tags allocated
3814 				 */
3815 				set->map[j].mq_map[i] = 0;
3816 			}
3817 
3818 			hctx = blk_mq_map_queue_type(q, j, i);
3819 			ctx->hctxs[j] = hctx;
3820 			/*
3821 			 * If the CPU is already set in the mask, then we've
3822 			 * mapped this one already. This can happen if
3823 			 * devices share queues across queue maps.
3824 			 */
3825 			if (cpumask_test_cpu(i, hctx->cpumask))
3826 				continue;
3827 
3828 			cpumask_set_cpu(i, hctx->cpumask);
3829 			hctx->type = j;
3830 			ctx->index_hw[hctx->type] = hctx->nr_ctx;
3831 			hctx->ctxs[hctx->nr_ctx++] = ctx;
3832 
3833 			/*
3834 			 * If the nr_ctx type overflows, we have exceeded the
3835 			 * amount of sw queues we can support.
3836 			 */
3837 			BUG_ON(!hctx->nr_ctx);
3838 		}
3839 
3840 		for (; j < HCTX_MAX_TYPES; j++)
3841 			ctx->hctxs[j] = blk_mq_map_queue_type(q,
3842 					HCTX_TYPE_DEFAULT, i);
3843 	}
3844 
3845 	queue_for_each_hw_ctx(q, hctx, i) {
3846 		/*
3847 		 * If no software queues are mapped to this hardware queue,
3848 		 * disable it and free the request entries.
3849 		 */
3850 		if (!hctx->nr_ctx) {
3851 			/* Never unmap queue 0.  We need it as a
3852 			 * fallback in case of a new remap fails
3853 			 * allocation
3854 			 */
3855 			if (i)
3856 				__blk_mq_free_map_and_rqs(set, i);
3857 
3858 			hctx->tags = NULL;
3859 			continue;
3860 		}
3861 
3862 		hctx->tags = set->tags[i];
3863 		WARN_ON(!hctx->tags);
3864 
3865 		/*
3866 		 * Set the map size to the number of mapped software queues.
3867 		 * This is more accurate and more efficient than looping
3868 		 * over all possibly mapped software queues.
3869 		 */
3870 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3871 
3872 		/*
3873 		 * Initialize batch roundrobin counts
3874 		 */
3875 		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3876 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3877 	}
3878 }
3879 
3880 /*
3881  * Caller needs to ensure that we're either frozen/quiesced, or that
3882  * the queue isn't live yet.
3883  */
queue_set_hctx_shared(struct request_queue * q,bool shared)3884 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3885 {
3886 	struct blk_mq_hw_ctx *hctx;
3887 	unsigned long i;
3888 
3889 	queue_for_each_hw_ctx(q, hctx, i) {
3890 		if (shared) {
3891 			hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3892 		} else {
3893 			blk_mq_tag_idle(hctx);
3894 			hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3895 		}
3896 	}
3897 }
3898 
blk_mq_update_tag_set_shared(struct blk_mq_tag_set * set,bool shared)3899 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3900 					 bool shared)
3901 {
3902 	struct request_queue *q;
3903 
3904 	lockdep_assert_held(&set->tag_list_lock);
3905 
3906 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
3907 		blk_mq_freeze_queue(q);
3908 		queue_set_hctx_shared(q, shared);
3909 		blk_mq_unfreeze_queue(q);
3910 	}
3911 }
3912 
blk_mq_del_queue_tag_set(struct request_queue * q)3913 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3914 {
3915 	struct blk_mq_tag_set *set = q->tag_set;
3916 
3917 	mutex_lock(&set->tag_list_lock);
3918 	list_del(&q->tag_set_list);
3919 	if (list_is_singular(&set->tag_list)) {
3920 		/* just transitioned to unshared */
3921 		set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3922 		/* update existing queue */
3923 		blk_mq_update_tag_set_shared(set, false);
3924 	}
3925 	mutex_unlock(&set->tag_list_lock);
3926 	INIT_LIST_HEAD(&q->tag_set_list);
3927 }
3928 
blk_mq_add_queue_tag_set(struct blk_mq_tag_set * set,struct request_queue * q)3929 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3930 				     struct request_queue *q)
3931 {
3932 	mutex_lock(&set->tag_list_lock);
3933 
3934 	/*
3935 	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
3936 	 */
3937 	if (!list_empty(&set->tag_list) &&
3938 	    !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3939 		set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3940 		/* update existing queue */
3941 		blk_mq_update_tag_set_shared(set, true);
3942 	}
3943 	if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3944 		queue_set_hctx_shared(q, true);
3945 	list_add_tail(&q->tag_set_list, &set->tag_list);
3946 
3947 	mutex_unlock(&set->tag_list_lock);
3948 }
3949 
3950 /* All allocations will be freed in release handler of q->mq_kobj */
blk_mq_alloc_ctxs(struct request_queue * q)3951 static int blk_mq_alloc_ctxs(struct request_queue *q)
3952 {
3953 	struct blk_mq_ctxs *ctxs;
3954 	int cpu;
3955 
3956 	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3957 	if (!ctxs)
3958 		return -ENOMEM;
3959 
3960 	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3961 	if (!ctxs->queue_ctx)
3962 		goto fail;
3963 
3964 	for_each_possible_cpu(cpu) {
3965 		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3966 		ctx->ctxs = ctxs;
3967 	}
3968 
3969 	q->mq_kobj = &ctxs->kobj;
3970 	q->queue_ctx = ctxs->queue_ctx;
3971 
3972 	return 0;
3973  fail:
3974 	kfree(ctxs);
3975 	return -ENOMEM;
3976 }
3977 
3978 /*
3979  * It is the actual release handler for mq, but we do it from
3980  * request queue's release handler for avoiding use-after-free
3981  * and headache because q->mq_kobj shouldn't have been introduced,
3982  * but we can't group ctx/kctx kobj without it.
3983  */
blk_mq_release(struct request_queue * q)3984 void blk_mq_release(struct request_queue *q)
3985 {
3986 	struct blk_mq_hw_ctx *hctx, *next;
3987 	unsigned long i;
3988 
3989 	queue_for_each_hw_ctx(q, hctx, i)
3990 		WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3991 
3992 	/* all hctx are in .unused_hctx_list now */
3993 	list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3994 		list_del_init(&hctx->hctx_list);
3995 		kobject_put(&hctx->kobj);
3996 	}
3997 
3998 	xa_destroy(&q->hctx_table);
3999 
4000 	/*
4001 	 * release .mq_kobj and sw queue's kobject now because
4002 	 * both share lifetime with request queue.
4003 	 */
4004 	blk_mq_sysfs_deinit(q);
4005 }
4006 
blk_mq_init_queue_data(struct blk_mq_tag_set * set,void * queuedata)4007 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
4008 		void *queuedata)
4009 {
4010 	struct request_queue *q;
4011 	int ret;
4012 
4013 	q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
4014 	if (!q)
4015 		return ERR_PTR(-ENOMEM);
4016 	q->queuedata = queuedata;
4017 	ret = blk_mq_init_allocated_queue(set, q);
4018 	if (ret) {
4019 		blk_put_queue(q);
4020 		return ERR_PTR(ret);
4021 	}
4022 	return q;
4023 }
4024 
blk_mq_init_queue(struct blk_mq_tag_set * set)4025 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
4026 {
4027 	return blk_mq_init_queue_data(set, NULL);
4028 }
4029 EXPORT_SYMBOL(blk_mq_init_queue);
4030 
4031 /**
4032  * blk_mq_destroy_queue - shutdown a request queue
4033  * @q: request queue to shutdown
4034  *
4035  * This shuts down a request queue allocated by blk_mq_init_queue() and drops
4036  * the initial reference.  All future requests will failed with -ENODEV.
4037  *
4038  * Context: can sleep
4039  */
blk_mq_destroy_queue(struct request_queue * q)4040 void blk_mq_destroy_queue(struct request_queue *q)
4041 {
4042 	WARN_ON_ONCE(!queue_is_mq(q));
4043 	WARN_ON_ONCE(blk_queue_registered(q));
4044 
4045 	might_sleep();
4046 
4047 	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4048 	blk_queue_start_drain(q);
4049 	blk_freeze_queue(q);
4050 
4051 	blk_sync_queue(q);
4052 	blk_mq_cancel_work_sync(q);
4053 	blk_mq_exit_queue(q);
4054 
4055 	/* @q is and will stay empty, shutdown and put */
4056 	blk_put_queue(q);
4057 }
4058 EXPORT_SYMBOL(blk_mq_destroy_queue);
4059 
__blk_mq_alloc_disk(struct blk_mq_tag_set * set,void * queuedata,struct lock_class_key * lkclass)4060 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4061 		struct lock_class_key *lkclass)
4062 {
4063 	struct request_queue *q;
4064 	struct gendisk *disk;
4065 
4066 	q = blk_mq_init_queue_data(set, queuedata);
4067 	if (IS_ERR(q))
4068 		return ERR_CAST(q);
4069 
4070 	disk = __alloc_disk_node(q, set->numa_node, lkclass);
4071 	if (!disk) {
4072 		blk_mq_destroy_queue(q);
4073 		return ERR_PTR(-ENOMEM);
4074 	}
4075 	set_bit(GD_OWNS_QUEUE, &disk->state);
4076 	return disk;
4077 }
4078 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4079 
blk_mq_alloc_disk_for_queue(struct request_queue * q,struct lock_class_key * lkclass)4080 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4081 		struct lock_class_key *lkclass)
4082 {
4083 	struct gendisk *disk;
4084 
4085 	if (!blk_get_queue(q))
4086 		return NULL;
4087 	disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4088 	if (!disk)
4089 		blk_put_queue(q);
4090 	return disk;
4091 }
4092 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4093 
blk_mq_alloc_and_init_hctx(struct blk_mq_tag_set * set,struct request_queue * q,int hctx_idx,int node)4094 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4095 		struct blk_mq_tag_set *set, struct request_queue *q,
4096 		int hctx_idx, int node)
4097 {
4098 	struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4099 
4100 	/* reuse dead hctx first */
4101 	spin_lock(&q->unused_hctx_lock);
4102 	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4103 		if (tmp->numa_node == node) {
4104 			hctx = tmp;
4105 			break;
4106 		}
4107 	}
4108 	if (hctx)
4109 		list_del_init(&hctx->hctx_list);
4110 	spin_unlock(&q->unused_hctx_lock);
4111 
4112 	if (!hctx)
4113 		hctx = blk_mq_alloc_hctx(q, set, node);
4114 	if (!hctx)
4115 		goto fail;
4116 
4117 	if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4118 		goto free_hctx;
4119 
4120 	return hctx;
4121 
4122  free_hctx:
4123 	kobject_put(&hctx->kobj);
4124  fail:
4125 	return NULL;
4126 }
4127 
blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set * set,struct request_queue * q)4128 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4129 						struct request_queue *q)
4130 {
4131 	struct blk_mq_hw_ctx *hctx;
4132 	unsigned long i, j;
4133 
4134 	/* protect against switching io scheduler  */
4135 	mutex_lock(&q->sysfs_lock);
4136 	for (i = 0; i < set->nr_hw_queues; i++) {
4137 		int old_node;
4138 		int node = blk_mq_get_hctx_node(set, i);
4139 		struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4140 
4141 		if (old_hctx) {
4142 			old_node = old_hctx->numa_node;
4143 			blk_mq_exit_hctx(q, set, old_hctx, i);
4144 		}
4145 
4146 		if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4147 			if (!old_hctx)
4148 				break;
4149 			pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4150 					node, old_node);
4151 			hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4152 			WARN_ON_ONCE(!hctx);
4153 		}
4154 	}
4155 	/*
4156 	 * Increasing nr_hw_queues fails. Free the newly allocated
4157 	 * hctxs and keep the previous q->nr_hw_queues.
4158 	 */
4159 	if (i != set->nr_hw_queues) {
4160 		j = q->nr_hw_queues;
4161 	} else {
4162 		j = i;
4163 		q->nr_hw_queues = set->nr_hw_queues;
4164 	}
4165 
4166 	xa_for_each_start(&q->hctx_table, j, hctx, j)
4167 		blk_mq_exit_hctx(q, set, hctx, j);
4168 	mutex_unlock(&q->sysfs_lock);
4169 }
4170 
blk_mq_update_poll_flag(struct request_queue * q)4171 static void blk_mq_update_poll_flag(struct request_queue *q)
4172 {
4173 	struct blk_mq_tag_set *set = q->tag_set;
4174 
4175 	if (set->nr_maps > HCTX_TYPE_POLL &&
4176 	    set->map[HCTX_TYPE_POLL].nr_queues)
4177 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4178 	else
4179 		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4180 }
4181 
blk_mq_init_allocated_queue(struct blk_mq_tag_set * set,struct request_queue * q)4182 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4183 		struct request_queue *q)
4184 {
4185 	WARN_ON_ONCE(blk_queue_has_srcu(q) !=
4186 			!!(set->flags & BLK_MQ_F_BLOCKING));
4187 
4188 	/* mark the queue as mq asap */
4189 	q->mq_ops = set->ops;
4190 
4191 	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4192 					     blk_mq_poll_stats_bkt,
4193 					     BLK_MQ_POLL_STATS_BKTS, q);
4194 	if (!q->poll_cb)
4195 		goto err_exit;
4196 
4197 	if (blk_mq_alloc_ctxs(q))
4198 		goto err_poll;
4199 
4200 	/* init q->mq_kobj and sw queues' kobjects */
4201 	blk_mq_sysfs_init(q);
4202 
4203 	INIT_LIST_HEAD(&q->unused_hctx_list);
4204 	spin_lock_init(&q->unused_hctx_lock);
4205 
4206 	xa_init(&q->hctx_table);
4207 
4208 	blk_mq_realloc_hw_ctxs(set, q);
4209 	if (!q->nr_hw_queues)
4210 		goto err_hctxs;
4211 
4212 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4213 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4214 
4215 	q->tag_set = set;
4216 
4217 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4218 	blk_mq_update_poll_flag(q);
4219 
4220 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4221 	INIT_LIST_HEAD(&q->requeue_list);
4222 	spin_lock_init(&q->requeue_lock);
4223 
4224 	q->nr_requests = set->queue_depth;
4225 
4226 	/*
4227 	 * Default to classic polling
4228 	 */
4229 	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4230 
4231 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4232 	blk_mq_add_queue_tag_set(set, q);
4233 	blk_mq_map_swqueue(q);
4234 	return 0;
4235 
4236 err_hctxs:
4237 	blk_mq_release(q);
4238 err_poll:
4239 	blk_stat_free_callback(q->poll_cb);
4240 	q->poll_cb = NULL;
4241 err_exit:
4242 	q->mq_ops = NULL;
4243 	return -ENOMEM;
4244 }
4245 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4246 
4247 /* tags can _not_ be used after returning from blk_mq_exit_queue */
blk_mq_exit_queue(struct request_queue * q)4248 void blk_mq_exit_queue(struct request_queue *q)
4249 {
4250 	struct blk_mq_tag_set *set = q->tag_set;
4251 
4252 	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4253 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4254 	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4255 	blk_mq_del_queue_tag_set(q);
4256 }
4257 
__blk_mq_alloc_rq_maps(struct blk_mq_tag_set * set)4258 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4259 {
4260 	int i;
4261 
4262 	if (blk_mq_is_shared_tags(set->flags)) {
4263 		set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4264 						BLK_MQ_NO_HCTX_IDX,
4265 						set->queue_depth);
4266 		if (!set->shared_tags)
4267 			return -ENOMEM;
4268 	}
4269 
4270 	for (i = 0; i < set->nr_hw_queues; i++) {
4271 		if (!__blk_mq_alloc_map_and_rqs(set, i))
4272 			goto out_unwind;
4273 		cond_resched();
4274 	}
4275 
4276 	return 0;
4277 
4278 out_unwind:
4279 	while (--i >= 0)
4280 		__blk_mq_free_map_and_rqs(set, i);
4281 
4282 	if (blk_mq_is_shared_tags(set->flags)) {
4283 		blk_mq_free_map_and_rqs(set, set->shared_tags,
4284 					BLK_MQ_NO_HCTX_IDX);
4285 	}
4286 
4287 	return -ENOMEM;
4288 }
4289 
4290 /*
4291  * Allocate the request maps associated with this tag_set. Note that this
4292  * may reduce the depth asked for, if memory is tight. set->queue_depth
4293  * will be updated to reflect the allocated depth.
4294  */
blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set * set)4295 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4296 {
4297 	unsigned int depth;
4298 	int err;
4299 
4300 	depth = set->queue_depth;
4301 	do {
4302 		err = __blk_mq_alloc_rq_maps(set);
4303 		if (!err)
4304 			break;
4305 
4306 		set->queue_depth >>= 1;
4307 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4308 			err = -ENOMEM;
4309 			break;
4310 		}
4311 	} while (set->queue_depth);
4312 
4313 	if (!set->queue_depth || err) {
4314 		pr_err("blk-mq: failed to allocate request map\n");
4315 		return -ENOMEM;
4316 	}
4317 
4318 	if (depth != set->queue_depth)
4319 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4320 						depth, set->queue_depth);
4321 
4322 	return 0;
4323 }
4324 
blk_mq_update_queue_map(struct blk_mq_tag_set * set)4325 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4326 {
4327 	/*
4328 	 * blk_mq_map_queues() and multiple .map_queues() implementations
4329 	 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4330 	 * number of hardware queues.
4331 	 */
4332 	if (set->nr_maps == 1)
4333 		set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4334 
4335 	if (set->ops->map_queues && !is_kdump_kernel()) {
4336 		int i;
4337 
4338 		/*
4339 		 * transport .map_queues is usually done in the following
4340 		 * way:
4341 		 *
4342 		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4343 		 * 	mask = get_cpu_mask(queue)
4344 		 * 	for_each_cpu(cpu, mask)
4345 		 * 		set->map[x].mq_map[cpu] = queue;
4346 		 * }
4347 		 *
4348 		 * When we need to remap, the table has to be cleared for
4349 		 * killing stale mapping since one CPU may not be mapped
4350 		 * to any hw queue.
4351 		 */
4352 		for (i = 0; i < set->nr_maps; i++)
4353 			blk_mq_clear_mq_map(&set->map[i]);
4354 
4355 		set->ops->map_queues(set);
4356 	} else {
4357 		BUG_ON(set->nr_maps > 1);
4358 		blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4359 	}
4360 }
4361 
blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set * set,int cur_nr_hw_queues,int new_nr_hw_queues)4362 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4363 				  int cur_nr_hw_queues, int new_nr_hw_queues)
4364 {
4365 	struct blk_mq_tags **new_tags;
4366 
4367 	if (cur_nr_hw_queues >= new_nr_hw_queues)
4368 		return 0;
4369 
4370 	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4371 				GFP_KERNEL, set->numa_node);
4372 	if (!new_tags)
4373 		return -ENOMEM;
4374 
4375 	if (set->tags)
4376 		memcpy(new_tags, set->tags, cur_nr_hw_queues *
4377 		       sizeof(*set->tags));
4378 	kfree(set->tags);
4379 	set->tags = new_tags;
4380 	set->nr_hw_queues = new_nr_hw_queues;
4381 
4382 	return 0;
4383 }
4384 
blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set * set,int new_nr_hw_queues)4385 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
4386 				int new_nr_hw_queues)
4387 {
4388 	return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
4389 }
4390 
4391 /*
4392  * Alloc a tag set to be associated with one or more request queues.
4393  * May fail with EINVAL for various error conditions. May adjust the
4394  * requested depth down, if it's too large. In that case, the set
4395  * value will be stored in set->queue_depth.
4396  */
blk_mq_alloc_tag_set(struct blk_mq_tag_set * set)4397 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4398 {
4399 	int i, ret;
4400 
4401 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4402 
4403 	if (!set->nr_hw_queues)
4404 		return -EINVAL;
4405 	if (!set->queue_depth)
4406 		return -EINVAL;
4407 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4408 		return -EINVAL;
4409 
4410 	if (!set->ops->queue_rq)
4411 		return -EINVAL;
4412 
4413 	if (!set->ops->get_budget ^ !set->ops->put_budget)
4414 		return -EINVAL;
4415 
4416 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4417 		pr_info("blk-mq: reduced tag depth to %u\n",
4418 			BLK_MQ_MAX_DEPTH);
4419 		set->queue_depth = BLK_MQ_MAX_DEPTH;
4420 	}
4421 
4422 	if (!set->nr_maps)
4423 		set->nr_maps = 1;
4424 	else if (set->nr_maps > HCTX_MAX_TYPES)
4425 		return -EINVAL;
4426 
4427 	/*
4428 	 * If a crashdump is active, then we are potentially in a very
4429 	 * memory constrained environment. Limit us to 1 queue and
4430 	 * 64 tags to prevent using too much memory.
4431 	 */
4432 	if (is_kdump_kernel()) {
4433 		set->nr_hw_queues = 1;
4434 		set->nr_maps = 1;
4435 		set->queue_depth = min(64U, set->queue_depth);
4436 	}
4437 	/*
4438 	 * There is no use for more h/w queues than cpus if we just have
4439 	 * a single map
4440 	 */
4441 	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4442 		set->nr_hw_queues = nr_cpu_ids;
4443 
4444 	if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4445 		return -ENOMEM;
4446 
4447 	ret = -ENOMEM;
4448 	for (i = 0; i < set->nr_maps; i++) {
4449 		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4450 						  sizeof(set->map[i].mq_map[0]),
4451 						  GFP_KERNEL, set->numa_node);
4452 		if (!set->map[i].mq_map)
4453 			goto out_free_mq_map;
4454 		set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4455 	}
4456 
4457 	blk_mq_update_queue_map(set);
4458 
4459 	ret = blk_mq_alloc_set_map_and_rqs(set);
4460 	if (ret)
4461 		goto out_free_mq_map;
4462 
4463 	mutex_init(&set->tag_list_lock);
4464 	INIT_LIST_HEAD(&set->tag_list);
4465 
4466 	return 0;
4467 
4468 out_free_mq_map:
4469 	for (i = 0; i < set->nr_maps; i++) {
4470 		kfree(set->map[i].mq_map);
4471 		set->map[i].mq_map = NULL;
4472 	}
4473 	kfree(set->tags);
4474 	set->tags = NULL;
4475 	return ret;
4476 }
4477 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4478 
4479 /* allocate and initialize a tagset for a simple single-queue device */
blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set * set,const struct blk_mq_ops * ops,unsigned int queue_depth,unsigned int set_flags)4480 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4481 		const struct blk_mq_ops *ops, unsigned int queue_depth,
4482 		unsigned int set_flags)
4483 {
4484 	memset(set, 0, sizeof(*set));
4485 	set->ops = ops;
4486 	set->nr_hw_queues = 1;
4487 	set->nr_maps = 1;
4488 	set->queue_depth = queue_depth;
4489 	set->numa_node = NUMA_NO_NODE;
4490 	set->flags = set_flags;
4491 	return blk_mq_alloc_tag_set(set);
4492 }
4493 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4494 
blk_mq_free_tag_set(struct blk_mq_tag_set * set)4495 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4496 {
4497 	int i, j;
4498 
4499 	for (i = 0; i < set->nr_hw_queues; i++)
4500 		__blk_mq_free_map_and_rqs(set, i);
4501 
4502 	if (blk_mq_is_shared_tags(set->flags)) {
4503 		blk_mq_free_map_and_rqs(set, set->shared_tags,
4504 					BLK_MQ_NO_HCTX_IDX);
4505 	}
4506 
4507 	for (j = 0; j < set->nr_maps; j++) {
4508 		kfree(set->map[j].mq_map);
4509 		set->map[j].mq_map = NULL;
4510 	}
4511 
4512 	kfree(set->tags);
4513 	set->tags = NULL;
4514 }
4515 EXPORT_SYMBOL(blk_mq_free_tag_set);
4516 
blk_mq_update_nr_requests(struct request_queue * q,unsigned int nr)4517 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4518 {
4519 	struct blk_mq_tag_set *set = q->tag_set;
4520 	struct blk_mq_hw_ctx *hctx;
4521 	int ret;
4522 	unsigned long i;
4523 
4524 	if (!set)
4525 		return -EINVAL;
4526 
4527 	if (q->nr_requests == nr)
4528 		return 0;
4529 
4530 	blk_mq_freeze_queue(q);
4531 	blk_mq_quiesce_queue(q);
4532 
4533 	ret = 0;
4534 	queue_for_each_hw_ctx(q, hctx, i) {
4535 		if (!hctx->tags)
4536 			continue;
4537 		/*
4538 		 * If we're using an MQ scheduler, just update the scheduler
4539 		 * queue depth. This is similar to what the old code would do.
4540 		 */
4541 		if (hctx->sched_tags) {
4542 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4543 						      nr, true);
4544 		} else {
4545 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4546 						      false);
4547 		}
4548 		if (ret)
4549 			break;
4550 		if (q->elevator && q->elevator->type->ops.depth_updated)
4551 			q->elevator->type->ops.depth_updated(hctx);
4552 	}
4553 	if (!ret) {
4554 		q->nr_requests = nr;
4555 		if (blk_mq_is_shared_tags(set->flags)) {
4556 			if (q->elevator)
4557 				blk_mq_tag_update_sched_shared_tags(q);
4558 			else
4559 				blk_mq_tag_resize_shared_tags(set, nr);
4560 		}
4561 	}
4562 
4563 	blk_mq_unquiesce_queue(q);
4564 	blk_mq_unfreeze_queue(q);
4565 
4566 	return ret;
4567 }
4568 
4569 /*
4570  * request_queue and elevator_type pair.
4571  * It is just used by __blk_mq_update_nr_hw_queues to cache
4572  * the elevator_type associated with a request_queue.
4573  */
4574 struct blk_mq_qe_pair {
4575 	struct list_head node;
4576 	struct request_queue *q;
4577 	struct elevator_type *type;
4578 };
4579 
4580 /*
4581  * Cache the elevator_type in qe pair list and switch the
4582  * io scheduler to 'none'
4583  */
blk_mq_elv_switch_none(struct list_head * head,struct request_queue * q)4584 static bool blk_mq_elv_switch_none(struct list_head *head,
4585 		struct request_queue *q)
4586 {
4587 	struct blk_mq_qe_pair *qe;
4588 
4589 	if (!q->elevator)
4590 		return true;
4591 
4592 	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4593 	if (!qe)
4594 		return false;
4595 
4596 	/* q->elevator needs protection from ->sysfs_lock */
4597 	mutex_lock(&q->sysfs_lock);
4598 
4599 	INIT_LIST_HEAD(&qe->node);
4600 	qe->q = q;
4601 	qe->type = q->elevator->type;
4602 	list_add(&qe->node, head);
4603 
4604 	/*
4605 	 * After elevator_switch, the previous elevator_queue will be
4606 	 * released by elevator_release. The reference of the io scheduler
4607 	 * module get by elevator_get will also be put. So we need to get
4608 	 * a reference of the io scheduler module here to prevent it to be
4609 	 * removed.
4610 	 */
4611 	__module_get(qe->type->elevator_owner);
4612 	elevator_switch(q, NULL);
4613 	mutex_unlock(&q->sysfs_lock);
4614 
4615 	return true;
4616 }
4617 
blk_lookup_qe_pair(struct list_head * head,struct request_queue * q)4618 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4619 						struct request_queue *q)
4620 {
4621 	struct blk_mq_qe_pair *qe;
4622 
4623 	list_for_each_entry(qe, head, node)
4624 		if (qe->q == q)
4625 			return qe;
4626 
4627 	return NULL;
4628 }
4629 
blk_mq_elv_switch_back(struct list_head * head,struct request_queue * q)4630 static void blk_mq_elv_switch_back(struct list_head *head,
4631 				  struct request_queue *q)
4632 {
4633 	struct blk_mq_qe_pair *qe;
4634 	struct elevator_type *t;
4635 
4636 	qe = blk_lookup_qe_pair(head, q);
4637 	if (!qe)
4638 		return;
4639 	t = qe->type;
4640 	list_del(&qe->node);
4641 	kfree(qe);
4642 
4643 	mutex_lock(&q->sysfs_lock);
4644 	elevator_switch(q, t);
4645 	mutex_unlock(&q->sysfs_lock);
4646 }
4647 
__blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4648 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4649 							int nr_hw_queues)
4650 {
4651 	struct request_queue *q;
4652 	LIST_HEAD(head);
4653 	int prev_nr_hw_queues;
4654 
4655 	lockdep_assert_held(&set->tag_list_lock);
4656 
4657 	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4658 		nr_hw_queues = nr_cpu_ids;
4659 	if (nr_hw_queues < 1)
4660 		return;
4661 	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4662 		return;
4663 
4664 	list_for_each_entry(q, &set->tag_list, tag_set_list)
4665 		blk_mq_freeze_queue(q);
4666 	/*
4667 	 * Switch IO scheduler to 'none', cleaning up the data associated
4668 	 * with the previous scheduler. We will switch back once we are done
4669 	 * updating the new sw to hw queue mappings.
4670 	 */
4671 	list_for_each_entry(q, &set->tag_list, tag_set_list)
4672 		if (!blk_mq_elv_switch_none(&head, q))
4673 			goto switch_back;
4674 
4675 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4676 		blk_mq_debugfs_unregister_hctxs(q);
4677 		blk_mq_sysfs_unregister_hctxs(q);
4678 	}
4679 
4680 	prev_nr_hw_queues = set->nr_hw_queues;
4681 	if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
4682 	    0)
4683 		goto reregister;
4684 
4685 	set->nr_hw_queues = nr_hw_queues;
4686 fallback:
4687 	blk_mq_update_queue_map(set);
4688 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4689 		blk_mq_realloc_hw_ctxs(set, q);
4690 		blk_mq_update_poll_flag(q);
4691 		if (q->nr_hw_queues != set->nr_hw_queues) {
4692 			int i = prev_nr_hw_queues;
4693 
4694 			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4695 					nr_hw_queues, prev_nr_hw_queues);
4696 			for (; i < set->nr_hw_queues; i++)
4697 				__blk_mq_free_map_and_rqs(set, i);
4698 
4699 			set->nr_hw_queues = prev_nr_hw_queues;
4700 			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4701 			goto fallback;
4702 		}
4703 		blk_mq_map_swqueue(q);
4704 	}
4705 
4706 reregister:
4707 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
4708 		blk_mq_sysfs_register_hctxs(q);
4709 		blk_mq_debugfs_register_hctxs(q);
4710 	}
4711 
4712 switch_back:
4713 	list_for_each_entry(q, &set->tag_list, tag_set_list)
4714 		blk_mq_elv_switch_back(&head, q);
4715 
4716 	list_for_each_entry(q, &set->tag_list, tag_set_list)
4717 		blk_mq_unfreeze_queue(q);
4718 }
4719 
blk_mq_update_nr_hw_queues(struct blk_mq_tag_set * set,int nr_hw_queues)4720 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4721 {
4722 	mutex_lock(&set->tag_list_lock);
4723 	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4724 	mutex_unlock(&set->tag_list_lock);
4725 }
4726 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4727 
4728 /* Enable polling stats and return whether they were already enabled. */
blk_poll_stats_enable(struct request_queue * q)4729 static bool blk_poll_stats_enable(struct request_queue *q)
4730 {
4731 	if (q->poll_stat)
4732 		return true;
4733 
4734 	return blk_stats_alloc_enable(q);
4735 }
4736 
blk_mq_poll_stats_start(struct request_queue * q)4737 static void blk_mq_poll_stats_start(struct request_queue *q)
4738 {
4739 	/*
4740 	 * We don't arm the callback if polling stats are not enabled or the
4741 	 * callback is already active.
4742 	 */
4743 	if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4744 		return;
4745 
4746 	blk_stat_activate_msecs(q->poll_cb, 100);
4747 }
4748 
blk_mq_poll_stats_fn(struct blk_stat_callback * cb)4749 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
4750 {
4751 	struct request_queue *q = cb->data;
4752 	int bucket;
4753 
4754 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
4755 		if (cb->stat[bucket].nr_samples)
4756 			q->poll_stat[bucket] = cb->stat[bucket];
4757 	}
4758 }
4759 
blk_mq_poll_nsecs(struct request_queue * q,struct request * rq)4760 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
4761 				       struct request *rq)
4762 {
4763 	unsigned long ret = 0;
4764 	int bucket;
4765 
4766 	/*
4767 	 * If stats collection isn't on, don't sleep but turn it on for
4768 	 * future users
4769 	 */
4770 	if (!blk_poll_stats_enable(q))
4771 		return 0;
4772 
4773 	/*
4774 	 * As an optimistic guess, use half of the mean service time
4775 	 * for this type of request. We can (and should) make this smarter.
4776 	 * For instance, if the completion latencies are tight, we can
4777 	 * get closer than just half the mean. This is especially
4778 	 * important on devices where the completion latencies are longer
4779 	 * than ~10 usec. We do use the stats for the relevant IO size
4780 	 * if available which does lead to better estimates.
4781 	 */
4782 	bucket = blk_mq_poll_stats_bkt(rq);
4783 	if (bucket < 0)
4784 		return ret;
4785 
4786 	if (q->poll_stat[bucket].nr_samples)
4787 		ret = (q->poll_stat[bucket].mean + 1) / 2;
4788 
4789 	return ret;
4790 }
4791 
blk_mq_poll_hybrid(struct request_queue * q,blk_qc_t qc)4792 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4793 {
4794 	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
4795 	struct request *rq = blk_qc_to_rq(hctx, qc);
4796 	struct hrtimer_sleeper hs;
4797 	enum hrtimer_mode mode;
4798 	unsigned int nsecs;
4799 	ktime_t kt;
4800 
4801 	/*
4802 	 * If a request has completed on queue that uses an I/O scheduler, we
4803 	 * won't get back a request from blk_qc_to_rq.
4804 	 */
4805 	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4806 		return false;
4807 
4808 	/*
4809 	 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4810 	 *
4811 	 *  0:	use half of prev avg
4812 	 * >0:	use this specific value
4813 	 */
4814 	if (q->poll_nsec > 0)
4815 		nsecs = q->poll_nsec;
4816 	else
4817 		nsecs = blk_mq_poll_nsecs(q, rq);
4818 
4819 	if (!nsecs)
4820 		return false;
4821 
4822 	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4823 
4824 	/*
4825 	 * This will be replaced with the stats tracking code, using
4826 	 * 'avg_completion_time / 2' as the pre-sleep target.
4827 	 */
4828 	kt = nsecs;
4829 
4830 	mode = HRTIMER_MODE_REL;
4831 	hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4832 	hrtimer_set_expires(&hs.timer, kt);
4833 
4834 	do {
4835 		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4836 			break;
4837 		set_current_state(TASK_UNINTERRUPTIBLE);
4838 		hrtimer_sleeper_start_expires(&hs, mode);
4839 		if (hs.task)
4840 			io_schedule();
4841 		hrtimer_cancel(&hs.timer);
4842 		mode = HRTIMER_MODE_ABS;
4843 	} while (hs.task && !signal_pending(current));
4844 
4845 	__set_current_state(TASK_RUNNING);
4846 	destroy_hrtimer_on_stack(&hs.timer);
4847 
4848 	/*
4849 	 * If we sleep, have the caller restart the poll loop to reset the
4850 	 * state.  Like for the other success return cases, the caller is
4851 	 * responsible for checking if the IO completed.  If the IO isn't
4852 	 * complete, we'll get called again and will go straight to the busy
4853 	 * poll loop.
4854 	 */
4855 	return true;
4856 }
4857 
blk_mq_poll_classic(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags)4858 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4859 			       struct io_comp_batch *iob, unsigned int flags)
4860 {
4861 	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
4862 	long state = get_current_state();
4863 	int ret;
4864 
4865 	do {
4866 		ret = q->mq_ops->poll(hctx, iob);
4867 		if (ret > 0) {
4868 			__set_current_state(TASK_RUNNING);
4869 			return ret;
4870 		}
4871 
4872 		if (signal_pending_state(state, current))
4873 			__set_current_state(TASK_RUNNING);
4874 		if (task_is_running(current))
4875 			return 1;
4876 
4877 		if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4878 			break;
4879 		cpu_relax();
4880 	} while (!need_resched());
4881 
4882 	__set_current_state(TASK_RUNNING);
4883 	return 0;
4884 }
4885 
blk_mq_poll(struct request_queue * q,blk_qc_t cookie,struct io_comp_batch * iob,unsigned int flags)4886 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4887 		unsigned int flags)
4888 {
4889 	if (!(flags & BLK_POLL_NOSLEEP) &&
4890 	    q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4891 		if (blk_mq_poll_hybrid(q, cookie))
4892 			return 1;
4893 	}
4894 	return blk_mq_poll_classic(q, cookie, iob, flags);
4895 }
4896 
blk_mq_rq_cpu(struct request * rq)4897 unsigned int blk_mq_rq_cpu(struct request *rq)
4898 {
4899 	return rq->mq_ctx->cpu;
4900 }
4901 EXPORT_SYMBOL(blk_mq_rq_cpu);
4902 
blk_mq_cancel_work_sync(struct request_queue * q)4903 void blk_mq_cancel_work_sync(struct request_queue *q)
4904 {
4905 	if (queue_is_mq(q)) {
4906 		struct blk_mq_hw_ctx *hctx;
4907 		unsigned long i;
4908 
4909 		cancel_delayed_work_sync(&q->requeue_work);
4910 
4911 		queue_for_each_hw_ctx(q, hctx, i)
4912 			cancel_delayed_work_sync(&hctx->run_work);
4913 	}
4914 }
4915 
blk_mq_init(void)4916 static int __init blk_mq_init(void)
4917 {
4918 	int i;
4919 
4920 	for_each_possible_cpu(i)
4921 		init_llist_head(&per_cpu(blk_cpu_done, i));
4922 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4923 
4924 	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4925 				  "block/softirq:dead", NULL,
4926 				  blk_softirq_cpu_dead);
4927 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4928 				blk_mq_hctx_notify_dead);
4929 	cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4930 				blk_mq_hctx_notify_online,
4931 				blk_mq_hctx_notify_offline);
4932 	return 0;
4933 }
4934 subsys_initcall(blk_mq_init);
4935