1 /*
2  * Functions to sequence FLUSH and FUA writes.
3  *
4  * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
5  * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11  * properties and hardware capability.
12  *
13  * If a request doesn't have data, only REQ_FLUSH makes sense, which
14  * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
15  * that the device cache should be flushed before the data is executed, and
16  * REQ_FUA means that the data must be on non-volatile media on request
17  * completion.
18  *
19  * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20  * difference.  The requests are either completed immediately if there's no
21  * data or executed as normal requests otherwise.
22  *
23  * If the device has writeback cache and supports FUA, REQ_FLUSH is
24  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25  *
26  * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
27  * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28  *
29  * The actual execution of flush is double buffered.  Whenever a request
30  * needs to execute PRE or POSTFLUSH, it queues at
31  * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
32  * flush is issued and the pending_idx is toggled.  When the flush
33  * completes, all the requests which were pending are proceeded to the next
34  * step.  This allows arbitrary merging of different types of FLUSH/FUA
35  * requests.
36  *
37  * Currently, the following conditions are used to determine when to issue
38  * flush.
39  *
40  * C1. At any given time, only one flush shall be in progress.  This makes
41  *     double buffering sufficient.
42  *
43  * C2. Flush is deferred if any request is executing DATA of its sequence.
44  *     This avoids issuing separate POSTFLUSHes for requests which shared
45  *     PREFLUSH.
46  *
47  * C3. The second condition is ignored if there is a request which has
48  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
49  *     starvation in the unlikely case where there are continuous stream of
50  *     FUA (without FLUSH) requests.
51  *
52  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53  * is beneficial.
54  *
55  * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56  * Once while executing DATA and again after the whole sequence is
57  * complete.  The first completion updates the contained bio but doesn't
58  * finish it so that the bio submitter is notified only after the whole
59  * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
60  * req_bio_endio().
61  *
62  * The above peculiarity requires that each FLUSH/FUA request has only one
63  * bio attached to it, which is guaranteed as they aren't allowed to be
64  * merged in the usual way.
65  */
66 
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 
73 #include "blk.h"
74 
75 /* FLUSH/FUA sequences */
76 enum {
77 	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
78 	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
79 	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
80 	REQ_FSEQ_DONE		= (1 << 3),
81 
82 	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
83 				  REQ_FSEQ_POSTFLUSH,
84 
85 	/*
86 	 * If flush has been pending longer than the following timeout,
87 	 * it's issued even if flush_data requests are still in flight.
88 	 */
89 	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
90 };
91 
92 static bool blk_kick_flush(struct request_queue *q);
93 
blk_flush_policy(unsigned int fflags,struct request * rq)94 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
95 {
96 	unsigned int policy = 0;
97 
98 	if (fflags & REQ_FLUSH) {
99 		if (rq->cmd_flags & REQ_FLUSH)
100 			policy |= REQ_FSEQ_PREFLUSH;
101 		if (blk_rq_sectors(rq))
102 			policy |= REQ_FSEQ_DATA;
103 		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
104 			policy |= REQ_FSEQ_POSTFLUSH;
105 	}
106 	return policy;
107 }
108 
blk_flush_cur_seq(struct request * rq)109 static unsigned int blk_flush_cur_seq(struct request *rq)
110 {
111 	return 1 << ffz(rq->flush.seq);
112 }
113 
blk_flush_restore_request(struct request * rq)114 static void blk_flush_restore_request(struct request *rq)
115 {
116 	/*
117 	 * After flush data completion, @rq->bio is %NULL but we need to
118 	 * complete the bio again.  @rq->biotail is guaranteed to equal the
119 	 * original @rq->bio.  Restore it.
120 	 */
121 	rq->bio = rq->biotail;
122 
123 	/* make @rq a normal request */
124 	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
125 	rq->end_io = NULL;
126 }
127 
128 /**
129  * blk_flush_complete_seq - complete flush sequence
130  * @rq: FLUSH/FUA request being sequenced
131  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
132  * @error: whether an error occurred
133  *
134  * @rq just completed @seq part of its flush sequence, record the
135  * completion and trigger the next step.
136  *
137  * CONTEXT:
138  * spin_lock_irq(q->queue_lock)
139  *
140  * RETURNS:
141  * %true if requests were added to the dispatch queue, %false otherwise.
142  */
blk_flush_complete_seq(struct request * rq,unsigned int seq,int error)143 static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
144 				   int error)
145 {
146 	struct request_queue *q = rq->q;
147 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
148 	bool queued = false;
149 
150 	BUG_ON(rq->flush.seq & seq);
151 	rq->flush.seq |= seq;
152 
153 	if (likely(!error))
154 		seq = blk_flush_cur_seq(rq);
155 	else
156 		seq = REQ_FSEQ_DONE;
157 
158 	switch (seq) {
159 	case REQ_FSEQ_PREFLUSH:
160 	case REQ_FSEQ_POSTFLUSH:
161 		/* queue for flush */
162 		if (list_empty(pending))
163 			q->flush_pending_since = jiffies;
164 		list_move_tail(&rq->flush.list, pending);
165 		break;
166 
167 	case REQ_FSEQ_DATA:
168 		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
169 		list_add(&rq->queuelist, &q->queue_head);
170 		queued = true;
171 		break;
172 
173 	case REQ_FSEQ_DONE:
174 		/*
175 		 * @rq was previously adjusted by blk_flush_issue() for
176 		 * flush sequencing and may already have gone through the
177 		 * flush data request completion path.  Restore @rq for
178 		 * normal completion and end it.
179 		 */
180 		BUG_ON(!list_empty(&rq->queuelist));
181 		list_del_init(&rq->flush.list);
182 		blk_flush_restore_request(rq);
183 		__blk_end_request_all(rq, error);
184 		break;
185 
186 	default:
187 		BUG();
188 	}
189 
190 	return blk_kick_flush(q) | queued;
191 }
192 
flush_end_io(struct request * flush_rq,int error)193 static void flush_end_io(struct request *flush_rq, int error)
194 {
195 	struct request_queue *q = flush_rq->q;
196 	struct list_head *running = &q->flush_queue[q->flush_running_idx];
197 	bool queued = false;
198 	struct request *rq, *n;
199 
200 	BUG_ON(q->flush_pending_idx == q->flush_running_idx);
201 
202 	/* account completion of the flush request */
203 	q->flush_running_idx ^= 1;
204 	elv_completed_request(q, flush_rq);
205 
206 	/* and push the waiting requests to the next stage */
207 	list_for_each_entry_safe(rq, n, running, flush.list) {
208 		unsigned int seq = blk_flush_cur_seq(rq);
209 
210 		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
211 		queued |= blk_flush_complete_seq(rq, seq, error);
212 	}
213 
214 	/*
215 	 * Moving a request silently to empty queue_head may stall the
216 	 * queue.  Kick the queue in those cases.  This function is called
217 	 * from request completion path and calling directly into
218 	 * request_fn may confuse the driver.  Always use kblockd.
219 	 */
220 	if (queued)
221 		blk_run_queue_async(q);
222 }
223 
224 /**
225  * blk_kick_flush - consider issuing flush request
226  * @q: request_queue being kicked
227  *
228  * Flush related states of @q have changed, consider issuing flush request.
229  * Please read the comment at the top of this file for more info.
230  *
231  * CONTEXT:
232  * spin_lock_irq(q->queue_lock)
233  *
234  * RETURNS:
235  * %true if flush was issued, %false otherwise.
236  */
blk_kick_flush(struct request_queue * q)237 static bool blk_kick_flush(struct request_queue *q)
238 {
239 	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
240 	struct request *first_rq =
241 		list_first_entry(pending, struct request, flush.list);
242 
243 	/* C1 described at the top of this file */
244 	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
245 		return false;
246 
247 	/* C2 and C3 */
248 	if (!list_empty(&q->flush_data_in_flight) &&
249 	    time_before(jiffies,
250 			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
251 		return false;
252 
253 	/*
254 	 * Issue flush and toggle pending_idx.  This makes pending_idx
255 	 * different from running_idx, which means flush is in flight.
256 	 */
257 	blk_rq_init(q, &q->flush_rq);
258 	q->flush_rq.cmd_type = REQ_TYPE_FS;
259 	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
260 	q->flush_rq.rq_disk = first_rq->rq_disk;
261 	q->flush_rq.end_io = flush_end_io;
262 
263 	q->flush_pending_idx ^= 1;
264 	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
265 	return true;
266 }
267 
flush_data_end_io(struct request * rq,int error)268 static void flush_data_end_io(struct request *rq, int error)
269 {
270 	struct request_queue *q = rq->q;
271 
272 	/*
273 	 * After populating an empty queue, kick it to avoid stall.  Read
274 	 * the comment in flush_end_io().
275 	 */
276 	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
277 		blk_run_queue_async(q);
278 }
279 
280 /**
281  * blk_insert_flush - insert a new FLUSH/FUA request
282  * @rq: request to insert
283  *
284  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
285  * @rq is being submitted.  Analyze what needs to be done and put it on the
286  * right queue.
287  *
288  * CONTEXT:
289  * spin_lock_irq(q->queue_lock)
290  */
blk_insert_flush(struct request * rq)291 void blk_insert_flush(struct request *rq)
292 {
293 	struct request_queue *q = rq->q;
294 	unsigned int fflags = q->flush_flags;	/* may change, cache */
295 	unsigned int policy = blk_flush_policy(fflags, rq);
296 
297 	BUG_ON(rq->end_io);
298 	BUG_ON(!rq->bio || rq->bio != rq->biotail);
299 
300 	/*
301 	 * @policy now records what operations need to be done.  Adjust
302 	 * REQ_FLUSH and FUA for the driver.
303 	 */
304 	rq->cmd_flags &= ~REQ_FLUSH;
305 	if (!(fflags & REQ_FUA))
306 		rq->cmd_flags &= ~REQ_FUA;
307 
308 	/*
309 	 * If there's data but flush is not necessary, the request can be
310 	 * processed directly without going through flush machinery.  Queue
311 	 * for normal execution.
312 	 */
313 	if ((policy & REQ_FSEQ_DATA) &&
314 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
315 		list_add_tail(&rq->queuelist, &q->queue_head);
316 		return;
317 	}
318 
319 	/*
320 	 * @rq should go through flush machinery.  Mark it part of flush
321 	 * sequence and submit for further processing.
322 	 */
323 	memset(&rq->flush, 0, sizeof(rq->flush));
324 	INIT_LIST_HEAD(&rq->flush.list);
325 	rq->cmd_flags |= REQ_FLUSH_SEQ;
326 	rq->end_io = flush_data_end_io;
327 
328 	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
329 }
330 
331 /**
332  * blk_abort_flushes - @q is being aborted, abort flush requests
333  * @q: request_queue being aborted
334  *
335  * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
336  * FLUSH/FUA requests for abortion.
337  *
338  * CONTEXT:
339  * spin_lock_irq(q->queue_lock)
340  */
blk_abort_flushes(struct request_queue * q)341 void blk_abort_flushes(struct request_queue *q)
342 {
343 	struct request *rq, *n;
344 	int i;
345 
346 	/*
347 	 * Requests in flight for data are already owned by the dispatch
348 	 * queue or the device driver.  Just restore for normal completion.
349 	 */
350 	list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
351 		list_del_init(&rq->flush.list);
352 		blk_flush_restore_request(rq);
353 	}
354 
355 	/*
356 	 * We need to give away requests on flush queues.  Restore for
357 	 * normal completion and put them on the dispatch queue.
358 	 */
359 	for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
360 		list_for_each_entry_safe(rq, n, &q->flush_queue[i],
361 					 flush.list) {
362 			list_del_init(&rq->flush.list);
363 			blk_flush_restore_request(rq);
364 			list_add_tail(&rq->queuelist, &q->queue_head);
365 		}
366 	}
367 }
368 
bio_end_flush(struct bio * bio,int err)369 static void bio_end_flush(struct bio *bio, int err)
370 {
371 	if (err)
372 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
373 	if (bio->bi_private)
374 		complete(bio->bi_private);
375 	bio_put(bio);
376 }
377 
378 /**
379  * blkdev_issue_flush - queue a flush
380  * @bdev:	blockdev to issue flush for
381  * @gfp_mask:	memory allocation flags (for bio_alloc)
382  * @error_sector:	error sector
383  *
384  * Description:
385  *    Issue a flush for the block device in question. Caller can supply
386  *    room for storing the error offset in case of a flush error, if they
387  *    wish to. If WAIT flag is not passed then caller may check only what
388  *    request was pushed in some internal queue for later handling.
389  */
blkdev_issue_flush(struct block_device * bdev,gfp_t gfp_mask,sector_t * error_sector)390 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
391 		sector_t *error_sector)
392 {
393 	DECLARE_COMPLETION_ONSTACK(wait);
394 	struct request_queue *q;
395 	struct bio *bio;
396 	int ret = 0;
397 
398 	if (bdev->bd_disk == NULL)
399 		return -ENXIO;
400 
401 	q = bdev_get_queue(bdev);
402 	if (!q)
403 		return -ENXIO;
404 
405 	/*
406 	 * some block devices may not have their queue correctly set up here
407 	 * (e.g. loop device without a backing file) and so issuing a flush
408 	 * here will panic. Ensure there is a request function before issuing
409 	 * the flush.
410 	 */
411 	if (!q->make_request_fn)
412 		return -ENXIO;
413 
414 	bio = bio_alloc(gfp_mask, 0);
415 	bio->bi_end_io = bio_end_flush;
416 	bio->bi_bdev = bdev;
417 	bio->bi_private = &wait;
418 
419 	bio_get(bio);
420 	submit_bio(WRITE_FLUSH, bio);
421 	wait_for_completion(&wait);
422 
423 	/*
424 	 * The driver must store the error location in ->bi_sector, if
425 	 * it supports it. For non-stacked drivers, this should be
426 	 * copied from blk_rq_pos(rq).
427 	 */
428 	if (error_sector)
429                *error_sector = bio->bi_sector;
430 
431 	if (!bio_flagged(bio, BIO_UPTODATE))
432 		ret = -EIO;
433 
434 	bio_put(bio);
435 	return ret;
436 }
437 EXPORT_SYMBOL(blkdev_issue_flush);
438