1 /*
2  * Functions related to setting various queue properties from drivers
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 
9 #include "blk.h"
10 
11 /*
12  * for max sense size
13  */
14 #include <scsi/scsi_cmnd.h>
15 
16 /**
17  * blk_end_sync_rq - executes a completion event on a request
18  * @rq: request to complete
19  * @error: end I/O status of the request
20  */
blk_end_sync_rq(struct request * rq,int error)21 static void blk_end_sync_rq(struct request *rq, int error)
22 {
23 	struct completion *waiting = rq->end_io_data;
24 
25 	rq->end_io_data = NULL;
26 	__blk_put_request(rq->q, rq);
27 
28 	/*
29 	 * complete last, if this is a stack request the process (and thus
30 	 * the rq pointer) could be invalid right after this complete()
31 	 */
32 	complete(waiting);
33 }
34 
35 /**
36  * blk_execute_rq_nowait - insert a request into queue for execution
37  * @q:		queue to insert the request in
38  * @bd_disk:	matching gendisk
39  * @rq:		request to insert
40  * @at_head:    insert request at head or tail of queue
41  * @done:	I/O completion handler
42  *
43  * Description:
44  *    Insert a fully prepared request at the back of the I/O scheduler queue
45  *    for execution.  Don't wait for completion.
46  */
blk_execute_rq_nowait(struct request_queue * q,struct gendisk * bd_disk,struct request * rq,int at_head,rq_end_io_fn * done)47 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
48 			   struct request *rq, int at_head,
49 			   rq_end_io_fn *done)
50 {
51 	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 	bool is_pm_resume;
53 
54 	WARN_ON(irqs_disabled());
55 
56 	rq->rq_disk = bd_disk;
57 	rq->end_io = done;
58 	/*
59 	 * need to check this before __blk_run_queue(), because rq can
60 	 * be freed before that returns.
61 	 */
62 	is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
63 
64 	spin_lock_irq(q->queue_lock);
65 
66 	if (unlikely(blk_queue_dead(q))) {
67 		spin_unlock_irq(q->queue_lock);
68 		rq->errors = -ENXIO;
69 		if (rq->end_io)
70 			rq->end_io(rq, rq->errors);
71 		return;
72 	}
73 
74 	rq->rq_disk = bd_disk;
75 	rq->end_io = done;
76 	__elv_add_request(q, rq, where);
77 	__blk_run_queue(q);
78 	/* the queue is stopped so it won't be run */
79 	if (is_pm_resume)
80 		q->request_fn(q);
81 	spin_unlock_irq(q->queue_lock);
82 }
83 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
84 
85 /**
86  * blk_execute_rq - insert a request into queue for execution
87  * @q:		queue to insert the request in
88  * @bd_disk:	matching gendisk
89  * @rq:		request to insert
90  * @at_head:    insert request at head or tail of queue
91  *
92  * Description:
93  *    Insert a fully prepared request at the back of the I/O scheduler queue
94  *    for execution and wait for completion.
95  */
blk_execute_rq(struct request_queue * q,struct gendisk * bd_disk,struct request * rq,int at_head)96 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
97 		   struct request *rq, int at_head)
98 {
99 	DECLARE_COMPLETION_ONSTACK(wait);
100 	char sense[SCSI_SENSE_BUFFERSIZE];
101 	int err = 0;
102 	unsigned long hang_check;
103 
104 	/*
105 	 * we need an extra reference to the request, so we can look at
106 	 * it after io completion
107 	 */
108 	rq->ref_count++;
109 
110 	if (!rq->sense) {
111 		memset(sense, 0, sizeof(sense));
112 		rq->sense = sense;
113 		rq->sense_len = 0;
114 	}
115 
116 	rq->end_io_data = &wait;
117 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
118 
119 	/* Prevent hang_check timer from firing at us during very long I/O */
120 	hang_check = sysctl_hung_task_timeout_secs;
121 	if (hang_check)
122 		while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
123 	else
124 		wait_for_completion(&wait);
125 
126 	if (rq->errors)
127 		err = -EIO;
128 
129 	return err;
130 }
131 EXPORT_SYMBOL(blk_execute_rq);
132