1 /*
2  * Functions related to generic timeout handling of requests.
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/blkdev.h>
7 #include <linux/fault-inject.h>
8 
9 #include "blk.h"
10 
11 #ifdef CONFIG_FAIL_IO_TIMEOUT
12 
13 static DECLARE_FAULT_ATTR(fail_io_timeout);
14 
setup_fail_io_timeout(char * str)15 static int __init setup_fail_io_timeout(char *str)
16 {
17 	return setup_fault_attr(&fail_io_timeout, str);
18 }
19 __setup("fail_io_timeout=", setup_fail_io_timeout);
20 
blk_should_fake_timeout(struct request_queue * q)21 int blk_should_fake_timeout(struct request_queue *q)
22 {
23 	if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
24 		return 0;
25 
26 	return should_fail(&fail_io_timeout, 1);
27 }
28 
fail_io_timeout_debugfs(void)29 static int __init fail_io_timeout_debugfs(void)
30 {
31 	return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
32 }
33 
34 late_initcall(fail_io_timeout_debugfs);
35 
part_timeout_show(struct device * dev,struct device_attribute * attr,char * buf)36 ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
37 			  char *buf)
38 {
39 	struct gendisk *disk = dev_to_disk(dev);
40 	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
41 
42 	return sprintf(buf, "%d\n", set != 0);
43 }
44 
part_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)45 ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
46 			   const char *buf, size_t count)
47 {
48 	struct gendisk *disk = dev_to_disk(dev);
49 	int val;
50 
51 	if (count) {
52 		struct request_queue *q = disk->queue;
53 		char *p = (char *) buf;
54 
55 		val = simple_strtoul(p, &p, 10);
56 		spin_lock_irq(q->queue_lock);
57 		if (val)
58 			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
59 		else
60 			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
61 		spin_unlock_irq(q->queue_lock);
62 	}
63 
64 	return count;
65 }
66 
67 #endif /* CONFIG_FAIL_IO_TIMEOUT */
68 
69 /*
70  * blk_delete_timer - Delete/cancel timer for a given function.
71  * @req:	request that we are canceling timer for
72  *
73  */
blk_delete_timer(struct request * req)74 void blk_delete_timer(struct request *req)
75 {
76 	list_del_init(&req->timeout_list);
77 }
78 
blk_rq_timed_out(struct request * req)79 static void blk_rq_timed_out(struct request *req)
80 {
81 	struct request_queue *q = req->q;
82 	enum blk_eh_timer_return ret;
83 
84 	ret = q->rq_timed_out_fn(req);
85 	switch (ret) {
86 	case BLK_EH_HANDLED:
87 		__blk_complete_request(req);
88 		break;
89 	case BLK_EH_RESET_TIMER:
90 		blk_clear_rq_complete(req);
91 		blk_add_timer(req);
92 		break;
93 	case BLK_EH_NOT_HANDLED:
94 		/*
95 		 * LLD handles this for now but in the future
96 		 * we can send a request msg to abort the command
97 		 * and we can move more of the generic scsi eh code to
98 		 * the blk layer.
99 		 */
100 		break;
101 	default:
102 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
103 		break;
104 	}
105 }
106 
blk_rq_timed_out_timer(unsigned long data)107 void blk_rq_timed_out_timer(unsigned long data)
108 {
109 	struct request_queue *q = (struct request_queue *) data;
110 	unsigned long flags, next = 0;
111 	struct request *rq, *tmp;
112 	int next_set = 0;
113 
114 	spin_lock_irqsave(q->queue_lock, flags);
115 
116 	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
117 		if (time_after_eq(jiffies, rq->deadline)) {
118 			list_del_init(&rq->timeout_list);
119 
120 			/*
121 			 * Check if we raced with end io completion
122 			 */
123 			if (blk_mark_rq_complete(rq))
124 				continue;
125 			blk_rq_timed_out(rq);
126 		} else if (!next_set || time_after(next, rq->deadline)) {
127 			next = rq->deadline;
128 			next_set = 1;
129 		}
130 	}
131 
132 	if (next_set)
133 		mod_timer(&q->timeout, round_jiffies_up(next));
134 
135 	spin_unlock_irqrestore(q->queue_lock, flags);
136 }
137 
138 /**
139  * blk_abort_request -- Request request recovery for the specified command
140  * @req:	pointer to the request of interest
141  *
142  * This function requests that the block layer start recovery for the
143  * request by deleting the timer and calling the q's timeout function.
144  * LLDDs who implement their own error recovery MAY ignore the timeout
145  * event if they generated blk_abort_req. Must hold queue lock.
146  */
blk_abort_request(struct request * req)147 void blk_abort_request(struct request *req)
148 {
149 	if (blk_mark_rq_complete(req))
150 		return;
151 	blk_delete_timer(req);
152 	blk_rq_timed_out(req);
153 }
154 EXPORT_SYMBOL_GPL(blk_abort_request);
155 
156 /**
157  * blk_add_timer - Start timeout timer for a single request
158  * @req:	request that is about to start running.
159  *
160  * Notes:
161  *    Each request has its own timer, and as it is added to the queue, we
162  *    set up the timer. When the request completes, we cancel the timer.
163  */
blk_add_timer(struct request * req)164 void blk_add_timer(struct request *req)
165 {
166 	struct request_queue *q = req->q;
167 	unsigned long expiry;
168 
169 	if (!q->rq_timed_out_fn)
170 		return;
171 
172 	BUG_ON(!list_empty(&req->timeout_list));
173 	BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
174 
175 	/*
176 	 * Some LLDs, like scsi, peek at the timeout to prevent a
177 	 * command from being retried forever.
178 	 */
179 	if (!req->timeout)
180 		req->timeout = q->rq_timeout;
181 
182 	req->deadline = jiffies + req->timeout;
183 	list_add_tail(&req->timeout_list, &q->timeout_list);
184 
185 	/*
186 	 * If the timer isn't already pending or this timeout is earlier
187 	 * than an existing one, modify the timer. Round up to next nearest
188 	 * second.
189 	 */
190 	expiry = round_jiffies_up(req->deadline);
191 
192 	if (!timer_pending(&q->timeout) ||
193 	    time_before(expiry, q->timeout.expires))
194 		mod_timer(&q->timeout, expiry);
195 }
196 
197 /**
198  * blk_abort_queue -- Abort all request on given queue
199  * @queue:	pointer to queue
200  *
201  */
blk_abort_queue(struct request_queue * q)202 void blk_abort_queue(struct request_queue *q)
203 {
204 	unsigned long flags;
205 	struct request *rq, *tmp;
206 	LIST_HEAD(list);
207 
208 	/*
209 	 * Not a request based block device, nothing to abort
210 	 */
211 	if (!q->request_fn)
212 		return;
213 
214 	spin_lock_irqsave(q->queue_lock, flags);
215 
216 	elv_abort_queue(q);
217 
218 	/*
219 	 * Splice entries to local list, to avoid deadlocking if entries
220 	 * get readded to the timeout list by error handling
221 	 */
222 	list_splice_init(&q->timeout_list, &list);
223 
224 	list_for_each_entry_safe(rq, tmp, &list, timeout_list)
225 		blk_abort_request(rq);
226 
227 	/*
228 	 * Occasionally, blk_abort_request() will return without
229 	 * deleting the element from the list. Make sure we add those back
230 	 * instead of leaving them on the local stack list.
231 	 */
232 	list_splice(&list, &q->timeout_list);
233 
234 	spin_unlock_irqrestore(q->queue_lock, flags);
235 
236 }
237 EXPORT_SYMBOL_GPL(blk_abort_queue);
238