1 #ifndef BLK_INTERNAL_H
2 #define BLK_INTERNAL_H
3 
4 #include <linux/idr.h>
5 
6 /* Amount of time in which a process may batch requests */
7 #define BLK_BATCH_TIME	(HZ/50UL)
8 
9 /* Number of requests a "batching" process may submit */
10 #define BLK_BATCH_REQ	32
11 
12 extern struct kmem_cache *blk_requestq_cachep;
13 extern struct kobj_type blk_queue_ktype;
14 extern struct ida blk_queue_ida;
15 
__blk_get_queue(struct request_queue * q)16 static inline void __blk_get_queue(struct request_queue *q)
17 {
18 	kobject_get(&q->kobj);
19 }
20 
21 void init_request_from_bio(struct request *req, struct bio *bio);
22 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
23 			struct bio *bio);
24 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
25 		      struct bio *bio);
26 void blk_drain_queue(struct request_queue *q, bool drain_all);
27 void blk_dequeue_request(struct request *rq);
28 void __blk_queue_free_tags(struct request_queue *q);
29 bool __blk_end_bidi_request(struct request *rq, int error,
30 			    unsigned int nr_bytes, unsigned int bidi_bytes);
31 
32 void blk_rq_timed_out_timer(unsigned long data);
33 void blk_delete_timer(struct request *);
34 void blk_add_timer(struct request *);
35 void __generic_unplug_device(struct request_queue *);
36 
37 /*
38  * Internal atomic flags for request handling
39  */
40 enum rq_atomic_flags {
41 	REQ_ATOM_COMPLETE = 0,
42 };
43 
44 /*
45  * EH timer and IO completion will both attempt to 'grab' the request, make
46  * sure that only one of them succeeds
47  */
blk_mark_rq_complete(struct request * rq)48 static inline int blk_mark_rq_complete(struct request *rq)
49 {
50 	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
51 }
52 
blk_clear_rq_complete(struct request * rq)53 static inline void blk_clear_rq_complete(struct request *rq)
54 {
55 	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
56 }
57 
58 /*
59  * Internal elevator interface
60  */
61 #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
62 
63 void blk_insert_flush(struct request *rq);
64 void blk_abort_flushes(struct request_queue *q);
65 
__elv_next_request(struct request_queue * q)66 static inline struct request *__elv_next_request(struct request_queue *q)
67 {
68 	struct request *rq;
69 
70 	while (1) {
71 		if (!list_empty(&q->queue_head)) {
72 			rq = list_entry_rq(q->queue_head.next);
73 			return rq;
74 		}
75 
76 		/*
77 		 * Flush request is running and flush request isn't queueable
78 		 * in the drive, we can hold the queue till flush request is
79 		 * finished. Even we don't do this, driver can't dispatch next
80 		 * requests and will requeue them. And this can improve
81 		 * throughput too. For example, we have request flush1, write1,
82 		 * flush 2. flush1 is dispatched, then queue is hold, write1
83 		 * isn't inserted to queue. After flush1 is finished, flush2
84 		 * will be dispatched. Since disk cache is already clean,
85 		 * flush2 will be finished very soon, so looks like flush2 is
86 		 * folded to flush1.
87 		 * Since the queue is hold, a flag is set to indicate the queue
88 		 * should be restarted later. Please see flush_end_io() for
89 		 * details.
90 		 */
91 		if (q->flush_pending_idx != q->flush_running_idx &&
92 				!queue_flush_queueable(q)) {
93 			q->flush_queue_delayed = 1;
94 			return NULL;
95 		}
96 		if (unlikely(blk_queue_dead(q)) ||
97 		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
98 			return NULL;
99 	}
100 }
101 
elv_activate_rq(struct request_queue * q,struct request * rq)102 static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
103 {
104 	struct elevator_queue *e = q->elevator;
105 
106 	if (e->type->ops.elevator_activate_req_fn)
107 		e->type->ops.elevator_activate_req_fn(q, rq);
108 }
109 
elv_deactivate_rq(struct request_queue * q,struct request * rq)110 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
111 {
112 	struct elevator_queue *e = q->elevator;
113 
114 	if (e->type->ops.elevator_deactivate_req_fn)
115 		e->type->ops.elevator_deactivate_req_fn(q, rq);
116 }
117 
118 #ifdef CONFIG_FAIL_IO_TIMEOUT
119 int blk_should_fake_timeout(struct request_queue *);
120 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
121 ssize_t part_timeout_store(struct device *, struct device_attribute *,
122 				const char *, size_t);
123 #else
blk_should_fake_timeout(struct request_queue * q)124 static inline int blk_should_fake_timeout(struct request_queue *q)
125 {
126 	return 0;
127 }
128 #endif
129 
130 int ll_back_merge_fn(struct request_queue *q, struct request *req,
131 		     struct bio *bio);
132 int ll_front_merge_fn(struct request_queue *q, struct request *req,
133 		      struct bio *bio);
134 int attempt_back_merge(struct request_queue *q, struct request *rq);
135 int attempt_front_merge(struct request_queue *q, struct request *rq);
136 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
137 				struct request *next);
138 void blk_recalc_rq_segments(struct request *rq);
139 void blk_rq_set_mixed_merge(struct request *rq);
140 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
141 int blk_try_merge(struct request *rq, struct bio *bio);
142 
143 void blk_queue_congestion_threshold(struct request_queue *q);
144 
145 int blk_dev_init(void);
146 
147 void elv_quiesce_start(struct request_queue *q);
148 void elv_quiesce_end(struct request_queue *q);
149 
150 
151 /*
152  * Return the threshold (number of used requests) at which the queue is
153  * considered to be congested.  It include a little hysteresis to keep the
154  * context switch rate down.
155  */
queue_congestion_on_threshold(struct request_queue * q)156 static inline int queue_congestion_on_threshold(struct request_queue *q)
157 {
158 	return q->nr_congestion_on;
159 }
160 
161 /*
162  * The threshold at which a queue is considered to be uncongested
163  */
queue_congestion_off_threshold(struct request_queue * q)164 static inline int queue_congestion_off_threshold(struct request_queue *q)
165 {
166 	return q->nr_congestion_off;
167 }
168 
169 /*
170  * Contribute to IO statistics IFF:
171  *
172  *	a) it's attached to a gendisk, and
173  *	b) the queue had IO stats enabled when this request was started, and
174  *	c) it's a file system request or a discard request
175  */
blk_do_io_stat(struct request * rq)176 static inline int blk_do_io_stat(struct request *rq)
177 {
178 	return rq->rq_disk &&
179 	       (rq->cmd_flags & REQ_IO_STAT) &&
180 	       (rq->cmd_type == REQ_TYPE_FS ||
181 	        (rq->cmd_flags & REQ_DISCARD));
182 }
183 
184 /*
185  * Internal io_context interface
186  */
187 void get_io_context(struct io_context *ioc);
188 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
189 struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
190 void ioc_clear_queue(struct request_queue *q);
191 
192 void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
193 				int node);
194 
195 /**
196  * create_io_context - try to create task->io_context
197  * @task: target task
198  * @gfp_mask: allocation mask
199  * @node: allocation node
200  *
201  * If @task->io_context is %NULL, allocate a new io_context and install it.
202  * Returns the current @task->io_context which may be %NULL if allocation
203  * failed.
204  *
205  * Note that this function can't be called with IRQ disabled because
206  * task_lock which protects @task->io_context is IRQ-unsafe.
207  */
create_io_context(struct task_struct * task,gfp_t gfp_mask,int node)208 static inline struct io_context *create_io_context(struct task_struct *task,
209 						   gfp_t gfp_mask, int node)
210 {
211 	WARN_ON_ONCE(irqs_disabled());
212 	if (unlikely(!task->io_context))
213 		create_io_context_slowpath(task, gfp_mask, node);
214 	return task->io_context;
215 }
216 
217 /*
218  * Internal throttling interface
219  */
220 #ifdef CONFIG_BLK_DEV_THROTTLING
221 extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
222 extern void blk_throtl_drain(struct request_queue *q);
223 extern int blk_throtl_init(struct request_queue *q);
224 extern void blk_throtl_exit(struct request_queue *q);
225 extern void blk_throtl_release(struct request_queue *q);
226 #else /* CONFIG_BLK_DEV_THROTTLING */
blk_throtl_bio(struct request_queue * q,struct bio * bio)227 static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
228 {
229 	return false;
230 }
blk_throtl_drain(struct request_queue * q)231 static inline void blk_throtl_drain(struct request_queue *q) { }
blk_throtl_init(struct request_queue * q)232 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
blk_throtl_exit(struct request_queue * q)233 static inline void blk_throtl_exit(struct request_queue *q) { }
blk_throtl_release(struct request_queue * q)234 static inline void blk_throtl_release(struct request_queue *q) { }
235 #endif /* CONFIG_BLK_DEV_THROTTLING */
236 
237 #endif /* BLK_INTERNAL_H */
238