1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #include <linux/major.h>
5 #include <linux/sched.h>
6 #include <linux/genhd.h>
7 #include <linux/tqueue.h>
8 #include <linux/list.h>
9 #include <linux/mm.h>
10 
11 #include <asm/io.h>
12 
13 struct request_queue;
14 typedef struct request_queue request_queue_t;
15 struct elevator_s;
16 typedef struct elevator_s elevator_t;
17 
18 /*
19  * Ok, this is an expanded form so that we can use the same
20  * request for paging requests.
21  */
22 struct request {
23 	struct list_head queue;
24 	int elevator_sequence;
25 
26 	volatile int rq_status;	/* should split this into a few status bits */
27 #define RQ_INACTIVE		(-1)
28 #define RQ_ACTIVE		1
29 #define RQ_SCSI_BUSY		0xffff
30 #define RQ_SCSI_DONE		0xfffe
31 #define RQ_SCSI_DISCONNECTING	0xffe0
32 
33 	kdev_t rq_dev;
34 	int cmd;		/* READ or WRITE */
35 	int errors;
36 	unsigned long start_time;
37 	unsigned long sector;
38 	unsigned long nr_sectors;
39 	unsigned long hard_sector, hard_nr_sectors;
40 	unsigned int nr_segments;
41 	unsigned int nr_hw_segments;
42 	unsigned long current_nr_sectors, hard_cur_sectors;
43 	void * special;
44 	char * buffer;
45 	struct completion * waiting;
46 	struct buffer_head * bh;
47 	struct buffer_head * bhtail;
48 	request_queue_t *q;
49 	char io_account;
50 };
51 
52 #include <linux/elevator.h>
53 
54 typedef int (merge_request_fn) (request_queue_t *q,
55 				struct request  *req,
56 				struct buffer_head *bh,
57 				int);
58 typedef int (merge_requests_fn) (request_queue_t *q,
59 				 struct request  *req,
60 				 struct request  *req2,
61 				 int);
62 typedef void (request_fn_proc) (request_queue_t *q);
63 typedef request_queue_t * (queue_proc) (kdev_t dev);
64 typedef int (make_request_fn) (request_queue_t *q, int rw, struct buffer_head *bh);
65 typedef void (plug_device_fn) (request_queue_t *q, kdev_t device);
66 typedef void (unplug_device_fn) (void *q);
67 
68 struct request_list {
69 	unsigned int count;
70 	unsigned int pending[2];
71 	struct list_head free;
72 };
73 
74 struct request_queue
75 {
76 	/*
77 	 * the queue request freelist, one for reads and one for writes
78 	 */
79 	struct request_list	rq;
80 
81 	/*
82 	 * The total number of requests on each queue
83 	 */
84 	int nr_requests;
85 
86 	/*
87 	 * Batching threshold for sleep/wakeup decisions
88 	 */
89 	int batch_requests;
90 
91 	/*
92 	 * The total number of 512byte blocks on each queue
93 	 */
94 	atomic_t nr_sectors;
95 
96 	/*
97 	 * Batching threshold for sleep/wakeup decisions
98 	 */
99 	int batch_sectors;
100 
101 	/*
102 	 * The max number of 512byte blocks on each queue
103 	 */
104 	int max_queue_sectors;
105 
106 	/*
107 	 * Together with queue_head for cacheline sharing
108 	 */
109 	struct list_head	queue_head;
110 	elevator_t		elevator;
111 
112 	request_fn_proc		* request_fn;
113 	merge_request_fn	* back_merge_fn;
114 	merge_request_fn	* front_merge_fn;
115 	merge_requests_fn	* merge_requests_fn;
116 	make_request_fn		* make_request_fn;
117 	plug_device_fn		* plug_device_fn;
118 	/*
119 	 * The queue owner gets to use this for whatever they like.
120 	 * ll_rw_blk doesn't touch it.
121 	 */
122 	void			* queuedata;
123 
124 	/*
125 	 * This is used to remove the plug when tq_disk runs.
126 	 */
127 	struct tq_struct	plug_tq;
128 
129 	/*
130 	 * Boolean that indicates whether this queue is plugged or not.
131 	 */
132 	int			plugged:1;
133 
134 	/*
135 	 * Boolean that indicates whether current_request is active or
136 	 * not.
137 	 */
138 	int			head_active:1;
139 
140 	/*
141 	 * Boolean that indicates you will use blk_started_sectors
142 	 * and blk_finished_sectors in addition to blk_started_io
143 	 * and blk_finished_io.  It enables the throttling code to
144 	 * help keep the sectors in flight to a reasonable value
145 	 */
146 	int			can_throttle:1;
147 
148 	unsigned long		bounce_pfn;
149 
150 	/*
151 	 * Is meant to protect the queue in the future instead of
152 	 * io_request_lock
153 	 */
154 	spinlock_t		queue_lock;
155 
156 	/*
157 	 * Tasks wait here for free read and write requests
158 	 */
159 	wait_queue_head_t	wait_for_requests;
160 };
161 
162 #define blk_queue_plugged(q)	(q)->plugged
163 #define blk_fs_request(rq)	((rq)->cmd == READ || (rq)->cmd == WRITE)
164 #define blk_queue_empty(q)	list_empty(&(q)->queue_head)
165 
rq_data_dir(struct request * rq)166 extern inline int rq_data_dir(struct request *rq)
167 {
168 	if (rq->cmd == READ)
169 		return READ;
170 	else if (rq->cmd == WRITE)
171 		return WRITE;
172 	else {
173 		BUG();
174 		return -1; /* ahem */
175 	}
176 }
177 
178 extern unsigned long blk_max_low_pfn, blk_max_pfn;
179 
180 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
181 #define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
182 
183 extern void blk_queue_bounce_limit(request_queue_t *, u64);
184 
185 #ifdef CONFIG_HIGHMEM
186 extern struct buffer_head *create_bounce(int, struct buffer_head *);
blk_queue_bounce(request_queue_t * q,int rw,struct buffer_head * bh)187 extern inline struct buffer_head *blk_queue_bounce(request_queue_t *q, int rw,
188 						   struct buffer_head *bh)
189 {
190 	struct page *page = bh->b_page;
191 
192 #ifndef CONFIG_DISCONTIGMEM
193 	if (page - mem_map <= q->bounce_pfn)
194 #else
195 	if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) <= q->bounce_pfn)
196 #endif
197 		return bh;
198 
199 	return create_bounce(rw, bh);
200 }
201 #else
202 #define blk_queue_bounce(q, rw, bh)	(bh)
203 #endif
204 
205 #define bh_phys(bh)		(page_to_phys((bh)->b_page) + bh_offset((bh)))
206 
207 #define BH_CONTIG(b1, b2)	(bh_phys((b1)) + (b1)->b_size == bh_phys((b2)))
208 #define BH_PHYS_4G(b1, b2)	((bh_phys((b1)) | 0xffffffff) == ((bh_phys((b2)) + (b2)->b_size - 1) | 0xffffffff))
209 
210 struct blk_dev_struct {
211 	/*
212 	 * queue_proc has to be atomic
213 	 */
214 	request_queue_t		request_queue;
215 	queue_proc		*queue;
216 	void			*data;
217 };
218 
219 struct sec_size {
220 	unsigned block_size;
221 	unsigned block_size_bits;
222 };
223 
224 /*
225  * Used to indicate the default queue for drivers that don't bother
226  * to implement multiple queues.  We have this access macro here
227  * so as to eliminate the need for each and every block device
228  * driver to know about the internal structure of blk_dev[].
229  */
230 #define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
231 
232 extern struct sec_size * blk_sec[MAX_BLKDEV];
233 extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
234 extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size);
235 extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
236 extern void generic_make_request(int rw, struct buffer_head * bh);
237 extern request_queue_t *blk_get_queue(kdev_t dev);
238 extern void blkdev_release_request(struct request *);
239 
240 /*
241  * Access functions for manipulating queue properties
242  */
243 extern int blk_grow_request_list(request_queue_t *q, int nr_requests, int max_queue_sectors);
244 extern void blk_init_queue(request_queue_t *, request_fn_proc *);
245 extern void blk_cleanup_queue(request_queue_t *);
246 extern void blk_queue_headactive(request_queue_t *, int);
247 extern void blk_queue_throttle_sectors(request_queue_t *, int);
248 extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
249 extern void generic_unplug_device(void *);
250 extern int blk_seg_merge_ok(struct buffer_head *, struct buffer_head *);
251 
252 extern int * blk_size[MAX_BLKDEV];
253 
254 extern int * blksize_size[MAX_BLKDEV];
255 
256 extern int * hardsect_size[MAX_BLKDEV];
257 
258 extern int * max_readahead[MAX_BLKDEV];
259 
260 extern int * max_sectors[MAX_BLKDEV];
261 
262 extern int * max_segments[MAX_BLKDEV];
263 
264 #define MAX_SEGMENTS 128
265 #define MAX_SECTORS 255
266 #define MAX_QUEUE_SECTORS (4 << (20 - 9)) /* 4 mbytes when full sized */
267 #define MAX_NR_REQUESTS 1024 /* 1024k when in 512 units, normally min is 1M in 1k units */
268 
269 #define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
270 
271 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queue)
272 #define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
273 #define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
274 #define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next)
275 #define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev)
276 
277 extern void drive_stat_acct (kdev_t dev, int rw,
278 					unsigned long nr_sectors, int new_io);
279 
get_hardsect_size(kdev_t dev)280 static inline int get_hardsect_size(kdev_t dev)
281 {
282 	int retval = 512;
283 	int major = MAJOR(dev);
284 
285 	if (hardsect_size[major]) {
286 		int minor = MINOR(dev);
287 		if (hardsect_size[major][minor])
288 			retval = hardsect_size[major][minor];
289 	}
290 	return retval;
291 }
292 
blk_oversized_queue(request_queue_t * q)293 static inline int blk_oversized_queue(request_queue_t * q)
294 {
295 	if (q->can_throttle)
296 		return atomic_read(&q->nr_sectors) > q->max_queue_sectors;
297 	return q->rq.count == 0;
298 }
299 
blk_oversized_queue_reads(request_queue_t * q)300 static inline int blk_oversized_queue_reads(request_queue_t * q)
301 {
302 	if (q->can_throttle)
303 		return atomic_read(&q->nr_sectors) > q->max_queue_sectors + q->batch_sectors;
304 	return q->rq.count == 0;
305 }
306 
blk_oversized_queue_batch(request_queue_t * q)307 static inline int blk_oversized_queue_batch(request_queue_t * q)
308 {
309 	return atomic_read(&q->nr_sectors) > q->max_queue_sectors - q->batch_sectors;
310 }
311 
312 #define blk_finished_io(nsects)	do { } while (0)
313 #define blk_started_io(nsects)	do { } while (0)
314 
blk_started_sectors(struct request * rq,int count)315 static inline void blk_started_sectors(struct request *rq, int count)
316 {
317 	request_queue_t *q = rq->q;
318 	if (q && q->can_throttle) {
319 		atomic_add(count, &q->nr_sectors);
320 		if (atomic_read(&q->nr_sectors) < 0) {
321 			printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
322 			BUG();
323 		}
324 	}
325 }
326 
blk_finished_sectors(struct request * rq,int count)327 static inline void blk_finished_sectors(struct request *rq, int count)
328 {
329 	request_queue_t *q = rq->q;
330 	if (q && q->can_throttle) {
331 		atomic_sub(count, &q->nr_sectors);
332 
333 		smp_mb();
334 		if (q->rq.count >= q->batch_requests && !blk_oversized_queue_batch(q)) {
335 			if (waitqueue_active(&q->wait_for_requests))
336 				wake_up(&q->wait_for_requests);
337 		}
338 		if (atomic_read(&q->nr_sectors) < 0) {
339 			printk("nr_sectors is %d\n", atomic_read(&q->nr_sectors));
340 			BUG();
341 		}
342 	}
343 }
344 
blksize_bits(unsigned int size)345 static inline unsigned int blksize_bits(unsigned int size)
346 {
347 	unsigned int bits = 8;
348 	do {
349 		bits++;
350 		size >>= 1;
351 	} while (size > 256);
352 	return bits;
353 }
354 
block_size(kdev_t dev)355 static inline unsigned int block_size(kdev_t dev)
356 {
357 	int retval = BLOCK_SIZE;
358 	int major = MAJOR(dev);
359 
360 	if (blksize_size[major]) {
361 		int minor = MINOR(dev);
362 		if (blksize_size[major][minor])
363 			retval = blksize_size[major][minor];
364 	}
365 	return retval;
366 }
367 
368 #endif
369