1 #ifndef _LINUX_ELEVATOR_H
2 #define _LINUX_ELEVATOR_H
3 
4 typedef void (elevator_fn) (struct request *, elevator_t *,
5 			    struct list_head *,
6 			    struct list_head *, int);
7 
8 typedef int (elevator_merge_fn) (request_queue_t *, struct request **, struct list_head *,
9 				 struct buffer_head *, int, int);
10 
11 typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int);
12 
13 typedef void (elevator_merge_req_fn) (struct request *, struct request *);
14 
15 struct elevator_s
16 {
17 	int read_latency;
18 	int write_latency;
19 
20 	elevator_merge_fn *elevator_merge_fn;
21 	elevator_merge_req_fn *elevator_merge_req_fn;
22 
23 	unsigned int queue_ID;
24 };
25 
26 int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int);
27 void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int);
28 void elevator_noop_merge_req(struct request *, struct request *);
29 
30 int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int);
31 void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int);
32 void elevator_linus_merge_req(struct request *, struct request *);
33 
34 typedef struct blkelv_ioctl_arg_s {
35 	int queue_ID;
36 	int read_latency;
37 	int write_latency;
38 	int max_bomb_segments;
39 } blkelv_ioctl_arg_t;
40 
41 #define BLKELVGET   _IOR(0x12,106,sizeof(blkelv_ioctl_arg_t))
42 #define BLKELVSET   _IOW(0x12,107,sizeof(blkelv_ioctl_arg_t))
43 
44 extern int blkelvget_ioctl(elevator_t *, blkelv_ioctl_arg_t *);
45 extern int blkelvset_ioctl(elevator_t *, const blkelv_ioctl_arg_t *);
46 
47 extern void elevator_init(elevator_t *, elevator_t);
48 
49 /*
50  * Return values from elevator merger
51  */
52 #define ELEVATOR_NO_MERGE	0
53 #define ELEVATOR_FRONT_MERGE	1
54 #define ELEVATOR_BACK_MERGE	2
55 
56 /*
57  * This is used in the elevator algorithm.  We don't prioritise reads
58  * over writes any more --- although reads are more time-critical than
59  * writes, by treating them equally we increase filesystem throughput.
60  * This turns out to give better overall performance.  -- sct
61  */
62 #define IN_ORDER(s1,s2)				\
63 	((((s1)->rq_dev == (s2)->rq_dev &&	\
64 	   (s1)->sector < (s2)->sector)) ||	\
65 	 (s1)->rq_dev < (s2)->rq_dev)
66 
67 #define BHRQ_IN_ORDER(bh, rq)			\
68 	((((bh)->b_rdev == (rq)->rq_dev &&	\
69 	   (bh)->b_rsector < (rq)->sector)) ||	\
70 	 (bh)->b_rdev < (rq)->rq_dev)
71 
elevator_request_latency(elevator_t * elevator,int rw)72 static inline int elevator_request_latency(elevator_t * elevator, int rw)
73 {
74 	int latency;
75 
76 	latency = elevator->read_latency;
77 	if (rw != READ)
78 		latency = elevator->write_latency;
79 
80 	return latency;
81 }
82 
83 #define ELV_LINUS_SEEK_COST	1
84 
85 #define ELEVATOR_NOOP							\
86 ((elevator_t) {								\
87 	0,				/* read_latency */		\
88 	0,				/* write_latency */		\
89 									\
90 	elevator_noop_merge,		/* elevator_merge_fn */		\
91 	elevator_noop_merge_req,	/* elevator_merge_req_fn */	\
92 	})
93 
94 #define ELEVATOR_LINUS							\
95 ((elevator_t) {								\
96 	128,				/* read passovers */		\
97 	512,				/* write passovers */		\
98 									\
99 	elevator_linus_merge,		/* elevator_merge_fn */		\
100 	elevator_linus_merge_req,	/* elevator_merge_req_fn */	\
101 	})
102 
103 #endif
104