1 /* drivers/message/fusion/linux_compat.h */
2
3 #ifndef FUSION_LINUX_COMPAT_H
4 #define FUSION_LINUX_COMPAT_H
5 /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6
7 #include <linux/version.h>
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11
12 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
13
14
15 #if (defined(__sparc__) && defined(__sparc_v9__)) || defined(__x86_64__)
16 #define MPT_CONFIG_COMPAT
17 #endif
18
19 #ifndef rwlock_init
20 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
21 #endif
22
23 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
24 #define SET_NICE(current,x) do {(current)->nice = (x);} while (0)
25 #else
26 #define SET_NICE(current,x)
27 #endif
28
29 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
30 #define pci_enable_device(pdev) (0)
31 #define SCSI_DATA_UNKNOWN 0
32 #define SCSI_DATA_WRITE 1
33 #define SCSI_DATA_READ 2
34 #define SCSI_DATA_NONE 3
35 #endif
36
37
38 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
39 #define pci_set_dma_mask(pdev, mask) (0)
40 #define scsi_set_pci_device(sh, pdev) (0)
41 #endif
42
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
44 # if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,18)
45 typedef unsigned int dma_addr_t;
46 # endif
47 #else
48 # if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,42)
49 typedef unsigned int dma_addr_t;
50 # endif
51 #endif
52
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,18)
54 /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
55
56 /* This block snipped from lk-2.2.18/include/linux/init.h { */
57 /*
58 * Used for initialization calls..
59 */
60 typedef int (*initcall_t)(void);
61 typedef void (*exitcall_t)(void);
62
63 #define __init_call __attribute__ ((unused,__section__ (".initcall.init")))
64 #define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))
65
66 extern initcall_t __initcall_start, __initcall_end;
67
68 #define __initcall(fn) \
69 static initcall_t __initcall_##fn __init_call = fn
70 #define __exitcall(fn) \
71 static exitcall_t __exitcall_##fn __exit_call = fn
72
73 #ifdef MODULE
74 /* These macros create a dummy inline: gcc 2.9x does not count alias
75 as usage, hence the `unused function' warning when __init functions
76 are declared static. We use the dummy __*_module_inline functions
77 both to kill the warning and check the type of the init/cleanup
78 function. */
79 typedef int (*__init_module_func_t)(void);
80 typedef void (*__cleanup_module_func_t)(void);
81 #define module_init(x) \
82 int init_module(void) __attribute__((alias(#x))); \
83 static inline __init_module_func_t __init_module_inline(void) \
84 { return x; }
85 #define module_exit(x) \
86 void cleanup_module(void) __attribute__((alias(#x))); \
87 static inline __cleanup_module_func_t __cleanup_module_inline(void) \
88 { return x; }
89
90 #else
91 #define module_init(x) __initcall(x);
92 #define module_exit(x) __exitcall(x);
93 #endif
94 /* } block snipped from lk-2.2.18/include/linux/init.h */
95
96 /* This block snipped from lk-2.2.18/include/linux/sched.h { */
97 /*
98 * Used prior to schedule_timeout calls..
99 */
100 #define __set_current_state(state_value) do { current->state = state_value; } while (0)
101 #ifdef CONFIG_SMP
102 #define set_current_state(state_value) do { __set_current_state(state_value); mb(); } while (0)
103 #else
104 #define set_current_state(state_value) __set_current_state(state_value)
105 #endif
106 /* } block snipped from lk-2.2.18/include/linux/sched.h */
107
108 /* procfs compat stuff... */
109 #define proc_mkdir(x,y) create_proc_entry(x, S_IFDIR, y)
110
111 /* MUTEX compat stuff... */
112 #define DECLARE_MUTEX(name) struct semaphore name=MUTEX
113 #define DECLARE_MUTEX_LOCKED(name) struct semaphore name=MUTEX_LOCKED
114 #define init_MUTEX(x) *(x)=MUTEX
115 #define init_MUTEX_LOCKED(x) *(x)=MUTEX_LOCKED
116
117 /* Wait queues. */
118 #define DECLARE_WAIT_QUEUE_HEAD(name) \
119 struct wait_queue * (name) = NULL
120 #define DECLARE_WAITQUEUE(name, task) \
121 struct wait_queue (name) = { (task), NULL }
122
123 #if defined(__sparc__) && defined(__sparc_v9__)
124 /* The sparc64 ioremap implementation is wrong in 2.2.x,
125 * but fixing it would break all of the drivers which
126 * workaround it. Fixed in 2.3.x onward. -DaveM
127 */
128 #define ARCH_IOREMAP(base) ((unsigned long) (base))
129 #else
130 #define ARCH_IOREMAP(base) ioremap(base)
131 #endif
132
133 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
134 #else /* LINUX_VERSION_CODE must be >= KERNEL_VERSION(2,2,18) */
135
136 /* No ioremap bugs in >2.3.x kernels. */
137 #define ARCH_IOREMAP(base) ioremap(base)
138
139 /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
140 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,2,18) */
141
142
143 /*
144 * Inclined to use:
145 * #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10)
146 * here, but MODULE_LICENSE defined in 2.4.9-6 and 2.4.9-13
147 * breaks the rule:-(
148 */
149 #ifndef MODULE_LICENSE
150 #define MODULE_LICENSE(license)
151 #endif
152
153
154 /* PCI/driver subsystem { */
155 #ifndef pci_for_each_dev
156 #define pci_for_each_dev(dev) for((dev)=pci_devices; (dev)!=NULL; (dev)=(dev)->next)
157 #define pci_peek_next_dev(dev) ((dev)->next ? (dev)->next : NULL)
158 #define DEVICE_COUNT_RESOURCE 6
159 #define PCI_BASEADDR_FLAGS(idx) base_address[idx]
160 #define PCI_BASEADDR_START(idx) base_address[idx] & ~0xFUL
161 /*
162 * We have to keep track of the original value using
163 * a temporary, and not by just sticking pdev->base_address[x]
164 * back. pdev->base_address[x] is an opaque cookie that can
165 * be used by the PCI implementation on a given Linux port
166 * for any purpose. -DaveM
167 */
168 #define PCI_BASEADDR_SIZE(__pdev, __idx) \
169 ({ unsigned int size, tmp; \
170 pci_read_config_dword(__pdev, PCI_BASE_ADDRESS_0 + (4*(__idx)), &tmp); \
171 pci_write_config_dword(__pdev, PCI_BASE_ADDRESS_0 + (4*(__idx)), 0xffffffff); \
172 pci_read_config_dword(__pdev, PCI_BASE_ADDRESS_0 + (4*(__idx)), &size); \
173 pci_write_config_dword(__pdev, PCI_BASE_ADDRESS_0 + (4*(__idx)), tmp); \
174 (4 - size); \
175 })
176 #else
177 #define pci_peek_next_dev(dev) ((dev) != pci_dev_g(&pci_devices) ? pci_dev_g((dev)->global_list.next) : NULL)
178 #define PCI_BASEADDR_FLAGS(idx) resource[idx].flags
179 #define PCI_BASEADDR_START(idx) resource[idx].start
180 #define PCI_BASEADDR_SIZE(dev,idx) (dev)->resource[idx].end - (dev)->resource[idx].start + 1
181 #endif /* } ifndef pci_for_each_dev */
182
183
184 /* Compatability for the 2.3.x PCI DMA API. */
185 #ifndef PCI_DMA_BIDIRECTIONAL
186 /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
187
188 #define PCI_DMA_BIDIRECTIONAL 0
189 #define PCI_DMA_TODEVICE 1
190 #define PCI_DMA_FROMDEVICE 2
191 #define PCI_DMA_NONE 3
192
193 #ifdef __KERNEL__
194 #include <asm/page.h>
195 /* Pure 2^n version of get_order */
__get_order(unsigned long size)196 static __inline__ int __get_order(unsigned long size)
197 {
198 int order;
199
200 size = (size-1) >> (PAGE_SHIFT-1);
201 order = -1;
202 do {
203 size >>= 1;
204 order++;
205 } while (size);
206 return order;
207 }
208 #endif
209
210 #define pci_alloc_consistent(hwdev, size, dma_handle) \
211 ({ void *__ret = (void *)__get_free_pages(GFP_ATOMIC, __get_order(size)); \
212 if (__ret != NULL) { \
213 memset(__ret, 0, size); \
214 *(dma_handle) = virt_to_bus(__ret); \
215 } \
216 __ret; \
217 })
218
219 #define pci_free_consistent(hwdev, size, vaddr, dma_handle) \
220 free_pages((unsigned long)vaddr, __get_order(size))
221
222 #define pci_map_single(hwdev, ptr, size, direction) \
223 virt_to_bus(ptr);
224
225 #define pci_unmap_single(hwdev, dma_addr, size, direction) \
226 do { /* Nothing to do */ } while (0)
227
228 #define pci_map_sg(hwdev, sg, nents, direction) (nents)
229 #define pci_unmap_sg(hwdev, sg, nents, direction) \
230 do { /* Nothing to do */ } while(0)
231
232 #define sg_dma_address(sg) (virt_to_bus((sg)->address))
233 #define sg_dma_len(sg) ((sg)->length)
234
235 /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
236 #endif /* PCI_DMA_BIDIRECTIONAL */
237
238 /*
239 * With the new command queuing code in the SCSI mid-layer we no longer have
240 * to hold the io_request_lock spin lock when calling the scsi_done routine.
241 * For now we only do this with the 2.5.1 kernel or newer.
242 */
243 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,1)
244 #define MPT_HOST_LOCK(flags)
245 #define MPT_HOST_UNLOCK(flags)
246 #else
247 #define MPT_HOST_LOCK(flags) \
248 spin_lock_irqsave(&io_request_lock, flags)
249 #define MPT_HOST_UNLOCK(flags) \
250 spin_unlock_irqrestore(&io_request_lock, flags)
251 #endif
252
253 /*
254 * We use our new error handling code if the kernel version is 2.4.18 or newer.
255 * Remark: 5/5/03 use old EH code with 2.4 kernels as it runs in a background thread
256 * 2.4 kernels choke on a call to schedule via eh thread.
257 */
258 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,1)
259 #define MPT_SCSI_USE_NEW_EH
260 #endif
261
262 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
263 #define mpt_work_struct work_struct
264 #define MPT_INIT_WORK(_task, _func, _data) INIT_WORK(_task, _func, _data)
265 #else
266 #define mpt_work_struct tq_struct
267 #define MPT_INIT_WORK(_task, _func, _data) \
268 ({ (_task)->sync = 0; \
269 (_task)->routine = (_func); \
270 (_task)->data = (void *) (_data); \
271 })
272 #endif
273
274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28)
275 #define mpt_sync_irq(_irq) synchronize_irq(_irq)
276 #else
277 #define mpt_sync_irq(_irq) synchronize_irq()
278 #endif
279
280 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,58)
281 #define mpt_inc_use_count()
282 #define mpt_dec_use_count()
283 #else
284 #define mpt_inc_use_count() MOD_INC_USE_COUNT
285 #define mpt_dec_use_count() MOD_DEC_USE_COUNT
286 #endif
287
288
289 /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
290 #endif /* _LINUX_COMPAT_H */
291
292