1 /*******************************************************************************
2 
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #ifndef _KCOMPAT_H_
30 #define _KCOMPAT_H_
31 
32 #include <linux/version.h>
33 #include <linux/types.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/ioport.h>
39 #include <linux/slab.h>
40 #include <linux/pagemap.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <asm/io.h>
45 
46 /* general compatibility flags unclassified per kernel */
47 #ifndef PMSG_SUSPEND
48 #define PMSG_SUSPEND 3
49 #endif
50 
51 #ifndef module_param
52 #define module_param(v,t,p) MODULE_PARM(v, "i");
53 #endif
54 
55 #ifndef DMA_64BIT_MASK
56 #define DMA_64BIT_MASK  0xffffffffffffffffULL
57 #endif
58 
59 #ifndef DMA_32BIT_MASK
60 #define DMA_32BIT_MASK  0x00000000ffffffffULL
61 #endif
62 
63 #ifndef PCI_CAP_ID_EXP
64 #define PCI_CAP_ID_EXP 0x10
65 #endif
66 
67 #ifndef mmiowb
68 #ifdef CONFIG_IA64
69 #define mmiowb() asm volatile ("mf.a" ::: "memory")
70 #else
71 #define mmiowb()
72 #endif
73 #endif
74 
75 #ifndef IRQ_HANDLED
76 #define irqreturn_t void
77 #define IRQ_HANDLED
78 #define IRQ_NONE
79 #endif
80 
81 #ifndef SET_NETDEV_DEV
82 #define SET_NETDEV_DEV(net, pdev)
83 #endif
84 
85 #ifndef HAVE_FREE_NETDEV
86 #define free_netdev(x)	kfree(x)
87 #endif
88 
89 #ifdef HAVE_POLL_CONTROLLER
90 #define CONFIG_NET_POLL_CONTROLLER
91 #endif
92 
93 #ifndef NETDEV_TX_OK
94 #define NETDEV_TX_OK 0
95 #endif
96 
97 #ifndef NETDEV_TX_BUSY
98 #define NETDEV_TX_BUSY 1
99 #endif
100 
101 #ifndef NETDEV_TX_LOCKED
102 #define NETDEV_TX_LOCKED -1
103 #endif
104 
105 #ifndef SKB_DATAREF_SHIFT
106 /* if we do not have the infrastructure to detect if skb_header is cloned
107    just return false in all cases */
108 #define skb_header_cloned(x) 0
109 #endif
110 
111 #ifndef NETIF_F_GSO
112 #define gso_size tso_size
113 #define gso_segs tso_segs
114 #endif
115 
116 #ifndef CHECKSUM_PARTIAL
117 #define CHECKSUM_PARTIAL CHECKSUM_HW
118 #define CHECKSUM_COMPLETE CHECKSUM_HW
119 #endif
120 
121 #ifndef __read_mostly
122 #define __read_mostly
123 #endif
124 
125 #ifndef HAVE_NETIF_MSG
126 #define HAVE_NETIF_MSG 1
127 enum {
128 	NETIF_MSG_DRV		= 0x0001,
129 	NETIF_MSG_PROBE		= 0x0002,
130 	NETIF_MSG_LINK		= 0x0004,
131 	NETIF_MSG_TIMER		= 0x0008,
132 	NETIF_MSG_IFDOWN	= 0x0010,
133 	NETIF_MSG_IFUP		= 0x0020,
134 	NETIF_MSG_RX_ERR	= 0x0040,
135 	NETIF_MSG_TX_ERR	= 0x0080,
136 	NETIF_MSG_TX_QUEUED	= 0x0100,
137 	NETIF_MSG_INTR		= 0x0200,
138 	NETIF_MSG_TX_DONE	= 0x0400,
139 	NETIF_MSG_RX_STATUS	= 0x0800,
140 	NETIF_MSG_PKTDATA	= 0x1000,
141 	NETIF_MSG_HW		= 0x2000,
142 	NETIF_MSG_WOL		= 0x4000,
143 };
144 
145 #else
146 #define NETIF_MSG_HW	0x2000
147 #define NETIF_MSG_WOL	0x4000
148 #endif /* HAVE_NETIF_MSG */
149 
150 #ifndef MII_RESV1
151 #define MII_RESV1		0x17		/* Reserved...		*/
152 #endif
153 
154 #ifndef unlikely
155 #define unlikely(_x) _x
156 #define likely(_x) _x
157 #endif
158 
159 #ifndef WARN_ON
160 #define WARN_ON(x)
161 #endif
162 
163 #ifndef PCI_DEVICE
164 #define PCI_DEVICE(vend,dev) \
165 	.vendor = (vend), .device = (dev), \
166 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
167 #endif
168 
169 /*****************************************************************************/
170 /* 2.5.28 => 2.4.23 */
171 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
172 
_kc_synchronize_irq(void)173 static inline void _kc_synchronize_irq(void) { synchronize_irq(); }
174 #undef synchronize_irq
175 #define synchronize_irq(X) _kc_synchronize_irq()
176 
177 #include <linux/tqueue.h>
178 #define work_struct tq_struct
179 #define INIT_WORK INIT_TQUEUE
180 #define schedule_work schedule_task
181 #define flush_scheduled_work flush_scheduled_tasks
182 
183 #endif /* 2.5.28 => 2.4.17 */
184 
185 /*****************************************************************************/
186 /* 2.6.0 => 2.5.28 */
187 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
188 #define MODULE_INFO(version, _version)
189 
190 #define pci_set_consistent_dma_mask(dev,mask) 1
191 
192 #undef dev_put
193 #define dev_put(dev) __dev_put(dev)
194 
195 #ifndef skb_fill_page_desc
196 #define skb_fill_page_desc _kc_skb_fill_page_desc
197 extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
198 #endif
199 
200 #ifndef pci_dma_mapping_error
201 #define pci_dma_mapping_error _kc_pci_dma_mapping_error
_kc_pci_dma_mapping_error(dma_addr_t dma_addr)202 static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
203 {
204 	return dma_addr == 0;
205 }
206 #endif
207 
208 #endif /* 2.6.0 => 2.5.28 */
209 
210 /*****************************************************************************/
211 /* 2.6.4 => 2.6.0 */
212 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
213 #define MODULE_VERSION(_version) MODULE_INFO(version, _version)
214 #endif /* 2.6.4 => 2.6.0 */
215 
216 /*****************************************************************************/
217 /* 2.6.5 => 2.6.0 */
218 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
219 #define pci_dma_sync_single_for_cpu	pci_dma_sync_single
220 #define pci_dma_sync_single_for_device	pci_dma_sync_single_for_cpu
221 #endif /* 2.6.5 => 2.6.0 */
222 
223 /*****************************************************************************/
224 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
225 #undef if_mii
226 #define if_mii _kc_if_mii
_kc_if_mii(struct ifreq * rq)227 static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
228 {
229 	return (struct mii_ioctl_data *) &rq->ifr_ifru;
230 }
231 #endif /* < 2.6.7 */
232 
233 /*****************************************************************************/
234 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
235 #define msleep(x)	do { set_current_state(TASK_UNINTERRUPTIBLE); \
236 				schedule_timeout((x * HZ)/1000 + 2); \
237 			} while (0)
238 #endif
239 
240 /*****************************************************************************/
241 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
242 #define __iomem
243 
244 #define MSEC_PER_SEC    1000L
_kc_jiffies_to_msecs(const unsigned long j)245 static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
246 {
247 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
248 	return (MSEC_PER_SEC / HZ) * j;
249 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
250 	return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
251 #else
252 	return (j * MSEC_PER_SEC) / HZ;
253 #endif
254 }
_kc_msecs_to_jiffies(const unsigned int m)255 static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
256 {
257 	if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
258 		return MAX_JIFFY_OFFSET;
259 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
260 	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
261 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
262 	return m * (HZ / MSEC_PER_SEC);
263 #else
264 	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
265 #endif
266 }
267 
268 #define msleep_interruptible _kc_msleep_interruptible
_kc_msleep_interruptible(unsigned int msecs)269 static unsigned inline long _kc_msleep_interruptible(unsigned int msecs)
270 {
271 	unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
272 
273 	while (timeout && !signal_pending(current)) {
274 		__set_current_state(TASK_INTERRUPTIBLE);
275 		timeout = schedule_timeout(timeout);
276 	}
277 	return _kc_jiffies_to_msecs(timeout);
278 }
279 #endif /* < 2.6.9 */
280 
281 /*****************************************************************************/
282 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) && \
283       LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
284 #ifdef pci_save_state
285 #undef pci_save_state
286 #endif
287 #define pci_save_state(X) { \
288         int i; \
289         if (adapter->pci_state) { \
290                 for (i = 0; i < 16; i++) { \
291                         pci_read_config_dword((X), \
292                                               i * 4, \
293                                               &adapter->pci_state[i]); \
294                 } \
295         } \
296 }
297 
298 #ifdef pci_restore_state
299 #undef pci_restore_state
300 #endif
301 #define pci_restore_state(X) { \
302         int i; \
303         if (adapter->pci_state) { \
304                 for (i = 0; i < 16; i++) { \
305                         pci_write_config_dword((X), \
306                                                i * 4, \
307                                                adapter->pci_state[i]); \
308                 } \
309         } else { \
310                 for (i = 0; i < 6; i++) { \
311                         pci_write_config_dword((X), \
312                                                PCI_BASE_ADDRESS_0 + (i * 4), \
313                                                (X)->resource[i].start); \
314                 } \
315         } \
316 }
317 #endif /* 2.4.6 <= x < 2.6.10 */
318 
319 /*****************************************************************************/
320 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
321 #ifdef module_param_array_named
322 #undef module_param_array_named
323 #define module_param_array_named(name, array, type, nump, perm)          \
324 	static struct kparam_array __param_arr_##name                    \
325 	= { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
326 	    sizeof(array[0]), array };                                   \
327 	module_param_call(name, param_array_set, param_array_get,        \
328 			  &__param_arr_##name, perm)
329 #endif /* module_param_array_named */
330 #endif /* < 2.6.10 */
331 
332 /*****************************************************************************/
333 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
334 #define PCI_D0      0
335 #define PCI_D1      1
336 #define PCI_D2      2
337 #define PCI_D3hot   3
338 #define PCI_D3cold  4
339 #define pci_choose_state(pdev,state) state
340 #define PMSG_SUSPEND 3
341 
342 #undef NETIF_F_LLTX
343 
344 #ifndef ARCH_HAS_PREFETCH
345 #define prefetch(X)
346 #endif
347 
348 #ifndef NET_IP_ALIGN
349 #define NET_IP_ALIGN 2
350 #endif
351 
352 #endif /* < 2.6.11 */
353 
354 /*****************************************************************************/
355 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
356 #define pm_message_t u32
357 #ifndef kzalloc
358 #define kzalloc _kc_kzalloc
359 extern void *_kc_kzalloc(size_t size, int flags);
360 #endif
361 #endif
362 
363 /*****************************************************************************/
364 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
365 
366 #ifndef IRQF_PROBE_SHARED
367 #ifdef SA_PROBEIRQ
368 #define IRQF_PROBE_SHARED SA_PROBEIRQ
369 #else
370 #define IRQF_PROBE_SHARED 0
371 #endif
372 #endif
373 
374 #ifndef IRQF_SHARED
375 #define IRQF_SHARED SA_SHIRQ
376 #endif
377 
378 #ifndef netdev_alloc_skb
379 #define netdev_alloc_skb _kc_netdev_alloc_skb
380 extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
381                                             unsigned int length);
382 #endif
383 
384 #endif /* < 2.6.18 */
385 /*****************************************************************************/
386 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
387 
388 typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
389 typedef void (*new_handler_t)(int, void*);
_kc_request_irq(unsigned int irq,new_handler_t handler,unsigned long flags,const char * devname,void * dev_id)390 static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
391 {
392 	irq_handler_t new_handler = (irq_handler_t) handler;
393 	return request_irq(irq, new_handler, flags, devname, dev_id);
394 }
395 
396 #undef request_irq
397 #define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
398 
399 #endif /* < 2.6.19 */
400 /*****************************************************************************/
401 
402 #endif /* _KCOMPAT_H_ */
403 
404