1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3 	Written 2002-2003 by David Dillow <dave@thedillows.org>
4 	Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 	Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This software is available on a public web site. It may enable
15 	cryptographic capabilities of the 3Com hardware, and may be
16 	exported from the United States under License Exception "TSU"
17 	pursuant to 15 C.F.R. Section 740.13(e).
18 
19 	This work was funded by the National Library of Medicine under
20 	the Department of Energy project number 0274DD06D1 and NLM project
21 	number Y1-LM-2015-01.
22 
23 	This driver is designed for the 3Com 3CR990 Family of cards with the
24 	3XP Processor. It has been tested on x86 and sparc64.
25 
26 	KNOWN ISSUES:
27 	*) The current firmware always strips the VLAN tag off, even if
28 		we tell it not to. You should filter VLANs at the switch
29 		as a workaround (good practice in any event) until we can
30 		get this fixed.
31 	*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 		issue. Hopefully 3Com will fix it.
33 	*) Waiting for a command response takes 8ms due to non-preemptable
34 		polling. Only significant for getting stats and creating
35 		SAs, but an ugly wart never the less.
36 	*) I've not tested multicast. I think it works, but reports welcome.
37 	*) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
38 */
39 
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41  * Setting to > 1518 effectively disables this feature.
42  */
43 static int rx_copybreak = 200;
44 
45 /* end user-configurable values */
46 
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
48  */
49 static const int multicast_filter_limit = 32;
50 
51 /* Operational parameters that are set at compile time. */
52 
53 /* Keep the ring sizes a power of two for compile efficiency.
54  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55  * Making the Tx ring too large decreases the effectiveness of channel
56  * bonding and packet priority.
57  * There are no ill effects from too-large receive rings.
58  *
59  * We don't currently use the Hi Tx ring so, don't make it very big.
60  *
61  * Beware that if we start using the Hi Tx ring, we will need to change
62  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
63  */
64 #define TXHI_ENTRIES		2
65 #define TXLO_ENTRIES		128
66 #define RX_ENTRIES		32
67 #define COMMAND_ENTRIES		16
68 #define RESPONSE_ENTRIES	32
69 
70 #define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
71 #define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
72 
73 /* The 3XP will preload and remove 64 entries from the free buffer
74  * list, and we need one entry to keep the ring from wrapping, so
75  * to keep this a power of two, we use 128 entries.
76  */
77 #define RXFREE_ENTRIES		128
78 #define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
79 
80 /* Operational parameters that usually are not changed. */
81 
82 /* Time in jiffies before concluding the transmitter is hung. */
83 #define TX_TIMEOUT  (2*HZ)
84 
85 #define PKT_BUF_SZ		1536
86 
87 #define DRV_MODULE_NAME		"typhoon"
88 #define DRV_MODULE_VERSION 	"1.4.3"
89 #define DRV_MODULE_RELDATE	"03/12/15"
90 #define PFX			DRV_MODULE_NAME ": "
91 #define ERR_PFX			KERN_ERR PFX
92 
93 #if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
94 #warning  You must compile this file with the correct options!
95 #warning  See the last lines of the source file.
96 #error  You must compile this driver with "-O".
97 #endif
98 
99 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/timer.h>
103 #include <linux/errno.h>
104 #include <linux/ioport.h>
105 #include <linux/slab.h>
106 #include <linux/interrupt.h>
107 #include <linux/pci.h>
108 #include <linux/netdevice.h>
109 #include <linux/etherdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/init.h>
112 #include <linux/delay.h>
113 #include <linux/ethtool.h>
114 #include <linux/if_vlan.h>
115 #include <linux/crc32.h>
116 #include <asm/processor.h>
117 #include <asm/bitops.h>
118 #include <asm/io.h>
119 #include <asm/uaccess.h>
120 #include <linux/in6.h>
121 #include <asm/checksum.h>
122 #include <linux/version.h>
123 
124 #include "typhoon.h"
125 #include "typhoon-firmware.h"
126 
127 static char version[] __devinitdata =
128     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
129 
130 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131 MODULE_LICENSE("GPL");
132 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
133 MODULE_PARM(rx_copybreak, "i");
134 
135 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
136 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
137 #undef NETIF_F_TSO
138 #endif
139 
140 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
141 #error TX ring too small!
142 #endif
143 
144 struct typhoon_card_info {
145 	char *name;
146 	int capabilities;
147 };
148 
149 #define TYPHOON_CRYPTO_NONE		0x00
150 #define TYPHOON_CRYPTO_DES		0x01
151 #define TYPHOON_CRYPTO_3DES		0x02
152 #define	TYPHOON_CRYPTO_VARIABLE		0x04
153 #define TYPHOON_FIBER			0x08
154 #define TYPHOON_WAKEUP_NEEDS_RESET	0x10
155 
156 enum typhoon_cards {
157 	TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
158 	TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
159 	TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
160 	TYPHOON_FXM,
161 };
162 
163 /* directly indexed by enum typhoon_cards, above */
164 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
165 	{ "3Com Typhoon (3C990-TX)",
166 		TYPHOON_CRYPTO_NONE},
167 	{ "3Com Typhoon (3CR990-TX-95)",
168 		TYPHOON_CRYPTO_DES},
169 	{ "3Com Typhoon (3CR990-TX-97)",
170 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
171 	{ "3Com Typhoon (3C990SVR)",
172 		TYPHOON_CRYPTO_NONE},
173 	{ "3Com Typhoon (3CR990SVR95)",
174 		TYPHOON_CRYPTO_DES},
175 	{ "3Com Typhoon (3CR990SVR97)",
176 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
177 	{ "3Com Typhoon2 (3C990B-TX-M)",
178 		TYPHOON_CRYPTO_VARIABLE},
179 	{ "3Com Typhoon2 (3C990BSVR)",
180 		TYPHOON_CRYPTO_VARIABLE},
181 	{ "3Com Typhoon (3CR990-FX-95)",
182 		TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
183 	{ "3Com Typhoon (3CR990-FX-97)",
184 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
185 	{ "3Com Typhoon (3CR990-FX-95 Server)",
186 	 	TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
187 	{ "3Com Typhoon (3CR990-FX-97 Server)",
188 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
189 	{ "3Com Typhoon2 (3C990B-FX-97)",
190 		TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
191 };
192 
193 /* Notes on the new subsystem numbering scheme:
194  * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
195  * bit 4 indicates if this card has secured firmware (we don't support it)
196  * bit 8 indicates if this is a (0) copper or (1) fiber card
197  * bits 12-16 indicate card type: (0) client and (1) server
198  */
199 static struct pci_device_id typhoon_pci_tbl[] __devinitdata = {
200 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
201 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
202 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
203 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
204 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
205 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
206 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
207 	  PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
208 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
209 	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
210 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
211 	  PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
212 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
213 	  PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
214 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
215 	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
216 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
217 	  PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
218 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
219 	  PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
220 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
221 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
222 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
223 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
224 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
225 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
226 	{ 0, }
227 };
228 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
229 
230 /* Define the shared memory area
231  * Align everything the 3XP will normally be using.
232  * We'll need to move/align txHi if we start using that ring.
233  */
234 #define __3xp_aligned	____cacheline_aligned
235 struct typhoon_shared {
236 	struct typhoon_interface	iface;
237 	struct typhoon_indexes		indexes			__3xp_aligned;
238 	struct tx_desc			txLo[TXLO_ENTRIES] 	__3xp_aligned;
239 	struct rx_desc			rxLo[RX_ENTRIES]	__3xp_aligned;
240 	struct rx_desc			rxHi[RX_ENTRIES]	__3xp_aligned;
241 	struct cmd_desc			cmd[COMMAND_ENTRIES]	__3xp_aligned;
242 	struct resp_desc		resp[RESPONSE_ENTRIES]	__3xp_aligned;
243 	struct rx_free			rxBuff[RXFREE_ENTRIES]	__3xp_aligned;
244 	u32				zeroWord;
245 	struct tx_desc			txHi[TXHI_ENTRIES];
246 } __attribute__ ((packed));
247 
248 struct rxbuff_ent {
249 	struct sk_buff *skb;
250 	dma_addr_t	dma_addr;
251 };
252 
253 struct typhoon {
254 	/* Tx cache line section */
255 	struct transmit_ring 	txLoRing	____cacheline_aligned;
256 	struct pci_dev *	tx_pdev;
257 	unsigned long		tx_ioaddr;
258 	u32			txlo_dma_addr;
259 
260 	/* Irq/Rx cache line section */
261 	unsigned long		ioaddr		____cacheline_aligned;
262 	struct typhoon_indexes *indexes;
263 	u8			awaiting_resp;
264 	u8			duplex;
265 	u8			speed;
266 	u8			card_state;
267 	struct basic_ring	rxLoRing;
268 	struct pci_dev *	pdev;
269 	struct net_device *	dev;
270 	spinlock_t		state_lock;
271 	struct vlan_group *	vlgrp;
272 	struct basic_ring	rxHiRing;
273 	struct basic_ring	rxBuffRing;
274 	struct rxbuff_ent	rxbuffers[RXENT_ENTRIES];
275 
276 	/* general section */
277 	spinlock_t		command_lock	____cacheline_aligned;
278 	struct basic_ring	cmdRing;
279 	struct basic_ring	respRing;
280 	struct net_device_stats	stats;
281 	struct net_device_stats	stats_saved;
282 	const char *		name;
283 	struct typhoon_shared *	shared;
284 	dma_addr_t		shared_dma;
285 	u16			xcvr_select;
286 	u16			wol_events;
287 	u32			offload;
288 	u32			pci_state[16];
289 
290 	/* unused stuff (future use) */
291 	int			capabilities;
292 	struct transmit_ring 	txHiRing;
293 };
294 
295 enum completion_wait_values {
296 	NoWait = 0, WaitNoSleep, WaitSleep,
297 };
298 
299 /* These are the values for the typhoon.card_state variable.
300  * These determine where the statistics will come from in get_stats().
301  * The sleep image does not support the statistics we need.
302  */
303 enum state_values {
304 	Sleeping = 0, Running,
305 };
306 
307 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
308  * cannot pass a read, so this forces current writes to post.
309  */
310 #define typhoon_post_pci_writes(x) \
311 	do { readl(x + TYPHOON_REG_HEARTBEAT); } while(0)
312 
313 /* We'll wait up to six seconds for a reset, and half a second normally.
314  */
315 #define TYPHOON_UDELAY			50
316 #define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
317 #define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
318 #define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
319 
320 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
321 #define typhoon_synchronize_irq(x) synchronize_irq()
322 #else
323 #define typhoon_synchronize_irq(x) synchronize_irq(x)
324 #endif
325 
326 #if defined(NETIF_F_TSO)
327 #define skb_tso_size(x)		(skb_shinfo(x)->tso_size)
328 #define TSO_NUM_DESCRIPTORS	2
329 #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
330 #else
331 #define NETIF_F_TSO 		0
332 #define skb_tso_size(x) 	0
333 #define TSO_NUM_DESCRIPTORS	0
334 #define TSO_OFFLOAD_ON		0
335 #endif
336 
337 static inline void
typhoon_inc_index(u32 * index,const int count,const int num_entries)338 typhoon_inc_index(u32 *index, const int count, const int num_entries)
339 {
340 	/* Increment a ring index -- we can use this for all rings execept
341 	 * the Rx rings, as they use different size descriptors
342 	 * otherwise, everything is the same size as a cmd_desc
343 	 */
344 	*index += count * sizeof(struct cmd_desc);
345 	*index %= num_entries * sizeof(struct cmd_desc);
346 }
347 
348 static inline void
typhoon_inc_cmd_index(u32 * index,const int count)349 typhoon_inc_cmd_index(u32 *index, const int count)
350 {
351 	typhoon_inc_index(index, count, COMMAND_ENTRIES);
352 }
353 
354 static inline void
typhoon_inc_resp_index(u32 * index,const int count)355 typhoon_inc_resp_index(u32 *index, const int count)
356 {
357 	typhoon_inc_index(index, count, RESPONSE_ENTRIES);
358 }
359 
360 static inline void
typhoon_inc_rxfree_index(u32 * index,const int count)361 typhoon_inc_rxfree_index(u32 *index, const int count)
362 {
363 	typhoon_inc_index(index, count, RXFREE_ENTRIES);
364 }
365 
366 static inline void
typhoon_inc_tx_index(u32 * index,const int count)367 typhoon_inc_tx_index(u32 *index, const int count)
368 {
369 	/* if we start using the Hi Tx ring, this needs updateing */
370 	typhoon_inc_index(index, count, TXLO_ENTRIES);
371 }
372 
373 static inline void
typhoon_inc_rx_index(u32 * index,const int count)374 typhoon_inc_rx_index(u32 *index, const int count)
375 {
376 	/* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
377 	*index += count * sizeof(struct rx_desc);
378 	*index %= RX_ENTRIES * sizeof(struct rx_desc);
379 }
380 
381 static int
typhoon_reset(unsigned long ioaddr,int wait_type)382 typhoon_reset(unsigned long ioaddr, int wait_type)
383 {
384 	int i, err = 0;
385 	int timeout;
386 
387 	if(wait_type == WaitNoSleep)
388 		timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
389 	else
390 		timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
391 
392 	writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
393 	writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
394 
395 	writel(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
396 	typhoon_post_pci_writes(ioaddr);
397 	udelay(1);
398 	writel(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
399 
400 	if(wait_type != NoWait) {
401 		for(i = 0; i < timeout; i++) {
402 			if(readl(ioaddr + TYPHOON_REG_STATUS) ==
403 			   TYPHOON_STATUS_WAITING_FOR_HOST)
404 				goto out;
405 
406 			if(wait_type == WaitSleep) {
407 				set_current_state(TASK_UNINTERRUPTIBLE);
408 				schedule_timeout(1);
409 			} else
410 				udelay(TYPHOON_UDELAY);
411 		}
412 
413 		err = -ETIMEDOUT;
414 	}
415 
416 out:
417 	writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
418 	writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
419 	udelay(100);
420 	return err;
421 
422 	/* The 3XP seems to need a little extra time to complete the load
423 	 * of the sleep image before we can reliably boot it. Failure to
424 	 * do this occasionally results in a hung adapter after boot in
425 	 * typhoon_init_one() while trying to read the MAC address or
426 	 * putting the card to sleep. 3Com's driver waits 5ms, but
427 	 * that seems to be overkill -- with a 50usec delay, it survives
428 	 * 35000 typhoon_init_one() calls, where it only make it 25-100
429 	 * without it.
430 	 *
431 	 * As it turns out, still occasionally getting a hung adapter,
432 	 * so I'm bumping it to 100us.
433 	 */
434 }
435 
436 static int
typhoon_wait_status(unsigned long ioaddr,u32 wait_value)437 typhoon_wait_status(unsigned long ioaddr, u32 wait_value)
438 {
439 	int i, err = 0;
440 
441 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
442 		if(readl(ioaddr + TYPHOON_REG_STATUS) == wait_value)
443 			goto out;
444 		udelay(TYPHOON_UDELAY);
445 	}
446 
447 	err = -ETIMEDOUT;
448 
449 out:
450 	return err;
451 }
452 
453 static inline void
typhoon_media_status(struct net_device * dev,struct resp_desc * resp)454 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
455 {
456 	if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
457 		netif_carrier_off(dev);
458 	else
459 		netif_carrier_on(dev);
460 }
461 
462 static inline void
typhoon_hello(struct typhoon * tp)463 typhoon_hello(struct typhoon *tp)
464 {
465 	struct basic_ring *ring = &tp->cmdRing;
466 	struct cmd_desc *cmd;
467 
468 	/* We only get a hello request if we've not sent anything to the
469 	 * card in a long while. If the lock is held, then we're in the
470 	 * process of issuing a command, so we don't need to respond.
471 	 */
472 	if(spin_trylock(&tp->command_lock)) {
473 		cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
474 		typhoon_inc_cmd_index(&ring->lastWrite, 1);
475 
476 		INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
477 		smp_wmb();
478 		writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
479 		spin_unlock(&tp->command_lock);
480 	}
481 }
482 
483 static int
typhoon_process_response(struct typhoon * tp,int resp_size,struct resp_desc * resp_save)484 typhoon_process_response(struct typhoon *tp, int resp_size,
485 				struct resp_desc *resp_save)
486 {
487 	struct typhoon_indexes *indexes = tp->indexes;
488 	struct resp_desc *resp;
489 	u8 *base = tp->respRing.ringBase;
490 	int count, len, wrap_len;
491 	u32 cleared;
492 	u32 ready;
493 
494 	cleared = le32_to_cpu(indexes->respCleared);
495 	ready = le32_to_cpu(indexes->respReady);
496 	while(cleared != ready) {
497 		resp = (struct resp_desc *)(base + cleared);
498 		count = resp->numDesc + 1;
499 		if(resp_save && resp->seqNo) {
500 			if(count > resp_size) {
501 				resp_save->flags = TYPHOON_RESP_ERROR;
502 				goto cleanup;
503 			}
504 
505 			wrap_len = 0;
506 			len = count * sizeof(*resp);
507 			if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
508 				wrap_len = cleared + len - RESPONSE_RING_SIZE;
509 				len = RESPONSE_RING_SIZE - cleared;
510 			}
511 
512 			memcpy(resp_save, resp, len);
513 			if(unlikely(wrap_len)) {
514 				resp_save += len / sizeof(*resp);
515 				memcpy(resp_save, base, wrap_len);
516 			}
517 
518 			resp_save = NULL;
519 		} else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
520 			typhoon_media_status(tp->dev, resp);
521 		} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
522 			typhoon_hello(tp);
523 		} else {
524 			printk(KERN_ERR "%s: dumping unexpected response "
525 			       "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
526 			       tp->name, le16_to_cpu(resp->cmd),
527 			       resp->numDesc, resp->flags,
528 			       le16_to_cpu(resp->parm1),
529 			       le32_to_cpu(resp->parm2),
530 			       le32_to_cpu(resp->parm3));
531 		}
532 
533 cleanup:
534 		typhoon_inc_resp_index(&cleared, count);
535 	}
536 
537 	indexes->respCleared = cpu_to_le32(cleared);
538 	wmb();
539 	return (resp_save == NULL);
540 }
541 
542 static inline int
typhoon_num_free(int lastWrite,int lastRead,int ringSize)543 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
544 {
545 	/* this works for all descriptors but rx_desc, as they are a
546 	 * different size than the cmd_desc -- everyone else is the same
547 	 */
548 	lastWrite /= sizeof(struct cmd_desc);
549 	lastRead /= sizeof(struct cmd_desc);
550 	return (ringSize + lastRead - lastWrite - 1) % ringSize;
551 }
552 
553 static inline int
typhoon_num_free_cmd(struct typhoon * tp)554 typhoon_num_free_cmd(struct typhoon *tp)
555 {
556 	int lastWrite = tp->cmdRing.lastWrite;
557 	int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
558 
559 	return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
560 }
561 
562 static inline int
typhoon_num_free_resp(struct typhoon * tp)563 typhoon_num_free_resp(struct typhoon *tp)
564 {
565 	int respReady = le32_to_cpu(tp->indexes->respReady);
566 	int respCleared = le32_to_cpu(tp->indexes->respCleared);
567 
568 	return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
569 }
570 
571 static inline int
typhoon_num_free_tx(struct transmit_ring * ring)572 typhoon_num_free_tx(struct transmit_ring *ring)
573 {
574 	/* if we start using the Hi Tx ring, this needs updating */
575 	return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
576 }
577 
578 static int
typhoon_issue_command(struct typhoon * tp,int num_cmd,struct cmd_desc * cmd,int num_resp,struct resp_desc * resp)579 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
580 		      int num_resp, struct resp_desc *resp)
581 {
582 	struct typhoon_indexes *indexes = tp->indexes;
583 	struct basic_ring *ring = &tp->cmdRing;
584 	struct resp_desc local_resp;
585 	int i, err = 0;
586 	int got_resp;
587 	int freeCmd, freeResp;
588 	int len, wrap_len;
589 
590 	spin_lock(&tp->command_lock);
591 
592 	freeCmd = typhoon_num_free_cmd(tp);
593 	freeResp = typhoon_num_free_resp(tp);
594 
595 	if(freeCmd < num_cmd || freeResp < num_resp) {
596 		printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
597 			"%d (%d) resp\n", tp->name, freeCmd, num_cmd,
598 			freeResp, num_resp);
599 		err = -ENOMEM;
600 		goto out;
601 	}
602 
603 	if(cmd->flags & TYPHOON_CMD_RESPOND) {
604 		/* If we're expecting a response, but the caller hasn't given
605 		 * us a place to put it, we'll provide one.
606 		 */
607 		tp->awaiting_resp = 1;
608 		if(resp == NULL) {
609 			resp = &local_resp;
610 			num_resp = 1;
611 		}
612 	}
613 
614 	wrap_len = 0;
615 	len = num_cmd * sizeof(*cmd);
616 	if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
617 		wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
618 		len = COMMAND_RING_SIZE - ring->lastWrite;
619 	}
620 
621 	memcpy(ring->ringBase + ring->lastWrite, cmd, len);
622 	if(unlikely(wrap_len)) {
623 		struct cmd_desc *wrap_ptr = cmd;
624 		wrap_ptr += len / sizeof(*cmd);
625 		memcpy(ring->ringBase, wrap_ptr, wrap_len);
626 	}
627 
628 	typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
629 
630 	/* "I feel a presence... another warrior is on the the mesa."
631 	 */
632 	wmb();
633 	writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
634 	typhoon_post_pci_writes(tp->ioaddr);
635 
636 	if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
637 		goto out;
638 
639 	/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
640 	 * preempt or do anything other than take interrupts. So, don't
641 	 * wait for a response unless you have to.
642 	 *
643 	 * I've thought about trying to sleep here, but we're called
644 	 * from many contexts that don't allow that. Also, given the way
645 	 * 3Com has implemented irq coalescing, we would likely timeout --
646 	 * this has been observed in real life!
647 	 *
648 	 * The big killer is we have to wait to get stats from the card,
649 	 * though we could go to a periodic refresh of those if we don't
650 	 * mind them getting somewhat stale. The rest of the waiting
651 	 * commands occur during open/close/suspend/resume, so they aren't
652 	 * time critical. Creating SAs in the future will also have to
653 	 * wait here.
654 	 */
655 	got_resp = 0;
656 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
657 		if(indexes->respCleared != indexes->respReady)
658 			got_resp = typhoon_process_response(tp, num_resp,
659 								resp);
660 		udelay(TYPHOON_UDELAY);
661 	}
662 
663 	if(!got_resp) {
664 		err = -ETIMEDOUT;
665 		goto out;
666 	}
667 
668 	/* Collect the error response even if we don't care about the
669 	 * rest of the response
670 	 */
671 	if(resp->flags & TYPHOON_RESP_ERROR)
672 		err = -EIO;
673 
674 out:
675 	if(tp->awaiting_resp) {
676 		tp->awaiting_resp = 0;
677 		smp_wmb();
678 
679 		/* Ugh. If a response was added to the ring between
680 		 * the call to typhoon_process_response() and the clearing
681 		 * of tp->awaiting_resp, we could have missed the interrupt
682 		 * and it could hang in the ring an indeterminate amount of
683 		 * time. So, check for it, and interrupt ourselves if this
684 		 * is the case.
685 		 */
686 		if(indexes->respCleared != indexes->respReady)
687 			writel(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
688 	}
689 
690 	spin_unlock(&tp->command_lock);
691 	return err;
692 }
693 
694 static void
typhoon_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)695 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
696 {
697 	struct typhoon *tp = (struct typhoon *) dev->priv;
698 	struct cmd_desc xp_cmd;
699 	int err;
700 
701 	spin_lock_bh(&tp->state_lock);
702 	if(!tp->vlgrp != !grp) {
703 		/* We've either been turned on for the first time, or we've
704 		 * been turned off. Update the 3XP.
705 		 */
706 		if(grp)
707 			tp->offload |= TYPHOON_OFFLOAD_VLAN;
708 		else
709 			tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
710 
711 		/* If the interface is up, the runtime is running -- and we
712 		 * must be up for the vlan core to call us.
713 		 *
714 		 * Do the command outside of the spin lock, as it is slow.
715 		 */
716 		INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
717 					TYPHOON_CMD_SET_OFFLOAD_TASKS);
718 		xp_cmd.parm2 = tp->offload;
719 		xp_cmd.parm3 = tp->offload;
720 		spin_unlock_bh(&tp->state_lock);
721 		err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
722 		if(err < 0)
723 			printk("%s: vlan offload error %d\n", tp->name, -err);
724 		spin_lock_bh(&tp->state_lock);
725 	}
726 
727 	/* now make the change visible */
728 	tp->vlgrp = grp;
729 	spin_unlock_bh(&tp->state_lock);
730 }
731 
732 static void
typhoon_vlan_rx_kill_vid(struct net_device * dev,unsigned short vid)733 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
734 {
735 	struct typhoon *tp = (struct typhoon *) dev->priv;
736 	spin_lock_bh(&tp->state_lock);
737 	if(tp->vlgrp)
738 		tp->vlgrp->vlan_devices[vid] = NULL;
739 	spin_unlock_bh(&tp->state_lock);
740 }
741 
742 static inline void
typhoon_tso_fill(struct sk_buff * skb,struct transmit_ring * txRing,u32 ring_dma)743 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
744 			u32 ring_dma)
745 {
746 	struct tcpopt_desc *tcpd;
747 	u32 tcpd_offset = ring_dma;
748 
749 	tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
750 	tcpd_offset += txRing->lastWrite;
751 	tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
752 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
753 
754 	tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
755 	tcpd->numDesc = 1;
756 	tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
757 	tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
758 	tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
759 	tcpd->bytesTx = cpu_to_le32(skb->len);
760 	tcpd->status = 0;
761 }
762 
763 static int
typhoon_start_tx(struct sk_buff * skb,struct net_device * dev)764 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
765 {
766 	struct typhoon *tp = (struct typhoon *) dev->priv;
767 	struct transmit_ring *txRing;
768 	struct tx_desc *txd, *first_txd;
769 	dma_addr_t skb_dma;
770 	int numDesc;
771 
772 	/* we have two rings to choose from, but we only use txLo for now
773 	 * If we start using the Hi ring as well, we'll need to update
774 	 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
775 	 * and TXHI_ENTIRES to match, as well as update the TSO code below
776 	 * to get the right DMA address
777 	 */
778 	txRing = &tp->txLoRing;
779 
780 	/* We need one descriptor for each fragment of the sk_buff, plus the
781 	 * one for the ->data area of it.
782 	 *
783 	 * The docs say a maximum of 16 fragment descriptors per TCP option
784 	 * descriptor, then make a new packet descriptor and option descriptor
785 	 * for the next 16 fragments. The engineers say just an option
786 	 * descriptor is needed. I've tested up to 26 fragments with a single
787 	 * packet descriptor/option descriptor combo, so I use that for now.
788 	 *
789 	 * If problems develop with TSO, check this first.
790 	 */
791 	numDesc = skb_shinfo(skb)->nr_frags + 1;
792 	if(skb_tso_size(skb))
793 		numDesc++;
794 
795 	/* When checking for free space in the ring, we need to also
796 	 * account for the initial Tx descriptor, and we always must leave
797 	 * at least one descriptor unused in the ring so that it doesn't
798 	 * wrap and look empty.
799 	 *
800 	 * The only time we should loop here is when we hit the race
801 	 * between marking the queue awake and updating the cleared index.
802 	 * Just loop and it will appear. This comes from the acenic driver.
803 	 */
804 	while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
805 		smp_rmb();
806 
807 	first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
808 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
809 
810 	first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
811 	first_txd->numDesc = 0;
812 	first_txd->len = 0;
813 	first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
814 	first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
815 	first_txd->processFlags = 0;
816 
817 	if(skb->ip_summed == CHECKSUM_HW) {
818 		/* The 3XP will figure out if this is UDP/TCP */
819 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
820 		first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
821 		first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
822 	}
823 
824 	if(vlan_tx_tag_present(skb)) {
825 		first_txd->processFlags |=
826 		    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
827 		first_txd->processFlags |=
828 		    cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
829 				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
830 	}
831 
832 	if(skb_tso_size(skb)) {
833 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
834 		first_txd->numDesc++;
835 
836 		typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
837 	}
838 
839 	txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
840 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
841 
842 	/* No need to worry about padding packet -- the firmware pads
843 	 * it with zeros to ETH_ZLEN for us.
844 	 */
845 	if(skb_shinfo(skb)->nr_frags == 0) {
846 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
847 				       PCI_DMA_TODEVICE);
848 		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
849 		txd->len = cpu_to_le16(skb->len);
850 		txd->addr = cpu_to_le32(skb_dma);
851 		txd->addrHi = 0;
852 		first_txd->numDesc++;
853 	} else {
854 		int i, len;
855 
856 		len = skb->len - skb->data_len;
857 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
858 				         PCI_DMA_TODEVICE);
859 		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
860 		txd->len = cpu_to_le16(len);
861 		txd->addr = cpu_to_le32(skb_dma);
862 		txd->addrHi = 0;
863 		first_txd->numDesc++;
864 
865 		for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
866 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
867 			void *frag_addr;
868 
869 			txd = (struct tx_desc *) (txRing->ringBase +
870 						txRing->lastWrite);
871 			typhoon_inc_tx_index(&txRing->lastWrite, 1);
872 
873 			len = frag->size;
874 			frag_addr = (void *) page_address(frag->page) +
875 						frag->page_offset;
876 			skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
877 					 PCI_DMA_TODEVICE);
878 			txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
879 			txd->len = cpu_to_le16(len);
880 			txd->addr = cpu_to_le32(skb_dma);
881 			txd->addrHi = 0;
882 			first_txd->numDesc++;
883 		}
884 	}
885 
886 	/* Kick the 3XP
887 	 */
888 	wmb();
889 	writel(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
890 
891 	dev->trans_start = jiffies;
892 
893 	/* If we don't have room to put the worst case packet on the
894 	 * queue, then we must stop the queue. We need 2 extra
895 	 * descriptors -- one to prevent ring wrap, and one for the
896 	 * Tx header.
897 	 */
898 	numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
899 
900 	if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
901 		netif_stop_queue(dev);
902 
903 		/* A Tx complete IRQ could have gotten inbetween, making
904 		 * the ring free again. Only need to recheck here, since
905 		 * Tx is serialized.
906 		 */
907 		if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
908 			netif_wake_queue(dev);
909 	}
910 
911 	return 0;
912 }
913 
914 static void
typhoon_set_rx_mode(struct net_device * dev)915 typhoon_set_rx_mode(struct net_device *dev)
916 {
917 	struct typhoon *tp = (struct typhoon *) dev->priv;
918 	struct cmd_desc xp_cmd;
919 	u32 mc_filter[2];
920 	u16 filter;
921 
922 	filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
923 	if(dev->flags & IFF_PROMISC) {
924 		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
925 		       dev->name);
926 		filter |= TYPHOON_RX_FILTER_PROMISCOUS;
927 	} else if((dev->mc_count > multicast_filter_limit) ||
928 		  (dev->flags & IFF_ALLMULTI)) {
929 		/* Too many to match, or accept all multicasts. */
930 		filter |= TYPHOON_RX_FILTER_ALL_MCAST;
931 	} else if(dev->mc_count) {
932 		struct dev_mc_list *mclist;
933 		int i;
934 
935 		memset(mc_filter, 0, sizeof(mc_filter));
936 		for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
937 		    i++, mclist = mclist->next) {
938 			int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
939 			mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
940 		}
941 
942 		INIT_COMMAND_NO_RESPONSE(&xp_cmd,
943 					 TYPHOON_CMD_SET_MULTICAST_HASH);
944 		xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
945 		xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
946 		xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
947 		typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
948 
949 		filter |= TYPHOON_RX_FILTER_MCAST_HASH;
950 	}
951 
952 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
953 	xp_cmd.parm1 = filter;
954 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
955 }
956 
957 static int
typhoon_do_get_stats(struct typhoon * tp)958 typhoon_do_get_stats(struct typhoon *tp)
959 {
960 	struct net_device_stats *stats = &tp->stats;
961 	struct net_device_stats *saved = &tp->stats_saved;
962 	struct cmd_desc xp_cmd;
963 	struct resp_desc xp_resp[7];
964 	struct stats_resp *s = (struct stats_resp *) xp_resp;
965 	int err;
966 
967 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
968 	err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
969 	if(err < 0)
970 		return err;
971 
972 	/* 3Com's Linux driver uses txMultipleCollisions as it's
973 	 * collisions value, but there is some other collision info as well...
974 	 */
975 	stats->tx_packets = le32_to_cpu(s->txPackets);
976 	stats->tx_bytes = le32_to_cpu(s->txBytes);
977 	stats->tx_errors = le32_to_cpu(s->txCarrierLost);
978 	stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
979 	stats->collisions = le32_to_cpu(s->txMultipleCollisions);
980 	stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
981 	stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
982 	stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
983 	stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
984 			le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
985 	stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
986 	stats->rx_length_errors = le32_to_cpu(s->rxOversized);
987 	tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
988 			SPEED_100 : SPEED_10;
989 	tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
990 			DUPLEX_FULL : DUPLEX_HALF;
991 
992 	/* add in the saved statistics
993 	 */
994 	stats->tx_packets += saved->tx_packets;
995 	stats->tx_bytes += saved->tx_bytes;
996 	stats->tx_errors += saved->tx_errors;
997 	stats->collisions += saved->collisions;
998 	stats->rx_packets += saved->rx_packets;
999 	stats->rx_bytes += saved->rx_bytes;
1000 	stats->rx_fifo_errors += saved->rx_fifo_errors;
1001 	stats->rx_errors += saved->rx_errors;
1002 	stats->rx_crc_errors += saved->rx_crc_errors;
1003 	stats->rx_length_errors += saved->rx_length_errors;
1004 
1005 	return 0;
1006 }
1007 
1008 static struct net_device_stats *
typhoon_get_stats(struct net_device * dev)1009 typhoon_get_stats(struct net_device *dev)
1010 {
1011 	struct typhoon *tp = (struct typhoon *) dev->priv;
1012 	struct net_device_stats *stats = &tp->stats;
1013 	struct net_device_stats *saved = &tp->stats_saved;
1014 
1015 	smp_rmb();
1016 	if(tp->card_state == Sleeping)
1017 		return saved;
1018 
1019 	if(typhoon_do_get_stats(tp) < 0) {
1020 		printk(KERN_ERR "%s: error getting stats\n", dev->name);
1021 		return saved;
1022 	}
1023 
1024 	return stats;
1025 }
1026 
1027 static int
typhoon_set_mac_address(struct net_device * dev,void * addr)1028 typhoon_set_mac_address(struct net_device *dev, void *addr)
1029 {
1030 	struct sockaddr *saddr = (struct sockaddr *) addr;
1031 
1032 	if(netif_running(dev))
1033 		return -EBUSY;
1034 
1035 	memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1036 	return 0;
1037 }
1038 
1039 static inline void
typhoon_ethtool_gdrvinfo(struct typhoon * tp,struct ethtool_drvinfo * info)1040 typhoon_ethtool_gdrvinfo(struct typhoon *tp, struct ethtool_drvinfo *info)
1041 {
1042 	struct pci_dev *pci_dev = tp->pdev;
1043 	struct cmd_desc xp_cmd;
1044 	struct resp_desc xp_resp[3];
1045 
1046 	smp_rmb();
1047 	if(tp->card_state == Sleeping) {
1048 		strcpy(info->fw_version, "Sleep image");
1049 	} else {
1050 		INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1051 		if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1052 			strcpy(info->fw_version, "Unknown runtime");
1053 		} else {
1054 			strncpy(info->fw_version, (char *) &xp_resp[1], 32);
1055 			info->fw_version[31] = 0;
1056 		}
1057 	}
1058 
1059 	strcpy(info->driver, DRV_MODULE_NAME);
1060 	strcpy(info->version, DRV_MODULE_VERSION);
1061 	strcpy(info->bus_info, pci_dev->slot_name);
1062 }
1063 
1064 static inline void
typhoon_ethtool_gset(struct typhoon * tp,struct ethtool_cmd * cmd)1065 typhoon_ethtool_gset(struct typhoon *tp, struct ethtool_cmd *cmd)
1066 {
1067 	cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1068 				SUPPORTED_Autoneg;
1069 
1070 	switch (tp->xcvr_select) {
1071 	case TYPHOON_XCVR_10HALF:
1072 		cmd->advertising = ADVERTISED_10baseT_Half;
1073 		break;
1074 	case TYPHOON_XCVR_10FULL:
1075 		cmd->advertising = ADVERTISED_10baseT_Full;
1076 		break;
1077 	case TYPHOON_XCVR_100HALF:
1078 		cmd->advertising = ADVERTISED_100baseT_Half;
1079 		break;
1080 	case TYPHOON_XCVR_100FULL:
1081 		cmd->advertising = ADVERTISED_100baseT_Full;
1082 		break;
1083 	case TYPHOON_XCVR_AUTONEG:
1084 		cmd->advertising = ADVERTISED_10baseT_Half |
1085 					    ADVERTISED_10baseT_Full |
1086 					    ADVERTISED_100baseT_Half |
1087 					    ADVERTISED_100baseT_Full |
1088 					    ADVERTISED_Autoneg;
1089 		break;
1090 	}
1091 
1092 	if(tp->capabilities & TYPHOON_FIBER) {
1093 		cmd->supported |= SUPPORTED_FIBRE;
1094 		cmd->advertising |= ADVERTISED_FIBRE;
1095 		cmd->port = PORT_FIBRE;
1096 	} else {
1097 		cmd->supported |= SUPPORTED_10baseT_Half |
1098 		    			SUPPORTED_10baseT_Full |
1099 					SUPPORTED_TP;
1100 		cmd->advertising |= ADVERTISED_TP;
1101 		cmd->port = PORT_TP;
1102 	}
1103 
1104 	/* need to get stats to make these link speed/duplex valid */
1105 	typhoon_do_get_stats(tp);
1106 	cmd->speed = tp->speed;
1107 	cmd->duplex = tp->duplex;
1108 	cmd->phy_address = 0;
1109 	cmd->transceiver = XCVR_INTERNAL;
1110 	if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1111 		cmd->autoneg = AUTONEG_ENABLE;
1112 	else
1113 		cmd->autoneg = AUTONEG_DISABLE;
1114 	cmd->maxtxpkt = 1;
1115 	cmd->maxrxpkt = 1;
1116 }
1117 
1118 static inline int
typhoon_ethtool_sset(struct typhoon * tp,struct ethtool_cmd * cmd)1119 typhoon_ethtool_sset(struct typhoon *tp, struct ethtool_cmd *cmd)
1120 {
1121 	struct cmd_desc xp_cmd;
1122 	int xcvr;
1123 	int err;
1124 
1125 	if(cmd->autoneg == AUTONEG_ENABLE) {
1126 		xcvr = TYPHOON_XCVR_AUTONEG;
1127 	} else {
1128 		if(cmd->duplex == DUPLEX_HALF) {
1129 			if(cmd->speed == SPEED_10)
1130 				xcvr = TYPHOON_XCVR_10HALF;
1131 			else if(cmd->speed == SPEED_100)
1132 				xcvr = TYPHOON_XCVR_100HALF;
1133 			else
1134 				return -EINVAL;
1135 		} else if(cmd->duplex == DUPLEX_FULL) {
1136 			if(cmd->speed == SPEED_10)
1137 				xcvr = TYPHOON_XCVR_10FULL;
1138 			else if(cmd->speed == SPEED_100)
1139 				xcvr = TYPHOON_XCVR_100FULL;
1140 			else
1141 				return -EINVAL;
1142 		} else
1143 			return -EINVAL;
1144 	}
1145 
1146 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1147 	xp_cmd.parm1 = cpu_to_le16(xcvr);
1148 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1149 	if(err < 0)
1150 		return err;
1151 
1152 	tp->xcvr_select = xcvr;
1153 	if(cmd->autoneg == AUTONEG_ENABLE) {
1154 		tp->speed = 0xff;	/* invalid */
1155 		tp->duplex = 0xff;	/* invalid */
1156 	} else {
1157 		tp->speed = cmd->speed;
1158 		tp->duplex = cmd->duplex;
1159 	}
1160 
1161 	return 0;
1162 }
1163 
1164 static inline int
typhoon_ethtool_ioctl(struct net_device * dev,void * useraddr)1165 typhoon_ethtool_ioctl(struct net_device *dev, void *useraddr)
1166 {
1167 	struct typhoon *tp = (struct typhoon *) dev->priv;
1168 	u32 ethcmd;
1169 
1170 	if(copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1171 		return -EFAULT;
1172 
1173 	switch (ethcmd) {
1174 	case ETHTOOL_GDRVINFO: {
1175 			struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1176 
1177 			typhoon_ethtool_gdrvinfo(tp, &info);
1178 			if(copy_to_user(useraddr, &info, sizeof(info)))
1179 				return -EFAULT;
1180 			return 0;
1181 		}
1182 	case ETHTOOL_GSET: {
1183 			struct ethtool_cmd cmd = { ETHTOOL_GSET };
1184 
1185 			typhoon_ethtool_gset(tp, &cmd);
1186 			if(copy_to_user(useraddr, &cmd, sizeof(cmd)))
1187 				return -EFAULT;
1188 			return 0;
1189 		}
1190 	case ETHTOOL_SSET: {
1191 			struct ethtool_cmd cmd;
1192 			if(copy_from_user(&cmd, useraddr, sizeof(cmd)))
1193 				return -EFAULT;
1194 
1195 			return typhoon_ethtool_sset(tp, &cmd);
1196 		}
1197 	case ETHTOOL_GLINK:{
1198 			struct ethtool_value edata = { ETHTOOL_GLINK };
1199 
1200 			edata.data = netif_carrier_ok(dev) ? 1 : 0;
1201 			if(copy_to_user(useraddr, &edata, sizeof(edata)))
1202 				return -EFAULT;
1203 			return 0;
1204 		}
1205 	case ETHTOOL_GWOL: {
1206 			struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
1207 
1208 			if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1209 				wol.wolopts |= WAKE_PHY;
1210 			if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1211 				wol.wolopts |= WAKE_MAGIC;
1212 			if(copy_to_user(useraddr, &wol, sizeof(wol)))
1213 				return -EFAULT;
1214 			return 0;
1215 	}
1216 	case ETHTOOL_SWOL: {
1217 			struct ethtool_wolinfo wol;
1218 
1219 			if(copy_from_user(&wol, useraddr, sizeof(wol)))
1220 				return -EFAULT;
1221 			tp->wol_events = 0;
1222 			if(wol.wolopts & WAKE_PHY)
1223 				tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1224 			if(wol.wolopts & WAKE_MAGIC)
1225 				tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1226 			return 0;
1227 	}
1228 	default:
1229 		break;
1230 	}
1231 
1232 	return -EOPNOTSUPP;
1233 }
1234 
1235 static int
typhoon_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1236 typhoon_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1237 {
1238 	switch (cmd) {
1239 	case SIOCETHTOOL:
1240 		return typhoon_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1241 	default:
1242 		break;
1243 	}
1244 
1245 	return -EOPNOTSUPP;
1246 }
1247 
1248 static int
typhoon_wait_interrupt(unsigned long ioaddr)1249 typhoon_wait_interrupt(unsigned long ioaddr)
1250 {
1251 	int i, err = 0;
1252 
1253 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1254 		if(readl(ioaddr + TYPHOON_REG_INTR_STATUS) &
1255 		   TYPHOON_INTR_BOOTCMD)
1256 			goto out;
1257 		udelay(TYPHOON_UDELAY);
1258 	}
1259 
1260 	err = -ETIMEDOUT;
1261 
1262 out:
1263 	writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1264 	return err;
1265 }
1266 
1267 #define shared_offset(x)	offsetof(struct typhoon_shared, x)
1268 
1269 static void
typhoon_init_interface(struct typhoon * tp)1270 typhoon_init_interface(struct typhoon *tp)
1271 {
1272 	struct typhoon_interface *iface = &tp->shared->iface;
1273 	dma_addr_t shared_dma;
1274 
1275 	memset(tp->shared, 0, sizeof(struct typhoon_shared));
1276 
1277 	/* The *Hi members of iface are all init'd to zero by the memset().
1278 	 */
1279 	shared_dma = tp->shared_dma + shared_offset(indexes);
1280 	iface->ringIndex = cpu_to_le32(shared_dma);
1281 
1282 	shared_dma = tp->shared_dma + shared_offset(txLo);
1283 	iface->txLoAddr = cpu_to_le32(shared_dma);
1284 	iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1285 
1286 	shared_dma = tp->shared_dma + shared_offset(txHi);
1287 	iface->txHiAddr = cpu_to_le32(shared_dma);
1288 	iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1289 
1290 	shared_dma = tp->shared_dma + shared_offset(rxBuff);
1291 	iface->rxBuffAddr = cpu_to_le32(shared_dma);
1292 	iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1293 					sizeof(struct rx_free));
1294 
1295 	shared_dma = tp->shared_dma + shared_offset(rxLo);
1296 	iface->rxLoAddr = cpu_to_le32(shared_dma);
1297 	iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1298 
1299 	shared_dma = tp->shared_dma + shared_offset(rxHi);
1300 	iface->rxHiAddr = cpu_to_le32(shared_dma);
1301 	iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1302 
1303 	shared_dma = tp->shared_dma + shared_offset(cmd);
1304 	iface->cmdAddr = cpu_to_le32(shared_dma);
1305 	iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1306 
1307 	shared_dma = tp->shared_dma + shared_offset(resp);
1308 	iface->respAddr = cpu_to_le32(shared_dma);
1309 	iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1310 
1311 	shared_dma = tp->shared_dma + shared_offset(zeroWord);
1312 	iface->zeroAddr = cpu_to_le32(shared_dma);
1313 
1314 	tp->indexes = &tp->shared->indexes;
1315 	tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1316 	tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1317 	tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1318 	tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1319 	tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1320 	tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1321 	tp->respRing.ringBase = (u8 *) tp->shared->resp;;
1322 
1323 	tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1324 	tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1325 
1326 	tp->txlo_dma_addr = iface->txLoAddr;
1327 	tp->card_state = Sleeping;
1328 	smp_wmb();
1329 
1330 	tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1331 	tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1332 
1333 	spin_lock_init(&tp->command_lock);
1334 	spin_lock_init(&tp->state_lock);
1335 }
1336 
1337 static void
typhoon_init_rings(struct typhoon * tp)1338 typhoon_init_rings(struct typhoon *tp)
1339 {
1340 	memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1341 
1342 	tp->txLoRing.lastWrite = 0;
1343 	tp->txHiRing.lastWrite = 0;
1344 	tp->rxLoRing.lastWrite = 0;
1345 	tp->rxHiRing.lastWrite = 0;
1346 	tp->rxBuffRing.lastWrite = 0;
1347 	tp->cmdRing.lastWrite = 0;
1348 	tp->cmdRing.lastWrite = 0;
1349 
1350 	tp->txLoRing.lastRead = 0;
1351 	tp->txHiRing.lastRead = 0;
1352 }
1353 
1354 static int
typhoon_download_firmware(struct typhoon * tp)1355 typhoon_download_firmware(struct typhoon *tp)
1356 {
1357 	unsigned long ioaddr = tp->ioaddr;
1358 	struct pci_dev *pdev = tp->pdev;
1359 	struct typhoon_file_header *fHdr;
1360 	struct typhoon_section_header *sHdr;
1361 	u8 *image_data;
1362 	void *dpage;
1363 	dma_addr_t dpage_dma;
1364 	unsigned int csum;
1365 	u32 irqEnabled;
1366 	u32 irqMasked;
1367 	u32 numSections;
1368 	u32 section_len;
1369 	u32 len;
1370 	u32 load_addr;
1371 	u32 hmac;
1372 	int i;
1373 	int err;
1374 
1375 	err = -EINVAL;
1376 	fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1377 	image_data = (u8 *) fHdr;
1378 
1379 	if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1380 		printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1381 		goto err_out;
1382 	}
1383 
1384 	/* Cannot just map the firmware image using pci_map_single() as
1385 	 * the firmware is part of the kernel/module image, so we allocate
1386 	 * some consistent memory to copy the sections into, as it is simpler,
1387 	 * and short-lived. If we ever split out and require a userland
1388 	 * firmware loader, then we can revisit this.
1389 	 */
1390 	err = -ENOMEM;
1391 	dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1392 	if(!dpage) {
1393 		printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1394 		goto err_out;
1395 	}
1396 
1397 	irqEnabled = readl(ioaddr + TYPHOON_REG_INTR_ENABLE);
1398 	writel(irqEnabled | TYPHOON_INTR_BOOTCMD,
1399 	       ioaddr + TYPHOON_REG_INTR_ENABLE);
1400 	irqMasked = readl(ioaddr + TYPHOON_REG_INTR_MASK);
1401 	writel(irqMasked | TYPHOON_INTR_BOOTCMD,
1402 	       ioaddr + TYPHOON_REG_INTR_MASK);
1403 
1404 	err = -ETIMEDOUT;
1405 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1406 		printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1407 		goto err_out_irq;
1408 	}
1409 
1410 	numSections = le32_to_cpu(fHdr->numSections);
1411 	load_addr = le32_to_cpu(fHdr->startAddr);
1412 
1413 	writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1414 	writel(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1415 	hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1416 	writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1417 	hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1418 	writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1419 	hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1420 	writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1421 	hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1422 	writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1423 	hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1424 	writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1425 	typhoon_post_pci_writes(ioaddr);
1426 	writel(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1427 
1428 	image_data += sizeof(struct typhoon_file_header);
1429 
1430 	/* The readl() in typhoon_wait_interrupt() will force the
1431 	 * last write to the command register to post, so
1432 	 * we don't need a typhoon_post_pci_writes() after it.
1433 	 */
1434 	for(i = 0; i < numSections; i++) {
1435 		sHdr = (struct typhoon_section_header *) image_data;
1436 		image_data += sizeof(struct typhoon_section_header);
1437 		load_addr = le32_to_cpu(sHdr->startAddr);
1438 		section_len = le32_to_cpu(sHdr->len);
1439 
1440 		while(section_len) {
1441 			len = min_t(u32, section_len, PAGE_SIZE);
1442 
1443 			if(typhoon_wait_interrupt(ioaddr) < 0 ||
1444 			   readl(ioaddr + TYPHOON_REG_STATUS) !=
1445 			   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1446 				printk(KERN_ERR "%s: segment ready timeout\n",
1447 				       tp->name);
1448 				goto err_out_irq;
1449 			}
1450 
1451 			/* Do an pseudo IPv4 checksum on the data -- first
1452 			 * need to convert each u16 to cpu order before
1453 			 * summing. Fortunately, due to the properties of
1454 			 * the checksum, we can do this once, at the end.
1455 			 */
1456 			csum = csum_partial_copy_nocheck(image_data, dpage,
1457 							 len, 0);
1458 			csum = csum_fold(csum);
1459 			csum = le16_to_cpu(csum);
1460 
1461 			writel(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1462 			writel(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1463 			writel(load_addr, ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1464 			writel(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1465 			writel(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1466 			typhoon_post_pci_writes(ioaddr);
1467 			writel(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1468 			       ioaddr + TYPHOON_REG_COMMAND);
1469 
1470 			image_data += len;
1471 			load_addr += len;
1472 			section_len -= len;
1473 		}
1474 	}
1475 
1476 	if(typhoon_wait_interrupt(ioaddr) < 0 ||
1477 	   readl(ioaddr + TYPHOON_REG_STATUS) !=
1478 	   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1479 		printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1480 		goto err_out_irq;
1481 	}
1482 
1483 	writel(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1484 
1485 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1486 		printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1487 		       tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1488 		goto err_out_irq;
1489 	}
1490 
1491 	err = 0;
1492 
1493 err_out_irq:
1494 	writel(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1495 	writel(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1496 
1497 	pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1498 
1499 err_out:
1500 	return err;
1501 }
1502 
1503 static int
typhoon_boot_3XP(struct typhoon * tp,u32 initial_status)1504 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1505 {
1506 	unsigned long ioaddr = tp->ioaddr;
1507 
1508 	if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1509 		printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1510 		goto out_timeout;
1511 	}
1512 
1513 	writel(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1514 	writel(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1515 	typhoon_post_pci_writes(ioaddr);
1516 	writel(TYPHOON_BOOTCMD_REG_BOOT_RECORD, ioaddr + TYPHOON_REG_COMMAND);
1517 
1518 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1519 		printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1520 		       tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1521 		goto out_timeout;
1522 	}
1523 
1524 	/* Clear the Transmit and Command ready registers
1525 	 */
1526 	writel(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1527 	writel(0, ioaddr + TYPHOON_REG_CMD_READY);
1528 	writel(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1529 	typhoon_post_pci_writes(ioaddr);
1530 	writel(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1531 
1532 	return 0;
1533 
1534 out_timeout:
1535 	return -ETIMEDOUT;
1536 }
1537 
1538 static u32
typhoon_clean_tx(struct typhoon * tp,struct transmit_ring * txRing,volatile u32 * index)1539 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1540 			volatile u32 * index)
1541 {
1542 	u32 lastRead = txRing->lastRead;
1543 	struct tx_desc *tx;
1544 	dma_addr_t skb_dma;
1545 	int dma_len;
1546 	int type;
1547 
1548 	while(lastRead != le32_to_cpu(*index)) {
1549 		tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1550 		type = tx->flags & TYPHOON_TYPE_MASK;
1551 
1552 		if(type == TYPHOON_TX_DESC) {
1553 			/* This tx_desc describes a packet.
1554 			 */
1555 			unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1556 			struct sk_buff *skb = (struct sk_buff *) ptr;
1557 			dev_kfree_skb_irq(skb);
1558 		} else if(type == TYPHOON_FRAG_DESC) {
1559 			/* This tx_desc describes a memory mapping. Free it.
1560 			 */
1561 			skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1562 			dma_len = le16_to_cpu(tx->len);
1563 			pci_unmap_single(tp->pdev, skb_dma, dma_len,
1564 				       PCI_DMA_TODEVICE);
1565 		}
1566 
1567 		tx->flags = 0;
1568 		typhoon_inc_tx_index(&lastRead, 1);
1569 	}
1570 
1571 	return lastRead;
1572 }
1573 
1574 static void
typhoon_tx_complete(struct typhoon * tp,struct transmit_ring * txRing,volatile u32 * index)1575 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1576 			volatile u32 * index)
1577 {
1578 	u32 lastRead;
1579 	int numDesc = MAX_SKB_FRAGS + 1;
1580 
1581 	/* This will need changing if we start to use the Hi Tx ring. */
1582 	lastRead = typhoon_clean_tx(tp, txRing, index);
1583 	if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1584 				lastRead, TXLO_ENTRIES) > (numDesc + 2))
1585 		netif_wake_queue(tp->dev);
1586 
1587 	txRing->lastRead = lastRead;
1588 	smp_wmb();
1589 }
1590 
1591 static void
typhoon_recycle_rx_skb(struct typhoon * tp,u32 idx)1592 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1593 {
1594 	struct typhoon_indexes *indexes = tp->indexes;
1595 	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1596 	struct basic_ring *ring = &tp->rxBuffRing;
1597 	struct rx_free *r;
1598 
1599 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1600 				indexes->rxBuffCleared) {
1601 		/* no room in ring, just drop the skb
1602 		 */
1603 		dev_kfree_skb_any(rxb->skb);
1604 		rxb->skb = NULL;
1605 		return;
1606 	}
1607 
1608 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1609 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1610 	r->virtAddr = idx;
1611 	r->physAddr = cpu_to_le32(rxb->dma_addr);
1612 
1613 	/* Tell the card about it */
1614 	wmb();
1615 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1616 }
1617 
1618 static int
typhoon_alloc_rx_skb(struct typhoon * tp,u32 idx)1619 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1620 {
1621 	struct typhoon_indexes *indexes = tp->indexes;
1622 	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1623 	struct basic_ring *ring = &tp->rxBuffRing;
1624 	struct rx_free *r;
1625 	struct sk_buff *skb;
1626 	dma_addr_t dma_addr;
1627 
1628 	rxb->skb = NULL;
1629 
1630 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1631 				indexes->rxBuffCleared)
1632 		return -ENOMEM;
1633 
1634 	skb = dev_alloc_skb(PKT_BUF_SZ);
1635 	if(!skb)
1636 		return -ENOMEM;
1637 
1638 #if 0
1639 	/* Please, 3com, fix the firmware to allow DMA to a unaligned
1640 	 * address! Pretty please?
1641 	 */
1642 	skb_reserve(skb, 2);
1643 #endif
1644 
1645 	skb->dev = tp->dev;
1646 	dma_addr = pci_map_single(tp->pdev, skb->tail,
1647 				  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1648 
1649 	/* Since no card does 64 bit DAC, the high bits will never
1650 	 * change from zero.
1651 	 */
1652 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1653 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1654 	r->virtAddr = idx;
1655 	r->physAddr = cpu_to_le32(dma_addr);
1656 	rxb->skb = skb;
1657 	rxb->dma_addr = dma_addr;
1658 
1659 	/* Tell the card about it */
1660 	wmb();
1661 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1662 	return 0;
1663 }
1664 
1665 static int
typhoon_rx(struct typhoon * tp,struct basic_ring * rxRing,volatile u32 * ready,volatile u32 * cleared,int budget)1666 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1667 	   volatile u32 * cleared, int budget)
1668 {
1669 	struct rx_desc *rx;
1670 	struct sk_buff *skb, *new_skb;
1671 	struct rxbuff_ent *rxb;
1672 	dma_addr_t dma_addr;
1673 	u32 local_ready;
1674 	u32 rxaddr;
1675 	int pkt_len;
1676 	u32 idx;
1677 	u32 csum_bits;
1678 	int received;
1679 
1680 	received = 0;
1681 	local_ready = le32_to_cpu(*ready);
1682 	rxaddr = le32_to_cpu(*cleared);
1683 	while(rxaddr != local_ready && budget > 0) {
1684 		rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1685 		idx = rx->addr;
1686 		rxb = &tp->rxbuffers[idx];
1687 		skb = rxb->skb;
1688 		dma_addr = rxb->dma_addr;
1689 
1690 		rxaddr += sizeof(struct rx_desc);
1691 		rxaddr %= RX_ENTRIES * sizeof(struct rx_desc);
1692 
1693 		if(rx->flags & TYPHOON_RX_ERROR) {
1694 			typhoon_recycle_rx_skb(tp, idx);
1695 			continue;
1696 		}
1697 
1698 		pkt_len = le16_to_cpu(rx->frameLen);
1699 
1700 		if(pkt_len < rx_copybreak &&
1701 		   (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1702 			new_skb->dev = tp->dev;
1703 			skb_reserve(new_skb, 2);
1704 			pci_dma_sync_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1705 					    PCI_DMA_FROMDEVICE);
1706 			eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1707 			skb_put(new_skb, pkt_len);
1708 			typhoon_recycle_rx_skb(tp, idx);
1709 		} else {
1710 			new_skb = skb;
1711 			skb_put(new_skb, pkt_len);
1712 			pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1713 				       PCI_DMA_FROMDEVICE);
1714 			typhoon_alloc_rx_skb(tp, idx);
1715 		}
1716 		new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1717 		csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1718 			TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1719 		if(csum_bits ==
1720 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1721 		   || csum_bits ==
1722 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1723 			new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1724 		} else
1725 			new_skb->ip_summed = CHECKSUM_NONE;
1726 
1727 		spin_lock(&tp->state_lock);
1728 		if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1729 			vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1730 						 ntohl(rx->vlanTag) & 0xffff);
1731 		else
1732 			netif_receive_skb(new_skb);
1733 		spin_unlock(&tp->state_lock);
1734 
1735 		tp->dev->last_rx = jiffies;
1736 		received++;
1737 		budget--;
1738 	}
1739 	*cleared = cpu_to_le32(rxaddr);
1740 
1741 	return received;
1742 }
1743 
1744 static void
typhoon_fill_free_ring(struct typhoon * tp)1745 typhoon_fill_free_ring(struct typhoon *tp)
1746 {
1747 	u32 i;
1748 
1749 	for(i = 0; i < RXENT_ENTRIES; i++) {
1750 		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1751 		if(rxb->skb)
1752 			continue;
1753 		if(typhoon_alloc_rx_skb(tp, i) < 0)
1754 			break;
1755 	}
1756 }
1757 
1758 static int
typhoon_poll(struct net_device * dev,int * total_budget)1759 typhoon_poll(struct net_device *dev, int *total_budget)
1760 {
1761 	struct typhoon *tp = (struct typhoon *) dev->priv;
1762 	struct typhoon_indexes *indexes = tp->indexes;
1763 	int orig_budget = *total_budget;
1764 	int budget, work_done, done;
1765 
1766 	rmb();
1767 	if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1768 			typhoon_process_response(tp, 0, NULL);
1769 
1770 	if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1771 		typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1772 
1773 	if(orig_budget > dev->quota)
1774 		orig_budget = dev->quota;
1775 
1776 	budget = orig_budget;
1777 	work_done = 0;
1778 	done = 1;
1779 
1780 	if(indexes->rxHiCleared != indexes->rxHiReady) {
1781 		work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1782 			   		&indexes->rxHiCleared, budget);
1783 		budget -= work_done;
1784 	}
1785 
1786 	if(indexes->rxLoCleared != indexes->rxLoReady) {
1787 		work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1788 			   		&indexes->rxLoCleared, budget);
1789 	}
1790 
1791 	if(work_done) {
1792 		*total_budget -= work_done;
1793 		dev->quota -= work_done;
1794 
1795 		if(work_done >= orig_budget)
1796 			done = 0;
1797 	}
1798 
1799 	if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1800 		/* rxBuff ring is empty, try to fill it. */
1801 		typhoon_fill_free_ring(tp);
1802 	}
1803 
1804 	if(done) {
1805 		netif_rx_complete(dev);
1806 		writel(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK);
1807 		typhoon_post_pci_writes(tp->ioaddr);
1808 	}
1809 
1810 	return (done ? 0 : 1);
1811 }
1812 
1813 static void
typhoon_interrupt(int irq,void * dev_instance,struct pt_regs * rgs)1814 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1815 {
1816 	struct net_device *dev = (struct net_device *) dev_instance;
1817 	unsigned long ioaddr = dev->base_addr;
1818 	u32 intr_status;
1819 
1820 	intr_status = readl(ioaddr + TYPHOON_REG_INTR_STATUS);
1821 	if(!(intr_status & TYPHOON_INTR_HOST_INT))
1822 		return;
1823 
1824 	writel(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1825 
1826 	if(netif_rx_schedule_prep(dev)) {
1827 		writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1828 		typhoon_post_pci_writes(ioaddr);
1829 		__netif_rx_schedule(dev);
1830 	} else {
1831 		printk(KERN_ERR "%s: Error, poll already scheduled\n",
1832                        dev->name);
1833 	}
1834 }
1835 
1836 static void
typhoon_free_rx_rings(struct typhoon * tp)1837 typhoon_free_rx_rings(struct typhoon *tp)
1838 {
1839 	u32 i;
1840 
1841 	for(i = 0; i < RXENT_ENTRIES; i++) {
1842 		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1843 		if(rxb->skb) {
1844 			pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1845 				       PCI_DMA_FROMDEVICE);
1846 			dev_kfree_skb(rxb->skb);
1847 			rxb->skb = NULL;
1848 		}
1849 	}
1850 }
1851 
1852 static int
typhoon_sleep(struct typhoon * tp,int state,u16 events)1853 typhoon_sleep(struct typhoon *tp, int state, u16 events)
1854 {
1855 	struct pci_dev *pdev = tp->pdev;
1856 	unsigned long ioaddr = tp->ioaddr;
1857 	struct cmd_desc xp_cmd;
1858 	int err;
1859 
1860 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1861 	xp_cmd.parm1 = events;
1862 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1863 	if(err < 0) {
1864 		printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1865 				tp->name, err);
1866 		return err;
1867 	}
1868 
1869 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1870 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1871 	if(err < 0) {
1872 		printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1873 				tp->name, err);
1874 		return err;
1875 	}
1876 
1877 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1878 		return -ETIMEDOUT;
1879 
1880 	/* Since we cannot monitor the status of the link while sleeping,
1881 	 * tell the world it went away.
1882 	 */
1883 	netif_carrier_off(tp->dev);
1884 
1885 	pci_enable_wake(tp->pdev, state, 1);
1886 	pci_disable_device(pdev);
1887 	return pci_set_power_state(pdev, state);
1888 }
1889 
1890 static int
typhoon_wakeup(struct typhoon * tp,int wait_type)1891 typhoon_wakeup(struct typhoon *tp, int wait_type)
1892 {
1893 	struct pci_dev *pdev = tp->pdev;
1894 	unsigned long ioaddr = tp->ioaddr;
1895 
1896 	pci_set_power_state(pdev, 0);
1897 	pci_restore_state(pdev, tp->pci_state);
1898 
1899 	/* Post 2.x.x versions of the Sleep Image require a reset before
1900 	 * we can download the Runtime Image. But let's not make users of
1901 	 * the old firmware pay for the reset.
1902 	 */
1903 	writel(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1904 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1905 			(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1906 		return typhoon_reset(ioaddr, wait_type);
1907 
1908 	return 0;
1909 }
1910 
1911 static int
typhoon_start_runtime(struct typhoon * tp)1912 typhoon_start_runtime(struct typhoon *tp)
1913 {
1914 	struct net_device *dev = tp->dev;
1915 	unsigned long ioaddr = tp->ioaddr;
1916 	struct cmd_desc xp_cmd;
1917 	int err;
1918 
1919 	typhoon_init_rings(tp);
1920 	typhoon_fill_free_ring(tp);
1921 
1922 	err = typhoon_download_firmware(tp);
1923 	if(err < 0) {
1924 		printk("%s: cannot load runtime on 3XP\n", tp->name);
1925 		goto error_out;
1926 	}
1927 
1928 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1929 		printk("%s: cannot boot 3XP\n", tp->name);
1930 		err = -EIO;
1931 		goto error_out;
1932 	}
1933 
1934 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1935 	xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1936 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1937 	if(err < 0)
1938 		goto error_out;
1939 
1940 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1941 	xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1942 	xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1943 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1944 	if(err < 0)
1945 		goto error_out;
1946 
1947 	/* Disable IRQ coalescing -- we can reenable it when 3Com gives
1948 	 * us some more information on how to control it.
1949 	 */
1950 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1951 	xp_cmd.parm1 = 0;
1952 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1953 	if(err < 0)
1954 		goto error_out;
1955 
1956 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1957 	xp_cmd.parm1 = tp->xcvr_select;
1958 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1959 	if(err < 0)
1960 		goto error_out;
1961 
1962 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1963 	xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1964 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965 	if(err < 0)
1966 		goto error_out;
1967 
1968 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1969 	spin_lock_bh(&tp->state_lock);
1970 	xp_cmd.parm2 = tp->offload;
1971 	xp_cmd.parm3 = tp->offload;
1972 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1973 	spin_unlock_bh(&tp->state_lock);
1974 	if(err < 0)
1975 		goto error_out;
1976 
1977 	typhoon_set_rx_mode(dev);
1978 
1979 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1980 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1981 	if(err < 0)
1982 		goto error_out;
1983 
1984 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1985 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1986 	if(err < 0)
1987 		goto error_out;
1988 
1989 	tp->card_state = Running;
1990 	smp_wmb();
1991 
1992 	writel(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1993 	writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1994 	typhoon_post_pci_writes(ioaddr);
1995 
1996 	return 0;
1997 
1998 error_out:
1999 	typhoon_reset(ioaddr, WaitNoSleep);
2000 	typhoon_free_rx_rings(tp);
2001 	typhoon_init_rings(tp);
2002 	return err;
2003 }
2004 
2005 static int
typhoon_stop_runtime(struct typhoon * tp,int wait_type)2006 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2007 {
2008 	struct typhoon_indexes *indexes = tp->indexes;
2009 	struct transmit_ring *txLo = &tp->txLoRing;
2010 	unsigned long ioaddr = tp->ioaddr;
2011 	struct cmd_desc xp_cmd;
2012 	int i;
2013 
2014 	/* Disable interrupts early, since we can't schedule a poll
2015 	 * when called with !netif_running(). This will be posted
2016 	 * when we force the posting of the command.
2017 	 */
2018 	writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2019 
2020 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2021 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2022 
2023 	/* Wait 1/2 sec for any outstanding transmits to occur
2024 	 * We'll cleanup after the reset if this times out.
2025 	 */
2026 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2027 		if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2028 			break;
2029 		udelay(TYPHOON_UDELAY);
2030 	}
2031 
2032 	if(i == TYPHOON_WAIT_TIMEOUT)
2033 		printk(KERN_ERR
2034 		       "%s: halt timed out waiting for Tx to complete\n",
2035 		       tp->name);
2036 
2037 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2038 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2039 
2040 	/* save the statistics so when we bring the interface up again,
2041 	 * the values reported to userspace are correct.
2042 	 */
2043 	tp->card_state = Sleeping;
2044 	smp_wmb();
2045 	typhoon_do_get_stats(tp);
2046 	memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2047 
2048 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2049 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2050 
2051 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2052 		printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2053 		       tp->name);
2054 
2055 	if(typhoon_reset(ioaddr, wait_type) < 0) {
2056 		printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2057 		return -ETIMEDOUT;
2058 	}
2059 
2060 	/* cleanup any outstanding Tx packets */
2061 	if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2062 		indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2063 		typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static void
typhoon_tx_timeout(struct net_device * dev)2070 typhoon_tx_timeout(struct net_device *dev)
2071 {
2072 	struct typhoon *tp = (struct typhoon *) dev->priv;
2073 
2074 	if(typhoon_reset(dev->base_addr, WaitNoSleep) < 0) {
2075 		printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2076 					dev->name);
2077 		goto truely_dead;
2078 	}
2079 
2080 	/* If we ever start using the Hi ring, it will need cleaning too */
2081 	typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2082 	typhoon_free_rx_rings(tp);
2083 
2084 	if(typhoon_start_runtime(tp) < 0) {
2085 		printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2086 					dev->name);
2087 		goto truely_dead;
2088         }
2089 
2090 	netif_wake_queue(dev);
2091 	return;
2092 
2093 truely_dead:
2094 	/* Reset the hardware, and turn off carrier to avoid more timeouts */
2095 	typhoon_reset(dev->base_addr, NoWait);
2096 	netif_carrier_off(dev);
2097 }
2098 
2099 static int
typhoon_open(struct net_device * dev)2100 typhoon_open(struct net_device *dev)
2101 {
2102 	struct typhoon *tp = (struct typhoon *) dev->priv;
2103 	int err;
2104 
2105 	err = typhoon_wakeup(tp, WaitSleep);
2106 	if(err < 0) {
2107 		printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2108 		goto out_sleep;
2109 	}
2110 
2111 	err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2112 				dev->name, dev);
2113 	if(err < 0)
2114 		goto out_sleep;
2115 
2116 	err = typhoon_start_runtime(tp);
2117 	if(err < 0)
2118 		goto out_irq;
2119 
2120 	netif_start_queue(dev);
2121 	return 0;
2122 
2123 out_irq:
2124 	free_irq(dev->irq, dev);
2125 
2126 out_sleep:
2127 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2128 		printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2129 				dev->name);
2130 		typhoon_reset(dev->base_addr, NoWait);
2131 		goto out;
2132 	}
2133 
2134 	if(typhoon_sleep(tp, 3, 0) < 0)
2135 		printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2136 
2137 out:
2138 	return err;
2139 }
2140 
2141 static int
typhoon_close(struct net_device * dev)2142 typhoon_close(struct net_device *dev)
2143 {
2144 	struct typhoon *tp = (struct typhoon *) dev->priv;
2145 
2146 	netif_stop_queue(dev);
2147 
2148 	if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2149 		printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2150 
2151 	/* Make sure there is no irq handler running on a different CPU. */
2152 	typhoon_synchronize_irq(dev->irq);
2153 	free_irq(dev->irq, dev);
2154 
2155 	typhoon_free_rx_rings(tp);
2156 	typhoon_init_rings(tp);
2157 
2158 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2159 		printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2160 
2161 	if(typhoon_sleep(tp, 3, 0) < 0)
2162 		printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2163 
2164 	return 0;
2165 }
2166 
2167 #ifdef CONFIG_PM
2168 static int
typhoon_resume(struct pci_dev * pdev)2169 typhoon_resume(struct pci_dev *pdev)
2170 {
2171 	struct net_device *dev = pci_get_drvdata(pdev);
2172 	struct typhoon *tp = (struct typhoon *) dev->priv;
2173 
2174 	/* If we're down, resume when we are upped.
2175 	 */
2176 	if(!netif_running(dev))
2177 		return 0;
2178 
2179 	if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2180 		printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2181 				dev->name);
2182 		goto reset;
2183 	}
2184 
2185 	if(typhoon_start_runtime(tp) < 0) {
2186 		printk(KERN_ERR "%s: critical: could not start runtime in "
2187 				"resume\n", dev->name);
2188 		goto reset;
2189 	}
2190 
2191 	netif_device_attach(dev);
2192 	netif_start_queue(dev);
2193 	return 0;
2194 
2195 reset:
2196 	typhoon_reset(dev->base_addr, NoWait);
2197 	return -EBUSY;
2198 }
2199 
2200 static int
typhoon_suspend(struct pci_dev * pdev,u32 state)2201 typhoon_suspend(struct pci_dev *pdev, u32 state)
2202 {
2203 	struct net_device *dev = pci_get_drvdata(pdev);
2204 	struct typhoon *tp = (struct typhoon *) dev->priv;
2205 	struct cmd_desc xp_cmd;
2206 
2207 	/* If we're down, we're already suspended.
2208 	 */
2209 	if(!netif_running(dev))
2210 		return 0;
2211 
2212 	spin_lock_bh(&tp->state_lock);
2213 	if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2214 		spin_unlock_bh(&tp->state_lock);
2215 		printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2216 				dev->name);
2217 		return -EBUSY;
2218 	}
2219 	spin_unlock_bh(&tp->state_lock);
2220 
2221 	netif_device_detach(dev);
2222 
2223 	if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2224 		printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2225 		goto need_resume;
2226 	}
2227 
2228 	typhoon_free_rx_rings(tp);
2229 	typhoon_init_rings(tp);
2230 
2231 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2232 		printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2233 		goto need_resume;
2234 	}
2235 
2236 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2237 	xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2238 	xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2239 	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2240 		printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2241 				dev->name);
2242 		goto need_resume;
2243 	}
2244 
2245 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2246 	xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2247 	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2248 		printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2249 				dev->name);
2250 		goto need_resume;
2251 	}
2252 
2253 	if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2254 		printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2255 		goto need_resume;
2256 	}
2257 
2258 	return 0;
2259 
2260 need_resume:
2261 	typhoon_resume(pdev);
2262 	return -EBUSY;
2263 }
2264 
2265 static int
typhoon_enable_wake(struct pci_dev * pdev,u32 state,int enable)2266 typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2267 {
2268 	return pci_enable_wake(pdev, state, enable);
2269 }
2270 #endif
2271 
2272 static int __devinit
typhoon_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2273 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2274 {
2275 	static int did_version = 0;
2276 	struct net_device *dev;
2277 	struct typhoon *tp;
2278 	int card_id = (int) ent->driver_data;
2279 	unsigned long ioaddr;
2280 	void *shared;
2281 	dma_addr_t shared_dma;
2282 	struct cmd_desc xp_cmd;
2283 	struct resp_desc xp_resp[3];
2284 	int i;
2285 	int err = 0;
2286 
2287 	if(!did_version++)
2288 		printk(KERN_INFO "%s", version);
2289 
2290 	dev = alloc_etherdev(sizeof(*tp));
2291 	if(dev == NULL) {
2292 		printk(ERR_PFX "%s: unable to alloc new net device\n",
2293 		       pdev->slot_name);
2294 		err = -ENOMEM;
2295 		goto error_out;
2296 	}
2297 	SET_MODULE_OWNER(dev);
2298 
2299 	err = pci_enable_device(pdev);
2300 	if(err < 0) {
2301 		printk(ERR_PFX "%s: unable to enable device\n",
2302 		       pdev->slot_name);
2303 		goto error_out_dev;
2304 	}
2305 
2306 	/* If we transitioned from D3->D0 in pci_enable_device(),
2307 	 * we lost our configuration and need to restore it to the
2308 	 * conditions at boot.
2309 	 */
2310 	pci_restore_state(pdev, NULL);
2311 
2312 	err = pci_set_dma_mask(pdev, 0xffffffffULL);
2313 	if(err < 0) {
2314 		printk(ERR_PFX "%s: No usable DMA configuration\n",
2315 		       pdev->slot_name);
2316 		goto error_out_dev;
2317 	}
2318 
2319 	/* sanity checks, resource #1 is our mmio area
2320 	 */
2321 	if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2322 		printk(ERR_PFX
2323 		       "%s: region #1 not a PCI MMIO resource, aborting\n",
2324 		       pdev->slot_name);
2325 		err = -ENODEV;
2326 		goto error_out_dev;
2327 	}
2328 	if(pci_resource_len(pdev, 1) < 128) {
2329 		printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2330 		       pdev->slot_name);
2331 		err = -ENODEV;
2332 		goto error_out_dev;
2333 	}
2334 
2335 	err = pci_request_regions(pdev, "typhoon");
2336 	if(err < 0) {
2337 		printk(ERR_PFX "%s: could not request regions\n",
2338 		       pdev->slot_name);
2339 		goto error_out_dev;
2340 	}
2341 
2342 	pci_set_master(pdev);
2343 	pci_set_mwi(pdev);
2344 
2345 	/* map our MMIO region
2346 	 */
2347 	ioaddr = pci_resource_start(pdev, 1);
2348 	ioaddr = (unsigned long) ioremap(ioaddr, 128);
2349 	if(!ioaddr) {
2350 		printk(ERR_PFX "%s: cannot remap MMIO, aborting\n",
2351 		       pdev->slot_name);
2352 		err = -EIO;
2353 		goto error_out_regions;
2354 	}
2355 	dev->base_addr = ioaddr;
2356 
2357 	/* allocate pci dma space for rx and tx descriptor rings
2358 	 */
2359 	shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2360 				      &shared_dma);
2361 	if(!shared) {
2362 		printk(ERR_PFX "%s: could not allocate DMA memory\n",
2363 		       pdev->slot_name);
2364 		err = -ENOMEM;
2365 		goto error_out_remap;
2366 	}
2367 
2368 	dev->irq = pdev->irq;
2369 	tp = dev->priv;
2370 	tp->shared = (struct typhoon_shared *) shared;
2371 	tp->shared_dma = shared_dma;
2372 	tp->pdev = pdev;
2373 	tp->tx_pdev = pdev;
2374 	tp->ioaddr = dev->base_addr;
2375 	tp->tx_ioaddr = dev->base_addr;
2376 	tp->dev = dev;
2377 
2378 	/* need to be able to restore PCI state after a suspend */
2379 	pci_save_state(pdev, tp->pci_state);
2380 
2381 	/* Init sequence:
2382 	 * 1) Reset the adapter to clear any bad juju
2383 	 * 2) Reload the sleep image
2384 	 * 3) Boot the sleep image
2385 	 * 4) Get the hardware address.
2386 	 * 5) Put the card to sleep.
2387 	 */
2388 	if(typhoon_reset(ioaddr, WaitSleep) < 0) {
2389 		printk(ERR_PFX "%s: could not reset 3XP\n", pdev->slot_name);
2390 		err = -EIO;
2391 		goto error_out_dma;
2392 	}
2393 
2394 	/* dev->name is not valid until we register, but we need to
2395 	 * use some common routines to initialize the card. So that those
2396 	 * routines print the right name, we keep our oun pointer to the name
2397 	 */
2398 	tp->name = pdev->slot_name;
2399 
2400 	typhoon_init_interface(tp);
2401 	typhoon_init_rings(tp);
2402 
2403 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2404 		printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2405 		       pdev->slot_name);
2406 		err = -EIO;
2407 		goto error_out_reset;
2408 	}
2409 
2410 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2411 	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2412 		printk(ERR_PFX "%s: cannot read MAC address\n",
2413 		       pdev->slot_name);
2414 		err = -EIO;
2415 		goto error_out_reset;
2416 	}
2417 
2418 	*(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2419 	*(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2420 
2421 	if(!is_valid_ether_addr(dev->dev_addr)) {
2422 		printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2423 		       "aborting\n", pdev->slot_name);
2424 		goto error_out_reset;
2425 	}
2426 
2427 	/* Read the Sleep Image version last, so the response is valid
2428 	 * later when we print out the version reported.
2429 	 */
2430 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2431 	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2432 		printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2433 			pdev->slot_name);
2434 		goto error_out_reset;
2435 	}
2436 
2437 	tp->capabilities = typhoon_card_info[card_id].capabilities;
2438 	tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2439 
2440 	/* Typhoon 1.0 Sleep Images return one response descriptor to the
2441 	 * READ_VERSIONS command. Those versions are OK after waking up
2442 	 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2443 	 * seem to need a little extra help to get started. Since we don't
2444 	 * know how to nudge it along, just kick it.
2445 	 */
2446 	if(xp_resp[0].numDesc != 0)
2447 		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2448 
2449 	if(typhoon_sleep(tp, 3, 0) < 0) {
2450 		printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2451 		       pdev->slot_name);
2452 		err = -EIO;
2453 		goto error_out_reset;
2454 	}
2455 
2456 	/* The chip-specific entries in the device structure. */
2457 	dev->open		= typhoon_open;
2458 	dev->hard_start_xmit	= typhoon_start_tx;
2459 	dev->stop		= typhoon_close;
2460 	dev->set_multicast_list	= typhoon_set_rx_mode;
2461 	dev->tx_timeout		= typhoon_tx_timeout;
2462 	dev->poll		= typhoon_poll;
2463 	dev->weight		= 16;
2464 	dev->watchdog_timeo	= TX_TIMEOUT;
2465 	dev->get_stats		= typhoon_get_stats;
2466 	dev->set_mac_address	= typhoon_set_mac_address;
2467 	dev->do_ioctl		= typhoon_ioctl;
2468 	dev->vlan_rx_register	= typhoon_vlan_rx_register;
2469 	dev->vlan_rx_kill_vid	= typhoon_vlan_rx_kill_vid;
2470 
2471 	/* We can handle scatter gather, up to 16 entries, and
2472 	 * we can do IP checksumming (only version 4, doh...)
2473 	 */
2474 	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2475 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2476 	dev->features |= NETIF_F_TSO;
2477 
2478 	if(register_netdev(dev) < 0)
2479 		goto error_out_reset;
2480 
2481 	/* fixup our local name */
2482 	tp->name = dev->name;
2483 
2484 	pci_set_drvdata(pdev, dev);
2485 
2486 	printk(KERN_INFO "%s: %s at 0x%lx, ",
2487 	       dev->name, typhoon_card_info[card_id].name, ioaddr);
2488 	for(i = 0; i < 5; i++)
2489 		printk("%2.2x:", dev->dev_addr[i]);
2490 	printk("%2.2x\n", dev->dev_addr[i]);
2491 
2492 	/* xp_resp still contains the response to the READ_VERSIONS command.
2493 	 * For debugging, let the user know what version he has.
2494 	 */
2495 	if(xp_resp[0].numDesc == 0) {
2496 		/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2497 		 * of version is Month/Day of build.
2498 		 */
2499 		u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2500 		printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2501 			"%02u/%02u/2000\n", dev->name, monthday >> 8,
2502 			monthday & 0xff);
2503 	} else if(xp_resp[0].numDesc == 2) {
2504 		/* This is the Typhoon 1.1+ type Sleep Image
2505 		 */
2506 		u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2507 		u8 *ver_string = (u8 *) &xp_resp[1];
2508 		ver_string[25] = 0;
2509 		printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2510 			"%u.%u.%u.%u %s\n", dev->name, HIPQUAD(sleep_ver),
2511 			ver_string);
2512 	} else {
2513 		printk(KERN_WARNING "%s: Unknown Sleep Image version "
2514 			"(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2515 			le32_to_cpu(xp_resp[0].parm2));
2516 	}
2517 
2518 	return 0;
2519 
2520 error_out_reset:
2521 	typhoon_reset(ioaddr, NoWait);
2522 
2523 error_out_dma:
2524 	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2525 			    shared, shared_dma);
2526 error_out_remap:
2527 	iounmap((void *) ioaddr);
2528 error_out_regions:
2529 	pci_release_regions(pdev);
2530 error_out_dev:
2531 	kfree(dev);
2532 error_out:
2533 	return err;
2534 }
2535 
2536 static void __devexit
typhoon_remove_one(struct pci_dev * pdev)2537 typhoon_remove_one(struct pci_dev *pdev)
2538 {
2539 	struct net_device *dev = pci_get_drvdata(pdev);
2540 	struct typhoon *tp = (struct typhoon *) (dev->priv);
2541 
2542 	unregister_netdev(dev);
2543 	pci_set_power_state(pdev, 0);
2544 	pci_restore_state(pdev, tp->pci_state);
2545 	typhoon_reset(dev->base_addr, NoWait);
2546 	iounmap((char *) (dev->base_addr));
2547 	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2548 			    tp->shared, tp->shared_dma);
2549 	pci_release_regions(pdev);
2550 	pci_disable_device(pdev);
2551 	pci_set_drvdata(pdev, NULL);
2552 	kfree(dev);
2553 }
2554 
2555 static struct pci_driver typhoon_driver = {
2556 	.name		= DRV_MODULE_NAME,
2557 	.id_table	= typhoon_pci_tbl,
2558 	.probe		= typhoon_init_one,
2559 	.remove		= __devexit_p(typhoon_remove_one),
2560 #ifdef CONFIG_PM
2561 	.suspend	= typhoon_suspend,
2562 	.resume		= typhoon_resume,
2563 	.enable_wake	= typhoon_enable_wake,
2564 #endif
2565 };
2566 
2567 static int __init
typhoon_init(void)2568 typhoon_init(void)
2569 {
2570 	return pci_module_init(&typhoon_driver);
2571 }
2572 
2573 static void __exit
typhoon_cleanup(void)2574 typhoon_cleanup(void)
2575 {
2576 	pci_unregister_driver(&typhoon_driver);
2577 }
2578 
2579 module_init(typhoon_init);
2580 module_exit(typhoon_cleanup);
2581