1 /*
2 * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
3 * and other Tigon based cards.
4 *
5 * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
6 *
7 * Thanks to Alteon and 3Com for providing hardware and documentation
8 * enabling me to write this driver.
9 *
10 * A mailing list for discussing the use of this driver has been
11 * setup, please subscribe to the lists if you have any questions
12 * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
13 * see how to subscribe.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * Additional credits:
21 * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
22 * dump support. The trace dump support has not been
23 * integrated yet however.
24 * Troy Benjegerdes: Big Endian (PPC) patches.
25 * Nate Stahl: Better out of memory handling and stats support.
26 * Aman Singla: Nasty race between interrupt handler and tx code dealing
27 * with 'testing the tx_ret_csm and setting tx_full'
28 * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
29 * infrastructure and Sparc support
30 * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
31 * driver under Linux/Sparc64
32 * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
33 * ETHTOOL_GDRVINFO support
34 * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
35 * handler and close() cleanup.
36 * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
37 * memory mapped IO is enabled to
38 * make the driver work on RS/6000.
39 * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
40 * where the driver would disable
41 * bus master mode if it had to disable
42 * write and invalidate.
43 * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
44 * endian systems.
45 * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
46 * rx producer index when
47 * flushing the Jumbo ring.
48 * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
49 * driver init path.
50 * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
51 */
52
53 #include <linux/config.h>
54 #include <linux/module.h>
55 #include <linux/version.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/kernel.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/skbuff.h>
64 #include <linux/init.h>
65 #include <linux/delay.h>
66 #include <linux/mm.h>
67 #include <linux/highmem.h>
68 #include <linux/sockios.h>
69
70 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
71 #include <linux/if_vlan.h>
72 #endif
73
74 #ifdef SIOCETHTOOL
75 #include <linux/ethtool.h>
76 #endif
77
78 #include <net/sock.h>
79 #include <net/ip.h>
80
81 #include <asm/system.h>
82 #include <asm/io.h>
83 #include <asm/irq.h>
84 #include <asm/byteorder.h>
85 #include <asm/uaccess.h>
86
87
88 #undef INDEX_DEBUG
89
90 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
91 #define ACE_IS_TIGON_I(ap) 0
92 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
93 #else
94 #define ACE_IS_TIGON_I(ap) (ap->version == 1)
95 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
96 #endif
97
98 #ifndef PCI_VENDOR_ID_ALTEON
99 #define PCI_VENDOR_ID_ALTEON 0x12ae
100 #endif
101 #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
102 #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
103 #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
104 #endif
105 #ifndef PCI_DEVICE_ID_3COM_3C985
106 #define PCI_DEVICE_ID_3COM_3C985 0x0001
107 #endif
108 #ifndef PCI_VENDOR_ID_NETGEAR
109 #define PCI_VENDOR_ID_NETGEAR 0x1385
110 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
111 #endif
112 #ifndef PCI_DEVICE_ID_NETGEAR_GA620T
113 #define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
114 #endif
115
116
117 /*
118 * Farallon used the DEC vendor ID by mistake and they seem not
119 * to care - stinky!
120 */
121 #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
122 #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
123 #endif
124 #ifndef PCI_DEVICE_ID_FARALLON_PN9100T
125 #define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
126 #endif
127 #ifndef PCI_VENDOR_ID_SGI
128 #define PCI_VENDOR_ID_SGI 0x10a9
129 #endif
130 #ifndef PCI_DEVICE_ID_SGI_ACENIC
131 #define PCI_DEVICE_ID_SGI_ACENIC 0x0009
132 #endif
133
134 #if LINUX_VERSION_CODE >= 0x20400
135 static struct pci_device_id acenic_pci_tbl[] __initdata = {
136 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
137 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
138 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
139 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
140 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
141 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
142 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
143 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
144 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
145 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
146 /*
147 * Farallon used the DEC vendor ID on their cards incorrectly,
148 * then later Alteon's ID.
149 */
150 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
151 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
152 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
153 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
154 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
155 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
156 { }
157 };
158 MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
159 #endif
160
161
162 #ifndef MODULE_LICENSE
163 #define MODULE_LICENSE(a)
164 #endif
165
166 #ifndef wmb
167 #define wmb() mb()
168 #endif
169
170 #ifndef __exit
171 #define __exit
172 #endif
173
174 #ifndef __devinit
175 #define __devinit __init
176 #endif
177
178 #ifndef SMP_CACHE_BYTES
179 #define SMP_CACHE_BYTES L1_CACHE_BYTES
180 #endif
181
182 #ifndef SET_MODULE_OWNER
183 #define SET_MODULE_OWNER(dev) do{} while(0)
184 #define ACE_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
185 #define ACE_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
186 #else
187 #define ACE_MOD_INC_USE_COUNT do{} while(0)
188 #define ACE_MOD_DEC_USE_COUNT do{} while(0)
189 #endif
190
191
192 #if LINUX_VERSION_CODE >= 0x2051c
193 #define ace_sync_irq(irq) synchronize_irq(irq)
194 #else
195 #define ace_sync_irq(irq) synchronize_irq()
196 #endif
197
198 #if (LINUX_VERSION_CODE < 0x02030d)
199 #define pci_resource_start(dev, bar) dev->base_address[bar]
200 #elif (LINUX_VERSION_CODE < 0x02032c)
201 #define pci_resource_start(dev, bar) dev->resource[bar].start
202 #endif
203
204 #if (LINUX_VERSION_CODE < 0x02030e)
205 #define net_device device
206 #endif
207
208
209 #if (LINUX_VERSION_CODE < 0x02032a)
210 typedef u32 dma_addr_t;
211
pci_alloc_consistent(struct pci_dev * hwdev,size_t size,dma_addr_t * dma_handle)212 static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
213 dma_addr_t *dma_handle)
214 {
215 void *virt_ptr;
216
217 virt_ptr = kmalloc(size, GFP_KERNEL);
218 if (!virt_ptr)
219 return NULL;
220 *dma_handle = virt_to_bus(virt_ptr);
221 return virt_ptr;
222 }
223
224 #define pci_free_consistent(cookie, size, ptr, dma_ptr) kfree(ptr)
225 #define pci_map_page(cookie, page, off, size, dir) \
226 virt_to_bus(page_address(page)+(off))
227 #define pci_unmap_page(cookie, address, size, dir)
228 #define pci_set_dma_mask(dev, mask) \
229 (((u64)(mask) & 0xffffffff00000000) == 0 ? 0 : -EIO)
230 #define pci_dma_supported(dev, mask) \
231 (((u64)(mask) & 0xffffffff00000000) == 0 ? 1 : 0)
232
233 #elif (LINUX_VERSION_CODE < 0x02040d)
234
235 /*
236 * 2.4.13 introduced pci_map_page()/pci_unmap_page() - for 2.4.12 and prior,
237 * fall back on pci_map_single()/pci_unnmap_single().
238 *
239 * We are guaranteed that the page is mapped at this point since
240 * pci_map_page() is only used upon valid struct skb's.
241 */
242 static inline dma_addr_t
pci_map_page(struct pci_dev * cookie,struct page * page,unsigned long off,size_t size,int dir)243 pci_map_page(struct pci_dev *cookie, struct page *page, unsigned long off,
244 size_t size, int dir)
245 {
246 void *page_virt;
247
248 page_virt = page_address(page);
249 if (!page_virt)
250 BUG();
251 return pci_map_single(cookie, (page_virt + off), size, dir);
252 }
253 #define pci_unmap_page(cookie, dma_addr, size, dir) \
254 pci_unmap_single(cookie, dma_addr, size, dir)
255 #endif
256
257 #if (LINUX_VERSION_CODE < 0x020412)
258 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
259 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
260 #define pci_unmap_addr(PTR, ADDR_NAME) 0
261 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do{} while(0)
262 #define pci_unmap_len(PTR, LEN_NAME) 0
263 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do{} while(0)
264 #endif
265
266
267 #if (LINUX_VERSION_CODE < 0x02032b)
268 /*
269 * SoftNet
270 *
271 * For pre-softnet kernels we need to tell the upper layer not to
272 * re-enter start_xmit() while we are in there. However softnet
273 * guarantees not to enter while we are in there so there is no need
274 * to do the netif_stop_queue() dance unless the transmit queue really
275 * gets stuck. This should also improve performance according to tests
276 * done by Aman Singla.
277 */
278 #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
279 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy)
280 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
281 #define late_stop_netif_stop_queue(dev) do{} while(0)
282 #define early_stop_netif_stop_queue(dev) test_and_set_bit(0,&dev->tbusy)
283 #define early_stop_netif_wake_queue(dev) netif_wake_queue(dev)
284
netif_start_queue(struct net_device * dev)285 static inline void netif_start_queue(struct net_device *dev)
286 {
287 dev->tbusy = 0;
288 dev->interrupt = 0;
289 dev->start = 1;
290 }
291
292 #define ace_mark_net_bh() mark_bh(NET_BH)
293 #define netif_queue_stopped(dev) dev->tbusy
294 #define netif_running(dev) dev->start
295 #define ace_if_down(dev) do{dev->start = 0;} while(0)
296
297 #define tasklet_struct tq_struct
tasklet_schedule(struct tasklet_struct * tasklet)298 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
299 {
300 queue_task(tasklet, &tq_immediate);
301 mark_bh(IMMEDIATE_BH);
302 }
303
tasklet_init(struct tasklet_struct * tasklet,void (* func)(unsigned long),unsigned long data)304 static inline void tasklet_init(struct tasklet_struct *tasklet,
305 void (*func)(unsigned long),
306 unsigned long data)
307 {
308 tasklet->next = NULL;
309 tasklet->sync = 0;
310 tasklet->routine = (void (*)(void *))func;
311 tasklet->data = (void *)data;
312 }
313 #define tasklet_kill(tasklet) do{} while(0)
314 #else
315 #define late_stop_netif_stop_queue(dev) netif_stop_queue(dev)
316 #define early_stop_netif_stop_queue(dev) 0
317 #define early_stop_netif_wake_queue(dev) do{} while(0)
318 #define ace_mark_net_bh() do{} while(0)
319 #define ace_if_down(dev) do{} while(0)
320 #endif
321
322 #if (LINUX_VERSION_CODE >= 0x02031b)
323 #define NEW_NETINIT
324 #define ACE_PROBE_ARG void
325 #else
326 #define ACE_PROBE_ARG struct net_device *dev
327 #endif
328
329 #ifndef min_t
330 #define min_t(type,a,b) (((a)<(b))?(a):(b))
331 #endif
332
333 #ifndef ARCH_HAS_PREFETCHW
334 #ifndef prefetchw
335 #define prefetchw(x) do{} while(0)
336 #endif
337 #endif
338
339 #define ACE_MAX_MOD_PARMS 8
340 #define BOARD_IDX_STATIC 0
341 #define BOARD_IDX_OVERFLOW -1
342
343 #if (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)) && \
344 defined(NETIF_F_HW_VLAN_RX)
345 #define ACENIC_DO_VLAN 1
346 #define ACE_RCB_VLAN_FLAG RCB_FLG_VLAN_ASSIST
347 #else
348 #define ACENIC_DO_VLAN 0
349 #define ACE_RCB_VLAN_FLAG 0
350 #endif
351
352 #include "acenic.h"
353
354 /*
355 * These must be defined before the firmware is included.
356 */
357 #define MAX_TEXT_LEN 96*1024
358 #define MAX_RODATA_LEN 8*1024
359 #define MAX_DATA_LEN 2*1024
360
361 #include "acenic_firmware.h"
362
363 #ifndef tigon2FwReleaseLocal
364 #define tigon2FwReleaseLocal 0
365 #endif
366
367 /*
368 * This driver currently supports Tigon I and Tigon II based cards
369 * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
370 * GA620. The driver should also work on the SGI, DEC and Farallon
371 * versions of the card, however I have not been able to test that
372 * myself.
373 *
374 * This card is really neat, it supports receive hardware checksumming
375 * and jumbo frames (up to 9000 bytes) and does a lot of work in the
376 * firmware. Also the programming interface is quite neat, except for
377 * the parts dealing with the i2c eeprom on the card ;-)
378 *
379 * Using jumbo frames:
380 *
381 * To enable jumbo frames, simply specify an mtu between 1500 and 9000
382 * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
383 * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
384 * interface number and <MTU> being the MTU value.
385 *
386 * Module parameters:
387 *
388 * When compiled as a loadable module, the driver allows for a number
389 * of module parameters to be specified. The driver supports the
390 * following module parameters:
391 *
392 * trace=<val> - Firmware trace level. This requires special traced
393 * firmware to replace the firmware supplied with
394 * the driver - for debugging purposes only.
395 *
396 * link=<val> - Link state. Normally you want to use the default link
397 * parameters set by the driver. This can be used to
398 * override these in case your switch doesn't negotiate
399 * the link properly. Valid values are:
400 * 0x0001 - Force half duplex link.
401 * 0x0002 - Do not negotiate line speed with the other end.
402 * 0x0010 - 10Mbit/sec link.
403 * 0x0020 - 100Mbit/sec link.
404 * 0x0040 - 1000Mbit/sec link.
405 * 0x0100 - Do not negotiate flow control.
406 * 0x0200 - Enable RX flow control Y
407 * 0x0400 - Enable TX flow control Y (Tigon II NICs only).
408 * Default value is 0x0270, ie. enable link+flow
409 * control negotiation. Negotiating the highest
410 * possible link speed with RX flow control enabled.
411 *
412 * When disabling link speed negotiation, only one link
413 * speed is allowed to be specified!
414 *
415 * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
416 * to wait for more packets to arive before
417 * interrupting the host, from the time the first
418 * packet arrives.
419 *
420 * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
421 * to wait for more packets to arive in the transmit ring,
422 * before interrupting the host, after transmitting the
423 * first packet in the ring.
424 *
425 * max_tx_desc=<val> - maximum number of transmit descriptors
426 * (packets) transmitted before interrupting the host.
427 *
428 * max_rx_desc=<val> - maximum number of receive descriptors
429 * (packets) received before interrupting the host.
430 *
431 * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
432 * increments of the NIC's on board memory to be used for
433 * transmit and receive buffers. For the 1MB NIC app. 800KB
434 * is available, on the 1/2MB NIC app. 300KB is available.
435 * 68KB will always be available as a minimum for both
436 * directions. The default value is a 50/50 split.
437 * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
438 * operations, default (1) is to always disable this as
439 * that is what Alteon does on NT. I have not been able
440 * to measure any real performance differences with
441 * this on my systems. Set <val>=0 if you want to
442 * enable these operations.
443 *
444 * If you use more than one NIC, specify the parameters for the
445 * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
446 * run tracing on NIC #2 but not on NIC #1 and #3.
447 *
448 * TODO:
449 *
450 * - Proper multicast support.
451 * - NIC dump support.
452 * - More tuning parameters.
453 *
454 * The mini ring is not used under Linux and I am not sure it makes sense
455 * to actually use it.
456 *
457 * New interrupt handler strategy:
458 *
459 * The old interrupt handler worked using the traditional method of
460 * replacing an skbuff with a new one when a packet arrives. However
461 * the rx rings do not need to contain a static number of buffer
462 * descriptors, thus it makes sense to move the memory allocation out
463 * of the main interrupt handler and do it in a bottom half handler
464 * and only allocate new buffers when the number of buffers in the
465 * ring is below a certain threshold. In order to avoid starving the
466 * NIC under heavy load it is however necessary to force allocation
467 * when hitting a minimum threshold. The strategy for alloction is as
468 * follows:
469 *
470 * RX_LOW_BUF_THRES - allocate buffers in the bottom half
471 * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
472 * the buffers in the interrupt handler
473 * RX_RING_THRES - maximum number of buffers in the rx ring
474 * RX_MINI_THRES - maximum number of buffers in the mini ring
475 * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
476 *
477 * One advantagous side effect of this allocation approach is that the
478 * entire rx processing can be done without holding any spin lock
479 * since the rx rings and registers are totally independant of the tx
480 * ring and its registers. This of course includes the kmalloc's of
481 * new skb's. Thus start_xmit can run in parallel with rx processing
482 * and the memory allocation on SMP systems.
483 *
484 * Note that running the skb reallocation in a bottom half opens up
485 * another can of races which needs to be handled properly. In
486 * particular it can happen that the interrupt handler tries to run
487 * the reallocation while the bottom half is either running on another
488 * CPU or was interrupted on the same CPU. To get around this the
489 * driver uses bitops to prevent the reallocation routines from being
490 * reentered.
491 *
492 * TX handling can also be done without holding any spin lock, wheee
493 * this is fun! since tx_ret_csm is only written to by the interrupt
494 * handler. The case to be aware of is when shutting down the device
495 * and cleaning up where it is necessary to make sure that
496 * start_xmit() is not running while this is happening. Well DaveM
497 * informs me that this case is already protected against ... bye bye
498 * Mr. Spin Lock, it was nice to know you.
499 *
500 * TX interrupts are now partly disabled so the NIC will only generate
501 * TX interrupts for the number of coal ticks, not for the number of
502 * TX packets in the queue. This should reduce the number of TX only,
503 * ie. when no RX processing is done, interrupts seen.
504 */
505
506 /*
507 * Threshold values for RX buffer allocation - the low water marks for
508 * when to start refilling the rings are set to 75% of the ring
509 * sizes. It seems to make sense to refill the rings entirely from the
510 * intrrupt handler once it gets below the panic threshold, that way
511 * we don't risk that the refilling is moved to another CPU when the
512 * one running the interrupt handler just got the slab code hot in its
513 * cache.
514 */
515 #define RX_RING_SIZE 72
516 #define RX_MINI_SIZE 64
517 #define RX_JUMBO_SIZE 48
518
519 #define RX_PANIC_STD_THRES 16
520 #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
521 #define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
522 #define RX_PANIC_MINI_THRES 12
523 #define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
524 #define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
525 #define RX_PANIC_JUMBO_THRES 6
526 #define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
527 #define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
528
529
530 /*
531 * Size of the mini ring entries, basically these just should be big
532 * enough to take TCP ACKs
533 */
534 #define ACE_MINI_SIZE 100
535
536 #define ACE_MINI_BUFSIZE (ACE_MINI_SIZE + 2 + 16)
537 #define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 2+4+16)
538 #define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
539
540 /*
541 * There seems to be a magic difference in the effect between 995 and 996
542 * but little difference between 900 and 995 ... no idea why.
543 *
544 * There is now a default set of tuning parameters which is set, depending
545 * on whether or not the user enables Jumbo frames. It's assumed that if
546 * Jumbo frames are enabled, the user wants optimal tuning for that case.
547 */
548 #define DEF_TX_COAL 400 /* 996 */
549 #define DEF_TX_MAX_DESC 60 /* was 40 */
550 #define DEF_RX_COAL 120 /* 1000 */
551 #define DEF_RX_MAX_DESC 25
552 #define DEF_TX_RATIO 21 /* 24 */
553
554 #define DEF_JUMBO_TX_COAL 20
555 #define DEF_JUMBO_TX_MAX_DESC 60
556 #define DEF_JUMBO_RX_COAL 30
557 #define DEF_JUMBO_RX_MAX_DESC 6
558 #define DEF_JUMBO_TX_RATIO 21
559
560 #if tigon2FwReleaseLocal < 20001118
561 /*
562 * Standard firmware and early modifications duplicate
563 * IRQ load without this flag (coal timer is never reset).
564 * Note that with this flag tx_coal should be less than
565 * time to xmit full tx ring.
566 * 400usec is not so bad for tx ring size of 128.
567 */
568 #define TX_COAL_INTS_ONLY 1 /* worth it */
569 #else
570 /*
571 * With modified firmware, this is not necessary, but still useful.
572 */
573 #define TX_COAL_INTS_ONLY 1
574 #endif
575
576 #define DEF_TRACE 0
577 #define DEF_STAT (2 * TICKS_PER_SEC)
578
579
580 static int link[ACE_MAX_MOD_PARMS];
581 static int trace[ACE_MAX_MOD_PARMS];
582 static int tx_coal_tick[ACE_MAX_MOD_PARMS];
583 static int rx_coal_tick[ACE_MAX_MOD_PARMS];
584 static int max_tx_desc[ACE_MAX_MOD_PARMS];
585 static int max_rx_desc[ACE_MAX_MOD_PARMS];
586 static int tx_ratio[ACE_MAX_MOD_PARMS];
587 static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
588
589 static char version[] __initdata =
590 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
591 " http://home.cern.ch/~jes/gige/acenic.html\n";
592
593 static struct net_device *root_dev;
594
595 static int probed __initdata = 0;
596
597 static void ace_watchdog(struct net_device *dev);
598
acenic_probe(ACE_PROBE_ARG)599 int __devinit acenic_probe (ACE_PROBE_ARG)
600 {
601 #ifdef NEW_NETINIT
602 struct net_device *dev;
603 #endif
604 struct ace_private *ap;
605 struct pci_dev *pdev = NULL;
606 int boards_found = 0;
607 int version_disp;
608
609 if (probed)
610 return -ENODEV;
611 probed++;
612
613 if (!pci_present()) /* is PCI support present? */
614 return -ENODEV;
615
616 version_disp = 0;
617
618 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET<<8, pdev))) {
619
620 if (!((pdev->vendor == PCI_VENDOR_ID_ALTEON) &&
621 ((pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE) ||
622 (pdev->device == PCI_DEVICE_ID_ALTEON_ACENIC_COPPER)))&&
623 !((pdev->vendor == PCI_VENDOR_ID_3COM) &&
624 (pdev->device == PCI_DEVICE_ID_3COM_3C985)) &&
625 !((pdev->vendor == PCI_VENDOR_ID_NETGEAR) &&
626 ((pdev->device == PCI_DEVICE_ID_NETGEAR_GA620) ||
627 (pdev->device == PCI_DEVICE_ID_NETGEAR_GA620T))) &&
628 /*
629 * Farallon used the DEC vendor ID on their cards by
630 * mistake for a while
631 */
632 !((pdev->vendor == PCI_VENDOR_ID_DEC) &&
633 (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX)) &&
634 !((pdev->vendor == PCI_VENDOR_ID_ALTEON) &&
635 (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T)) &&
636 !((pdev->vendor == PCI_VENDOR_ID_SGI) &&
637 (pdev->device == PCI_DEVICE_ID_SGI_ACENIC)))
638 continue;
639
640 dev = init_etherdev(NULL, sizeof(struct ace_private));
641
642 if (dev == NULL) {
643 printk(KERN_ERR "acenic: Unable to allocate "
644 "net_device structure!\n");
645 break;
646 }
647
648 SET_MODULE_OWNER(dev);
649
650 if (!dev->priv)
651 dev->priv = kmalloc(sizeof(*ap), GFP_KERNEL);
652 if (!dev->priv) {
653 printk(KERN_ERR "acenic: Unable to allocate memory\n");
654 return -ENOMEM;
655 }
656
657 ap = dev->priv;
658 ap->pdev = pdev;
659
660 dev->open = &ace_open;
661 dev->hard_start_xmit = &ace_start_xmit;
662 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
663 #if ACENIC_DO_VLAN
664 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
665 dev->vlan_rx_register = ace_vlan_rx_register;
666 dev->vlan_rx_kill_vid = ace_vlan_rx_kill_vid;
667 #endif
668 if (1) {
669 dev->tx_timeout = &ace_watchdog;
670 dev->watchdog_timeo = 5*HZ;
671 }
672 dev->stop = &ace_close;
673 dev->get_stats = &ace_get_stats;
674 dev->set_multicast_list = &ace_set_multicast_list;
675 dev->do_ioctl = &ace_ioctl;
676 dev->set_mac_address = &ace_set_mac_addr;
677 dev->change_mtu = &ace_change_mtu;
678
679 /* display version info if adapter is found */
680 if (!version_disp)
681 {
682 /* set display flag to TRUE so that */
683 /* we only display this string ONCE */
684 version_disp = 1;
685 printk(version);
686 }
687
688 if (pci_enable_device(pdev)) {
689 kfree(dev);
690 continue;
691 }
692
693 /*
694 * Enable master mode before we start playing with the
695 * pci_command word since pci_set_master() will modify
696 * it.
697 */
698 pci_set_master(pdev);
699
700 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
701
702 /* OpenFirmware on Mac's does not set this - DOH.. */
703 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
704 printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
705 "access - was not enabled by BIOS/Firmware\n",
706 dev->name);
707 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
708 pci_write_config_word(ap->pdev, PCI_COMMAND,
709 ap->pci_command);
710 wmb();
711 }
712
713 pci_read_config_byte(pdev, PCI_LATENCY_TIMER,
714 &ap->pci_latency);
715 if (ap->pci_latency <= 0x40) {
716 ap->pci_latency = 0x40;
717 pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
718 ap->pci_latency);
719 }
720
721 /*
722 * Remap the regs into kernel space - this is abuse of
723 * dev->base_addr since it was means for I/O port
724 * addresses but who gives a damn.
725 */
726 dev->base_addr = pci_resource_start(pdev, 0);
727 ap->regs = (struct ace_regs *)ioremap(dev->base_addr, 0x4000);
728 if (!ap->regs) {
729 printk(KERN_ERR "%s: Unable to map I/O register, "
730 "AceNIC %i will be disabled.\n",
731 dev->name, boards_found);
732 break;
733 }
734
735 switch(pdev->vendor) {
736 case PCI_VENDOR_ID_ALTEON:
737 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
738 strncpy(ap->name, "Farallon PN9100-T "
739 "Gigabit Ethernet", sizeof (ap->name));
740 printk(KERN_INFO "%s: Farallon PN9100-T ",
741 dev->name);
742 } else {
743 strncpy(ap->name, "AceNIC Gigabit Ethernet",
744 sizeof (ap->name));
745 printk(KERN_INFO "%s: Alteon AceNIC ",
746 dev->name);
747 }
748 break;
749 case PCI_VENDOR_ID_3COM:
750 strncpy(ap->name, "3Com 3C985 Gigabit Ethernet",
751 sizeof (ap->name));
752 printk(KERN_INFO "%s: 3Com 3C985 ", dev->name);
753 break;
754 case PCI_VENDOR_ID_NETGEAR:
755 strncpy(ap->name, "NetGear GA620 Gigabit Ethernet",
756 sizeof (ap->name));
757 printk(KERN_INFO "%s: NetGear GA620 ", dev->name);
758 break;
759 case PCI_VENDOR_ID_DEC:
760 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
761 strncpy(ap->name, "Farallon PN9000-SX "
762 "Gigabit Ethernet", sizeof (ap->name));
763 printk(KERN_INFO "%s: Farallon PN9000-SX ",
764 dev->name);
765 break;
766 }
767 case PCI_VENDOR_ID_SGI:
768 strncpy(ap->name, "SGI AceNIC Gigabit Ethernet",
769 sizeof (ap->name));
770 printk(KERN_INFO "%s: SGI AceNIC ", dev->name);
771 break;
772 default:
773 strncpy(ap->name, "Unknown AceNIC based Gigabit "
774 "Ethernet", sizeof (ap->name));
775 printk(KERN_INFO "%s: Unknown AceNIC ", dev->name);
776 break;
777 }
778 ap->name [sizeof (ap->name) - 1] = '\0';
779 printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
780 #ifdef __sparc__
781 printk("irq %s\n", __irq_itoa(pdev->irq));
782 #else
783 printk("irq %i\n", pdev->irq);
784 #endif
785
786 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
787 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
788 printk(KERN_ERR "%s: Driver compiled without Tigon I"
789 " support - NIC disabled\n", dev->name);
790 ace_init_cleanup(dev);
791 kfree(dev);
792 continue;
793 }
794 #endif
795
796 if (ace_allocate_descriptors(dev)) {
797 /*
798 * ace_allocate_descriptors() calls
799 * ace_init_cleanup() on error.
800 */
801 kfree(dev);
802 continue;
803 }
804
805 #ifdef MODULE
806 if (boards_found >= ACE_MAX_MOD_PARMS)
807 ap->board_idx = BOARD_IDX_OVERFLOW;
808 else
809 ap->board_idx = boards_found;
810 #else
811 ap->board_idx = BOARD_IDX_STATIC;
812 #endif
813
814 if (ace_init(dev)) {
815 /*
816 * ace_init() calls ace_init_cleanup() on error.
817 */
818 kfree(dev);
819 continue;
820 }
821
822 if (ap->pci_using_dac)
823 dev->features |= NETIF_F_HIGHDMA;
824
825 boards_found++;
826 }
827
828 /*
829 * If we're at this point we're going through ace_probe() for
830 * the first time. Return success (0) if we've initialized 1
831 * or more boards. Otherwise, return failure (-ENODEV).
832 */
833
834 if (boards_found > 0)
835 return 0;
836 else
837 return -ENODEV;
838 }
839
840
841 #ifdef MODULE
842 MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
843 MODULE_LICENSE("GPL");
844 MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
845 MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
846 MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
847 MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
848 MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
849 MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
850 MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
851 MODULE_PARM(tx_ratio, "1-" __MODULE_STRING(8) "i");
852 MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
853 MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
854 MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
855 MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
856 MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
857 MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
858 MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
859 #endif
860
861
ace_module_cleanup(void)862 static void __exit ace_module_cleanup(void)
863 {
864 struct ace_private *ap;
865 struct ace_regs *regs;
866 struct net_device *next;
867 short i;
868
869 while (root_dev) {
870 ap = root_dev->priv;
871 next = ap->next;
872
873 regs = ap->regs;
874
875 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
876 if (ap->version >= 2)
877 writel(readl(®s->CpuBCtrl) | CPU_HALT,
878 ®s->CpuBCtrl);
879 /*
880 * This clears any pending interrupts
881 */
882 writel(1, ®s->Mb0Lo);
883 readl(®s->CpuCtrl); /* flush */
884
885 /*
886 * Make sure no other CPUs are processing interrupts
887 * on the card before the buffers are being released.
888 * Otherwise one might experience some `interesting'
889 * effects.
890 *
891 * Then release the RX buffers - jumbo buffers were
892 * already released in ace_close().
893 */
894 ace_sync_irq(root_dev->irq);
895
896 for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
897 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
898
899 if (skb) {
900 struct ring_info *ringp;
901 dma_addr_t mapping;
902
903 ringp = &ap->skb->rx_std_skbuff[i];
904 mapping = pci_unmap_addr(ringp, mapping);
905 pci_unmap_page(ap->pdev, mapping,
906 ACE_STD_BUFSIZE - (2 + 16),
907 PCI_DMA_FROMDEVICE);
908
909 ap->rx_std_ring[i].size = 0;
910 ap->skb->rx_std_skbuff[i].skb = NULL;
911 dev_kfree_skb(skb);
912 }
913 }
914 if (ap->version >= 2) {
915 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
916 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
917
918 if (skb) {
919 struct ring_info *ringp;
920 dma_addr_t mapping;
921
922 ringp = &ap->skb->rx_mini_skbuff[i];
923 mapping = pci_unmap_addr(ringp,mapping);
924 pci_unmap_page(ap->pdev, mapping,
925 ACE_MINI_BUFSIZE - (2 + 16),
926 PCI_DMA_FROMDEVICE);
927
928 ap->rx_mini_ring[i].size = 0;
929 ap->skb->rx_mini_skbuff[i].skb = NULL;
930 dev_kfree_skb(skb);
931 }
932 }
933 }
934 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
935 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
936 if (skb) {
937 struct ring_info *ringp;
938 dma_addr_t mapping;
939
940 ringp = &ap->skb->rx_jumbo_skbuff[i];
941 mapping = pci_unmap_addr(ringp, mapping);
942 pci_unmap_page(ap->pdev, mapping,
943 ACE_JUMBO_BUFSIZE - (2 + 16),
944 PCI_DMA_FROMDEVICE);
945
946 ap->rx_jumbo_ring[i].size = 0;
947 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
948 dev_kfree_skb(skb);
949 }
950 }
951
952 ace_init_cleanup(root_dev);
953 kfree(root_dev);
954 root_dev = next;
955 }
956 }
957
958
ace_module_init(void)959 int __init ace_module_init(void)
960 {
961 int status;
962
963 root_dev = NULL;
964
965 #ifdef NEW_NETINIT
966 status = acenic_probe();
967 #else
968 status = acenic_probe(NULL);
969 #endif
970 return status;
971 }
972
973
974 #if (LINUX_VERSION_CODE < 0x02032a)
975 #ifdef MODULE
init_module(void)976 int init_module(void)
977 {
978 return ace_module_init();
979 }
980
981
cleanup_module(void)982 void cleanup_module(void)
983 {
984 ace_module_cleanup();
985 }
986 #endif
987 #else
988 module_init(ace_module_init);
989 module_exit(ace_module_cleanup);
990 #endif
991
992
ace_free_descriptors(struct net_device * dev)993 static void ace_free_descriptors(struct net_device *dev)
994 {
995 struct ace_private *ap = dev->priv;
996 int size;
997
998 if (ap->rx_std_ring != NULL) {
999 size = (sizeof(struct rx_desc) *
1000 (RX_STD_RING_ENTRIES +
1001 RX_JUMBO_RING_ENTRIES +
1002 RX_MINI_RING_ENTRIES +
1003 RX_RETURN_RING_ENTRIES));
1004 pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
1005 ap->rx_ring_base_dma);
1006 ap->rx_std_ring = NULL;
1007 ap->rx_jumbo_ring = NULL;
1008 ap->rx_mini_ring = NULL;
1009 ap->rx_return_ring = NULL;
1010 }
1011 if (ap->evt_ring != NULL) {
1012 size = (sizeof(struct event) * EVT_RING_ENTRIES);
1013 pci_free_consistent(ap->pdev, size, ap->evt_ring,
1014 ap->evt_ring_dma);
1015 ap->evt_ring = NULL;
1016 }
1017 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
1018 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
1019 pci_free_consistent(ap->pdev, size, ap->tx_ring,
1020 ap->tx_ring_dma);
1021 }
1022 ap->tx_ring = NULL;
1023
1024 if (ap->evt_prd != NULL) {
1025 pci_free_consistent(ap->pdev, sizeof(u32),
1026 (void *)ap->evt_prd, ap->evt_prd_dma);
1027 ap->evt_prd = NULL;
1028 }
1029 if (ap->rx_ret_prd != NULL) {
1030 pci_free_consistent(ap->pdev, sizeof(u32),
1031 (void *)ap->rx_ret_prd,
1032 ap->rx_ret_prd_dma);
1033 ap->rx_ret_prd = NULL;
1034 }
1035 if (ap->tx_csm != NULL) {
1036 pci_free_consistent(ap->pdev, sizeof(u32),
1037 (void *)ap->tx_csm, ap->tx_csm_dma);
1038 ap->tx_csm = NULL;
1039 }
1040 }
1041
1042
ace_allocate_descriptors(struct net_device * dev)1043 static int ace_allocate_descriptors(struct net_device *dev)
1044 {
1045 struct ace_private *ap = dev->priv;
1046 int size;
1047
1048 size = (sizeof(struct rx_desc) *
1049 (RX_STD_RING_ENTRIES +
1050 RX_JUMBO_RING_ENTRIES +
1051 RX_MINI_RING_ENTRIES +
1052 RX_RETURN_RING_ENTRIES));
1053
1054 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
1055 &ap->rx_ring_base_dma);
1056 if (ap->rx_std_ring == NULL)
1057 goto fail;
1058
1059 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
1060 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
1061 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
1062
1063 size = (sizeof(struct event) * EVT_RING_ENTRIES);
1064
1065 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
1066
1067 if (ap->evt_ring == NULL)
1068 goto fail;
1069
1070 /*
1071 * Only allocate a host TX ring for the Tigon II, the Tigon I
1072 * has to use PCI registers for this ;-(
1073 */
1074 if (!ACE_IS_TIGON_I(ap)) {
1075 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
1076
1077 ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
1078 &ap->tx_ring_dma);
1079
1080 if (ap->tx_ring == NULL)
1081 goto fail;
1082 }
1083
1084 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
1085 &ap->evt_prd_dma);
1086 if (ap->evt_prd == NULL)
1087 goto fail;
1088
1089 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
1090 &ap->rx_ret_prd_dma);
1091 if (ap->rx_ret_prd == NULL)
1092 goto fail;
1093
1094 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
1095 &ap->tx_csm_dma);
1096 if (ap->tx_csm == NULL)
1097 goto fail;
1098
1099 return 0;
1100
1101 fail:
1102 /* Clean up. */
1103 ace_init_cleanup(dev);
1104 return 1;
1105 }
1106
1107
1108 /*
1109 * Generic cleanup handling data allocated during init. Used when the
1110 * module is unloaded or if an error occurs during initialization
1111 */
ace_init_cleanup(struct net_device * dev)1112 static void ace_init_cleanup(struct net_device *dev)
1113 {
1114 struct ace_private *ap;
1115
1116 ap = dev->priv;
1117
1118 ace_free_descriptors(dev);
1119
1120 if (ap->info)
1121 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
1122 ap->info, ap->info_dma);
1123 if (ap->skb)
1124 kfree(ap->skb);
1125 if (ap->trace_buf)
1126 kfree(ap->trace_buf);
1127
1128 if (dev->irq)
1129 free_irq(dev->irq, dev);
1130
1131 unregister_netdev(dev);
1132 iounmap(ap->regs);
1133 }
1134
1135
1136 /*
1137 * Commands are considered to be slow.
1138 */
ace_issue_cmd(struct ace_regs * regs,struct cmd * cmd)1139 static inline void ace_issue_cmd(struct ace_regs *regs, struct cmd *cmd)
1140 {
1141 u32 idx;
1142
1143 idx = readl(®s->CmdPrd);
1144
1145 writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
1146 idx = (idx + 1) % CMD_RING_ENTRIES;
1147
1148 writel(idx, ®s->CmdPrd);
1149 }
1150
1151
ace_init(struct net_device * dev)1152 static int __init ace_init(struct net_device *dev)
1153 {
1154 struct ace_private *ap;
1155 struct ace_regs *regs;
1156 struct ace_info *info = NULL;
1157 struct pci_dev *pdev;
1158 unsigned long myjif;
1159 u64 tmp_ptr;
1160 u32 tig_ver, mac1, mac2, tmp, pci_state;
1161 int board_idx, ecode = 0;
1162 short i;
1163 unsigned char cache_size;
1164
1165 ap = dev->priv;
1166 regs = ap->regs;
1167
1168 board_idx = ap->board_idx;
1169
1170 /*
1171 * aman@sgi.com - its useful to do a NIC reset here to
1172 * address the `Firmware not running' problem subsequent
1173 * to any crashes involving the NIC
1174 */
1175 writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
1176 readl(®s->HostCtrl); /* PCI write posting */
1177 udelay(5);
1178
1179 /*
1180 * Don't access any other registers before this point!
1181 */
1182 #ifdef __BIG_ENDIAN
1183 /*
1184 * This will most likely need BYTE_SWAP once we switch
1185 * to using __raw_writel()
1186 */
1187 writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
1188 ®s->HostCtrl);
1189 #else
1190 writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
1191 ®s->HostCtrl);
1192 #endif
1193 readl(®s->HostCtrl); /* PCI write posting */
1194
1195 /*
1196 * Stop the NIC CPU and clear pending interrupts
1197 */
1198 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
1199 readl(®s->CpuCtrl); /* PCI write posting */
1200 writel(0, ®s->Mb0Lo);
1201
1202 tig_ver = readl(®s->HostCtrl) >> 28;
1203
1204 switch(tig_ver){
1205 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
1206 case 4:
1207 printk(KERN_INFO " Tigon I (Rev. 4), Firmware: %i.%i.%i, ",
1208 tigonFwReleaseMajor, tigonFwReleaseMinor,
1209 tigonFwReleaseFix);
1210 writel(0, ®s->LocalCtrl);
1211 ap->version = 1;
1212 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
1213 break;
1214 #endif
1215 case 6:
1216 printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
1217 tig_ver, tigon2FwReleaseMajor, tigon2FwReleaseMinor,
1218 tigon2FwReleaseFix);
1219 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
1220 readl(®s->CpuBCtrl); /* PCI write posting */
1221 /*
1222 * The SRAM bank size does _not_ indicate the amount
1223 * of memory on the card, it controls the _bank_ size!
1224 * Ie. a 1MB AceNIC will have two banks of 512KB.
1225 */
1226 writel(SRAM_BANK_512K, ®s->LocalCtrl);
1227 writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
1228 ap->version = 2;
1229 ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
1230 break;
1231 default:
1232 printk(KERN_WARNING " Unsupported Tigon version detected "
1233 "(%i), ", tig_ver);
1234 ecode = -ENODEV;
1235 goto init_error;
1236 }
1237
1238 /*
1239 * ModeStat _must_ be set after the SRAM settings as this change
1240 * seems to corrupt the ModeStat and possible other registers.
1241 * The SRAM settings survive resets and setting it to the same
1242 * value a second time works as well. This is what caused the
1243 * `Firmware not running' problem on the Tigon II.
1244 */
1245 #ifdef __BIG_ENDIAN
1246 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
1247 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
1248 #else
1249 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
1250 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
1251 #endif
1252 readl(®s->ModeStat); /* PCI write posting */
1253
1254 mac1 = 0;
1255 for(i = 0; i < 4; i++) {
1256 mac1 = mac1 << 8;
1257 tmp = read_eeprom_byte(dev, 0x8c+i);
1258 if (tmp < 0) {
1259 ecode = -EIO;
1260 goto init_error;
1261 } else
1262 mac1 |= (tmp & 0xff);
1263 }
1264 mac2 = 0;
1265 for(i = 4; i < 8; i++) {
1266 mac2 = mac2 << 8;
1267 tmp = read_eeprom_byte(dev, 0x8c+i);
1268 if (tmp < 0) {
1269 ecode = -EIO;
1270 goto init_error;
1271 } else
1272 mac2 |= (tmp & 0xff);
1273 }
1274
1275 writel(mac1, ®s->MacAddrHi);
1276 writel(mac2, ®s->MacAddrLo);
1277
1278 printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
1279 (mac1 >> 8) & 0xff, mac1 & 0xff, (mac2 >> 24) &0xff,
1280 (mac2 >> 16) & 0xff, (mac2 >> 8) & 0xff, mac2 & 0xff);
1281
1282 dev->dev_addr[0] = (mac1 >> 8) & 0xff;
1283 dev->dev_addr[1] = mac1 & 0xff;
1284 dev->dev_addr[2] = (mac2 >> 24) & 0xff;
1285 dev->dev_addr[3] = (mac2 >> 16) & 0xff;
1286 dev->dev_addr[4] = (mac2 >> 8) & 0xff;
1287 dev->dev_addr[5] = mac2 & 0xff;
1288
1289 /*
1290 * Looks like this is necessary to deal with on all architectures,
1291 * even this %$#%$# N440BX Intel based thing doesn't get it right.
1292 * Ie. having two NICs in the machine, one will have the cache
1293 * line set at boot time, the other will not.
1294 */
1295 pdev = ap->pdev;
1296 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
1297 cache_size <<= 2;
1298 if (cache_size != SMP_CACHE_BYTES) {
1299 printk(KERN_INFO " PCI cache line size set incorrectly "
1300 "(%i bytes) by BIOS/FW, ", cache_size);
1301 if (cache_size > SMP_CACHE_BYTES)
1302 printk("expecting %i\n", SMP_CACHE_BYTES);
1303 else {
1304 printk("correcting to %i\n", SMP_CACHE_BYTES);
1305 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1306 SMP_CACHE_BYTES >> 2);
1307 }
1308 }
1309
1310 pci_state = readl(®s->PciState);
1311 printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
1312 "latency: %i clks\n",
1313 (pci_state & PCI_32BIT) ? 32 : 64,
1314 (pci_state & PCI_66MHZ) ? 66 : 33,
1315 ap->pci_latency);
1316
1317 /*
1318 * Set the max DMA transfer size. Seems that for most systems
1319 * the performance is better when no MAX parameter is
1320 * set. However for systems enabling PCI write and invalidate,
1321 * DMA writes must be set to the L1 cache line size to get
1322 * optimal performance.
1323 *
1324 * The default is now to turn the PCI write and invalidate off
1325 * - that is what Alteon does for NT.
1326 */
1327 tmp = READ_CMD_MEM | WRITE_CMD_MEM;
1328 if (ap->version >= 2) {
1329 tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
1330 /*
1331 * Tuning parameters only supported for 8 cards
1332 */
1333 if (board_idx == BOARD_IDX_OVERFLOW ||
1334 dis_pci_mem_inval[board_idx]) {
1335 if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1336 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1337 pci_write_config_word(pdev, PCI_COMMAND,
1338 ap->pci_command);
1339 printk(KERN_INFO " Disabling PCI memory "
1340 "write and invalidate\n");
1341 }
1342 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1343 printk(KERN_INFO " PCI memory write & invalidate "
1344 "enabled by BIOS, enabling counter measures\n");
1345
1346 switch(SMP_CACHE_BYTES) {
1347 case 16:
1348 tmp |= DMA_WRITE_MAX_16;
1349 break;
1350 case 32:
1351 tmp |= DMA_WRITE_MAX_32;
1352 break;
1353 case 64:
1354 tmp |= DMA_WRITE_MAX_64;
1355 break;
1356 case 128:
1357 tmp |= DMA_WRITE_MAX_128;
1358 break;
1359 default:
1360 printk(KERN_INFO " Cache line size %i not "
1361 "supported, PCI write and invalidate "
1362 "disabled\n", SMP_CACHE_BYTES);
1363 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1364 pci_write_config_word(pdev, PCI_COMMAND,
1365 ap->pci_command);
1366 }
1367 }
1368 }
1369
1370 #ifdef __sparc__
1371 /*
1372 * On this platform, we know what the best dma settings
1373 * are. We use 64-byte maximum bursts, because if we
1374 * burst larger than the cache line size (or even cross
1375 * a 64byte boundry in a single burst) the UltraSparc
1376 * PCI controller will disconnect at 64-byte multiples.
1377 *
1378 * Read-multiple will be properly enabled above, and when
1379 * set will give the PCI controller proper hints about
1380 * prefetching.
1381 */
1382 tmp &= ~DMA_READ_WRITE_MASK;
1383 tmp |= DMA_READ_MAX_64;
1384 tmp |= DMA_WRITE_MAX_64;
1385 #endif
1386 #ifdef __alpha__
1387 tmp &= ~DMA_READ_WRITE_MASK;
1388 tmp |= DMA_READ_MAX_128;
1389 /*
1390 * All the docs say MUST NOT. Well, I did.
1391 * Nothing terrible happens, if we load wrong size.
1392 * Bit w&i still works better!
1393 */
1394 tmp |= DMA_WRITE_MAX_128;
1395 #endif
1396 writel(tmp, ®s->PciState);
1397
1398 #if 0
1399 /*
1400 * The Host PCI bus controller driver has to set FBB.
1401 * If all devices on that PCI bus support FBB, then the controller
1402 * can enable FBB support in the Host PCI Bus controller (or on
1403 * the PCI-PCI bridge if that applies).
1404 * -ggg
1405 */
1406 /*
1407 * I have received reports from people having problems when this
1408 * bit is enabled.
1409 */
1410 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
1411 printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
1412 ap->pci_command |= PCI_COMMAND_FAST_BACK;
1413 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
1414 }
1415 #endif
1416
1417 /*
1418 * Configure DMA attributes.
1419 */
1420 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
1421 ap->pci_using_dac = 1;
1422 } else if (!pci_set_dma_mask(pdev, 0xffffffffULL)) {
1423 ap->pci_using_dac = 0;
1424 } else {
1425 ecode = -ENODEV;
1426 goto init_error;
1427 }
1428
1429 /*
1430 * Initialize the generic info block and the command+event rings
1431 * and the control blocks for the transmit and receive rings
1432 * as they need to be setup once and for all.
1433 */
1434 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
1435 &ap->info_dma))) {
1436 ecode = -EAGAIN;
1437 goto init_error;
1438 }
1439 ap->info = info;
1440
1441 /*
1442 * Get the memory for the skb rings.
1443 */
1444 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1445 ecode = -EAGAIN;
1446 goto init_error;
1447 }
1448
1449 ecode = request_irq(pdev->irq, ace_interrupt, SA_SHIRQ,
1450 dev->name, dev);
1451 if (ecode) {
1452 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1453 dev->name, pdev->irq);
1454 goto init_error;
1455 } else
1456 dev->irq = pdev->irq;
1457
1458 /*
1459 * Register the device here to be able to catch allocated
1460 * interrupt handlers in case the firmware doesn't come up.
1461 */
1462 ap->next = root_dev;
1463 root_dev = dev;
1464
1465 #ifdef INDEX_DEBUG
1466 spin_lock_init(&ap->debug_lock);
1467 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1468 ap->last_std_rx = 0;
1469 ap->last_mini_rx = 0;
1470 #endif
1471
1472 memset(ap->info, 0, sizeof(struct ace_info));
1473 memset(ap->skb, 0, sizeof(struct ace_skb));
1474
1475 ace_load_firmware(dev);
1476 ap->fw_running = 0;
1477
1478 tmp_ptr = ap->info_dma;
1479 writel(tmp_ptr >> 32, ®s->InfoPtrHi);
1480 writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
1481
1482 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1483
1484 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1485 info->evt_ctrl.flags = 0;
1486
1487 *(ap->evt_prd) = 0;
1488 wmb();
1489 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1490 writel(0, ®s->EvtCsm);
1491
1492 set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
1493 info->cmd_ctrl.flags = 0;
1494 info->cmd_ctrl.max_len = 0;
1495
1496 for (i = 0; i < CMD_RING_ENTRIES; i++)
1497 writel(0, ®s->CmdRng[i]);
1498
1499 writel(0, ®s->CmdPrd);
1500 writel(0, ®s->CmdCsm);
1501
1502 tmp_ptr = ap->info_dma;
1503 tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
1504 set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
1505
1506 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1507 info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
1508 info->rx_std_ctrl.flags =
1509 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
1510
1511 memset(ap->rx_std_ring, 0,
1512 RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
1513
1514 for (i = 0; i < RX_STD_RING_ENTRIES; i++)
1515 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1516
1517 ap->rx_std_skbprd = 0;
1518 atomic_set(&ap->cur_rx_bufs, 0);
1519
1520 set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
1521 (ap->rx_ring_base_dma +
1522 (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
1523 info->rx_jumbo_ctrl.max_len = 0;
1524 info->rx_jumbo_ctrl.flags =
1525 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
1526
1527 memset(ap->rx_jumbo_ring, 0,
1528 RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
1529
1530 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
1531 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1532
1533 ap->rx_jumbo_skbprd = 0;
1534 atomic_set(&ap->cur_jumbo_bufs, 0);
1535
1536 memset(ap->rx_mini_ring, 0,
1537 RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
1538
1539 if (ap->version >= 2) {
1540 set_aceaddr(&info->rx_mini_ctrl.rngptr,
1541 (ap->rx_ring_base_dma +
1542 (sizeof(struct rx_desc) *
1543 (RX_STD_RING_ENTRIES +
1544 RX_JUMBO_RING_ENTRIES))));
1545 info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
1546 info->rx_mini_ctrl.flags =
1547 RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG;
1548
1549 for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
1550 ap->rx_mini_ring[i].flags =
1551 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
1552 } else {
1553 set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
1554 info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
1555 info->rx_mini_ctrl.max_len = 0;
1556 }
1557
1558 ap->rx_mini_skbprd = 0;
1559 atomic_set(&ap->cur_mini_bufs, 0);
1560
1561 set_aceaddr(&info->rx_return_ctrl.rngptr,
1562 (ap->rx_ring_base_dma +
1563 (sizeof(struct rx_desc) *
1564 (RX_STD_RING_ENTRIES +
1565 RX_JUMBO_RING_ENTRIES +
1566 RX_MINI_RING_ENTRIES))));
1567 info->rx_return_ctrl.flags = 0;
1568 info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
1569
1570 memset(ap->rx_return_ring, 0,
1571 RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
1572
1573 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1574 *(ap->rx_ret_prd) = 0;
1575
1576 writel(TX_RING_BASE, ®s->WinBase);
1577
1578 if (ACE_IS_TIGON_I(ap)) {
1579 ap->tx_ring = (struct tx_desc *)regs->Window;
1580 for (i = 0; i < (TIGON_I_TX_RING_ENTRIES *
1581 sizeof(struct tx_desc) / 4); i++) {
1582 writel(0, (unsigned long)ap->tx_ring + i * 4);
1583 }
1584
1585 set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
1586 } else {
1587 memset(ap->tx_ring, 0,
1588 MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
1589
1590 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1591 }
1592
1593 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1594 tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
1595
1596 /*
1597 * The Tigon I does not like having the TX ring in host memory ;-(
1598 */
1599 if (!ACE_IS_TIGON_I(ap))
1600 tmp |= RCB_FLG_TX_HOST_RING;
1601 #if TX_COAL_INTS_ONLY
1602 tmp |= RCB_FLG_COAL_INT_ONLY;
1603 #endif
1604 info->tx_ctrl.flags = tmp;
1605
1606 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1607
1608 /*
1609 * Potential item for tuning parameter
1610 */
1611 #if 0 /* NO */
1612 writel(DMA_THRESH_16W, ®s->DmaReadCfg);
1613 writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
1614 #else
1615 writel(DMA_THRESH_8W, ®s->DmaReadCfg);
1616 writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
1617 #endif
1618
1619 writel(0, ®s->MaskInt);
1620 writel(1, ®s->IfIdx);
1621 #if 0
1622 /*
1623 * McKinley boxes do not like us fiddling with AssistState
1624 * this early
1625 */
1626 writel(1, ®s->AssistState);
1627 #endif
1628
1629 writel(DEF_STAT, ®s->TuneStatTicks);
1630 writel(DEF_TRACE, ®s->TuneTrace);
1631
1632 ace_set_rxtx_parms(dev, 0);
1633
1634 if (board_idx == BOARD_IDX_OVERFLOW) {
1635 printk(KERN_WARNING "%s: more than %i NICs detected, "
1636 "ignoring module parameters!\n",
1637 dev->name, ACE_MAX_MOD_PARMS);
1638 } else if (board_idx >= 0) {
1639 if (tx_coal_tick[board_idx])
1640 writel(tx_coal_tick[board_idx],
1641 ®s->TuneTxCoalTicks);
1642 if (max_tx_desc[board_idx])
1643 writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
1644
1645 if (rx_coal_tick[board_idx])
1646 writel(rx_coal_tick[board_idx],
1647 ®s->TuneRxCoalTicks);
1648 if (max_rx_desc[board_idx])
1649 writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
1650
1651 if (trace[board_idx])
1652 writel(trace[board_idx], ®s->TuneTrace);
1653
1654 if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
1655 writel(tx_ratio[board_idx], ®s->TxBufRat);
1656 }
1657
1658 /*
1659 * Default link parameters
1660 */
1661 tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
1662 LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
1663 if(ap->version >= 2)
1664 tmp |= LNK_TX_FLOW_CTL_Y;
1665
1666 /*
1667 * Override link default parameters
1668 */
1669 if ((board_idx >= 0) && link[board_idx]) {
1670 int option = link[board_idx];
1671
1672 tmp = LNK_ENABLE;
1673
1674 if (option & 0x01) {
1675 printk(KERN_INFO "%s: Setting half duplex link\n",
1676 dev->name);
1677 tmp &= ~LNK_FULL_DUPLEX;
1678 }
1679 if (option & 0x02)
1680 tmp &= ~LNK_NEGOTIATE;
1681 if (option & 0x10)
1682 tmp |= LNK_10MB;
1683 if (option & 0x20)
1684 tmp |= LNK_100MB;
1685 if (option & 0x40)
1686 tmp |= LNK_1000MB;
1687 if ((option & 0x70) == 0) {
1688 printk(KERN_WARNING "%s: No media speed specified, "
1689 "forcing auto negotiation\n", dev->name);
1690 tmp |= LNK_NEGOTIATE | LNK_1000MB |
1691 LNK_100MB | LNK_10MB;
1692 }
1693 if ((option & 0x100) == 0)
1694 tmp |= LNK_NEG_FCTL;
1695 else
1696 printk(KERN_INFO "%s: Disabling flow control "
1697 "negotiation\n", dev->name);
1698 if (option & 0x200)
1699 tmp |= LNK_RX_FLOW_CTL_Y;
1700 if ((option & 0x400) && (ap->version >= 2)) {
1701 printk(KERN_INFO "%s: Enabling TX flow control\n",
1702 dev->name);
1703 tmp |= LNK_TX_FLOW_CTL_Y;
1704 }
1705 }
1706
1707 ap->link = tmp;
1708 writel(tmp, ®s->TuneLink);
1709 if (ap->version >= 2)
1710 writel(tmp, ®s->TuneFastLink);
1711
1712 if (ACE_IS_TIGON_I(ap))
1713 writel(tigonFwStartAddr, ®s->Pc);
1714 if (ap->version == 2)
1715 writel(tigon2FwStartAddr, ®s->Pc);
1716
1717 writel(0, ®s->Mb0Lo);
1718
1719 /*
1720 * Set tx_csm before we start receiving interrupts, otherwise
1721 * the interrupt handler might think it is supposed to process
1722 * tx ints before we are up and running, which may cause a null
1723 * pointer access in the int handler.
1724 */
1725 ap->cur_rx = 0;
1726 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1727
1728 wmb();
1729 ace_set_txprd(regs, ap, 0);
1730 writel(0, ®s->RxRetCsm);
1731
1732 /*
1733 * Zero the stats before starting the interface
1734 */
1735 memset(&ap->stats, 0, sizeof(ap->stats));
1736
1737 /*
1738 * Enable DMA engine now.
1739 * If we do this sooner, Mckinley box pukes.
1740 * I assume it's because Tigon II DMA engine wants to check
1741 * *something* even before the CPU is started.
1742 */
1743 writel(1, ®s->AssistState); /* enable DMA */
1744
1745 /*
1746 * Start the NIC CPU
1747 */
1748 writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
1749 readl(®s->CpuCtrl);
1750
1751 /*
1752 * Wait for the firmware to spin up - max 3 seconds.
1753 */
1754 myjif = jiffies + 3 * HZ;
1755 while (time_before(jiffies, myjif) && !ap->fw_running);
1756
1757 if (!ap->fw_running) {
1758 printk(KERN_ERR "%s: Firmware NOT running!\n", dev->name);
1759
1760 ace_dump_trace(ap);
1761 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
1762 readl(®s->CpuCtrl);
1763
1764 /* aman@sgi.com - account for badly behaving firmware/NIC:
1765 * - have observed that the NIC may continue to generate
1766 * interrupts for some reason; attempt to stop it - halt
1767 * second CPU for Tigon II cards, and also clear Mb0
1768 * - if we're a module, we'll fail to load if this was
1769 * the only GbE card in the system => if the kernel does
1770 * see an interrupt from the NIC, code to handle it is
1771 * gone and OOps! - so free_irq also
1772 */
1773 if (ap->version >= 2)
1774 writel(readl(®s->CpuBCtrl) | CPU_HALT,
1775 ®s->CpuBCtrl);
1776 writel(0, ®s->Mb0Lo);
1777 readl(®s->Mb0Lo);
1778
1779 ecode = -EBUSY;
1780 goto init_error;
1781 }
1782
1783 /*
1784 * We load the ring here as there seem to be no way to tell the
1785 * firmware to wipe the ring without re-initializing it.
1786 */
1787 if (!test_and_set_bit(0, &ap->std_refill_busy))
1788 ace_load_std_rx_ring(ap, RX_RING_SIZE);
1789 else
1790 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1791 dev->name);
1792 if (ap->version >= 2) {
1793 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1794 ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
1795 else
1796 printk(KERN_ERR "%s: Someone is busy refilling "
1797 "the RX mini ring\n", dev->name);
1798 }
1799 return 0;
1800
1801 init_error:
1802 ace_init_cleanup(dev);
1803 return ecode;
1804 }
1805
1806
ace_set_rxtx_parms(struct net_device * dev,int jumbo)1807 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
1808 {
1809 struct ace_private *ap;
1810 struct ace_regs *regs;
1811 int board_idx;
1812
1813 ap = dev->priv;
1814 regs = ap->regs;
1815
1816 board_idx = ap->board_idx;
1817
1818 if (board_idx >= 0) {
1819 if (!jumbo) {
1820 if (!tx_coal_tick[board_idx])
1821 writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
1822 if (!max_tx_desc[board_idx])
1823 writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
1824 if (!rx_coal_tick[board_idx])
1825 writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
1826 if (!max_rx_desc[board_idx])
1827 writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
1828 if (!tx_ratio[board_idx])
1829 writel(DEF_TX_RATIO, ®s->TxBufRat);
1830 } else {
1831 if (!tx_coal_tick[board_idx])
1832 writel(DEF_JUMBO_TX_COAL,
1833 ®s->TuneTxCoalTicks);
1834 if (!max_tx_desc[board_idx])
1835 writel(DEF_JUMBO_TX_MAX_DESC,
1836 ®s->TuneMaxTxDesc);
1837 if (!rx_coal_tick[board_idx])
1838 writel(DEF_JUMBO_RX_COAL,
1839 ®s->TuneRxCoalTicks);
1840 if (!max_rx_desc[board_idx])
1841 writel(DEF_JUMBO_RX_MAX_DESC,
1842 ®s->TuneMaxRxDesc);
1843 if (!tx_ratio[board_idx])
1844 writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
1845 }
1846 }
1847 }
1848
1849
ace_watchdog(struct net_device * data)1850 static void ace_watchdog(struct net_device *data)
1851 {
1852 struct net_device *dev = data;
1853 struct ace_private *ap = dev->priv;
1854 struct ace_regs *regs = ap->regs;
1855
1856 /*
1857 * We haven't received a stats update event for more than 2.5
1858 * seconds and there is data in the transmit queue, thus we
1859 * asume the card is stuck.
1860 */
1861 if (*ap->tx_csm != ap->tx_ret_csm) {
1862 printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
1863 dev->name, (unsigned int)readl(®s->HostCtrl));
1864 /* This can happen due to ieee flow control. */
1865 } else {
1866 printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
1867 dev->name);
1868 #if 0
1869 netif_wake_queue(dev);
1870 #endif
1871 }
1872 }
1873
1874
ace_tasklet(unsigned long dev)1875 static void ace_tasklet(unsigned long dev)
1876 {
1877 struct ace_private *ap = ((struct net_device *)dev)->priv;
1878 int cur_size;
1879
1880 cur_size = atomic_read(&ap->cur_rx_bufs);
1881 if ((cur_size < RX_LOW_STD_THRES) &&
1882 !test_and_set_bit(0, &ap->std_refill_busy)) {
1883 #if DEBUG
1884 printk("refilling buffers (current %i)\n", cur_size);
1885 #endif
1886 ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
1887 }
1888
1889 if (ap->version >= 2) {
1890 cur_size = atomic_read(&ap->cur_mini_bufs);
1891 if ((cur_size < RX_LOW_MINI_THRES) &&
1892 !test_and_set_bit(0, &ap->mini_refill_busy)) {
1893 #if DEBUG
1894 printk("refilling mini buffers (current %i)\n",
1895 cur_size);
1896 #endif
1897 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
1898 }
1899 }
1900
1901 cur_size = atomic_read(&ap->cur_jumbo_bufs);
1902 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1903 !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1904 #if DEBUG
1905 printk("refilling jumbo buffers (current %i)\n", cur_size);
1906 #endif
1907 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
1908 }
1909 ap->tasklet_pending = 0;
1910 }
1911
1912
1913 /*
1914 * Copy the contents of the NIC's trace buffer to kernel memory.
1915 */
ace_dump_trace(struct ace_private * ap)1916 static void ace_dump_trace(struct ace_private *ap)
1917 {
1918 #if 0
1919 if (!ap->trace_buf)
1920 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
1921 return;
1922 #endif
1923 }
1924
1925
1926 /*
1927 * Load the standard rx ring.
1928 *
1929 * Loading rings is safe without holding the spin lock since this is
1930 * done only before the device is enabled, thus no interrupts are
1931 * generated and by the interrupt handler/tasklet handler.
1932 */
ace_load_std_rx_ring(struct ace_private * ap,int nr_bufs)1933 static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1934 {
1935 struct ace_regs *regs;
1936 short i, idx;
1937
1938 regs = ap->regs;
1939
1940 prefetchw(&ap->cur_rx_bufs);
1941
1942 idx = ap->rx_std_skbprd;
1943
1944 for (i = 0; i < nr_bufs; i++) {
1945 struct sk_buff *skb;
1946 struct rx_desc *rd;
1947 dma_addr_t mapping;
1948
1949 skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
1950 if (!skb)
1951 break;
1952
1953 /*
1954 * Make sure IP header starts on a fresh cache line.
1955 */
1956 skb_reserve(skb, 2 + 16);
1957 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
1958 ((unsigned long)skb->data & ~PAGE_MASK),
1959 ACE_STD_BUFSIZE - (2 + 16),
1960 PCI_DMA_FROMDEVICE);
1961 ap->skb->rx_std_skbuff[idx].skb = skb;
1962 pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1963 mapping, mapping);
1964
1965 rd = &ap->rx_std_ring[idx];
1966 set_aceaddr(&rd->addr, mapping);
1967 rd->size = ACE_STD_MTU + ETH_HLEN + 4;
1968 rd->idx = idx;
1969 idx = (idx + 1) % RX_STD_RING_ENTRIES;
1970 }
1971
1972 if (!i)
1973 goto error_out;
1974
1975 atomic_add(i, &ap->cur_rx_bufs);
1976 ap->rx_std_skbprd = idx;
1977
1978 if (ACE_IS_TIGON_I(ap)) {
1979 struct cmd cmd;
1980 cmd.evt = C_SET_RX_PRD_IDX;
1981 cmd.code = 0;
1982 cmd.idx = ap->rx_std_skbprd;
1983 ace_issue_cmd(regs, &cmd);
1984 } else {
1985 writel(idx, ®s->RxStdPrd);
1986 wmb();
1987 }
1988
1989 out:
1990 clear_bit(0, &ap->std_refill_busy);
1991 return;
1992
1993 error_out:
1994 printk(KERN_INFO "Out of memory when allocating "
1995 "standard receive buffers\n");
1996 goto out;
1997 }
1998
1999
ace_load_mini_rx_ring(struct ace_private * ap,int nr_bufs)2000 static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
2001 {
2002 struct ace_regs *regs;
2003 short i, idx;
2004
2005 regs = ap->regs;
2006
2007 prefetchw(&ap->cur_mini_bufs);
2008
2009 idx = ap->rx_mini_skbprd;
2010 for (i = 0; i < nr_bufs; i++) {
2011 struct sk_buff *skb;
2012 struct rx_desc *rd;
2013 dma_addr_t mapping;
2014
2015 skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
2016 if (!skb)
2017 break;
2018
2019 /*
2020 * Make sure the IP header ends up on a fresh cache line
2021 */
2022 skb_reserve(skb, 2 + 16);
2023 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2024 ((unsigned long)skb->data & ~PAGE_MASK),
2025 ACE_MINI_BUFSIZE - (2 + 16),
2026 PCI_DMA_FROMDEVICE);
2027 ap->skb->rx_mini_skbuff[idx].skb = skb;
2028 pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
2029 mapping, mapping);
2030
2031 rd = &ap->rx_mini_ring[idx];
2032 set_aceaddr(&rd->addr, mapping);
2033 rd->size = ACE_MINI_SIZE;
2034 rd->idx = idx;
2035 idx = (idx + 1) % RX_MINI_RING_ENTRIES;
2036 }
2037
2038 if (!i)
2039 goto error_out;
2040
2041 atomic_add(i, &ap->cur_mini_bufs);
2042
2043 ap->rx_mini_skbprd = idx;
2044
2045 writel(idx, ®s->RxMiniPrd);
2046 wmb();
2047
2048 out:
2049 clear_bit(0, &ap->mini_refill_busy);
2050 return;
2051 error_out:
2052 printk(KERN_INFO "Out of memory when allocating "
2053 "mini receive buffers\n");
2054 goto out;
2055 }
2056
2057
2058 /*
2059 * Load the jumbo rx ring, this may happen at any time if the MTU
2060 * is changed to a value > 1500.
2061 */
ace_load_jumbo_rx_ring(struct ace_private * ap,int nr_bufs)2062 static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
2063 {
2064 struct ace_regs *regs;
2065 short i, idx;
2066
2067 regs = ap->regs;
2068
2069 idx = ap->rx_jumbo_skbprd;
2070
2071 for (i = 0; i < nr_bufs; i++) {
2072 struct sk_buff *skb;
2073 struct rx_desc *rd;
2074 dma_addr_t mapping;
2075
2076 skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
2077 if (!skb)
2078 break;
2079
2080 /*
2081 * Make sure the IP header ends up on a fresh cache line
2082 */
2083 skb_reserve(skb, 2 + 16);
2084 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2085 ((unsigned long)skb->data & ~PAGE_MASK),
2086 ACE_JUMBO_BUFSIZE - (2 + 16),
2087 PCI_DMA_FROMDEVICE);
2088 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
2089 pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
2090 mapping, mapping);
2091
2092 rd = &ap->rx_jumbo_ring[idx];
2093 set_aceaddr(&rd->addr, mapping);
2094 rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
2095 rd->idx = idx;
2096 idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
2097 }
2098
2099 if (!i)
2100 goto error_out;
2101
2102 atomic_add(i, &ap->cur_jumbo_bufs);
2103 ap->rx_jumbo_skbprd = idx;
2104
2105 if (ACE_IS_TIGON_I(ap)) {
2106 struct cmd cmd;
2107 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
2108 cmd.code = 0;
2109 cmd.idx = ap->rx_jumbo_skbprd;
2110 ace_issue_cmd(regs, &cmd);
2111 } else {
2112 writel(idx, ®s->RxJumboPrd);
2113 wmb();
2114 }
2115
2116 out:
2117 clear_bit(0, &ap->jumbo_refill_busy);
2118 return;
2119 error_out:
2120 if (net_ratelimit())
2121 printk(KERN_INFO "Out of memory when allocating "
2122 "jumbo receive buffers\n");
2123 goto out;
2124 }
2125
2126
2127 /*
2128 * All events are considered to be slow (RX/TX ints do not generate
2129 * events) and are handled here, outside the main interrupt handler,
2130 * to reduce the size of the handler.
2131 */
ace_handle_event(struct net_device * dev,u32 evtcsm,u32 evtprd)2132 static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
2133 {
2134 struct ace_private *ap;
2135
2136 ap = dev->priv;
2137
2138 while (evtcsm != evtprd) {
2139 switch (ap->evt_ring[evtcsm].evt) {
2140 case E_FW_RUNNING:
2141 printk(KERN_INFO "%s: Firmware up and running\n",
2142 dev->name);
2143 ap->fw_running = 1;
2144 wmb();
2145 break;
2146 case E_STATS_UPDATED:
2147 break;
2148 case E_LNK_STATE:
2149 {
2150 u16 code = ap->evt_ring[evtcsm].code;
2151 switch (code) {
2152 case E_C_LINK_UP:
2153 {
2154 u32 state = readl(&ap->regs->GigLnkState);
2155 printk(KERN_WARNING "%s: Optical link UP "
2156 "(%s Duplex, Flow Control: %s%s)\n",
2157 dev->name,
2158 state & LNK_FULL_DUPLEX ? "Full":"Half",
2159 state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
2160 state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
2161 break;
2162 }
2163 case E_C_LINK_DOWN:
2164 printk(KERN_WARNING "%s: Optical link DOWN\n",
2165 dev->name);
2166 break;
2167 case E_C_LINK_10_100:
2168 printk(KERN_WARNING "%s: 10/100BaseT link "
2169 "UP\n", dev->name);
2170 break;
2171 default:
2172 printk(KERN_ERR "%s: Unknown optical link "
2173 "state %02x\n", dev->name, code);
2174 }
2175 break;
2176 }
2177 case E_ERROR:
2178 switch(ap->evt_ring[evtcsm].code) {
2179 case E_C_ERR_INVAL_CMD:
2180 printk(KERN_ERR "%s: invalid command error\n",
2181 dev->name);
2182 break;
2183 case E_C_ERR_UNIMP_CMD:
2184 printk(KERN_ERR "%s: unimplemented command "
2185 "error\n", dev->name);
2186 break;
2187 case E_C_ERR_BAD_CFG:
2188 printk(KERN_ERR "%s: bad config error\n",
2189 dev->name);
2190 break;
2191 default:
2192 printk(KERN_ERR "%s: unknown error %02x\n",
2193 dev->name, ap->evt_ring[evtcsm].code);
2194 }
2195 break;
2196 case E_RESET_JUMBO_RNG:
2197 {
2198 int i;
2199 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
2200 if (ap->skb->rx_jumbo_skbuff[i].skb) {
2201 ap->rx_jumbo_ring[i].size = 0;
2202 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
2203 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
2204 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
2205 }
2206 }
2207
2208 if (ACE_IS_TIGON_I(ap)) {
2209 struct cmd cmd;
2210 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
2211 cmd.code = 0;
2212 cmd.idx = 0;
2213 ace_issue_cmd(ap->regs, &cmd);
2214 } else {
2215 writel(0, &((ap->regs)->RxJumboPrd));
2216 wmb();
2217 }
2218
2219 ap->jumbo = 0;
2220 ap->rx_jumbo_skbprd = 0;
2221 printk(KERN_INFO "%s: Jumbo ring flushed\n",
2222 dev->name);
2223 clear_bit(0, &ap->jumbo_refill_busy);
2224 break;
2225 }
2226 default:
2227 printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
2228 dev->name, ap->evt_ring[evtcsm].evt);
2229 }
2230 evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
2231 }
2232
2233 return evtcsm;
2234 }
2235
2236
ace_rx_int(struct net_device * dev,u32 rxretprd,u32 rxretcsm)2237 static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2238 {
2239 struct ace_private *ap = dev->priv;
2240 u32 idx;
2241 int mini_count = 0, std_count = 0;
2242
2243 idx = rxretcsm;
2244
2245 prefetchw(&ap->cur_rx_bufs);
2246 prefetchw(&ap->cur_mini_bufs);
2247
2248 while (idx != rxretprd) {
2249 struct ring_info *rip;
2250 struct sk_buff *skb;
2251 struct rx_desc *rxdesc, *retdesc;
2252 u32 skbidx;
2253 int bd_flags, desc_type, mapsize;
2254 u16 csum;
2255
2256 retdesc = &ap->rx_return_ring[idx];
2257 skbidx = retdesc->idx;
2258 bd_flags = retdesc->flags;
2259 desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
2260
2261 switch(desc_type) {
2262 /*
2263 * Normal frames do not have any flags set
2264 *
2265 * Mini and normal frames arrive frequently,
2266 * so use a local counter to avoid doing
2267 * atomic operations for each packet arriving.
2268 */
2269 case 0:
2270 rip = &ap->skb->rx_std_skbuff[skbidx];
2271 mapsize = ACE_STD_BUFSIZE - (2 + 16);
2272 rxdesc = &ap->rx_std_ring[skbidx];
2273 std_count++;
2274 break;
2275 case BD_FLG_JUMBO:
2276 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
2277 mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
2278 rxdesc = &ap->rx_jumbo_ring[skbidx];
2279 atomic_dec(&ap->cur_jumbo_bufs);
2280 break;
2281 case BD_FLG_MINI:
2282 rip = &ap->skb->rx_mini_skbuff[skbidx];
2283 mapsize = ACE_MINI_BUFSIZE - (2 + 16);
2284 rxdesc = &ap->rx_mini_ring[skbidx];
2285 mini_count++;
2286 break;
2287 default:
2288 printk(KERN_INFO "%s: unknown frame type (0x%02x) "
2289 "returned by NIC\n", dev->name,
2290 retdesc->flags);
2291 goto error;
2292 }
2293
2294 skb = rip->skb;
2295 rip->skb = NULL;
2296 pci_unmap_page(ap->pdev,
2297 pci_unmap_addr(rip, mapping),
2298 mapsize,
2299 PCI_DMA_FROMDEVICE);
2300 skb_put(skb, retdesc->size);
2301
2302 /*
2303 * Fly baby, fly!
2304 */
2305 csum = retdesc->tcp_udp_csum;
2306
2307 skb->dev = dev;
2308 skb->protocol = eth_type_trans(skb, dev);
2309
2310 /*
2311 * Instead of forcing the poor tigon mips cpu to calculate
2312 * pseudo hdr checksum, we do this ourselves.
2313 */
2314 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
2315 skb->csum = htons(csum);
2316 skb->ip_summed = CHECKSUM_HW;
2317 } else {
2318 skb->ip_summed = CHECKSUM_NONE;
2319 }
2320
2321 /* send it up */
2322 #if ACENIC_DO_VLAN
2323 if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) {
2324 vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan);
2325 } else
2326 #endif
2327 netif_rx(skb);
2328
2329 dev->last_rx = jiffies;
2330 ap->stats.rx_packets++;
2331 ap->stats.rx_bytes += retdesc->size;
2332
2333 idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2334 }
2335
2336 atomic_sub(std_count, &ap->cur_rx_bufs);
2337 if (!ACE_IS_TIGON_I(ap))
2338 atomic_sub(mini_count, &ap->cur_mini_bufs);
2339
2340 out:
2341 /*
2342 * According to the documentation RxRetCsm is obsolete with
2343 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
2344 */
2345 if (ACE_IS_TIGON_I(ap)) {
2346 struct ace_regs *regs = ap->regs;
2347 writel(idx, ®s->RxRetCsm);
2348 }
2349 ap->cur_rx = idx;
2350
2351 return;
2352 error:
2353 idx = rxretprd;
2354 goto out;
2355 }
2356
2357
ace_tx_int(struct net_device * dev,u32 txcsm,u32 idx)2358 static inline void ace_tx_int(struct net_device *dev,
2359 u32 txcsm, u32 idx)
2360 {
2361 struct ace_private *ap = dev->priv;
2362
2363 do {
2364 struct sk_buff *skb;
2365 dma_addr_t mapping;
2366 struct tx_ring_info *info;
2367
2368 info = ap->skb->tx_skbuff + idx;
2369 skb = info->skb;
2370 mapping = pci_unmap_addr(info, mapping);
2371
2372 if (mapping) {
2373 pci_unmap_page(ap->pdev, mapping,
2374 pci_unmap_len(info, maplen),
2375 PCI_DMA_TODEVICE);
2376 pci_unmap_addr_set(info, mapping, 0);
2377 }
2378
2379 if (skb) {
2380 ap->stats.tx_packets++;
2381 ap->stats.tx_bytes += skb->len;
2382 dev_kfree_skb_irq(skb);
2383 info->skb = NULL;
2384 }
2385
2386 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2387 } while (idx != txcsm);
2388
2389 if (netif_queue_stopped(dev))
2390 netif_wake_queue(dev);
2391
2392 wmb();
2393 ap->tx_ret_csm = txcsm;
2394
2395 /* So... tx_ret_csm is advanced _after_ check for device wakeup.
2396 *
2397 * We could try to make it before. In this case we would get
2398 * the following race condition: hard_start_xmit on other cpu
2399 * enters after we advanced tx_ret_csm and fills space,
2400 * which we have just freed, so that we make illegal device wakeup.
2401 * There is no good way to workaround this (at entry
2402 * to ace_start_xmit detects this condition and prevents
2403 * ring corruption, but it is not a good workaround.)
2404 *
2405 * When tx_ret_csm is advanced after, we wake up device _only_
2406 * if we really have some space in ring (though the core doing
2407 * hard_start_xmit can see full ring for some period and has to
2408 * synchronize.) Superb.
2409 * BUT! We get another subtle race condition. hard_start_xmit
2410 * may think that ring is full between wakeup and advancing
2411 * tx_ret_csm and will stop device instantly! It is not so bad.
2412 * We are guaranteed that there is something in ring, so that
2413 * the next irq will resume transmission. To speedup this we could
2414 * mark descriptor, which closes ring with BD_FLG_COAL_NOW
2415 * (see ace_start_xmit).
2416 *
2417 * Well, this dilemma exists in all lock-free devices.
2418 * We, following scheme used in drivers by Donald Becker,
2419 * select the least dangerous.
2420 * --ANK
2421 */
2422 }
2423
2424
ace_interrupt(int irq,void * dev_id,struct pt_regs * ptregs)2425 static void ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
2426 {
2427 struct ace_private *ap;
2428 struct ace_regs *regs;
2429 struct net_device *dev = (struct net_device *)dev_id;
2430 u32 idx;
2431 u32 txcsm, rxretcsm, rxretprd;
2432 u32 evtcsm, evtprd;
2433
2434 ap = dev->priv;
2435 regs = ap->regs;
2436
2437 /*
2438 * In case of PCI shared interrupts or spurious interrupts,
2439 * we want to make sure it is actually our interrupt before
2440 * spending any time in here.
2441 */
2442 if (!(readl(®s->HostCtrl) & IN_INT))
2443 return;
2444
2445 /*
2446 * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
2447 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
2448 * writel(0, ®s->Mb0Lo).
2449 *
2450 * "IRQ avoidance" recommended in docs applies to IRQs served
2451 * threads and it is wrong even for that case.
2452 */
2453 writel(0, ®s->Mb0Lo);
2454 readl(®s->Mb0Lo);
2455
2456 /*
2457 * There is no conflict between transmit handling in
2458 * start_xmit and receive processing, thus there is no reason
2459 * to take a spin lock for RX handling. Wait until we start
2460 * working on the other stuff - hey we don't need a spin lock
2461 * anymore.
2462 */
2463 rxretprd = *ap->rx_ret_prd;
2464 rxretcsm = ap->cur_rx;
2465
2466 if (rxretprd != rxretcsm)
2467 ace_rx_int(dev, rxretprd, rxretcsm);
2468
2469 txcsm = *ap->tx_csm;
2470 idx = ap->tx_ret_csm;
2471
2472 if (txcsm != idx) {
2473 /*
2474 * If each skb takes only one descriptor this check degenerates
2475 * to identity, because new space has just been opened.
2476 * But if skbs are fragmented we must check that this index
2477 * update releases enough of space, otherwise we just
2478 * wait for device to make more work.
2479 */
2480 if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2481 ace_tx_int(dev, txcsm, idx);
2482 }
2483
2484 evtcsm = readl(®s->EvtCsm);
2485 evtprd = *ap->evt_prd;
2486
2487 if (evtcsm != evtprd) {
2488 evtcsm = ace_handle_event(dev, evtcsm, evtprd);
2489 writel(evtcsm, ®s->EvtCsm);
2490 }
2491
2492 /*
2493 * This has to go last in the interrupt handler and run with
2494 * the spin lock released ... what lock?
2495 */
2496 if (netif_running(dev)) {
2497 int cur_size;
2498 int run_tasklet = 0;
2499
2500 cur_size = atomic_read(&ap->cur_rx_bufs);
2501 if (cur_size < RX_LOW_STD_THRES) {
2502 if ((cur_size < RX_PANIC_STD_THRES) &&
2503 !test_and_set_bit(0, &ap->std_refill_busy)) {
2504 #if DEBUG
2505 printk("low on std buffers %i\n", cur_size);
2506 #endif
2507 ace_load_std_rx_ring(ap,
2508 RX_RING_SIZE - cur_size);
2509 } else
2510 run_tasklet = 1;
2511 }
2512
2513 if (!ACE_IS_TIGON_I(ap)) {
2514 cur_size = atomic_read(&ap->cur_mini_bufs);
2515 if (cur_size < RX_LOW_MINI_THRES) {
2516 if ((cur_size < RX_PANIC_MINI_THRES) &&
2517 !test_and_set_bit(0,
2518 &ap->mini_refill_busy)) {
2519 #if DEBUG
2520 printk("low on mini buffers %i\n",
2521 cur_size);
2522 #endif
2523 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
2524 } else
2525 run_tasklet = 1;
2526 }
2527 }
2528
2529 if (ap->jumbo) {
2530 cur_size = atomic_read(&ap->cur_jumbo_bufs);
2531 if (cur_size < RX_LOW_JUMBO_THRES) {
2532 if ((cur_size < RX_PANIC_JUMBO_THRES) &&
2533 !test_and_set_bit(0,
2534 &ap->jumbo_refill_busy)){
2535 #if DEBUG
2536 printk("low on jumbo buffers %i\n",
2537 cur_size);
2538 #endif
2539 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
2540 } else
2541 run_tasklet = 1;
2542 }
2543 }
2544 if (run_tasklet && !ap->tasklet_pending) {
2545 ap->tasklet_pending = 1;
2546 tasklet_schedule(&ap->ace_tasklet);
2547 }
2548 }
2549 }
2550
2551
2552 #if ACENIC_DO_VLAN
ace_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)2553 static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2554 {
2555 struct ace_private *ap = dev->priv;
2556 unsigned long flags;
2557
2558 local_irq_save(flags);
2559 ace_mask_irq(dev);
2560
2561 ap->vlgrp = grp;
2562
2563 ace_unmask_irq(dev);
2564 local_irq_restore(flags);
2565 }
2566
2567
ace_vlan_rx_kill_vid(struct net_device * dev,unsigned short vid)2568 static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2569 {
2570 struct ace_private *ap = dev->priv;
2571 unsigned long flags;
2572
2573 local_irq_save(flags);
2574 ace_mask_irq(dev);
2575
2576 if (ap->vlgrp)
2577 ap->vlgrp->vlan_devices[vid] = NULL;
2578
2579 ace_unmask_irq(dev);
2580 local_irq_restore(flags);
2581 }
2582 #endif /* ACENIC_DO_VLAN */
2583
2584
ace_open(struct net_device * dev)2585 static int ace_open(struct net_device *dev)
2586 {
2587 struct ace_private *ap;
2588 struct ace_regs *regs;
2589 struct cmd cmd;
2590
2591 ap = dev->priv;
2592 regs = ap->regs;
2593
2594 if (!(ap->fw_running)) {
2595 printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
2596 return -EBUSY;
2597 }
2598
2599 writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
2600
2601 cmd.evt = C_CLEAR_STATS;
2602 cmd.code = 0;
2603 cmd.idx = 0;
2604 ace_issue_cmd(regs, &cmd);
2605
2606 cmd.evt = C_HOST_STATE;
2607 cmd.code = C_C_STACK_UP;
2608 cmd.idx = 0;
2609 ace_issue_cmd(regs, &cmd);
2610
2611 if (ap->jumbo &&
2612 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2613 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
2614
2615 if (dev->flags & IFF_PROMISC) {
2616 cmd.evt = C_SET_PROMISC_MODE;
2617 cmd.code = C_C_PROMISC_ENABLE;
2618 cmd.idx = 0;
2619 ace_issue_cmd(regs, &cmd);
2620
2621 ap->promisc = 1;
2622 }else
2623 ap->promisc = 0;
2624 ap->mcast_all = 0;
2625
2626 #if 0
2627 cmd.evt = C_LNK_NEGOTIATION;
2628 cmd.code = 0;
2629 cmd.idx = 0;
2630 ace_issue_cmd(regs, &cmd);
2631 #endif
2632
2633 netif_start_queue(dev);
2634
2635 ACE_MOD_INC_USE_COUNT;
2636
2637 /*
2638 * Setup the bottom half rx ring refill handler
2639 */
2640 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
2641 return 0;
2642 }
2643
2644
ace_close(struct net_device * dev)2645 static int ace_close(struct net_device *dev)
2646 {
2647 struct ace_private *ap;
2648 struct ace_regs *regs;
2649 struct cmd cmd;
2650 unsigned long flags;
2651 short i;
2652
2653 ace_if_down(dev);
2654
2655 /*
2656 * Without (or before) releasing irq and stopping hardware, this
2657 * is an absolute non-sense, by the way. It will be reset instantly
2658 * by the first irq.
2659 */
2660 netif_stop_queue(dev);
2661
2662 ap = dev->priv;
2663 regs = ap->regs;
2664
2665 if (ap->promisc) {
2666 cmd.evt = C_SET_PROMISC_MODE;
2667 cmd.code = C_C_PROMISC_DISABLE;
2668 cmd.idx = 0;
2669 ace_issue_cmd(regs, &cmd);
2670 ap->promisc = 0;
2671 }
2672
2673 cmd.evt = C_HOST_STATE;
2674 cmd.code = C_C_STACK_DOWN;
2675 cmd.idx = 0;
2676 ace_issue_cmd(regs, &cmd);
2677
2678 tasklet_kill(&ap->ace_tasklet);
2679
2680 /*
2681 * Make sure one CPU is not processing packets while
2682 * buffers are being released by another.
2683 */
2684
2685 local_irq_save(flags);
2686 ace_mask_irq(dev);
2687
2688 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2689 struct sk_buff *skb;
2690 dma_addr_t mapping;
2691 struct tx_ring_info *info;
2692
2693 info = ap->skb->tx_skbuff + i;
2694 skb = info->skb;
2695 mapping = pci_unmap_addr(info, mapping);
2696
2697 if (mapping) {
2698 if (ACE_IS_TIGON_I(ap)) {
2699 writel(0, &ap->tx_ring[i].addr.addrhi);
2700 writel(0, &ap->tx_ring[i].addr.addrlo);
2701 writel(0, &ap->tx_ring[i].flagsize);
2702 } else
2703 memset(ap->tx_ring + i, 0,
2704 sizeof(struct tx_desc));
2705 pci_unmap_page(ap->pdev, mapping,
2706 pci_unmap_len(info, maplen),
2707 PCI_DMA_TODEVICE);
2708 pci_unmap_addr_set(info, mapping, 0);
2709 }
2710 if (skb) {
2711 dev_kfree_skb(skb);
2712 info->skb = NULL;
2713 }
2714 }
2715
2716 if (ap->jumbo) {
2717 cmd.evt = C_RESET_JUMBO_RNG;
2718 cmd.code = 0;
2719 cmd.idx = 0;
2720 ace_issue_cmd(regs, &cmd);
2721 }
2722
2723 ace_unmask_irq(dev);
2724 local_irq_restore(flags);
2725
2726 ACE_MOD_DEC_USE_COUNT;
2727 return 0;
2728 }
2729
2730
2731 static inline dma_addr_t
ace_map_tx_skb(struct ace_private * ap,struct sk_buff * skb,struct sk_buff * tail,u32 idx)2732 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2733 struct sk_buff *tail, u32 idx)
2734 {
2735 dma_addr_t mapping;
2736 struct tx_ring_info *info;
2737
2738 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
2739 ((unsigned long) skb->data & ~PAGE_MASK),
2740 skb->len, PCI_DMA_TODEVICE);
2741
2742 info = ap->skb->tx_skbuff + idx;
2743 info->skb = tail;
2744 pci_unmap_addr_set(info, mapping, mapping);
2745 pci_unmap_len_set(info, maplen, skb->len);
2746 return mapping;
2747 }
2748
2749
2750 static inline void
ace_load_tx_bd(struct ace_private * ap,struct tx_desc * desc,u64 addr,u32 flagsize,u32 vlan_tag)2751 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2752 u32 flagsize, u32 vlan_tag)
2753 {
2754 #if !USE_TX_COAL_NOW
2755 flagsize &= ~BD_FLG_COAL_NOW;
2756 #endif
2757
2758 if (ACE_IS_TIGON_I(ap)) {
2759 writel(addr >> 32, &desc->addr.addrhi);
2760 writel(addr & 0xffffffff, &desc->addr.addrlo);
2761 writel(flagsize, &desc->flagsize);
2762 #if ACENIC_DO_VLAN
2763 writel(vlan_tag, &desc->vlanres);
2764 #endif
2765 } else {
2766 desc->addr.addrhi = addr >> 32;
2767 desc->addr.addrlo = addr;
2768 desc->flagsize = flagsize;
2769 #if ACENIC_DO_VLAN
2770 desc->vlanres = vlan_tag;
2771 #endif
2772 }
2773 }
2774
2775
ace_start_xmit(struct sk_buff * skb,struct net_device * dev)2776 static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 {
2778 struct ace_private *ap = dev->priv;
2779 struct ace_regs *regs = ap->regs;
2780 struct tx_desc *desc;
2781 u32 idx, flagsize;
2782
2783 /*
2784 * This only happens with pre-softnet, ie. 2.2.x kernels.
2785 */
2786 if (early_stop_netif_stop_queue(dev))
2787 return 1;
2788
2789 restart:
2790 idx = ap->tx_prd;
2791
2792 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2793 goto overflow;
2794
2795 #if MAX_SKB_FRAGS
2796 if (!skb_shinfo(skb)->nr_frags)
2797 #endif
2798 {
2799 dma_addr_t mapping;
2800 u32 vlan_tag = 0;
2801
2802 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2803 flagsize = (skb->len << 16) | (BD_FLG_END);
2804 if (skb->ip_summed == CHECKSUM_HW)
2805 flagsize |= BD_FLG_TCP_UDP_SUM;
2806 #if ACENIC_DO_VLAN
2807 if (vlan_tx_tag_present(skb)) {
2808 flagsize |= BD_FLG_VLAN_TAG;
2809 vlan_tag = vlan_tx_tag_get(skb);
2810 }
2811 #endif
2812 desc = ap->tx_ring + idx;
2813 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2814
2815 /* Look at ace_tx_int for explanations. */
2816 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2817 flagsize |= BD_FLG_COAL_NOW;
2818
2819 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2820 }
2821 #if MAX_SKB_FRAGS
2822 else {
2823 dma_addr_t mapping;
2824 u32 vlan_tag = 0;
2825 int i, len = 0;
2826
2827 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2828 flagsize = ((skb->len - skb->data_len) << 16);
2829 if (skb->ip_summed == CHECKSUM_HW)
2830 flagsize |= BD_FLG_TCP_UDP_SUM;
2831 #if ACENIC_DO_VLAN
2832 if (vlan_tx_tag_present(skb)) {
2833 flagsize |= BD_FLG_VLAN_TAG;
2834 vlan_tag = vlan_tx_tag_get(skb);
2835 }
2836 #endif
2837
2838 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2839
2840 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2841
2842 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2843 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2844 struct tx_ring_info *info;
2845
2846 len += frag->size;
2847 info = ap->skb->tx_skbuff + idx;
2848 desc = ap->tx_ring + idx;
2849
2850 mapping = pci_map_page(ap->pdev, frag->page,
2851 frag->page_offset, frag->size,
2852 PCI_DMA_TODEVICE);
2853
2854 flagsize = (frag->size << 16);
2855 if (skb->ip_summed == CHECKSUM_HW)
2856 flagsize |= BD_FLG_TCP_UDP_SUM;
2857 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2858
2859 if (i == skb_shinfo(skb)->nr_frags - 1) {
2860 flagsize |= BD_FLG_END;
2861 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2862 flagsize |= BD_FLG_COAL_NOW;
2863
2864 /*
2865 * Only the last fragment frees
2866 * the skb!
2867 */
2868 info->skb = skb;
2869 } else {
2870 info->skb = NULL;
2871 }
2872 pci_unmap_addr_set(info, mapping, mapping);
2873 pci_unmap_len_set(info, maplen, frag->size);
2874 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2875 }
2876 }
2877 #endif
2878
2879 wmb();
2880 ap->tx_prd = idx;
2881 ace_set_txprd(regs, ap, idx);
2882
2883 if (flagsize & BD_FLG_COAL_NOW) {
2884 netif_stop_queue(dev);
2885
2886 /*
2887 * A TX-descriptor producer (an IRQ) might have gotten
2888 * inbetween, making the ring free again. Since xmit is
2889 * serialized, this is the only situation we have to
2890 * re-test.
2891 */
2892 if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2893 netif_wake_queue(dev);
2894 }
2895
2896 dev->trans_start = jiffies;
2897 return 0;
2898
2899 overflow:
2900 /*
2901 * This race condition is unavoidable with lock-free drivers.
2902 * We wake up the queue _before_ tx_prd is advanced, so that we can
2903 * enter hard_start_xmit too early, while tx ring still looks closed.
2904 * This happens ~1-4 times per 100000 packets, so that we can allow
2905 * to loop syncing to other CPU. Probably, we need an additional
2906 * wmb() in ace_tx_intr as well.
2907 *
2908 * Note that this race is relieved by reserving one more entry
2909 * in tx ring than it is necessary (see original non-SG driver).
2910 * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
2911 * is already overkill.
2912 *
2913 * Alternative is to return with 1 not throttling queue. In this
2914 * case loop becomes longer, no more useful effects.
2915 */
2916 barrier();
2917 goto restart;
2918 }
2919
2920
ace_change_mtu(struct net_device * dev,int new_mtu)2921 static int ace_change_mtu(struct net_device *dev, int new_mtu)
2922 {
2923 struct ace_private *ap = dev->priv;
2924 struct ace_regs *regs = ap->regs;
2925
2926 if (new_mtu > ACE_JUMBO_MTU)
2927 return -EINVAL;
2928
2929 writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
2930 dev->mtu = new_mtu;
2931
2932 if (new_mtu > ACE_STD_MTU) {
2933 if (!(ap->jumbo)) {
2934 printk(KERN_INFO "%s: Enabling Jumbo frame "
2935 "support\n", dev->name);
2936 ap->jumbo = 1;
2937 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2938 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
2939 ace_set_rxtx_parms(dev, 1);
2940 }
2941 } else {
2942 while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2943 ace_sync_irq(dev->irq);
2944 ace_set_rxtx_parms(dev, 0);
2945 if (ap->jumbo) {
2946 struct cmd cmd;
2947
2948 cmd.evt = C_RESET_JUMBO_RNG;
2949 cmd.code = 0;
2950 cmd.idx = 0;
2951 ace_issue_cmd(regs, &cmd);
2952 }
2953 }
2954
2955 return 0;
2956 }
2957
2958
ace_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)2959 static int ace_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2960 {
2961 struct ace_private *ap = dev->priv;
2962 struct ace_regs *regs = ap->regs;
2963 #ifdef SIOCETHTOOL
2964 struct ethtool_cmd ecmd;
2965 u32 link, speed;
2966
2967 if (cmd != SIOCETHTOOL)
2968 return -EOPNOTSUPP;
2969 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof(ecmd)))
2970 return -EFAULT;
2971 switch (ecmd.cmd) {
2972 case ETHTOOL_GSET:
2973 ecmd.supported =
2974 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2975 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2976 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
2977 SUPPORTED_Autoneg | SUPPORTED_FIBRE);
2978
2979 ecmd.port = PORT_FIBRE;
2980 ecmd.transceiver = XCVR_INTERNAL;
2981 ecmd.phy_address = 0;
2982
2983 link = readl(®s->GigLnkState);
2984 if (link & LNK_1000MB)
2985 ecmd.speed = SPEED_1000;
2986 else {
2987 link = readl(®s->FastLnkState);
2988 if (link & LNK_100MB)
2989 ecmd.speed = SPEED_100;
2990 else if (link & LNK_100MB)
2991 ecmd.speed = SPEED_10;
2992 else
2993 ecmd.speed = 0;
2994 }
2995 if (link & LNK_FULL_DUPLEX)
2996 ecmd.duplex = DUPLEX_FULL;
2997 else
2998 ecmd.duplex = DUPLEX_HALF;
2999
3000 if (link & LNK_NEGOTIATE)
3001 ecmd.autoneg = AUTONEG_ENABLE;
3002 else
3003 ecmd.autoneg = AUTONEG_DISABLE;
3004
3005 #if 0
3006 /*
3007 * Current struct ethtool_cmd is insufficient
3008 */
3009 ecmd.trace = readl(®s->TuneTrace);
3010
3011 ecmd.txcoal = readl(®s->TuneTxCoalTicks);
3012 ecmd.rxcoal = readl(®s->TuneRxCoalTicks);
3013 #endif
3014 ecmd.maxtxpkt = readl(®s->TuneMaxTxDesc);
3015 ecmd.maxrxpkt = readl(®s->TuneMaxRxDesc);
3016
3017 if(copy_to_user(ifr->ifr_data, &ecmd, sizeof(ecmd)))
3018 return -EFAULT;
3019 return 0;
3020
3021 case ETHTOOL_SSET:
3022 link = readl(®s->GigLnkState);
3023 if (link & LNK_1000MB)
3024 speed = SPEED_1000;
3025 else {
3026 link = readl(®s->FastLnkState);
3027 if (link & LNK_100MB)
3028 speed = SPEED_100;
3029 else if (link & LNK_100MB)
3030 speed = SPEED_10;
3031 else
3032 speed = SPEED_100;
3033 }
3034
3035 link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
3036 LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
3037 if (!ACE_IS_TIGON_I(ap))
3038 link |= LNK_TX_FLOW_CTL_Y;
3039 if (ecmd.autoneg == AUTONEG_ENABLE)
3040 link |= LNK_NEGOTIATE;
3041 if (ecmd.speed != speed) {
3042 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
3043 switch (speed) {
3044 case SPEED_1000:
3045 link |= LNK_1000MB;
3046 break;
3047 case SPEED_100:
3048 link |= LNK_100MB;
3049 break;
3050 case SPEED_10:
3051 link |= LNK_10MB;
3052 break;
3053 }
3054 }
3055 if (ecmd.duplex == DUPLEX_FULL)
3056 link |= LNK_FULL_DUPLEX;
3057
3058 if (link != ap->link) {
3059 struct cmd cmd;
3060 printk(KERN_INFO "%s: Renegotiating link state\n",
3061 dev->name);
3062
3063 ap->link = link;
3064 writel(link, ®s->TuneLink);
3065 if (!ACE_IS_TIGON_I(ap))
3066 writel(link, ®s->TuneFastLink);
3067 wmb();
3068
3069 cmd.evt = C_LNK_NEGOTIATION;
3070 cmd.code = 0;
3071 cmd.idx = 0;
3072 ace_issue_cmd(regs, &cmd);
3073 }
3074 return 0;
3075
3076 case ETHTOOL_GDRVINFO: {
3077 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
3078 strncpy(info.driver, "acenic", sizeof(info.driver) - 1);
3079 sprintf(info.fw_version, "%i.%i.%i",
3080 tigonFwReleaseMajor, tigonFwReleaseMinor,
3081 tigonFwReleaseFix);
3082 strncpy(info.version, version, sizeof(info.version) - 1);
3083 if (ap && ap->pdev)
3084 strcpy(info.bus_info, ap->pdev->slot_name);
3085 if (copy_to_user(ifr->ifr_data, &info, sizeof(info)))
3086 return -EFAULT;
3087 return 0;
3088 }
3089 default:
3090 break;
3091 }
3092
3093 #endif
3094
3095 return -EOPNOTSUPP;
3096 }
3097
3098
3099 /*
3100 * Set the hardware MAC address.
3101 */
ace_set_mac_addr(struct net_device * dev,void * p)3102 static int ace_set_mac_addr(struct net_device *dev, void *p)
3103 {
3104 struct sockaddr *addr=p;
3105 struct ace_regs *regs;
3106 u8 *da;
3107 struct cmd cmd;
3108
3109 if(netif_running(dev))
3110 return -EBUSY;
3111
3112 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
3113
3114 da = (u8 *)dev->dev_addr;
3115
3116 regs = ((struct ace_private *)dev->priv)->regs;
3117 writel(da[0] << 8 | da[1], ®s->MacAddrHi);
3118 writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
3119 ®s->MacAddrLo);
3120
3121 cmd.evt = C_SET_MAC_ADDR;
3122 cmd.code = 0;
3123 cmd.idx = 0;
3124 ace_issue_cmd(regs, &cmd);
3125
3126 return 0;
3127 }
3128
3129
ace_set_multicast_list(struct net_device * dev)3130 static void ace_set_multicast_list(struct net_device *dev)
3131 {
3132 struct ace_private *ap = dev->priv;
3133 struct ace_regs *regs = ap->regs;
3134 struct cmd cmd;
3135
3136 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
3137 cmd.evt = C_SET_MULTICAST_MODE;
3138 cmd.code = C_C_MCAST_ENABLE;
3139 cmd.idx = 0;
3140 ace_issue_cmd(regs, &cmd);
3141 ap->mcast_all = 1;
3142 } else if (ap->mcast_all) {
3143 cmd.evt = C_SET_MULTICAST_MODE;
3144 cmd.code = C_C_MCAST_DISABLE;
3145 cmd.idx = 0;
3146 ace_issue_cmd(regs, &cmd);
3147 ap->mcast_all = 0;
3148 }
3149
3150 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
3151 cmd.evt = C_SET_PROMISC_MODE;
3152 cmd.code = C_C_PROMISC_ENABLE;
3153 cmd.idx = 0;
3154 ace_issue_cmd(regs, &cmd);
3155 ap->promisc = 1;
3156 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
3157 cmd.evt = C_SET_PROMISC_MODE;
3158 cmd.code = C_C_PROMISC_DISABLE;
3159 cmd.idx = 0;
3160 ace_issue_cmd(regs, &cmd);
3161 ap->promisc = 0;
3162 }
3163
3164 /*
3165 * For the time being multicast relies on the upper layers
3166 * filtering it properly. The Firmware does not allow one to
3167 * set the entire multicast list at a time and keeping track of
3168 * it here is going to be messy.
3169 */
3170 if ((dev->mc_count) && !(ap->mcast_all)) {
3171 cmd.evt = C_SET_MULTICAST_MODE;
3172 cmd.code = C_C_MCAST_ENABLE;
3173 cmd.idx = 0;
3174 ace_issue_cmd(regs, &cmd);
3175 }else if (!ap->mcast_all) {
3176 cmd.evt = C_SET_MULTICAST_MODE;
3177 cmd.code = C_C_MCAST_DISABLE;
3178 cmd.idx = 0;
3179 ace_issue_cmd(regs, &cmd);
3180 }
3181 }
3182
3183
ace_get_stats(struct net_device * dev)3184 static struct net_device_stats *ace_get_stats(struct net_device *dev)
3185 {
3186 struct ace_private *ap = dev->priv;
3187 struct ace_mac_stats *mac_stats =
3188 (struct ace_mac_stats *)ap->regs->Stats;
3189
3190 ap->stats.rx_missed_errors = readl(&mac_stats->drop_space);
3191 ap->stats.multicast = readl(&mac_stats->kept_mc);
3192 ap->stats.collisions = readl(&mac_stats->coll);
3193
3194 return &ap->stats;
3195 }
3196
3197
ace_copy(struct ace_regs * regs,void * src,u32 dest,int size)3198 static void __init ace_copy(struct ace_regs *regs, void *src,
3199 u32 dest, int size)
3200 {
3201 unsigned long tdest;
3202 u32 *wsrc;
3203 short tsize, i;
3204
3205 if (size <= 0)
3206 return;
3207
3208 while (size > 0) {
3209 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
3210 min_t(u32, size, ACE_WINDOW_SIZE));
3211 tdest = (unsigned long)®s->Window +
3212 (dest & (ACE_WINDOW_SIZE - 1));
3213 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
3214 /*
3215 * This requires byte swapping on big endian, however
3216 * writel does that for us
3217 */
3218 wsrc = src;
3219 for (i = 0; i < (tsize / 4); i++) {
3220 writel(wsrc[i], tdest + i*4);
3221 }
3222 dest += tsize;
3223 src += tsize;
3224 size -= tsize;
3225 }
3226
3227 return;
3228 }
3229
3230
ace_clear(struct ace_regs * regs,u32 dest,int size)3231 static void __init ace_clear(struct ace_regs *regs, u32 dest, int size)
3232 {
3233 unsigned long tdest;
3234 short tsize = 0, i;
3235
3236 if (size <= 0)
3237 return;
3238
3239 while (size > 0) {
3240 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
3241 min_t(u32, size, ACE_WINDOW_SIZE));
3242 tdest = (unsigned long)®s->Window +
3243 (dest & (ACE_WINDOW_SIZE - 1));
3244 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
3245
3246 for (i = 0; i < (tsize / 4); i++) {
3247 writel(0, tdest + i*4);
3248 }
3249
3250 dest += tsize;
3251 size -= tsize;
3252 }
3253
3254 return;
3255 }
3256
3257
3258 /*
3259 * Download the firmware into the SRAM on the NIC
3260 *
3261 * This operation requires the NIC to be halted and is performed with
3262 * interrupts disabled and with the spinlock hold.
3263 */
ace_load_firmware(struct net_device * dev)3264 int __init ace_load_firmware(struct net_device *dev)
3265 {
3266 struct ace_private *ap;
3267 struct ace_regs *regs;
3268
3269 ap = dev->priv;
3270 regs = ap->regs;
3271
3272 if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
3273 printk(KERN_ERR "%s: trying to download firmware while the "
3274 "CPU is running!\n", dev->name);
3275 return -EFAULT;
3276 }
3277
3278 /*
3279 * Do not try to clear more than 512KB or we end up seeing
3280 * funny things on NICs with only 512KB SRAM
3281 */
3282 ace_clear(regs, 0x2000, 0x80000-0x2000);
3283 if (ACE_IS_TIGON_I(ap)) {
3284 ace_copy(regs, tigonFwText, tigonFwTextAddr, tigonFwTextLen);
3285 ace_copy(regs, tigonFwData, tigonFwDataAddr, tigonFwDataLen);
3286 ace_copy(regs, tigonFwRodata, tigonFwRodataAddr,
3287 tigonFwRodataLen);
3288 ace_clear(regs, tigonFwBssAddr, tigonFwBssLen);
3289 ace_clear(regs, tigonFwSbssAddr, tigonFwSbssLen);
3290 }else if (ap->version == 2) {
3291 ace_clear(regs, tigon2FwBssAddr, tigon2FwBssLen);
3292 ace_clear(regs, tigon2FwSbssAddr, tigon2FwSbssLen);
3293 ace_copy(regs, tigon2FwText, tigon2FwTextAddr,tigon2FwTextLen);
3294 ace_copy(regs, tigon2FwRodata, tigon2FwRodataAddr,
3295 tigon2FwRodataLen);
3296 ace_copy(regs, tigon2FwData, tigon2FwDataAddr,tigon2FwDataLen);
3297 }
3298
3299 return 0;
3300 }
3301
3302
3303 /*
3304 * The eeprom on the AceNIC is an Atmel i2c EEPROM.
3305 *
3306 * Accessing the EEPROM is `interesting' to say the least - don't read
3307 * this code right after dinner.
3308 *
3309 * This is all about black magic and bit-banging the device .... I
3310 * wonder in what hospital they have put the guy who designed the i2c
3311 * specs.
3312 *
3313 * Oh yes, this is only the beginning!
3314 *
3315 * Thanks to Stevarino Webinski for helping tracking down the bugs in the
3316 * code i2c readout code by beta testing all my hacks.
3317 */
eeprom_start(struct ace_regs * regs)3318 static void __init eeprom_start(struct ace_regs *regs)
3319 {
3320 u32 local;
3321
3322 readl(®s->LocalCtrl);
3323 udelay(ACE_SHORT_DELAY);
3324 local = readl(®s->LocalCtrl);
3325 local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
3326 writel(local, ®s->LocalCtrl);
3327 readl(®s->LocalCtrl);
3328 mb();
3329 udelay(ACE_SHORT_DELAY);
3330 local |= EEPROM_CLK_OUT;
3331 writel(local, ®s->LocalCtrl);
3332 readl(®s->LocalCtrl);
3333 mb();
3334 udelay(ACE_SHORT_DELAY);
3335 local &= ~EEPROM_DATA_OUT;
3336 writel(local, ®s->LocalCtrl);
3337 readl(®s->LocalCtrl);
3338 mb();
3339 udelay(ACE_SHORT_DELAY);
3340 local &= ~EEPROM_CLK_OUT;
3341 writel(local, ®s->LocalCtrl);
3342 readl(®s->LocalCtrl);
3343 mb();
3344 }
3345
3346
eeprom_prep(struct ace_regs * regs,u8 magic)3347 static void __init eeprom_prep(struct ace_regs *regs, u8 magic)
3348 {
3349 short i;
3350 u32 local;
3351
3352 udelay(ACE_SHORT_DELAY);
3353 local = readl(®s->LocalCtrl);
3354 local &= ~EEPROM_DATA_OUT;
3355 local |= EEPROM_WRITE_ENABLE;
3356 writel(local, ®s->LocalCtrl);
3357 readl(®s->LocalCtrl);
3358 mb();
3359
3360 for (i = 0; i < 8; i++, magic <<= 1) {
3361 udelay(ACE_SHORT_DELAY);
3362 if (magic & 0x80)
3363 local |= EEPROM_DATA_OUT;
3364 else
3365 local &= ~EEPROM_DATA_OUT;
3366 writel(local, ®s->LocalCtrl);
3367 readl(®s->LocalCtrl);
3368 mb();
3369
3370 udelay(ACE_SHORT_DELAY);
3371 local |= EEPROM_CLK_OUT;
3372 writel(local, ®s->LocalCtrl);
3373 readl(®s->LocalCtrl);
3374 mb();
3375 udelay(ACE_SHORT_DELAY);
3376 local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
3377 writel(local, ®s->LocalCtrl);
3378 readl(®s->LocalCtrl);
3379 mb();
3380 }
3381 }
3382
3383
eeprom_check_ack(struct ace_regs * regs)3384 static int __init eeprom_check_ack(struct ace_regs *regs)
3385 {
3386 int state;
3387 u32 local;
3388
3389 local = readl(®s->LocalCtrl);
3390 local &= ~EEPROM_WRITE_ENABLE;
3391 writel(local, ®s->LocalCtrl);
3392 readl(®s->LocalCtrl);
3393 mb();
3394 udelay(ACE_LONG_DELAY);
3395 local |= EEPROM_CLK_OUT;
3396 writel(local, ®s->LocalCtrl);
3397 readl(®s->LocalCtrl);
3398 mb();
3399 udelay(ACE_SHORT_DELAY);
3400 /* sample data in middle of high clk */
3401 state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
3402 udelay(ACE_SHORT_DELAY);
3403 mb();
3404 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3405 readl(®s->LocalCtrl);
3406 mb();
3407
3408 return state;
3409 }
3410
3411
eeprom_stop(struct ace_regs * regs)3412 static void __init eeprom_stop(struct ace_regs *regs)
3413 {
3414 u32 local;
3415
3416 udelay(ACE_SHORT_DELAY);
3417 local = readl(®s->LocalCtrl);
3418 local |= EEPROM_WRITE_ENABLE;
3419 writel(local, ®s->LocalCtrl);
3420 readl(®s->LocalCtrl);
3421 mb();
3422 udelay(ACE_SHORT_DELAY);
3423 local &= ~EEPROM_DATA_OUT;
3424 writel(local, ®s->LocalCtrl);
3425 readl(®s->LocalCtrl);
3426 mb();
3427 udelay(ACE_SHORT_DELAY);
3428 local |= EEPROM_CLK_OUT;
3429 writel(local, ®s->LocalCtrl);
3430 readl(®s->LocalCtrl);
3431 mb();
3432 udelay(ACE_SHORT_DELAY);
3433 local |= EEPROM_DATA_OUT;
3434 writel(local, ®s->LocalCtrl);
3435 readl(®s->LocalCtrl);
3436 mb();
3437 udelay(ACE_LONG_DELAY);
3438 local &= ~EEPROM_CLK_OUT;
3439 writel(local, ®s->LocalCtrl);
3440 mb();
3441 }
3442
3443
3444 /*
3445 * Read a whole byte from the EEPROM.
3446 */
read_eeprom_byte(struct net_device * dev,unsigned long offset)3447 static int __init read_eeprom_byte(struct net_device *dev,
3448 unsigned long offset)
3449 {
3450 struct ace_regs *regs;
3451 unsigned long flags;
3452 u32 local;
3453 int result = 0;
3454 short i;
3455
3456 if (!dev) {
3457 printk(KERN_ERR "No device!\n");
3458 result = -ENODEV;
3459 goto eeprom_read_error;
3460 }
3461
3462 regs = ((struct ace_private *)dev->priv)->regs;
3463
3464 /*
3465 * Don't take interrupts on this CPU will bit banging
3466 * the %#%#@$ I2C device
3467 */
3468 local_irq_save(flags);
3469
3470 eeprom_start(regs);
3471
3472 eeprom_prep(regs, EEPROM_WRITE_SELECT);
3473 if (eeprom_check_ack(regs)) {
3474 local_irq_restore(flags);
3475 printk(KERN_ERR "%s: Unable to sync eeprom\n", dev->name);
3476 result = -EIO;
3477 goto eeprom_read_error;
3478 }
3479
3480 eeprom_prep(regs, (offset >> 8) & 0xff);
3481 if (eeprom_check_ack(regs)) {
3482 local_irq_restore(flags);
3483 printk(KERN_ERR "%s: Unable to set address byte 0\n",
3484 dev->name);
3485 result = -EIO;
3486 goto eeprom_read_error;
3487 }
3488
3489 eeprom_prep(regs, offset & 0xff);
3490 if (eeprom_check_ack(regs)) {
3491 local_irq_restore(flags);
3492 printk(KERN_ERR "%s: Unable to set address byte 1\n",
3493 dev->name);
3494 result = -EIO;
3495 goto eeprom_read_error;
3496 }
3497
3498 eeprom_start(regs);
3499 eeprom_prep(regs, EEPROM_READ_SELECT);
3500 if (eeprom_check_ack(regs)) {
3501 local_irq_restore(flags);
3502 printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
3503 dev->name);
3504 result = -EIO;
3505 goto eeprom_read_error;
3506 }
3507
3508 for (i = 0; i < 8; i++) {
3509 local = readl(®s->LocalCtrl);
3510 local &= ~EEPROM_WRITE_ENABLE;
3511 writel(local, ®s->LocalCtrl);
3512 readl(®s->LocalCtrl);
3513 udelay(ACE_LONG_DELAY);
3514 mb();
3515 local |= EEPROM_CLK_OUT;
3516 writel(local, ®s->LocalCtrl);
3517 readl(®s->LocalCtrl);
3518 mb();
3519 udelay(ACE_SHORT_DELAY);
3520 /* sample data mid high clk */
3521 result = (result << 1) |
3522 ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
3523 udelay(ACE_SHORT_DELAY);
3524 mb();
3525 local = readl(®s->LocalCtrl);
3526 local &= ~EEPROM_CLK_OUT;
3527 writel(local, ®s->LocalCtrl);
3528 readl(®s->LocalCtrl);
3529 udelay(ACE_SHORT_DELAY);
3530 mb();
3531 if (i == 7) {
3532 local |= EEPROM_WRITE_ENABLE;
3533 writel(local, ®s->LocalCtrl);
3534 readl(®s->LocalCtrl);
3535 mb();
3536 udelay(ACE_SHORT_DELAY);
3537 }
3538 }
3539
3540 local |= EEPROM_DATA_OUT;
3541 writel(local, ®s->LocalCtrl);
3542 readl(®s->LocalCtrl);
3543 mb();
3544 udelay(ACE_SHORT_DELAY);
3545 writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
3546 readl(®s->LocalCtrl);
3547 udelay(ACE_LONG_DELAY);
3548 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3549 readl(®s->LocalCtrl);
3550 mb();
3551 udelay(ACE_SHORT_DELAY);
3552 eeprom_stop(regs);
3553
3554 local_irq_restore(flags);
3555 out:
3556 return result;
3557
3558 eeprom_read_error:
3559 printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
3560 dev->name, offset);
3561 goto out;
3562 }
3563
3564
3565 /*
3566 * Local variables:
3567 * compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c"
3568 * End:
3569 */
3570