1 /*
2 * drivers/net/titan_ge.c - Driver for Titan ethernet ports
3 *
4 * Copyright (C) 2003 PMC-Sierra Inc.
5 * Author : Manish Lachwani (lachwani@pmc-sierra.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22 /*
23 * The MAC unit of the Titan consists of the following:
24 *
25 * -> XDMA Engine to move data to from the memory to the MAC packet FIFO
26 * -> FIFO is where the incoming and outgoing data is placed
27 * -> TRTG is the unit that pulls the data from the FIFO for Tx and pushes
28 * the data into the FIFO for Rx
29 * -> TMAC is the outgoing MAC interface and RMAC is the incoming.
30 * -> AFX is the address filtering block
31 * -> GMII block to communicate with the PHY
32 *
33 * Rx will look like the following:
34 * GMII --> RMAC --> AFX --> TRTG --> Rx FIFO --> XDMA --> CPU memory
35 *
36 * Tx will look like the following:
37 * CPU memory --> XDMA --> Tx FIFO --> TRTG --> TMAC --> GMII
38 *
39 * The Titan driver has support for the following performance features:
40 * -> Rx side checksumming
41 * -> Jumbo Frames
42 * -> Interrupt Coalscing
43 * -> Rx NAPI
44 * -> SKB Recycling
45 * -> Transmit/Receive descriptors in SRAM
46 * -> Fast routing for IP forwarding
47 */
48
49 #include <linux/config.h>
50 #include <linux/version.h>
51 #include <linux/module.h>
52 #include <linux/kernel.h>
53 #include <linux/config.h>
54 #include <linux/sched.h>
55 #include <linux/ptrace.h>
56 #include <linux/fcntl.h>
57 #include <linux/ioport.h>
58 #include <linux/interrupt.h>
59 #include <linux/slab.h>
60 #include <linux/string.h>
61 #include <linux/errno.h>
62 #include <linux/ip.h>
63 #include <linux/init.h>
64 #include <linux/in.h>
65 #include <linux/pci.h>
66
67 #include <linux/netdevice.h>
68 #include <linux/etherdevice.h>
69 #include <linux/skbuff.h>
70 #include <linux/mii.h>
71 #include <linux/delay.h>
72 #include <linux/skbuff.h>
73 #include <linux/prefetch.h>
74
75 /* For MII specifc registers, titan_mdio.h should be included */
76 #include <net/ip.h>
77
78 #include <asm/bitops.h>
79 #include <asm/io.h>
80 #include <asm/types.h>
81 #include <asm/pgtable.h>
82 #include <asm/system.h>
83
84 #include "titan_ge.h"
85 #include "titan_mdio.h"
86
87 /* Static Function Declarations */
88 static int titan_ge_eth_open(struct net_device *);
89 static int titan_ge_eth_stop(struct net_device *);
90 static int titan_ge_change_mtu(struct net_device *, int);
91 static struct net_device_stats *titan_ge_get_stats(struct net_device *);
92 static int titan_ge_init_rx_desc_ring(titan_ge_port_info *, int, int,
93 unsigned long, unsigned long,
94 unsigned long);
95 static int titan_ge_init_tx_desc_ring(titan_ge_port_info *, int,
96 unsigned long, unsigned long);
97
98 static int titan_ge_open(struct net_device *);
99 static int titan_ge_start_xmit(struct sk_buff *, struct net_device *);
100 static int titan_ge_stop(struct net_device *);
101 static int titan_ge_set_mac_address(struct net_device *, void *);
102
103 static unsigned long titan_ge_tx_coal(unsigned long, int);
104 static unsigned long titan_ge_rx_coal(unsigned long, int);
105
106 static void titan_ge_port_reset(unsigned int);
107 static int titan_ge_free_tx_queue(titan_ge_port_info *);
108 static int titan_ge_rx_task(struct net_device *, titan_ge_port_info *);
109 static int titan_ge_port_start(struct net_device *, titan_ge_port_info *);
110
111 static int titan_ge_init(int);
112 static int titan_ge_return_tx_desc(titan_ge_port_info *, int);
113
114 /*
115 * Some configuration for the FIFO and the XDMA channel needs
116 * to be done only once for all the ports. This flag controls
117 * that
118 */
119 unsigned long config_done = 0;
120
121 /*
122 * One time out of memory flag
123 */
124 unsigned int oom_flag = 0;
125
126 #ifdef TITAN_RX_NAPI
127 static int titan_ge_poll(struct net_device *netdev, int *budget);
128 #endif
129
130 int titan_ge_receive_queue(struct net_device *, unsigned int);
131
132 /* MAC Address */
133 extern unsigned char titan_ge_mac_addr_base[6];
134
135 /* Support for Rx NAPI */
136 #ifdef TITAN_RX_NAPI
__netif_rx_complete(struct net_device * dev)137 static inline void __netif_rx_complete(struct net_device *dev)
138 {
139 if (!test_bit(__LINK_STATE_RX_SCHED, &dev->state))
140 BUG();
141 list_del(&dev->poll_list);
142 mb();
143 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
144 }
145 #endif
146
147 /*
148 * The Titan GE has two alignment requirements:
149 * -> skb->data to be cacheline aligned (32 byte)
150 * -> IP header alignment to 16 bytes
151 *
152 * The latter is not implemented. So, that results in an extra copy on
153 * the Rx. This is a big performance hog. For the former case, the
154 * dev_alloc_skb() has been replaced with titan_ge_alloc_skb(). The size
155 * requested is calculated:
156 *
157 * Ethernet Frame Size : 1518
158 * Ethernet Header : 14
159 * Future Titan change for IP header alignment : 2
160 *
161 * Hence, we allocate (1518 + 14 + 2+ 64) = 1580 bytes. For the future
162 * revisions of the chip that do support IP header alignment, we will use
163 * skb_reserve().
164 */
165
166 #define ALIGNED_RX_SKB_ADDR(addr) \
167 ((((unsigned long)(addr) + (64UL - 1UL)) \
168 & ~(64UL - 1UL)) - (unsigned long)(addr))
169
170 #define titan_ge_alloc_skb(__length, __gfp_flags) \
171 ({ struct sk_buff *__skb; \
172 __skb = alloc_skb((__length) + 64, (__gfp_flags)); \
173 if(__skb) { \
174 int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
175 if(__offset) \
176 skb_reserve(__skb, __offset); \
177 } \
178 __skb; \
179 })
180
181 /*
182 * Configure the GMII block of the Titan based
183 * on what the PHY tells us
184 */
titan_ge_gmii_config(int port_num)185 static void titan_ge_gmii_config(int port_num)
186 {
187 volatile unsigned int reg_data = 0, phy_reg;
188 int err;
189
190 err = titan_ge_mdio_read(port_num,
191 TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
192
193 if (err == TITAN_GE_MDIO_ERROR) {
194 printk(KERN_ERR
195 "Could not read PHY control register 0x11 \n");
196 printk(KERN_ERR
197 "Setting speed to 1000 Mbps and Duplex to Full \n");
198
199 return TITAN_ERROR;
200 }
201
202 err = titan_ge_mdio_write(port_num,
203 TITAN_GE_MDIO_PHY_IE, 0);
204
205 if (phy_reg & 0x8000) {
206 if (phy_reg & 0x2000) {
207 /* Full Duplex and 1000 Mbps */
208 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
209 (port_num << 12)), 0x201);
210 } else {
211 /* Half Duplex and 1000 Mbps */
212 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
213 (port_num << 12)), 0x2201);
214 }
215 }
216 if (phy_reg & 0x4000) {
217 if (phy_reg & 0x2000) {
218 /* Full Duplex and 100 Mbps */
219 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
220 (port_num << 12)), 0x100);
221 } else {
222 /* Half Duplex and 100 Mbps */
223 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_MODE +
224 (port_num << 12)), 0x2100);
225 }
226 }
227 reg_data = TITAN_GE_READ(TITAN_GE_GMII_CONFIG_GENERAL +
228 (port_num << 12));
229 reg_data |= 0x3;
230 TITAN_GE_WRITE((TITAN_GE_GMII_CONFIG_GENERAL +
231 (port_num << 12)), reg_data);
232 }
233
234 /*
235 * Enable the TMAC if it is not
236 */
titan_ge_enable_tx(unsigned int port_num)237 static void titan_ge_enable_tx(unsigned int port_num)
238 {
239 unsigned long reg_data;
240
241 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
242 (port_num << 12));
243 if (!(reg_data & 0x8000)) {
244 printk("TMAC disabled for port %d!! \n", port_num);
245
246 reg_data |= 0x0001; /* Enable TMAC */
247 reg_data |= 0x4000; /* CRC Check Enable */
248 reg_data |= 0x2000; /* Padding enable */
249 reg_data |= 0x0800; /* CRC Add enable */
250 reg_data |= 0x0080; /* PAUSE frame */
251
252 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
253 (port_num << 12)), reg_data);
254 }
255 }
256
257 /*
258 * Tx Timeout function
259 */
titan_ge_tx_timeout(struct net_device * netdev)260 static void titan_ge_tx_timeout(struct net_device *netdev)
261 {
262 titan_ge_port_info *titan_ge_eth = netdev->priv;
263
264 printk(KERN_INFO "%s: TX timeout ", netdev->name);
265 printk(KERN_INFO "Resetting card \n");
266
267 /* Do the reset outside of interrupt context */
268 schedule_task(&titan_ge_eth->tx_timeout_task);
269 }
270
271 /*
272 * Update the AFX tables for UC and MC for slice 0 only
273 */
titan_ge_update_afx(titan_ge_port_info * titan_ge_eth)274 static void titan_ge_update_afx(titan_ge_port_info * titan_ge_eth)
275 {
276 unsigned int i;
277 volatile unsigned long reg_data = 0;
278 u8 p_addr[6];
279 int port = titan_ge_eth->port_num;
280
281 memcpy(p_addr, titan_ge_eth->port_mac_addr, 6);
282
283 /* Set the MAC address here for TMAC and RMAC */
284 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port << 12)),
285 ((p_addr[5] << 8) | p_addr[4]));
286 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port << 12)),
287 ((p_addr[3] << 8) | p_addr[2]));
288 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port << 12)),
289 ((p_addr[1] << 8) | p_addr[0]));
290
291 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port << 12)),
292 ((p_addr[5] << 8) | p_addr[4]));
293 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port << 12)),
294 ((p_addr[3] << 8) | p_addr[2]));
295 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port << 12)),
296 ((p_addr[1] << 8) | p_addr[0]));
297
298 TITAN_GE_WRITE((0x112c | (port << 12)), 0x1);
299 /* Configure the eight address filters */
300 for (i = 0; i < 8; i++) {
301 /* Select each of the eight filters */
302 TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_2 +
303 (port << 12)), i);
304
305 /* Configure the match */
306 reg_data = 0x9; /* Forward Enable Bit */
307 TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_0 +
308 (port << 12)), reg_data);
309
310 /* Finally, AFX Exact Match Address Registers */
311 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_LOW + (port << 12)),
312 ((p_addr[1] << 8) | p_addr[0]));
313 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_MID + (port << 12)),
314 ((p_addr[3] << 8) | p_addr[2]));
315 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_HIGH + (port << 12)),
316 ((p_addr[5] << 8) | p_addr[4]));
317
318 /* VLAN id set to 0 */
319 TITAN_GE_WRITE((TITAN_GE_AFX_EXACT_MATCH_VID +
320 (port << 12)), 0);
321 }
322 }
323
324 /*
325 * Actual Routine to reset the adapter when the timeout occurred
326 */
titan_ge_tx_timeout_task(struct net_device * netdev)327 static void titan_ge_tx_timeout_task(struct net_device *netdev)
328 {
329 titan_ge_port_info *titan_ge_eth = netdev->priv;
330 int port = titan_ge_eth->port_num;
331
332 printk("Titan GE: Transmit timed out. Resetting ... \n");
333
334 /* Dump debug info */
335 printk(KERN_ERR "TRTG cause : %x \n",
336 (unsigned long)TITAN_GE_READ(0x100c + (port << 12)));
337
338 /* Fix this for the other ports */
339 printk(KERN_ERR "FIFO cause : %x \n",
340 (unsigned long)TITAN_GE_READ(0x482c));
341 printk(KERN_ERR "IE cause : %x \n",
342 (unsigned long)TITAN_GE_READ(0x0040));
343 printk(KERN_ERR "XDMA GDI ERROR : %x \n",
344 (unsigned long)TITAN_GE_READ(0x5008 + (port << 8)));
345 printk(KERN_ERR "CHANNEL ERROR: %x \n",
346 (unsigned long)TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT
347 + (port << 8)));
348
349 netif_device_detach(netdev);
350 titan_ge_port_reset(titan_ge_eth->port_num);
351 titan_ge_port_start(netdev, titan_ge_eth);
352 netif_device_attach(netdev);
353 }
354
355 /*
356 * Change the MTU of the Ethernet Device
357 */
titan_ge_change_mtu(struct net_device * netdev,int new_mtu)358 static int titan_ge_change_mtu(struct net_device *netdev, int new_mtu)
359 {
360 titan_ge_port_info *titan_ge_eth;
361 unsigned int port_num;
362 unsigned long flags;
363
364 titan_ge_eth = netdev->priv;
365 port_num = titan_ge_eth->port_num;
366
367 spin_lock_irqsave(&titan_ge_eth->lock, flags);
368
369 if ((new_mtu > 9500) || (new_mtu < 64)) {
370 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
371 return -EINVAL;
372 }
373
374 netdev->mtu = new_mtu;
375
376 /* Now we have to reopen the interface so that SKBs with the new
377 * size will be allocated */
378
379 if (netif_running(netdev)) {
380 if (titan_ge_eth_stop(netdev) != TITAN_OK) {
381 printk(KERN_ERR
382 "%s: Fatal error on stopping device\n",
383 netdev->name);
384 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
385 return -1;
386 }
387
388 if (titan_ge_eth_open(netdev) != TITAN_OK) {
389 printk(KERN_ERR
390 "%s: Fatal error on opening device\n",
391 netdev->name);
392 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
393 return -1;
394 }
395 }
396
397 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
398 return 0;
399 }
400
401 /*
402 * Reset the XDMA unit due to errors
403 */
titan_ge_xdma_reset(void)404 static void titan_ge_xdma_reset(void)
405 {
406 unsigned long reg_data;
407
408 reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG);
409 reg_data |= 0x80000000;
410 TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data);
411
412 mdelay(2);
413
414 reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG);
415 reg_data &= ~(0x80000000);
416 TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data);
417 }
418
419 /*
420 * Titan Gbe Interrupt Handler. All the three ports send interrupt to one line
421 * only. Once an interrupt is triggered, figure out the port and then check
422 * the channel.
423 */
titan_ge_int_handler(int irq,void * dev_id,struct pt_regs * regs)424 static irqreturn_t titan_ge_int_handler(int irq, void *dev_id,
425 struct pt_regs *regs)
426 {
427 struct net_device *netdev = (struct net_device *) dev_id;
428 titan_ge_port_info *titan_ge_eth;
429 unsigned int port_num, reg_data;
430 unsigned long eth_int_cause_error = 0, is;
431 unsigned long eth_int_cause1;
432 int err = 0;
433 #ifdef CONFIG_SMP
434 unsigned long eth_int_cause2;
435 #endif
436
437 titan_ge_eth = netdev->priv;
438 port_num = titan_ge_eth->port_num;
439
440 /* Ack the CPU interrupt */
441 if (port_num == 1) {
442 #ifdef CONFIG_MIPS64
443 is = *(volatile u_int32_t *)(0xfffffffffb001b00);
444 *(volatile u_int32_t *)(0xfffffffffb001b0c) = is;
445 #else
446 is = *(volatile u_int32_t *)(0xfb001b00);
447 *(volatile u_int32_t *)(0xfb001b0c) = is;
448 #endif
449
450 #ifdef CONFIG_SMP
451 #ifdef CONFIG_MIPS64
452 is = *(volatile u_int32_t *)(0xfffffffffb002b00);
453 *(volatile u_int32_t *)(0xfffffffffb002b0c) = is;
454 #else
455 is = *(volatile u_int32_t *)(0xfb002b00);
456 *(volatile u_int32_t *)(0xfb002b0c) = is;
457 #endif
458 #endif
459 }
460
461 if (port_num == 0) {
462 #ifdef CONFIG_MIPS64
463 is = *(volatile u_int32_t *)(0xfffffffffb001b10);
464 *(volatile u_int32_t *)(0xfffffffffb001b1c) = is;
465 #else
466 is = *(volatile u_int32_t *)(0xfb001b10);
467 *(volatile u_int32_t *)(0xfb001b1c) = is;
468 #endif
469
470 #ifdef CONFIG_SMP
471 #ifdef CONFIG_MIPS64
472 is = *(volatile u_int32_t *)(0xfffffffffb002b10);
473 *(volatile u_int32_t *)(0xfffffffffb002b1c) = is;
474 #else
475 is = *(volatile u_int32_t *)(0xfb002b10);
476 *(volatile u_int32_t *)(0xfb002b1c) = is;
477 #endif
478 #endif
479 }
480
481 eth_int_cause1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
482 #ifdef CONFIG_SMP
483 eth_int_cause2 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_B);
484 #endif
485
486 /* Spurious interrupt */
487 #ifdef CONFIG_SMP
488 if ( (eth_int_cause1 == 0) && (eth_int_cause2 == 0)) {
489 #else
490 if (eth_int_cause1 == 0) {
491 #endif
492 eth_int_cause_error = TITAN_GE_READ(TITAN_GE_CHANNEL0_INTERRUPT +
493 (port_num << 8));
494
495 if (eth_int_cause_error == 0)
496 return IRQ_NONE;
497 }
498
499 /* Handle Tx first. No need to ack interrupts */
500 #ifdef CONFIG_SMP
501 if ( (eth_int_cause1 & 0x20202) ||
502 (eth_int_cause2 & 0x20202) )
503 #else
504 if (eth_int_cause1 & 0x20202)
505 #endif
506 titan_ge_free_tx_queue(titan_ge_eth);
507
508 #ifdef TITAN_RX_NAPI
509 /* Handle the Rx next */
510 #ifdef CONFIG_SMP
511 if ( (eth_int_cause1 & 0x10101) ||
512 (eth_int_cause2 & 0x10101)) {
513 #else
514 if (eth_int_cause1 & 0x10101) {
515 #endif
516 if (netif_rx_schedule_prep(netdev)) {
517 unsigned int ack;
518
519 ack = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
520 /* Disable Tx and Rx both */
521 if (port_num == 0)
522 ack &= ~(0x3);
523 if (port_num == 1)
524 ack &= ~(0x300);
525
526 if (port_num == 2)
527 ack &= ~(0x30000);
528
529 /* Interrupts have been disabled */
530 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, ack);
531
532 __netif_rx_schedule(netdev);
533 }
534 }
535 #else
536 titan_ge_free_tx_queue(titan_ge_eth);
537 count = titan_ge_receive_queue(netdev, 0);
538
539 #endif
540 /* Handle error interrupts */
541 if (eth_int_cause_error &&
542 (eth_int_cause_error != 0x2)) {
543 printk(KERN_ERR
544 "XDMA Channel Error : %x on port %d\n",
545 eth_int_cause_error, port_num);
546
547 printk(KERN_ERR
548 "XDMA GDI Hardware error : %x on port %d\n",
549 TITAN_GE_READ(0x5008 + (port_num << 8)), port_num);
550
551 printk(KERN_ERR
552 "XDMA currently has %d Rx descriptors \n",
553 TITAN_GE_READ(0x5048 + (port_num << 8)));
554
555 printk(KERN_ERR
556 "XDMA currently has prefetcted %d Rx descriptors \n",
557 TITAN_GE_READ(0x505c + (port_num << 8)));
558
559 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT +
560 (port_num << 8)), eth_int_cause_error);
561 }
562
563 /*
564 * PHY interrupt to inform abt the changes. Reading the
565 * PHY Status register will clear the interrupt
566 */
567 if ((!(eth_int_cause1 & 0x30303)) &&
568 (eth_int_cause_error == 0)) {
569 err =
570 titan_ge_mdio_read(port_num,
571 TITAN_GE_MDIO_PHY_IS, ®_data);
572
573 if (reg_data & 0x0400) {
574 /* Link status change */
575 titan_ge_mdio_read(port_num,
576 TITAN_GE_MDIO_PHY_STATUS, ®_data);
577 if (!(reg_data & 0x0400)) {
578 /* Link is down */
579 netif_carrier_off(netdev);
580 netif_stop_queue(netdev);
581 } else {
582 /* Link is up */
583 netif_carrier_on(netdev);
584 netif_wake_queue(netdev);
585
586 /* Enable the queue */
587 titan_ge_enable_tx(port_num);
588 }
589 }
590 }
591
592 return IRQ_HANDLED;
593 }
594
595 /*
596 * Multicast and Promiscuous mode set. The
597 * set_multi entry point is called whenever the
598 * multicast address list or the network interface
599 * flags are updated.
600 */
601 static void titan_ge_set_multi(struct net_device *netdev)
602 {
603 unsigned long reg_data;
604 unsigned int port_num;
605 titan_ge_port_info *titan_ge_eth;
606
607 titan_ge_eth = netdev->priv;
608 port_num = titan_ge_eth->port_num;
609
610 reg_data = TITAN_GE_READ(TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
611 (port_num << 12));
612
613 if (netdev->flags & IFF_PROMISC) {
614 reg_data |= 0x2;
615 }
616 else if (netdev->flags & IFF_ALLMULTI) {
617 reg_data |= 0x01;
618 reg_data |= 0x400; /* Use the 64-bit Multicast Hash bin */
619 }
620 else {
621 reg_data = 0x2;
622 }
623
624 TITAN_GE_WRITE((TITAN_GE_AFX_ADDRS_FILTER_CTRL_1 +
625 (port_num << 12)), reg_data);
626 if (reg_data & 0x01) {
627 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_LOW +
628 (port_num << 12)), 0xffff);
629 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDLOW +
630 (port_num << 12)), 0xffff);
631 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_MIDHI +
632 (port_num << 12)), 0xffff);
633 TITAN_GE_WRITE((TITAN_GE_AFX_MULTICAST_HASH_HI +
634 (port_num << 12)), 0xffff);
635 }
636 }
637
638 /*
639 * Open the network device
640 */
641 static int titan_ge_open(struct net_device *netdev)
642 {
643 int retval;
644 titan_ge_port_info *titan_ge_eth;
645 unsigned int port_num;
646
647 titan_ge_eth = netdev->priv;
648 port_num = titan_ge_eth->port_num;
649
650 retval = request_irq(TITAN_ETH_PORT_IRQ - port_num, titan_ge_int_handler,
651 SA_INTERRUPT | SA_SAMPLE_RANDOM , netdev->name, netdev);
652
653 if (retval != 0) {
654 printk(KERN_ERR "Cannot assign IRQ number to TITAN GE \n");
655 return -1;
656 } else {
657 netdev->irq = TITAN_ETH_PORT_IRQ - port_num;
658 printk(KERN_INFO "Assigned IRQ %d to port %d\n",
659 netdev->irq, port_num);
660 }
661
662 spin_lock_irq(&(titan_ge_eth->lock));
663
664 if (titan_ge_eth_open(netdev) != TITAN_OK) {
665 printk("%s: Error opening interface \n", netdev->name);
666 spin_unlock_irq(&(titan_ge_eth->lock));
667 free_irq(netdev->irq, netdev);
668 return -EBUSY;
669 }
670
671 SET_MODULE_OWNER(netdev);
672 spin_unlock_irq(&(titan_ge_eth->lock));
673 return 0;
674 }
675
676 /*
677 * Return the Rx buffer back to the Rx ring
678 */
679 static int titan_ge_rx_return_buff(titan_ge_port_info * titan_ge_port,
680 struct sk_buff *skb)
681 {
682 int rx_used_desc;
683 volatile titan_ge_rx_desc *rx_desc;
684
685 rx_used_desc = titan_ge_port->rx_used_desc_q;
686 rx_desc = &(titan_ge_port->rx_desc_area[rx_used_desc]);
687
688 #ifdef TITAN_GE_JUMBO_FRAMES
689 rx_desc->buffer_addr =
690 pci_map_single(0, skb->data, TITAN_GE_JUMBO_BUFSIZE - 2,
691 PCI_DMA_FROMDEVICE);
692 #else
693 rx_desc->buffer_addr =
694 pci_map_single(0, skb->data, TITAN_GE_STD_BUFSIZE - 2,
695 PCI_DMA_FROMDEVICE);
696 #endif
697
698 titan_ge_port->rx_skb[rx_used_desc] = skb;
699 rx_desc->cmd_sts = TITAN_GE_RX_BUFFER_OWNED;
700
701 titan_ge_port->rx_used_desc_q =
702 (rx_used_desc + 1) % TITAN_GE_RX_QUEUE;
703
704 return TITAN_OK;
705 }
706
707 /*
708 * Allocate the SKBs for the Rx ring. Also used
709 * for refilling the queue
710 */
711 static int titan_ge_rx_task(struct net_device *netdev,
712 titan_ge_port_info *titan_ge_eth)
713 {
714 struct sk_buff *skb;
715 int count = 0;
716
717 while (titan_ge_eth->rx_ring_skbs < titan_ge_eth->rx_ring_size) {
718
719 /* First try to get the skb from the recycler */
720 #ifdef TITAN_GE_JUMBO_FRAMES
721 skb = titan_ge_alloc_skb(TITAN_GE_JUMBO_BUFSIZE, GFP_ATOMIC);
722 #else
723 skb = titan_ge_alloc_skb(TITAN_GE_STD_BUFSIZE, GFP_ATOMIC);
724 #endif
725 if (!skb) {
726 /* OOM, set the flag */
727 printk("OOM \n");
728 oom_flag = 1;
729 break;
730 }
731 count++;
732 skb->dev = netdev;
733
734 titan_ge_eth->rx_ring_skbs++;
735
736 if (titan_ge_rx_return_buff(titan_ge_eth, skb) !=
737 TITAN_OK) {
738 printk(KERN_ERR "%s: Error allocating RX Ring\n",
739 netdev->name);
740 break;
741 }
742 }
743
744 return count;
745 }
746
747 /*
748 * Actual init of the Tital GE port. There is one register for
749 * the channel configuration
750 */
751 static void titan_port_init(struct net_device *netdev,
752 titan_ge_port_info * titan_ge_eth)
753 {
754 unsigned long reg_data;
755
756 titan_ge_port_reset(titan_ge_eth->port_num);
757
758 /* First reset the TMAC */
759 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
760 reg_data |= 0x80000000;
761 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
762
763 udelay(30);
764
765 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
766 reg_data &= ~(0xc0000000);
767 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
768
769 /* Now reset the RMAC */
770 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
771 reg_data |= 0x00080000;
772 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
773
774 udelay(30);
775
776 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG);
777 reg_data &= ~(0x000c0000);
778 TITAN_GE_WRITE(TITAN_GE_CHANNEL0_CONFIG, reg_data);
779 }
780
781 /*
782 * Start the port. All the hardware specific configuration
783 * for the XDMA, Tx FIFO, Rx FIFO, TMAC, RMAC, TRTG and AFX
784 * go here
785 */
786 static int titan_ge_port_start(struct net_device *netdev,
787 titan_ge_port_info * titan_port)
788 {
789 volatile unsigned long reg_data, reg_data1;
790 int count = 0;
791 int port_num = titan_port->port_num;
792 unsigned long reg_data_1;
793
794 if (config_done == 0) {
795 reg_data = TITAN_GE_READ(0x0004);
796 reg_data |= 0x100;
797 TITAN_GE_WRITE(0x0004, reg_data);
798
799 reg_data &= ~(0x100);
800 TITAN_GE_WRITE(0x0004, reg_data);
801
802 /* Turn on GMII/MII mode and turn off TBI mode */
803 reg_data = TITAN_GE_READ(TITAN_GE_TSB_CTRL_1);
804 reg_data |= 0x00000700;
805 reg_data &= ~(0x00800000); /* Fencing */
806 #ifdef TITAN_RX_NAPI
807 TITAN_GE_WRITE(0x000c, 0x00001100);
808 #else
809 TITAN_GE_WRITE(0x000c, 0x00000100); /* No WCIMODE */
810 #endif
811 TITAN_GE_WRITE(TITAN_GE_TSB_CTRL_1, reg_data);
812
813 /* Set the CPU Resource Limit register */
814 TITAN_GE_WRITE(0x00f8, 0x8);
815
816 /* Be conservative when using the BIU buffers */
817 TITAN_GE_WRITE(0x0068, 0x4);
818 }
819
820 #ifdef TITAN_RX_NAPI
821 titan_port->tx_threshold = 0;
822 titan_port->rx_threshold = 0;
823 #endif
824
825 /* We need to write the descriptors for Tx and Rx */
826 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_TX_DESC + (port_num << 8)),
827 (unsigned long) titan_port->tx_dma);
828 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_RX_DESC + (port_num << 8)),
829 (unsigned long) titan_port->rx_dma);
830
831 if (config_done == 0) {
832 /* Step 1: XDMA config */
833 reg_data = TITAN_GE_READ(TITAN_GE_XDMA_CONFIG);
834 reg_data &= ~(0x80000000); /* clear reset */
835 reg_data |= 0x1 << 29; /* sparse tx descriptor spacing */
836 reg_data |= 0x1 << 28; /* sparse rx descriptor spacing */
837 reg_data |= (0x1 << 23) | (0x1 << 24); /* Descriptor Coherency */
838 reg_data |= (0x1 << 21) | (0x1 << 22); /* Data Coherency */
839 TITAN_GE_WRITE(TITAN_GE_XDMA_CONFIG, reg_data);
840 }
841
842 /* IR register for the XDMA */
843 reg_data = TITAN_GE_READ(TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8));
844 reg_data |= 0x80068000; /* No Rx_OOD */
845 TITAN_GE_WRITE((TITAN_GE_GDI_INTERRUPT_ENABLE + (port_num << 8)), reg_data);
846
847 /* Start the Tx and Rx XDMA controller */
848 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG + (port_num << 8));
849 reg_data &= 0x4fffffff; /* Clear tx reset */
850 reg_data &= 0xfff4ffff; /* Clear rx reset */
851
852 #ifdef TITAN_GE_JUMBO_FRAMES
853 reg_data |= 0xa0 | 0x30030000;
854 #else
855 reg_data |= 0x40 | 0x20030000;
856 #endif
857
858 #ifndef CONFIG_SMP
859 reg_data &= ~(0x10);
860 reg_data |= 0x0f; /* All of the packet */
861 #endif
862
863 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG + (port_num << 8)), reg_data);
864
865 /* Rx desc count */
866 count = titan_ge_rx_task(netdev, titan_port);
867 TITAN_GE_WRITE((0x5048 + (port_num << 8)), count);
868 count = TITAN_GE_READ(0x5048 + (port_num << 8));
869
870 udelay(30);
871
872 /*
873 * Step 2: Configure the SDQPF, i.e. FIFO
874 */
875 if (config_done == 0) {
876 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
877 reg_data = 0x1;
878 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
879 reg_data &= ~(0x1);
880 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
881 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_CTL);
882 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_CTL, reg_data);
883
884 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
885 reg_data = 0x1;
886 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
887 reg_data &= ~(0x1);
888 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
889 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_CTL);
890 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_CTL, reg_data);
891 }
892 /*
893 * Enable RX FIFO 0, 4 and 8
894 */
895 if (port_num == 0) {
896 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_RXFIFO_0);
897
898 reg_data |= 0x100000;
899 reg_data |= (0xff << 10);
900
901 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
902 /*
903 * BAV2,BAV and DAV settings for the Rx FIFO
904 */
905 reg_data1 = TITAN_GE_READ(0x4844);
906 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
907 TITAN_GE_WRITE(0x4844, reg_data1);
908
909 reg_data &= ~(0x00100000);
910 reg_data |= 0x200000;
911
912 TITAN_GE_WRITE(TITAN_GE_SDQPF_RXFIFO_0, reg_data);
913
914 reg_data = TITAN_GE_READ(TITAN_GE_SDQPF_TXFIFO_0);
915 reg_data |= 0x100000;
916
917 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
918
919 reg_data |= (0xff << 10);
920
921 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
922
923 /*
924 * BAV2, BAV and DAV settings for the Tx FIFO
925 */
926 reg_data1 = TITAN_GE_READ(0x4944);
927 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
928
929 TITAN_GE_WRITE(0x4944, reg_data1);
930
931 reg_data &= ~(0x00100000);
932 reg_data |= 0x200000;
933
934 TITAN_GE_WRITE(TITAN_GE_SDQPF_TXFIFO_0, reg_data);
935
936 }
937
938 if (port_num == 1) {
939 reg_data = TITAN_GE_READ(0x4870);
940
941 reg_data |= 0x100000;
942 reg_data |= (0xff << 10) | (0xff + 1);
943
944 TITAN_GE_WRITE(0x4870, reg_data);
945 /*
946 * BAV2,BAV and DAV settings for the Rx FIFO
947 */
948 reg_data1 = TITAN_GE_READ(0x4874);
949 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
950 TITAN_GE_WRITE(0x4874, reg_data1);
951
952 reg_data &= ~(0x00100000);
953 reg_data |= 0x200000;
954
955 TITAN_GE_WRITE(0x4870, reg_data);
956
957 reg_data = TITAN_GE_READ(0x494c);
958 reg_data |= 0x100000;
959
960 TITAN_GE_WRITE(0x494c, reg_data);
961 reg_data |= (0xff << 10) | (0xff + 1);
962 TITAN_GE_WRITE(0x494c, reg_data);
963
964 /*
965 * BAV2, BAV and DAV settings for the Tx FIFO
966 */
967 reg_data1 = TITAN_GE_READ(0x4950);
968 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
969
970 TITAN_GE_WRITE(0x4950, reg_data1);
971
972 reg_data &= ~(0x00100000);
973 reg_data |= 0x200000;
974
975 TITAN_GE_WRITE(0x494c, reg_data);
976 }
977
978 /*
979 * Titan 1.2 revision does support port #2
980 */
981 if (port_num == 2) {
982 /*
983 * Put the descriptors in the SRAM
984 */
985 reg_data = TITAN_GE_READ(0x48a0);
986
987 reg_data |= 0x100000;
988 reg_data |= (0xff << 10) | (2*(0xff + 1));
989
990 TITAN_GE_WRITE(0x48a0, reg_data);
991 /*
992 * BAV2,BAV and DAV settings for the Rx FIFO
993 */
994 reg_data1 = TITAN_GE_READ(0x48a4);
995 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
996 TITAN_GE_WRITE(0x48a4, reg_data1);
997
998 reg_data &= ~(0x00100000);
999 reg_data |= 0x200000;
1000
1001 TITAN_GE_WRITE(0x48a0, reg_data);
1002
1003 reg_data = TITAN_GE_READ(0x4958);
1004 reg_data |= 0x100000;
1005
1006 TITAN_GE_WRITE(0x4958, reg_data);
1007 reg_data |= (0xff << 10) | (2*(0xff + 1));
1008 TITAN_GE_WRITE(0x4958, reg_data);
1009
1010 /*
1011 * BAV2, BAV and DAV settings for the Tx FIFO
1012 */
1013 reg_data1 = TITAN_GE_READ(0x495c);
1014 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
1015
1016 TITAN_GE_WRITE(0x495c, reg_data1);
1017
1018 reg_data &= ~(0x00100000);
1019 reg_data |= 0x200000;
1020
1021 TITAN_GE_WRITE(0x4958, reg_data);
1022 }
1023
1024 if (port_num == 2) {
1025 reg_data = TITAN_GE_READ(0x48a0);
1026
1027 reg_data |= 0x100000;
1028 reg_data |= (0xff << 10) | (2*(0xff + 1));
1029
1030 TITAN_GE_WRITE(0x48a0, reg_data);
1031 /*
1032 * BAV2,BAV and DAV settings for the Rx FIFO
1033 */
1034 reg_data1 = TITAN_GE_READ(0x48a4);
1035 reg_data1 |= ( (0x10 << 20) | (0x10 << 10) | 0x1);
1036 TITAN_GE_WRITE(0x48a4, reg_data1);
1037
1038 reg_data &= ~(0x00100000);
1039 reg_data |= 0x200000;
1040
1041 TITAN_GE_WRITE(0x48a0, reg_data);
1042
1043 reg_data = TITAN_GE_READ(0x4958);
1044 reg_data |= 0x100000;
1045
1046 TITAN_GE_WRITE(0x4958, reg_data);
1047 reg_data |= (0xff << 10) | (2*(0xff + 1));
1048 TITAN_GE_WRITE(0x4958, reg_data);
1049
1050 /*
1051 * BAV2, BAV and DAV settings for the Tx FIFO
1052 */
1053 reg_data1 = TITAN_GE_READ(0x495c);
1054 reg_data1 = ( (0x1 << 20) | (0x1 << 10) | 0x10);
1055
1056 TITAN_GE_WRITE(0x495c, reg_data1);
1057
1058 reg_data &= ~(0x00100000);
1059 reg_data |= 0x200000;
1060
1061 TITAN_GE_WRITE(0x4958, reg_data);
1062 }
1063
1064 /*
1065 * Step 3: TRTG block enable
1066 */
1067 reg_data = TITAN_GE_READ(TITAN_GE_TRTG_CONFIG + (port_num << 12));
1068 /*
1069 * This is the 1.2 revision of the chip. It has fix for the
1070 * IP header alignment. Now, the IP header begins at an
1071 * aligned address and this wont need an extra copy in the
1072 * driver. This performance drawback existed in the previous
1073 * versions of the silicon
1074 */
1075 reg_data_1 = TITAN_GE_READ(0x103c + (port_num << 12));
1076 reg_data_1 |= 0x40000000;
1077 TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
1078
1079 reg_data_1 |= 0x04000000;
1080 TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
1081
1082 mdelay(5);
1083
1084 reg_data_1 &= ~(0x04000000);
1085 TITAN_GE_WRITE((0x103c + (port_num << 12)), reg_data_1);
1086
1087 mdelay(5);
1088
1089 reg_data |= 0x0001;
1090 TITAN_GE_WRITE((TITAN_GE_TRTG_CONFIG + (port_num << 12)), reg_data);
1091
1092 /*
1093 * Step 4: Start the Tx activity
1094 */
1095 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_2 + (port_num << 12)), 0xe197);
1096 #ifdef TITAN_GE_JUMBO_FRAMES
1097 TITAN_GE_WRITE((0x1258 + (port_num << 12)), 0x4000);
1098 #endif
1099 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 + (port_num << 12));
1100 reg_data |= 0x0001; /* Enable TMAC */
1101 reg_data |= 0x6c70; /* PAUSE also set */
1102
1103 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 + (port_num << 12)), reg_data);
1104
1105 udelay(30);
1106
1107 /* Destination Address drop bit */
1108 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_2 + (port_num << 12));
1109 reg_data |= 0x218; /* DA_DROP bit and pause */
1110 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_2 + (port_num << 12)), reg_data);
1111
1112 TITAN_GE_WRITE((0x1218 + (port_num << 12)), 0x3);
1113
1114 #ifdef TITAN_GE_JUMBO_FRAMES
1115 TITAN_GE_WRITE((0x1208 + (port_num << 12)), 0x4000);
1116 #endif
1117 /* Start the Rx activity */
1118 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 + (port_num << 12));
1119 reg_data |= 0x0001; /* RMAC Enable */
1120 reg_data |= 0x0010; /* CRC Check enable */
1121 reg_data |= 0x0040; /* Min Frame check enable */
1122 reg_data |= 0x4400; /* Max Frame check enable */
1123
1124 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 + (port_num << 12)), reg_data);
1125
1126 udelay(30);
1127
1128 /*
1129 * Enable the Interrupts for Tx and Rx
1130 */
1131 reg_data1 = TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
1132
1133 if (port_num == 0) {
1134 reg_data1 |= 0x3;
1135 #ifdef CONFIG_SMP
1136 TITAN_GE_WRITE(0x0038, 0x003);
1137 #else
1138 TITAN_GE_WRITE(0x0038, 0x303);
1139 #endif
1140 }
1141
1142 if (port_num == 1) {
1143 reg_data1 |= 0x300;
1144 }
1145
1146 if (port_num == 2)
1147 reg_data1 |= 0x30000;
1148
1149 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data1);
1150 TITAN_GE_WRITE(0x003c, 0x300);
1151
1152 if (config_done == 0) {
1153 TITAN_GE_WRITE(0x0024, 0x04000024); /* IRQ vector */
1154 TITAN_GE_WRITE(0x0020, 0x000fb000); /* INTMSG base */
1155 }
1156
1157 /* Priority */
1158 reg_data = TITAN_GE_READ(0x1038 + (port_num << 12));
1159 reg_data &= ~(0x00f00000);
1160 TITAN_GE_WRITE((0x1038 + (port_num << 12)), reg_data);
1161
1162 /* Step 5: GMII config */
1163 titan_ge_gmii_config(port_num);
1164
1165 if (config_done == 0) {
1166 TITAN_GE_WRITE(0x1a80, 0);
1167 config_done = 1;
1168 }
1169
1170 return TITAN_OK;
1171 }
1172
1173 /*
1174 * Function to queue the packet for the Ethernet device
1175 */
1176 static void titan_ge_tx_queue(titan_ge_port_info * titan_ge_eth,
1177 struct sk_buff * skb)
1178 {
1179 volatile titan_ge_tx_desc *tx_curr;
1180 int port_num = titan_ge_eth->port_num;
1181 unsigned int curr_desc =
1182 titan_ge_eth->tx_curr_desc_q;
1183
1184 tx_curr = &(titan_ge_eth->tx_desc_area[curr_desc]);
1185 tx_curr->buffer_addr =
1186 pci_map_single(0, skb->data, skb->len - skb->data_len,
1187 PCI_DMA_TODEVICE);
1188
1189 titan_ge_eth->tx_skb[curr_desc] = (struct sk_buff *) skb;
1190 tx_curr->buffer_len = skb->len - skb->data_len;
1191
1192 /* Last descriptor enables interrupt and changes ownership */
1193 tx_curr->cmd_sts = 0x1 | (1 << 15) | (1 << 5);
1194
1195 /* Kick the XDMA to start the transfer from memory to the FIFO */
1196 TITAN_GE_WRITE((0x5044 + (port_num << 8)), 0x1);
1197
1198 /* Current descriptor updated */
1199 titan_ge_eth->tx_curr_desc_q = (curr_desc + 1) % TITAN_GE_TX_QUEUE;
1200
1201 /* Prefetch the next descriptor */
1202 #ifdef CONFIG_CPU_HAS_PREFETCH
1203 rm9000_prefetch(&(titan_ge_eth->tx_desc_area[
1204 titan_ge_eth->tx_curr_desc_q]));
1205 #endif
1206 }
1207
1208 /*
1209 * Actually does the open of the Ethernet device
1210 */
1211 static int titan_ge_eth_open(struct net_device *netdev)
1212 {
1213 titan_ge_port_info *titan_ge_eth;
1214 unsigned int port_num, size, phy_reg;
1215 unsigned long reg_data;
1216 int err = 0;
1217
1218 titan_ge_eth = netdev->priv;
1219 port_num = titan_ge_eth->port_num;
1220
1221 /* Stop the Rx activity */
1222 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
1223 (port_num << 12));
1224 reg_data &= ~(0x00000001);
1225 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
1226 (port_num << 12)), reg_data);
1227
1228 /* Clear the port interrupts */
1229 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_INTERRUPT +
1230 (port_num << 8)), 0x0);
1231
1232 if (config_done == 0) {
1233 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0);
1234 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_B, 0);
1235 }
1236
1237 /* Set the MAC Address */
1238 memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
1239
1240 if (config_done == 0)
1241 titan_port_init(netdev, titan_ge_eth);
1242
1243 titan_ge_update_afx(titan_ge_eth);
1244
1245 /* Allocate the Tx ring now */
1246 titan_ge_eth->tx_ring_skbs = 0;
1247 titan_ge_eth->tx_ring_size = TITAN_GE_TX_QUEUE;
1248 size = titan_ge_eth->tx_ring_size * sizeof(titan_ge_tx_desc);
1249
1250 /* Allocate space in the SRAM for the descriptors */
1251 if (port_num == 0) {
1252 titan_ge_eth->tx_desc_area =
1253 (titan_ge_tx_desc *) (TITAN_GE_SRAM_BASE_VIRTUAL);
1254
1255 titan_ge_eth->tx_dma = (TITAN_GE_SRAM_BASE_PHYSICAL);
1256 }
1257
1258 if (port_num == 1) {
1259 titan_ge_eth->tx_desc_area =
1260 (titan_ge_tx_desc *) (TITAN_GE_SRAM_BASE_VIRTUAL + 0x100);
1261
1262 titan_ge_eth->tx_dma = (TITAN_GE_SRAM_BASE_PHYSICAL + 0x100);
1263 }
1264
1265 if (!titan_ge_eth->tx_desc_area) {
1266 printk(KERN_ERR
1267 "%s: Cannot allocate Tx Ring (size %d bytes) for port %d\n",
1268 netdev->name, size, port_num);
1269 return -ENOMEM;
1270 }
1271
1272 memset((void *) titan_ge_eth->tx_desc_area, 0,
1273 titan_ge_eth->tx_desc_area_size);
1274
1275 /* Now initialize the Tx descriptor ring */
1276 titan_ge_init_tx_desc_ring(titan_ge_eth,
1277 titan_ge_eth->tx_ring_size,
1278 (unsigned long) titan_ge_eth->
1279 tx_desc_area,
1280 (unsigned long) titan_ge_eth->tx_dma);
1281
1282 /* Allocate the Rx ring now */
1283 titan_ge_eth->rx_ring_size = TITAN_GE_RX_QUEUE;
1284 titan_ge_eth->rx_ring_skbs = 0;
1285 size = titan_ge_eth->rx_ring_size * sizeof(titan_ge_rx_desc);
1286
1287 if (port_num == 0) {
1288 titan_ge_eth->rx_desc_area =
1289 (titan_ge_rx_desc *)(TITAN_GE_SRAM_BASE_VIRTUAL + 0x1000);
1290
1291 titan_ge_eth->rx_dma = (TITAN_GE_SRAM_BASE_PHYSICAL + 0x1000);
1292 }
1293
1294 if (port_num == 1) {
1295 titan_ge_eth->rx_desc_area =
1296 (titan_ge_rx_desc *)(TITAN_GE_SRAM_BASE_VIRTUAL + 0x1100);
1297 titan_ge_eth->rx_dma = (TITAN_GE_SRAM_BASE_PHYSICAL + 0x1100);
1298 }
1299
1300 if (!titan_ge_eth->rx_desc_area) {
1301 printk(KERN_ERR
1302 "%s: Cannot allocate Rx Ring (size %d bytes)\n",
1303 netdev->name, size);
1304
1305 printk(KERN_ERR
1306 "%s: Freeing previously allocated TX queues...",
1307 netdev->name);
1308
1309 pci_free_consistent(0, titan_ge_eth->tx_desc_area_size,
1310 (void *) titan_ge_eth->tx_desc_area,
1311 titan_ge_eth->tx_dma);
1312
1313 return -ENOMEM;
1314 }
1315
1316 memset((void *) titan_ge_eth->rx_desc_area, 0,
1317 titan_ge_eth->tx_desc_area_size);
1318
1319 /* Now initialize the Rx ring */
1320 #ifdef TITAN_GE_JUMBO_FRAMES
1321 if ((titan_ge_init_rx_desc_ring
1322 (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_JUMBO_BUFSIZE,
1323 (unsigned long) titan_ge_eth->rx_desc_area, 0,
1324 (unsigned long) titan_ge_eth->rx_dma)) == 0)
1325 #else
1326 if ((titan_ge_init_rx_desc_ring
1327 (titan_ge_eth, titan_ge_eth->rx_ring_size, TITAN_GE_STD_BUFSIZE,
1328 (unsigned long) titan_ge_eth->rx_desc_area, 0,
1329 (unsigned long) titan_ge_eth->rx_dma)) == 0)
1330 #endif
1331 panic("%s: Error initializing RX Ring\n", netdev->name);
1332
1333 /* Fill the Rx ring with the SKBs */
1334 titan_ge_port_start(netdev, titan_ge_eth);
1335
1336 /*
1337 * Check if Interrupt Coalscing needs to be turned on. The
1338 * values specified in the register is multiplied by
1339 * (8 x 64 nanoseconds) to determine when an interrupt should
1340 * be sent to the CPU.
1341 */
1342 #ifndef TITAN_RX_NAPI
1343 /*
1344 * If NAPI is turned on, we disable Rx interrupts
1345 * completely. So, we dont need coalescing then. Tx side
1346 * coalescing set to very high value. Maybe, disable
1347 * Tx side interrupts completely
1348 */
1349 if (TITAN_GE_RX_COAL) {
1350 titan_ge_eth->rx_int_coal =
1351 titan_ge_rx_coal(TITAN_GE_RX_COAL, port_num);
1352 }
1353
1354 #endif
1355 if (TITAN_GE_TX_COAL) {
1356 titan_ge_eth->tx_int_coal =
1357 titan_ge_tx_coal(TITAN_GE_TX_COAL, port_num);
1358 }
1359
1360 err =
1361 titan_ge_mdio_read(port_num,
1362 TITAN_GE_MDIO_PHY_STATUS, &phy_reg);
1363 if (err == TITAN_GE_MDIO_ERROR) {
1364 printk(KERN_ERR
1365 "Could not read PHY control register 0x11 \n");
1366 return TITAN_ERROR;
1367 }
1368 if (!(phy_reg & 0x0400)) {
1369 netif_carrier_off(netdev);
1370 netif_stop_queue(netdev);
1371 return TITAN_ERROR;
1372 } else {
1373 netif_carrier_on(netdev);
1374 netif_start_queue(netdev);
1375 }
1376
1377 return TITAN_OK;
1378 }
1379
1380 /*
1381 * Queue the packet for Tx. Currently no support for zero copy,
1382 * checksum offload and Scatter Gather. The chip does support
1383 * Scatter Gather only. But, that wont help here since zero copy
1384 * requires support for Tx checksumming also.
1385 */
1386 int titan_ge_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1387 {
1388 titan_ge_port_info *titan_ge_eth;
1389 unsigned long flags;
1390 struct net_device_stats *stats;
1391 titan_ge_eth = netdev->priv;
1392
1393 stats = &titan_ge_eth->stats;
1394 #ifdef CONFIG_SMP
1395 spin_lock_irqsave(&titan_ge_eth->lock, flags);
1396 #else
1397 local_irq_save(flags);
1398 #endif
1399
1400 if ((TITAN_GE_TX_QUEUE - titan_ge_eth->tx_ring_skbs) <=
1401 (skb_shinfo(skb)->nr_frags + 1)) {
1402 netif_stop_queue(netdev);
1403 #ifdef CONFIG_SMP
1404 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1405 #else
1406 local_irq_restore(flags);
1407 #endif
1408 printk(KERN_ERR "Tx OOD \n");
1409 return 1;
1410 }
1411
1412 titan_ge_tx_queue(titan_ge_eth, skb);
1413 titan_ge_eth->tx_ring_skbs++;
1414
1415 if (TITAN_GE_TX_QUEUE <= (titan_ge_eth->tx_ring_skbs + 4)) {
1416 #ifdef CONFIG_SMP
1417 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1418 #else
1419 local_irq_restore(flags);
1420 #endif
1421 titan_ge_free_tx_queue(titan_ge_eth);
1422 #ifdef CONFIG_SMP
1423 spin_lock_irqsave(&titan_ge_eth->lock, flags);
1424 #else
1425 local_irq_save(flags);
1426 #endif
1427 }
1428
1429 stats->tx_bytes += skb->len;
1430 stats->tx_packets++;
1431
1432 #ifdef CONFIG_SMP
1433 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1434 #else
1435 local_irq_restore(flags);
1436 #endif
1437
1438 netdev->trans_start = jiffies;
1439
1440 return 0;
1441 }
1442
1443 #ifdef CONFIG_NET_FASTROUTE
1444 /*
1445 * Fast forward function for the fast routing. Helps
1446 * in IP forwarding. No semi fast forward since we
1447 * have to do that extra copy on the Rx for the IP
1448 * header alignment
1449 */
1450 static int titan_ge_fast_forward(struct net_device *dev,
1451 struct sk_buff *skb, int len)
1452 {
1453 titan_ge_port_info *titan_ge_eth =
1454 (titan_ge_port_info *)dev->priv;
1455 struct ethhdr *eth = (void*)skb->data;
1456
1457 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
1458 struct rtable *rt;
1459 struct iphdr *iph;
1460 unsigned h;
1461
1462 iph = (struct iphdr*)(skb->data + ETH_HLEN);
1463 h=(*(u8*)&iph->daddr^*(u8*)&iph->saddr)&NETDEV_FASTROUTE_HMASK;
1464 rt = (struct rtable*)(dev->fastpath[h]);
1465 if (rt &&
1466 ((u16*)&iph->daddr)[0] == ((u16*)&rt->key.dst)[0] &&
1467 ((u16*)&iph->daddr)[1] == ((u16*)&rt->key.dst)[1] &&
1468 ((u16*)&iph->saddr)[0] == ((u16*)&rt->key.src)[0] &&
1469 ((u16*)&iph->saddr)[1] == ((u16*)&rt->key.src)[1] &&
1470 rt->u.dst.obsolete == 0) {
1471 struct net_device *odev = rt->u.dst.dev;
1472
1473 if (*(u8*)iph != 0x45 ||
1474 (eth->h_dest[0]&1) ||
1475 !neigh_is_valid(rt->u.dst.neighbour) ||
1476 iph->ttl <= 1) {
1477 return 1;
1478 }
1479 ip_decrease_ttl(iph);
1480 skb_put(skb, len);
1481
1482 memcpy(eth->h_source, odev->dev_addr, 6);
1483 memcpy(eth->h_dest, rt->u.dst.neighbour->ha, 6);
1484 skb->dev = odev;
1485 skb->pkt_type = PACKET_FASTROUTE;
1486
1487 if (netif_running(odev) &&
1488 (spin_trylock(&odev->xmit_lock))) {
1489 if(odev->xmit_lock_owner != 0) {
1490 odev->xmit_lock_owner=0;
1491 }
1492 if (odev->hard_start_xmit(skb,odev) == 0) {
1493 odev->xmit_lock_owner=-1;
1494 spin_unlock(&odev->xmit_lock);
1495 return 0;
1496 }
1497 }
1498 skb->nh.raw = skb->data + ETH_HLEN;
1499 skb->protocol = __constant_htons(ETH_P_IP);
1500 return 1;
1501 }
1502 }
1503 return 1;
1504 }
1505
1506 #endif
1507
1508 /*
1509 * Actually does the Rx. Rx side checksumming supported.
1510 */
1511 static int titan_ge_rx(struct net_device *netdev, int port_num,
1512 titan_ge_port_info * titan_ge_port,
1513 titan_ge_packet * packet)
1514 {
1515 int rx_curr_desc, rx_used_desc;
1516 volatile titan_ge_rx_desc *rx_desc;
1517
1518 rx_curr_desc = titan_ge_port->rx_curr_desc_q;
1519 rx_used_desc = titan_ge_port->rx_used_desc_q;
1520
1521 if (((rx_curr_desc + 1) % TITAN_GE_RX_QUEUE) == rx_used_desc)
1522 return TITAN_ERROR;
1523
1524 rx_desc = &(titan_ge_port->rx_desc_area[rx_curr_desc]);
1525
1526 if (rx_desc->cmd_sts & TITAN_GE_RX_BUFFER_OWNED)
1527 return TITAN_ERROR;
1528
1529 packet->skb = titan_ge_port->rx_skb[rx_curr_desc];
1530 packet->len = (rx_desc->cmd_sts & 0x7fff);
1531
1532 /*
1533 * At this point, we dont know if the checksumming
1534 * actually helps relieve CPU. So, keep it for
1535 * port 0 only
1536 */
1537 packet->checksum = ntohs((rx_desc->buffer & 0xffff0000) >> 16);
1538
1539 titan_ge_port->rx_curr_desc_q =
1540 (rx_curr_desc + 1) % TITAN_GE_RX_QUEUE;
1541
1542 /* Prefetch the next descriptor */
1543 #ifdef CONFIG_CPU_HAS_PREFETCH
1544 rm9000_prefetch(&(titan_ge_port->rx_desc_area[
1545 titan_ge_port->rx_curr_desc_q + 1]));
1546 #endif
1547
1548 return TITAN_OK;
1549 }
1550
1551 /*
1552 * Free the Tx queue of the used SKBs
1553 */
1554 static int titan_ge_free_tx_queue(titan_ge_port_info *titan_ge_eth)
1555 {
1556 unsigned long flags;
1557
1558 /* Take the lock */
1559 #ifdef CONFIG_SMP
1560 spin_lock_irqsave(&(titan_ge_eth->lock), flags);
1561 #else
1562 local_irq_save(flags);
1563 #endif
1564
1565 while (titan_ge_return_tx_desc(titan_ge_eth, titan_ge_eth->port_num) == 0)
1566 if (titan_ge_eth->tx_ring_skbs != 1)
1567 titan_ge_eth->tx_ring_skbs--;
1568
1569 #ifdef CONFIG_SMP
1570 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1571 #else
1572 local_irq_restore(flags);
1573 #endif
1574
1575 return TITAN_OK;
1576 }
1577
1578 /*
1579 * Threshold beyond which we do the cleaning of
1580 * Tx queue and new allocation for the Rx
1581 * queue
1582 */
1583 #define TX_THRESHOLD 4
1584 #define RX_THRESHOLD 10
1585
1586 /*
1587 * Receive the packets and send it to the kernel.
1588 */
1589 int titan_ge_receive_queue(struct net_device *netdev, unsigned int max)
1590 {
1591 titan_ge_port_info *titan_ge_eth;
1592 unsigned int port_num;
1593 titan_ge_packet packet;
1594 struct net_device_stats *stats;
1595 struct sk_buff *skb;
1596 unsigned long received_packets = 0;
1597 unsigned int ack;
1598
1599 titan_ge_eth = netdev->priv;
1600 port_num = titan_ge_eth->port_num;
1601 stats = &titan_ge_eth->stats;
1602
1603 while ((--max)
1604 && (titan_ge_rx(netdev, port_num, titan_ge_eth, &packet) == TITAN_OK)) {
1605
1606 titan_ge_eth->rx_ring_skbs--;
1607
1608 #ifdef TITAN_RX_NAPI
1609 if (--titan_ge_eth->rx_work_limit < 0)
1610 break;
1611 received_packets++;
1612 #endif
1613 stats->rx_packets++;
1614 stats->rx_bytes += packet.len;
1615
1616 if ((packet.cmd_sts & TITAN_GE_RX_PERR) ||
1617 (packet.cmd_sts & TITAN_GE_RX_OVERFLOW_ERROR) ||
1618 (packet.cmd_sts & TITAN_GE_RX_TRUNC) ||
1619 (packet.cmd_sts & TITAN_GE_RX_CRC_ERROR)) {
1620 stats->rx_dropped++;
1621 dev_kfree_skb_any(skb);
1622
1623 continue;
1624 }
1625 /*
1626 * Either support fast path or slow path. Decision
1627 * making can really slow down the performance. The
1628 * idea is to cut down the number of checks and improve
1629 * the fastpath.
1630 */
1631 #ifdef CONFIG_NET_FASTROUTE
1632 switch (titan_ge_fast_forward(netdev,
1633 packet.skb, packet.len)) {
1634 case 0:
1635 goto gone;
1636 case 1:
1637 break;
1638 }
1639 #endif
1640 skb = (struct sk_buff *) packet.skb;
1641 /*
1642 * This chip is wierd. Does not have a byte level offset
1643 * to fix the IP header alignment issue. Now, do an extra
1644 * copy only if the custom pattern is not present
1645 */
1646 skb_put(skb, packet.len - 2);
1647
1648 /*
1649 * Increment data pointer by two since thats where
1650 * the MAC starts
1651 */
1652 skb_reserve(skb, 2);
1653 skb->protocol = eth_type_trans(skb, netdev);
1654 netif_receive_skb(skb);
1655
1656 #ifdef CONFIG_NET_FASTROUTE
1657 gone:
1658 #endif
1659
1660 #ifdef TITAN_RX_NAPI
1661 if (titan_ge_eth->rx_threshold > RX_THRESHOLD) {
1662 ack = titan_ge_rx_task(netdev, titan_ge_eth);
1663 TITAN_GE_WRITE((0x5048 + (port_num << 8)), ack);
1664 titan_ge_eth->rx_threshold = 0;
1665 } else
1666 titan_ge_eth->rx_threshold++;
1667 #else
1668 ack = titan_ge_rx_task(netdev, titan_ge_eth);
1669 TITAN_GE_WRITE((0x5048 + (port_num << 8)), ack);
1670 #endif
1671
1672 #ifdef TITAN_RX_NAPI
1673 if (titan_ge_eth->tx_threshold > TX_THRESHOLD) {
1674 titan_ge_eth->tx_threshold = 0;
1675 titan_ge_free_tx_queue(titan_ge_eth);
1676 }
1677 else
1678 titan_ge_eth->tx_threshold++;
1679 #endif
1680
1681 }
1682 return received_packets;
1683 }
1684
1685
1686 #ifdef TITAN_RX_NAPI
1687
1688 /*
1689 * Enable the Rx side interrupts
1690 */
1691 static void titan_ge_enable_int(unsigned int port_num,
1692 titan_ge_port_info *titan_ge_eth,
1693 struct net_device *netdev)
1694 {
1695 unsigned long reg_data =
1696 TITAN_GE_READ(TITAN_GE_INTR_XDMA_IE);
1697
1698 if (port_num == 0)
1699 reg_data |= 0x3;
1700 if (port_num == 1)
1701 reg_data |= 0x300;
1702 if (port_num == 2)
1703 reg_data |= 0x30000;
1704
1705 /* Re-enable interrupts */
1706 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, reg_data);
1707 }
1708
1709 /*
1710 * Main function to handle the polling for Rx side NAPI.
1711 * Receive interrupts have been disabled at this point.
1712 * The poll schedules the transmit followed by receive.
1713 */
1714 static int titan_ge_poll(struct net_device *netdev, int *budget)
1715 {
1716 titan_ge_port_info *titan_ge_eth = netdev->priv;
1717 int port_num = titan_ge_eth->port_num;
1718 int work_done = 0;
1719 unsigned long flags, status;
1720
1721 titan_ge_eth->rx_work_limit = *budget;
1722 if (titan_ge_eth->rx_work_limit > netdev->quota)
1723 titan_ge_eth->rx_work_limit = netdev->quota;
1724
1725 do {
1726 /* Do the transmit cleaning work here */
1727 titan_ge_free_tx_queue(titan_ge_eth);
1728
1729 /* Ack the Rx interrupts */
1730 if (port_num == 0)
1731 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x3);
1732 if (port_num == 1)
1733 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x300);
1734 if (port_num == 2)
1735 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_CORE_A, 0x30000);
1736
1737 work_done += titan_ge_receive_queue(netdev, 0);
1738
1739 /* Out of quota and there is work to be done */
1740 if (titan_ge_eth->rx_work_limit < 0)
1741 goto not_done;
1742
1743 /* Receive alloc_skb could lead to OOM */
1744 if (oom_flag == 1) {
1745 oom_flag = 0;
1746 goto oom;
1747 }
1748
1749 status = TITAN_GE_READ(TITAN_GE_INTR_XDMA_CORE_A);
1750 } while (status & 0x30300);
1751
1752 /* If we are here, then no more interrupts to process */
1753 goto done;
1754
1755 not_done:
1756 *budget -= work_done;
1757 netdev->quota -= work_done;
1758 return 1;
1759
1760 oom:
1761 printk(KERN_ERR "OOM \n");
1762 netif_rx_complete(netdev);
1763 return 0;
1764
1765 done:
1766 /*
1767 * No more packets on the poll list. Turn the interrupts
1768 * back on and we should be able to catch the new
1769 * packets in the interrupt handler
1770 */
1771 if (!work_done)
1772 work_done = 1;
1773
1774 *budget -= work_done;
1775 netdev->quota -= work_done;
1776
1777 #ifdef CONFIG_SMP
1778 spin_lock_irqsave(&titan_ge_eth->lock, flags);
1779 #else
1780 local_irq_save(flags);
1781 #endif
1782
1783 /* Remove us from the poll list */
1784 netif_rx_complete(netdev);
1785
1786 /* Re-enable interrupts */
1787 titan_ge_enable_int(port_num, titan_ge_eth, netdev);
1788
1789 #ifdef CONFIG_SMP
1790 spin_unlock_irqrestore(&titan_ge_eth->lock, flags);
1791 #else
1792 local_irq_restore(flags);
1793 #endif
1794
1795 return 0;
1796 }
1797 #endif
1798
1799 /*
1800 * Close the network device
1801 */
1802 int titan_ge_stop(struct net_device *netdev)
1803 {
1804 titan_ge_port_info *titan_ge_eth;
1805 unsigned int port_num;
1806
1807 titan_ge_eth = netdev->priv;
1808 port_num = titan_ge_eth->port_num;
1809
1810 spin_lock_irq(&(titan_ge_eth->lock));
1811 titan_ge_eth_stop(netdev);
1812 free_irq(netdev->irq, netdev);
1813 MOD_DEC_USE_COUNT;
1814 spin_unlock_irq(&titan_ge_eth->lock);
1815
1816 return TITAN_OK;
1817 }
1818
1819 /*
1820 * Free the Tx ring
1821 */
1822 static void titan_ge_free_tx_rings(struct net_device *netdev)
1823 {
1824 titan_ge_port_info *titan_ge_eth;
1825 unsigned int port_num, curr;
1826 unsigned long reg_data;
1827
1828 titan_ge_eth = netdev->priv;
1829 port_num = titan_ge_eth->port_num;
1830
1831 /* Stop the Tx DMA */
1832 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
1833 (port_num << 8));
1834 reg_data |= 0xc0000000;
1835 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
1836 (port_num << 8)), reg_data);
1837
1838 /* Disable the TMAC */
1839 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
1840 (port_num << 12));
1841 reg_data &= ~(0x00000001);
1842 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
1843 (port_num << 12)), reg_data);
1844
1845 for (curr = 0;
1846 (titan_ge_eth->tx_ring_skbs) && (curr < TITAN_GE_TX_QUEUE);
1847 curr++) {
1848 if (titan_ge_eth->tx_skb[curr]) {
1849 dev_kfree_skb(titan_ge_eth->tx_skb[curr]);
1850 titan_ge_eth->tx_ring_skbs--;
1851 }
1852 }
1853
1854 if (titan_ge_eth->tx_ring_skbs != 0)
1855 printk
1856 ("%s: Error on Tx descriptor free - could not free %d"
1857 " descriptors\n", netdev->name,
1858 titan_ge_eth->tx_ring_skbs);
1859
1860 pci_free_consistent(0, titan_ge_eth->tx_desc_area_size,
1861 (void *) titan_ge_eth->tx_desc_area,
1862 titan_ge_eth->tx_dma);
1863 }
1864
1865 /*
1866 * Free the Rx ring
1867 */
1868 static void titan_ge_free_rx_rings(struct net_device *netdev)
1869 {
1870 titan_ge_port_info *titan_ge_eth;
1871 unsigned int port_num, curr;
1872 unsigned long reg_data;
1873
1874 titan_ge_eth = netdev->priv;
1875 port_num = titan_ge_eth->port_num;
1876
1877 /* Stop the Rx DMA */
1878 reg_data = TITAN_GE_READ(TITAN_GE_CHANNEL0_CONFIG +
1879 (port_num << 8));
1880 reg_data |= 0x000c0000;
1881 TITAN_GE_WRITE((TITAN_GE_CHANNEL0_CONFIG +
1882 (port_num << 8)), reg_data);
1883
1884 /* Disable the RMAC */
1885 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
1886 (port_num << 12));
1887 reg_data &= ~(0x00000001);
1888 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
1889 (port_num << 12)), reg_data);
1890
1891 for (curr = 0;
1892 titan_ge_eth->rx_ring_skbs && (curr < TITAN_GE_RX_QUEUE);
1893 curr++) {
1894 if (titan_ge_eth->rx_skb[curr]) {
1895 dev_kfree_skb(titan_ge_eth->rx_skb[curr]);
1896 titan_ge_eth->rx_ring_skbs--;
1897 }
1898 }
1899
1900 if (titan_ge_eth->rx_ring_skbs != 0)
1901 printk(KERN_ERR
1902 "%s: Error in freeing Rx Ring. %d skb's still"
1903 " stuck in RX Ring - ignoring them\n", netdev->name,
1904 titan_ge_eth->rx_ring_skbs);
1905
1906 pci_free_consistent(0, titan_ge_eth->rx_desc_area_size,
1907 (void *) titan_ge_eth->rx_desc_area,
1908 titan_ge_eth->rx_dma);
1909 }
1910
1911 /*
1912 * Actually does the stop of the Ethernet device
1913 */
1914 static int titan_ge_eth_stop(struct net_device *netdev)
1915 {
1916 titan_ge_port_info *titan_ge_eth;
1917 unsigned int port_num;
1918
1919 titan_ge_eth = netdev->priv;
1920 port_num = titan_ge_eth->port_num;
1921
1922 netif_stop_queue(netdev);
1923
1924 titan_ge_port_reset(titan_ge_eth->port_num);
1925
1926 titan_ge_free_tx_rings(netdev);
1927 titan_ge_free_rx_rings(netdev);
1928
1929 /* Disable the Tx and Rx Interrupts for all channels */
1930 TITAN_GE_WRITE(TITAN_GE_INTR_XDMA_IE, 0x0);
1931
1932 return TITAN_OK;
1933 }
1934
1935 /*
1936 * Update the MAC address. Note that we have to write the
1937 * address in three station registers, 16 bits each. And this
1938 * has to be done for TMAC and RMAC
1939 */
1940 static void titan_ge_update_mac_address(struct net_device *netdev)
1941 {
1942 titan_ge_port_info *titan_ge_eth = netdev->priv;
1943 unsigned int port_num = titan_ge_eth->port_num;
1944 u8 p_addr[6];
1945
1946 memcpy(titan_ge_eth->port_mac_addr, netdev->dev_addr, 6);
1947 memcpy(p_addr, netdev->dev_addr, 6);
1948
1949 /* Update the Address Filtering Match tables */
1950 titan_ge_update_afx(titan_ge_eth);
1951
1952 printk("Station MAC : %d %d %d %d %d %d \n",
1953 p_addr[5], p_addr[4], p_addr[3],
1954 p_addr[2], p_addr[1], p_addr[0]);
1955
1956 /* Set the MAC address here for TMAC and RMAC */
1957 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_HI + (port_num << 12)),
1958 ((p_addr[5] << 8) | p_addr[4]));
1959 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_MID + (port_num << 12)),
1960 ((p_addr[3] << 8) | p_addr[2]));
1961 TITAN_GE_WRITE((TITAN_GE_TMAC_STATION_LOW + (port_num << 12)),
1962 ((p_addr[1] << 8) | p_addr[0]));
1963
1964 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_HI + (port_num << 12)),
1965 ((p_addr[5] << 8) | p_addr[4]));
1966 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_MID + (port_num << 12)),
1967 ((p_addr[3] << 8) | p_addr[2]));
1968 TITAN_GE_WRITE((TITAN_GE_RMAC_STATION_LOW + (port_num << 12)),
1969 ((p_addr[1] << 8) | p_addr[0]));
1970
1971 return;
1972 }
1973
1974 /*
1975 * Set the MAC address of the Ethernet device
1976 */
1977 int titan_ge_set_mac_address(struct net_device *netdev, void *addr)
1978 {
1979 int i;
1980
1981 for (i = 0; i < 6; i++)
1982 netdev->dev_addr[i] = ((unsigned char *) addr)[i + 2];
1983
1984 titan_ge_update_mac_address(netdev);
1985 return 0;
1986 }
1987
1988 /*
1989 * Get the Ethernet device stats
1990 */
1991 static struct net_device_stats *titan_ge_get_stats(struct net_device
1992 *netdev)
1993 {
1994 titan_ge_port_info *titan_ge_eth;
1995 unsigned int port_num;
1996
1997 titan_ge_eth = netdev->priv;
1998 port_num = titan_ge_eth->port_num;
1999
2000 return &titan_ge_eth->stats;
2001 }
2002
2003 /*
2004 * Register the Titan GE with the kernel
2005 */
2006 static int __init titan_ge_init_module(void)
2007 {
2008 unsigned long version, device;
2009
2010 printk(KERN_NOTICE
2011 "PMC-Sierra TITAN 10/100/1000 Ethernet Driver \n");
2012 device = TITAN_GE_READ(TITAN_GE_DEVICE_ID);
2013 version = (device & 0x000f0000) >> 16;
2014 device &= 0x0000ffff;
2015
2016 printk(KERN_NOTICE "Device Id : %x, Version : %x \n", device,
2017 version);
2018
2019 /* Register only one port */
2020 if (titan_ge_init(0))
2021 printk(KERN_ERR
2022 "Error registering the TITAN Ethernet driver"
2023 "for port 0 \n");
2024
2025 if (titan_ge_init(1))
2026 printk(KERN_ERR "Error registering the TITAN Ethernet"
2027 "driver for port 1\n");
2028
2029 return 0;
2030 }
2031
2032 /*
2033 * Unregister the Titan GE from the kernel
2034 */
2035 static void __init titan_ge_cleanup_module(void)
2036 {
2037 /* Nothing to do here */
2038 }
2039
2040 module_init(titan_ge_init_module);
2041 module_exit(titan_ge_cleanup_module);
2042 MODULE_AUTHOR("Manish Lachwani");
2043 MODULE_DESCRIPTION("Titan ethernet ports driver");
2044 MODULE_LICENSE("GPL");
2045
2046 /*
2047 * Initialize the Rx descriptor ring for the Titan Ge
2048 */
2049 static int titan_ge_init_rx_desc_ring(titan_ge_port_info * titan_eth_port,
2050 int rx_desc_num,
2051 int rx_buff_size,
2052 unsigned long rx_desc_base_addr,
2053 unsigned long rx_buff_base_addr,
2054 unsigned long rx_dma)
2055 {
2056 volatile titan_ge_rx_desc *rx_desc;
2057 unsigned long buffer_addr;
2058 int index;
2059 unsigned long titan_ge_rx_desc_bus = rx_dma;
2060
2061 buffer_addr = rx_buff_base_addr;
2062 rx_desc = (titan_ge_rx_desc *) rx_desc_base_addr;
2063
2064 /* Check alignment */
2065 if (rx_buff_base_addr & 0xF)
2066 return 0;
2067
2068 /* Check Rx buffer size */
2069 if ((rx_buff_size < 8) || (rx_buff_size > TITAN_GE_MAX_RX_BUFFER))
2070 return 0;
2071
2072 /* 64-bit alignment
2073 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2074 return 0; */
2075
2076 /* Initialize the Rx desc ring */
2077 for (index = 0; index < rx_desc_num; index++) {
2078 titan_ge_rx_desc_bus += sizeof(titan_ge_rx_desc);
2079 rx_desc[index].cmd_sts = 0;
2080 rx_desc[index].buffer_addr = buffer_addr;
2081 titan_eth_port->rx_skb[index] = NULL;
2082 buffer_addr += rx_buff_size;
2083 }
2084
2085 titan_eth_port->rx_curr_desc_q = 0;
2086 titan_eth_port->rx_used_desc_q = 0;
2087
2088 titan_eth_port->rx_desc_area =
2089 (titan_ge_rx_desc *) rx_desc_base_addr;
2090 titan_eth_port->rx_desc_area_size =
2091 rx_desc_num * sizeof(titan_ge_rx_desc);
2092
2093 titan_eth_port->rx_dma = rx_dma;
2094
2095 return TITAN_OK;
2096 }
2097
2098 /*
2099 * Initialize the Tx descriptor ring. Descriptors in the SRAM
2100 */
2101 static int titan_ge_init_tx_desc_ring(titan_ge_port_info * titan_ge_port,
2102 int tx_desc_num,
2103 unsigned long tx_desc_base_addr,
2104 unsigned long tx_dma)
2105 {
2106 titan_ge_tx_desc *tx_desc;
2107 int index;
2108 unsigned long titan_ge_tx_desc_bus = tx_dma;
2109
2110 if (tx_desc_base_addr & 0xF)
2111 return 0;
2112
2113 tx_desc = (titan_ge_tx_desc *) tx_desc_base_addr;
2114
2115 for (index = 0; index < tx_desc_num; index++) {
2116 titan_ge_port->tx_dma_array[index] =
2117 (dma_addr_t) titan_ge_tx_desc_bus;
2118 titan_ge_tx_desc_bus += sizeof(titan_ge_tx_desc);
2119 tx_desc[index].cmd_sts = 0x0000;
2120 tx_desc[index].buffer_len = 0;
2121 tx_desc[index].buffer_addr = 0x00000000;
2122 titan_ge_port->tx_skb[index] = NULL;
2123 }
2124
2125 titan_ge_port->tx_curr_desc_q = 0;
2126 titan_ge_port->tx_used_desc_q = 0;
2127
2128 titan_ge_port->tx_desc_area =
2129 (titan_ge_tx_desc *) tx_desc_base_addr;
2130 titan_ge_port->tx_desc_area_size =
2131 tx_desc_num * sizeof(titan_ge_tx_desc);
2132
2133 titan_ge_port->tx_dma = tx_dma;
2134 return TITAN_OK;
2135 }
2136
2137 /*
2138 * Initialize the device as an Ethernet device
2139 */
2140 static int titan_ge_init(int port)
2141 {
2142 titan_ge_port_info *titan_ge_eth;
2143 struct net_device *netdev;
2144 int err;
2145
2146 netdev = alloc_etherdev(sizeof(titan_ge_port_info));
2147 if (!netdev) {
2148 err = -ENODEV;
2149 goto out;
2150 }
2151
2152 netdev->open = titan_ge_open;
2153 netdev->stop = titan_ge_stop;
2154 netdev->hard_start_xmit = titan_ge_start_xmit;
2155 netdev->get_stats = titan_ge_get_stats;
2156 netdev->set_multicast_list = titan_ge_set_multi;
2157 netdev->set_mac_address = titan_ge_set_mac_address;
2158
2159 /* Tx timeout */
2160 netdev->tx_timeout = titan_ge_tx_timeout;
2161 netdev->watchdog_timeo = 2 * HZ;
2162
2163 #ifdef TITAN_RX_NAPI
2164 /* Set these to very high values */
2165 netdev->poll = titan_ge_poll;
2166 netdev->weight = 64;
2167 #endif
2168 netdev->tx_queue_len = TITAN_GE_TX_QUEUE;
2169 netif_carrier_off(netdev);
2170 netdev->base_addr = 0;
2171
2172 #ifdef CONFIG_NET_FASTROUTE
2173 netdev->accept_fastpath = titan_accept_fastpath;
2174 #endif
2175
2176 netdev->change_mtu = titan_ge_change_mtu;
2177
2178 titan_ge_eth = netdev->priv;
2179 /* Allocation of memory for the driver structures */
2180
2181 titan_ge_eth->port_num = port;
2182
2183 memset(&titan_ge_eth->stats, 0, sizeof(struct net_device_stats));
2184
2185 /* Configure the Tx timeout handler */
2186 INIT_TQUEUE(&titan_ge_eth->tx_timeout_task,
2187 (void (*)(void *)) titan_ge_tx_timeout_task, netdev);
2188
2189 spin_lock_init(&titan_ge_eth->lock);
2190
2191 /* set MAC addresses */
2192 memcpy(netdev->dev_addr, titan_ge_mac_addr_base, 6);
2193 netdev->dev_addr[5] += port;
2194
2195 err = register_netdev(netdev);
2196
2197 if (err)
2198 goto out_free_private;
2199
2200 printk(KERN_NOTICE
2201 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
2202 netdev->name, port, netdev->dev_addr[0],
2203 netdev->dev_addr[1], netdev->dev_addr[2],
2204 netdev->dev_addr[3], netdev->dev_addr[4],
2205 netdev->dev_addr[5]);
2206
2207 #ifdef TITAN_RX_NAPI
2208 printk(KERN_NOTICE "Rx NAPI supported, Tx Coalescing ON \n");
2209 #else
2210 printk(KERN_NOTICE "Rx and Tx Coalescing ON \n");
2211 #endif
2212
2213 /*
2214 * Titan 1.2 does support port #2
2215 */
2216 if (titan_ge_init(2))
2217 printk(KERN_ERR "Error registering the TITAN Ethernet"
2218 "driver for port 2\n");
2219
2220 return 0;
2221
2222 out_free_private:
2223
2224 out_free_netdev:
2225 kfree(netdev);
2226
2227 out:
2228 return err;
2229 }
2230
2231 /*
2232 * Reset the Ethernet port
2233 */
2234 static void titan_ge_port_reset(unsigned int port_num)
2235 {
2236 unsigned int reg_data;
2237
2238 /* Stop the Tx port activity */
2239 reg_data = TITAN_GE_READ(TITAN_GE_TMAC_CONFIG_1 +
2240 (port_num << 12));
2241 reg_data &= ~(0x0001);
2242 TITAN_GE_WRITE((TITAN_GE_TMAC_CONFIG_1 +
2243 (port_num << 12)), reg_data);
2244
2245 /* Stop the Rx port activity */
2246 reg_data = TITAN_GE_READ(TITAN_GE_RMAC_CONFIG_1 +
2247 (port_num << 12));
2248 reg_data &= ~(0x0001);
2249 TITAN_GE_WRITE((TITAN_GE_RMAC_CONFIG_1 +
2250 (port_num << 12)), reg_data);
2251
2252 return;
2253 }
2254
2255 /*
2256 * Return the Tx desc after use by the XDMA
2257 */
2258 static int titan_ge_return_tx_desc(titan_ge_port_info * titan_ge_eth, int port)
2259 {
2260 int tx_desc_used;
2261 struct sk_buff *skb;
2262
2263 tx_desc_used = titan_ge_eth->tx_used_desc_q;
2264
2265 /* return right away */
2266 if (tx_desc_used == titan_ge_eth->tx_curr_desc_q)
2267 return TITAN_ERROR;
2268
2269 /* Now the critical stuff */
2270 skb = titan_ge_eth->tx_skb[tx_desc_used];
2271
2272 dev_kfree_skb_any(skb);
2273
2274 titan_ge_eth->tx_skb[tx_desc_used] = NULL;
2275 titan_ge_eth->tx_used_desc_q =
2276 (tx_desc_used + 1) % TITAN_GE_TX_QUEUE;
2277
2278 return 0;
2279 }
2280
2281 #ifndef TITAN_RX_NAPI
2282 /*
2283 * Coalescing for the Rx path
2284 */
2285 static unsigned long titan_ge_rx_coal(unsigned long delay, int port)
2286 {
2287 TITAN_GE_WRITE(TITAN_GE_INT_COALESCING, delay);
2288 TITAN_GE_WRITE(0x5038, delay);
2289
2290 return delay;
2291 }
2292 #endif
2293
2294 /*
2295 * Coalescing for the Tx path
2296 */
2297 static unsigned long titan_ge_tx_coal(unsigned long delay, int port)
2298 {
2299 unsigned long rx_delay;
2300
2301 rx_delay = TITAN_GE_READ(TITAN_GE_INT_COALESCING);
2302 delay = (delay << 16) | rx_delay;
2303
2304 TITAN_GE_WRITE(TITAN_GE_INT_COALESCING, delay);
2305 TITAN_GE_WRITE(0x5038, delay);
2306
2307 return delay;
2308 }
2309