1 /*
2 * drivers/net/mv64340_eth.c - Driver for MV64340X ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 *
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
7 *
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani (lachwani@pmc-sierra.com)
10 *
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27 #include <linux/config.h>
28 #include <linux/version.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/config.h>
32 #include <linux/sched.h>
33 #include <linux/ptrace.h>
34 #include <linux/fcntl.h>
35 #include <linux/ioport.h>
36 #include <linux/interrupt.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/ip.h>
41 #include <linux/init.h>
42 #include <linux/in.h>
43 #include <linux/pci.h>
44 #include <asm/smp.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <net/ip.h>
49
50 #include <asm/bitops.h>
51 #include <asm/io.h>
52 #include <asm/types.h>
53 #include <asm/pgtable.h>
54 #include <asm/system.h>
55 #include "mv64340_eth.h"
56
57 /*************************************************************************
58 **************************************************************************
59 **************************************************************************
60 * The first part is the high level driver of the gigE ethernet ports. *
61 **************************************************************************
62 **************************************************************************
63 *************************************************************************/
64
65 /* Definition for configuring driver */
66 #undef MV64340_RX_QUEUE_FILL_ON_TASK
67
68 /* Constants */
69 #define EXTRA_BYTES 32
70 #define WRAP ETH_HLEN + 2 + 4 + 16
71 #define BUFFER_MTU dev->mtu + WRAP
72 #define INT_CAUSE_UNMASK_ALL 0x0007ffff
73 #define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff
74 #ifdef MV64340_RX_FILL_ON_TASK
75 #define INT_CAUSE_MASK_ALL 0x00000000
76 #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
77 #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
78 #endif
79
80 /* Static function declarations */
81 static int mv64340_eth_real_open(struct net_device *);
82 static int mv64340_eth_real_stop(struct net_device *);
83 static int mv64340_eth_change_mtu(struct net_device *, int);
84 static struct net_device_stats *mv64340_eth_get_stats(struct net_device *);
85 static void eth_port_init_mac_tables(ETH_PORT eth_port_num);
86 #ifdef MV64340_NAPI
87 static int mv64340_poll(struct net_device *dev, int *budget);
88 #endif
89
90 unsigned char prom_mac_addr_base[6];
91 unsigned long mv64340_sram_base;
92
93 /**************************************************
94 * Helper functions - used inside the driver only *
95 **************************************************/
96
mv64340_eth_malloc_ring(unsigned int size)97 static void *mv64340_eth_malloc_ring(unsigned int size)
98 {
99 dma_addr_t dma_handle;
100 void *result;
101 /* Assumes allocated buffer is cache line alligned */
102 result = pci_alloc_consistent(NULL, size, &dma_handle);
103 memset(result, 0, size);
104 return result;
105 }
106
107 /**********************************************************************
108 * mv64340_eth_change_mtu
109 *
110 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
111 *
112 * Input : pointer to ethernet interface network device structure
113 * new mtu size
114 * Output : 0 upon success, -EINVAL upon failure
115 **********************************************************************/
mv64340_eth_change_mtu(struct net_device * dev,int new_mtu)116 static int mv64340_eth_change_mtu(struct net_device *dev, int new_mtu)
117 {
118 ETH_PORT_INFO *ethernet_private;
119 struct mv64340_eth_priv *port_private;
120 unsigned int port_num;
121 unsigned long flags;
122 ethernet_private = dev->priv;
123 port_private =
124 (struct mv64340_eth_priv *) ethernet_private->port_private;
125 port_num = port_private->port_num;
126
127 spin_lock_irqsave(&port_private->lock, flags);
128
129 if ((new_mtu > 9500) || (new_mtu < 64)) {
130 spin_unlock_irqrestore(&port_private->lock, flags);
131 return -EINVAL;
132 }
133
134 dev->mtu = new_mtu;
135 /*
136 * Stop then re-open the interface. This will allocate RX skb's with
137 * the new MTU.
138 * There is a possible danger that the open will not successed, due
139 * to memory is full, which might fail the open function.
140 */
141 if (netif_running(dev)) {
142 if (mv64340_eth_real_stop(dev))
143 printk(KERN_ERR
144 "%s: Fatal error on stopping device\n",
145 dev->name);
146 if (mv64340_eth_real_open(dev))
147 printk(KERN_ERR
148 "%s: Fatal error on opening device\n",
149 dev->name);
150 }
151
152 spin_unlock_irqrestore(&port_private->lock, flags);
153 return 0;
154 }
155 /**********************************************************************
156 * mv64340_eth_rx_task
157 *
158 * Fills / refills RX queue on a certain gigabit ethernet port
159 *
160 * Input : pointer to ethernet interface network device structure
161 * Output : N/A
162 **********************************************************************/
mv64340_eth_rx_task(void * data)163 static void mv64340_eth_rx_task(void *data)
164 {
165 struct net_device *dev = (struct net_device *) data;
166 ETH_PORT_INFO *ethernet_private;
167 struct mv64340_eth_priv *port_private;
168 unsigned int port_num;
169 PKT_INFO pkt_info;
170 struct sk_buff *skb;
171
172 ethernet_private = dev->priv;
173 port_private =
174 (struct mv64340_eth_priv *) ethernet_private->port_private;
175 port_num = port_private->port_num;
176
177 if (test_and_set_bit(0, &port_private->rx_task_busy)) {
178 panic("%s: Error in test_set_bit / clear_bit\n", dev->name);
179 }
180
181 while (port_private->rx_ring_skbs < (port_private->rx_ring_size - 5)) {
182 /* The +8 for buffer allignment and another 32 byte extra */
183
184 skb = dev_alloc_skb(BUFFER_MTU + 8 + EXTRA_BYTES);
185 if (!skb)
186 /* Better luck next time */
187 break;
188 port_private->rx_ring_skbs++;
189 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
190 pkt_info.byte_cnt = dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES;
191 /* Allign buffer to 8 bytes */
192 if (pkt_info.byte_cnt & ~0x7) {
193 pkt_info.byte_cnt &= ~0x7;
194 pkt_info.byte_cnt += 8;
195 }
196 pkt_info.buf_ptr =
197 pci_map_single(0, skb->data,
198 dev->mtu + ETH_HLEN + 4 + 2 + EXTRA_BYTES,
199 PCI_DMA_FROMDEVICE);
200 pkt_info.return_info = skb;
201 if (eth_rx_return_buff(ethernet_private, &pkt_info) != ETH_OK) {
202 printk(KERN_ERR
203 "%s: Error allocating RX Ring\n", dev->name);
204 break;
205 }
206 skb_reserve(skb, 2);
207 }
208 clear_bit(0, &port_private->rx_task_busy);
209 /*
210 * If RX ring is empty of SKB, set a timer to try allocating
211 * again in a later time .
212 */
213 if ((port_private->rx_ring_skbs == 0) &&
214 (port_private->rx_timer_flag == 0)) {
215 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
216 /* After 100mSec */
217 port_private->timeout.expires = jiffies + (HZ / 10);
218 add_timer(&port_private->timeout);
219 port_private->rx_timer_flag = 1;
220 }
221 #if MV64340_RX_QUEUE_FILL_ON_TASK
222 else {
223 /* Return interrupts */
224 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
225 INT_CAUSE_UNMASK_ALL);
226 }
227 #endif
228 }
229
230 /**********************************************************************
231 * mv64340_eth_rx_task_timer_wrapper
232 *
233 * Timer routine to wake up RX queue filling task. This function is
234 * used only in case the RX queue is empty, and all alloc_skb has
235 * failed (due to out of memory event).
236 *
237 * Input : pointer to ethernet interface network device structure
238 * Output : N/A
239 **********************************************************************/
mv64340_eth_rx_task_timer_wrapper(unsigned long data)240 static void mv64340_eth_rx_task_timer_wrapper(unsigned long data)
241 {
242 struct net_device *dev = (struct net_device *) data;
243 ETH_PORT_INFO *ethernet_private;
244 struct mv64340_eth_priv *port_private;
245
246 ethernet_private = dev->priv;
247 port_private =
248 (struct mv64340_eth_priv *) ethernet_private->port_private;
249
250 port_private->rx_timer_flag = 0;
251 mv64340_eth_rx_task((void *) data);
252 }
253
254
255 /**********************************************************************
256 * mv64340_eth_update_mac_address
257 *
258 * Update the MAC address of the port in the address table
259 *
260 * Input : pointer to ethernet interface network device structure
261 * Output : N/A
262 **********************************************************************/
mv64340_eth_update_mac_address(struct net_device * dev)263 static void mv64340_eth_update_mac_address(struct net_device *dev)
264 {
265 ETH_PORT_INFO *ethernet_private = dev->priv;
266 struct mv64340_eth_priv *port_private;
267 unsigned int port_num;
268
269 port_private =
270 (struct mv64340_eth_priv *) ethernet_private->port_private;
271 port_num = port_private->port_num;
272
273 eth_port_init_mac_tables(port_num);
274 memcpy(ethernet_private->port_mac_addr, dev->dev_addr, 6);
275 eth_port_uc_addr_set(port_num, ethernet_private->port_mac_addr);
276 }
277
278 /**********************************************************************
279 * mv64340_eth_set_rx_mode
280 *
281 * Change from promiscuos to regular rx mode
282 *
283 * Input : pointer to ethernet interface network device structure
284 * Output : N/A
285 **********************************************************************/
mv64340_eth_set_rx_mode(struct net_device * dev)286 static void mv64340_eth_set_rx_mode(struct net_device *dev)
287 {
288 ETH_PORT_INFO *ethernet_private = (ETH_PORT_INFO *) dev->priv;
289
290 if (dev->flags & IFF_PROMISC) {
291 ethernet_set_config_reg
292 (ethernet_private->port_num,
293 ethernet_get_config_reg(ethernet_private->port_num) |
294 ETH_UNICAST_PROMISCUOUS_MODE);
295 } else {
296 ethernet_set_config_reg
297 (ethernet_private->port_num,
298 ethernet_get_config_reg(ethernet_private->port_num) &
299 ~(unsigned int) ETH_UNICAST_PROMISCUOUS_MODE);
300 }
301 }
302
303
304 /**********************************************************************
305 * mv64340_eth_set_mac_address
306 *
307 * Change the interface's mac address.
308 * No special hardware thing should be done because interface is always
309 * put in promiscuous mode.
310 *
311 * Input : pointer to ethernet interface network device structure and
312 * a pointer to the designated entry to be added to the cache.
313 * Output : zero upon success, negative upon failure
314 **********************************************************************/
mv64340_eth_set_mac_address(struct net_device * dev,void * addr)315 static int mv64340_eth_set_mac_address(struct net_device *dev, void *addr)
316 {
317 int i;
318
319 for (i = 0; i < 6; i++)
320 /* +2 is for the offset of the HW addr type */
321 dev->dev_addr[i] = ((unsigned char *) addr)[i + 2];
322 mv64340_eth_update_mac_address(dev);
323 return 0;
324 }
325
326 /**********************************************************************
327 * mv64340_eth_tx_timeout
328 *
329 * Called upon a timeout on transmitting a packet
330 *
331 * Input : pointer to ethernet interface network device structure.
332 * Output : N/A
333 **********************************************************************/
mv64340_eth_tx_timeout(struct net_device * dev)334 static void mv64340_eth_tx_timeout(struct net_device *dev)
335 {
336 ETH_PORT_INFO *ethernet_private = dev->priv;
337 printk(KERN_INFO "%s: TX timeout ", dev->name);
338 printk(KERN_INFO "Resetting card \n");
339
340 /* Do the reset outside of interrupt context */
341 schedule_task(ðernet_private->tx_timeout_task);
342 }
343
344 /**********************************************************************
345 * mv64340_eth_tx_timeout_task
346 *
347 * Actual routine to reset the adapter when a timeout on Tx has occurred
348 **********************************************************************/
mv64340_eth_tx_timeout_task(struct net_device * dev)349 static void mv64340_eth_tx_timeout_task(struct net_device *dev)
350 {
351 ETH_PORT_INFO *ethernet_private = dev->priv;
352
353 netif_device_detach(dev);
354 eth_port_reset(ethernet_private->port_num);
355 eth_port_start(ethernet_private);
356 netif_device_attach(dev);
357 }
358
359 /**********************************************************************
360 * mv64340_eth_free_tx_queue
361 *
362 * Input : dev - a pointer to the required interface
363 *
364 * Output : 0 if was able to release skb , nonzero otherwise
365 **********************************************************************/
366
mv64340_eth_free_tx_queue(struct net_device * dev,unsigned int eth_int_cause_ext)367 static int mv64340_eth_free_tx_queue(struct net_device *dev,
368 unsigned int eth_int_cause_ext)
369 {
370 ETH_PORT_INFO *ethernet_private;
371 struct mv64340_eth_priv *port_private;
372 unsigned int port_num;
373 PKT_INFO pkt_info;
374 int released = 1;
375 struct net_device_stats *stats;
376
377 spin_lock(&(port_private->lock));
378 ethernet_private = dev->priv;
379 port_private =
380 (struct mv64340_eth_priv *) ethernet_private->port_private;
381 port_num = port_private->port_num;
382 stats = &port_private->stats;
383
384 /* Check only queue 0 */
385 if (eth_int_cause_ext & (BIT0 | BIT8))
386 while (eth_tx_return_desc(ethernet_private, &pkt_info) ==
387 ETH_OK) {
388 if (pkt_info.cmd_sts & BIT0) {
389 printk("%s: Error in TX\n", dev->name);
390 stats->tx_errors++;
391 }
392 /*
393 * If return_info is different than 0, release the skb.
394 * The case where return_info is not 0 is only in case
395 * when transmitted a scatter/gather packet, where only
396 * last skb releases the whole chain.
397 */
398 if (pkt_info.return_info) {
399 dev_kfree_skb_irq((struct sk_buff *)
400 pkt_info.return_info);
401 released = 0;
402 if (skb_shinfo(pkt_info.return_info)->nr_frags)
403 pci_unmap_page(0, pkt_info.buf_ptr,
404 pkt_info.byte_cnt,
405 PCI_DMA_TODEVICE);
406
407 if (port_private->tx_ring_skbs != 1)
408 port_private->tx_ring_skbs--;
409 }
410 else
411 pci_unmap_page(0, pkt_info.buf_ptr,
412 pkt_info.byte_cnt,
413 PCI_DMA_TODEVICE);
414
415 /*
416 * Decrement the number of outstanding skbs counter on the
417 * TX queue.
418 */
419 if (port_private->tx_ring_skbs == 0)
420 panic
421 ("ERROR - TX outstanding SKBs counter is corrupted\n");
422
423 }
424
425 spin_unlock(&(port_private->lock));
426 return released;
427 }
428
429 /**********************************************************************
430 * mv64340_eth_receive
431 *
432 * This function is forward packets that are received from the port's
433 * queues toward kernel core or FastRoute them to another interface.
434 *
435 * Input : dev - a pointer to the required interface
436 * max - maximum number to receive (0 means unlimted)
437 *
438 * Output : number of served packets
439 **********************************************************************/
440 #ifdef MV64340_NAPI
mv64340_eth_receive_queue(struct net_device * dev,unsigned int max,int budget)441 static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max,
442 int budget)
443 #else
444 static int mv64340_eth_receive_queue(struct net_device *dev, unsigned int max)
445 #endif
446 {
447 ETH_PORT_INFO *ethernet_private;
448 struct mv64340_eth_priv *port_private;
449 unsigned int port_num;
450 PKT_INFO pkt_info;
451 struct sk_buff *skb;
452 unsigned int received_packets = 0;
453 struct net_device_stats *stats;
454
455 ethernet_private = dev->priv;
456 port_private =
457 (struct mv64340_eth_priv *) ethernet_private->port_private;
458 port_num = port_private->port_num;
459 stats = &port_private->stats;
460
461 #ifdef MV64340_NAPI
462 while ((eth_port_receive(ethernet_private, &pkt_info) == ETH_OK)
463 &&
464 budget > 0) {
465 #else
466 while ((--max)
467 && (eth_port_receive(ethernet_private, &pkt_info) ==
468 ETH_OK)) {
469 #endif
470 port_private->rx_ring_skbs--;
471 received_packets++;
472 #ifdef MV64340_NAPI
473 budget--;
474 #endif
475 /* Update statistics. Note byte count includes 4 byte CRC count */
476 stats->rx_packets++;
477 stats->rx_bytes += pkt_info.byte_cnt;
478 skb = (struct sk_buff *) pkt_info.return_info;
479 /*
480 * In case received a packet without first / last bits on OR the error
481 * summary bit is on, the packets needs to be dropeed.
482 */
483 if (
484 ((pkt_info.cmd_sts
485 & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
486 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
487 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
488 stats->rx_dropped++;
489 if (
490 (pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
491 ETH_RX_LAST_DESC)) !=
492 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
493 if (net_ratelimit())
494 printk(KERN_ERR
495 "%s: Received packet spread on multiple"
496 " descriptors\n",
497 dev->name);
498 }
499 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) {
500 stats->rx_errors++;
501 }
502 dev_kfree_skb_irq(skb);
503 } else {
504 struct ethhdr *eth_h;
505 struct iphdr *ip_h;
506
507 /*
508 * The -4 is for the CRC in the trailer of the
509 * received packet
510 */
511 skb_put(skb, pkt_info.byte_cnt - 4);
512 skb->dev = dev;
513
514 eth_h = (struct ethhdr *) skb->data;
515 ip_h = (struct iphdr *) (skb->data + ETH_HLEN);
516 if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
517 skb->ip_summed = CHECKSUM_UNNECESSARY;
518 skb->csum = htons((pkt_info.cmd_sts
519 & 0x0007fff8) >> 3);
520 }
521 else
522 skb->ip_summed = CHECKSUM_NONE;
523 skb->protocol = eth_type_trans(skb, dev);
524 #ifdef MV64340_NAPI
525 netif_receive_skb(skb);
526 #else
527 netif_rx(skb);
528 #endif
529 }
530 }
531
532 return received_packets;
533 }
534
535 /**********************************************************************
536 * mv64340_eth_int_handler
537 *
538 * Main interrupt handler for the gigbit ethernet ports
539 *
540 * Input : irq - irq number (not used)
541 * dev_id - a pointer to the required interface's data structure
542 * regs - not used
543 * Output : N/A
544 **********************************************************************/
545
546 static void mv64340_eth_int_handler(int irq, void *dev_id, struct pt_regs *regs)
547 {
548 struct net_device *dev = (struct net_device *) dev_id;
549 u32 eth_int_cause = 0, eth_int_cause_ext = 0;
550 ETH_PORT_INFO *ethernet_private;
551 struct mv64340_eth_priv *port_private;
552 unsigned int port_num;
553 ethernet_private = dev->priv;
554 port_private =
555 (struct mv64340_eth_priv *) ethernet_private->port_private;
556 port_num = port_private->port_num;
557
558 /* Read interrupt cause registers */
559 eth_int_cause =
560 (MV_READ_DATA(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num)) &
561 INT_CAUSE_UNMASK_ALL);
562
563 if (eth_int_cause & BIT1)
564 eth_int_cause_ext =
565 (MV_READ_DATA(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
566 INT_CAUSE_UNMASK_ALL_EXT);
567 else
568 eth_int_cause_ext = 0;
569
570 #ifdef MV64340_NAPI
571 if (!(eth_int_cause & 0x0007fffd)) {
572 /* Dont ack the Rx interrupt */
573 #endif
574 /*
575 * Clear specific ethernet port intrerrupt registers by acknowleding
576 * relevant bits.
577 */
578 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),
579 ~eth_int_cause);
580 if (eth_int_cause_ext != 0x0)
581 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
582 ~eth_int_cause_ext);
583
584 /* UDP change : We may need this */
585 if (eth_int_cause_ext & 0x0000ffff) {
586 if (mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) {
587 if (netif_queue_stopped(dev) &&
588 (dev->flags & IFF_RUNNING) &&
589 (MV64340_TX_QUEUE_SIZE > port_private->tx_ring_skbs + 1)) {
590 netif_wake_queue(dev);
591 }
592 }
593 }
594 #ifdef MV64340_NAPI
595 }
596 else {
597 if (netif_rx_schedule_prep(dev)) {
598 /* Mask all the interrupts */
599 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),0);
600 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
601 __netif_rx_schedule(dev);
602 }
603 #else
604 unsigned int total_received = 0;
605
606 if (eth_int_cause & (BIT2 | BIT11)) {
607 total_received +=
608 mv64340_eth_receive_queue(dev, 0);
609 }
610 /*
611 * After forwarded received packets to upper layer, add a task in an
612 * interrupts enabled context that refills the RX ring with skb's.
613 */
614 #if MV64340_RX_QUEUE_FILL_ON_TASK
615 /* Unmask all interrupts on ethernet port */
616 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
617 INT_CAUSE_MASK_ALL);
618 queue_task(&port_private->rx_task, &tq_immediate);
619 mark_bh(IMMEDIATE_BH);
620 #else
621 port_private->rx_task.routine(dev);
622 #endif
623 #endif
624 }
625 /* PHY status changed */
626 if (eth_int_cause_ext & (BIT16 | BIT20)) {
627 unsigned int phy_reg_data;
628
629 /* Check Link status on ethernet port */
630 eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
631 if (!(phy_reg_data & 0x20)) {
632 netif_stop_queue(dev);
633 dev->flags &= ~(IFF_RUNNING);
634 } else {
635 netif_wake_queue(dev);
636 dev->flags |= (IFF_RUNNING);
637
638 /*
639 * Start all TX queues on ethernet port. This is good in case of
640 * previous packets where not transmitted, due to link down and
641 * this command re-enables all TX queues.
642 * Note that it is possible to get a TX resource error interrupt
643 * after issuing this, since not all TX queues are enabled,
644 * or has anything to send.
645 */
646 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 1);
647 }
648 }
649
650 /*
651 * If no real interrupt occured, exit.
652 * This can happen when using gigE interrupt coalescing mechanism.
653 */
654 if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0)) {
655 return;
656 }
657
658 return;
659 }
660
661 /**********************************************************************
662 * mv64340_eth_open
663 *
664 * This function is called when openning the network device. The function
665 * should initialize all the hardware, initialize cyclic Rx/Tx
666 * descriptors chain and buffers and allocate an IRQ to the network
667 * device.
668 *
669 * Input : a pointer to the network device structure
670 *
671 * Output : zero of success , nonzero if fails.
672 **********************************************************************/
673
674 static int mv64340_eth_open(struct net_device *dev)
675 {
676 int retval;
677 ETH_PORT_INFO *ethernet_private;
678 struct mv64340_eth_priv *port_private;
679 unsigned int port_num;
680
681 ethernet_private = dev->priv;
682 port_private =
683 (struct mv64340_eth_priv *) ethernet_private->port_private;
684 port_num = port_private->port_num;
685
686 spin_lock_irq(&(port_private->lock));
687
688 retval =
689 request_irq(ETH_PORT0_IRQ_NUM + port_num, mv64340_eth_int_handler,
690 (SA_INTERRUPT | SA_SAMPLE_RANDOM), dev->name, dev);
691
692 if (retval != 0) {
693 printk(KERN_ERR "Can not assign IRQ number to MV64340_eth%d\n",
694 port_num);
695 return -1;
696 } else {
697 dev->irq = ETH_PORT0_IRQ_NUM + port_num;
698 }
699
700 if (mv64340_eth_real_open(dev)) {
701 printk("%s: Error opening interface\n", dev->name);
702 free_irq(dev->irq, dev);
703 spin_unlock_irq(&port_private->lock);
704 return -EBUSY;
705 }
706 MOD_INC_USE_COUNT;
707 spin_unlock_irq(&port_private->lock);
708 return 0;
709 }
710
711 /* Helper function for mv64340_eth_open */
712 static int mv64340_eth_real_open(struct net_device *dev)
713 {
714 ETH_PORT_INFO *ethernet_private;
715 struct mv64340_eth_priv *port_private;
716 unsigned int port_num;
717 u32 phy_reg_data;
718 unsigned int size;
719
720 ethernet_private = dev->priv;
721 port_private =
722 (struct mv64340_eth_priv *) ethernet_private->port_private;
723 port_num = port_private->port_num;
724
725 /* Stop RX Queues */
726 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
727 0x0000ff00);
728
729 /* Clear the ethernet port interrupts */
730 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
731 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
732
733 /* Unmask RX buffer and TX end interrupt */
734 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
735 INT_CAUSE_UNMASK_ALL);
736
737 /* Unmask phy and link status changes interrupts */
738 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
739 INT_CAUSE_UNMASK_ALL_EXT);
740
741 /* Set the MAC Address */
742 memcpy(ethernet_private->port_mac_addr, dev->dev_addr, 6);
743
744 eth_port_init(ethernet_private);
745
746 /* Set rx_task pointers */
747 port_private->rx_task.sync = 0;
748 port_private->rx_task.routine = mv64340_eth_rx_task;
749 port_private->rx_task.data = dev;
750
751 memset(&port_private->timeout, 0, sizeof(struct timer_list));
752 port_private->timeout.function = mv64340_eth_rx_task_timer_wrapper;
753 port_private->timeout.data = (unsigned long) dev;
754
755 port_private->rx_task_busy = 0;
756 port_private->rx_timer_flag = 0;
757
758 /* Allocate TX ring */
759 port_private->tx_ring_skbs = 0;
760 port_private->tx_ring_size = MV64340_TX_QUEUE_SIZE;
761 size = port_private->tx_ring_size * sizeof(ETH_TX_DESC);
762 ethernet_private->tx_desc_area_size = size;
763
764 /* Assumes allocated ring is 16 bytes alligned */
765 ethernet_private->p_tx_desc_area =
766 (ETH_TX_DESC *) mv64340_sram_base;
767 if (!ethernet_private->p_tx_desc_area) {
768 printk(KERN_ERR
769 "%s: Cannot allocate Tx Ring (size %d bytes)\n",
770 dev->name, size);
771 return -ENOMEM;
772 }
773 memset((void *) ethernet_private->p_tx_desc_area, 0,
774 ethernet_private->tx_desc_area_size);
775
776 /* Dummy will be replaced upon real tx */
777 ether_init_tx_desc_ring(ethernet_private,
778 port_private->tx_ring_size,
779 (unsigned long)ethernet_private->p_tx_desc_area);
780
781 /* Allocate RX ring */
782 /* Meantime RX Ring are fixed - but must be configurable by user */
783 port_private->rx_ring_size = MV64340_RX_QUEUE_SIZE;
784 port_private->rx_ring_skbs = 0;
785 size = port_private->rx_ring_size * sizeof(ETH_RX_DESC);
786 ethernet_private->rx_desc_area_size = size;
787
788 /* Assumes allocated ring is 16 bytes alligned */
789 ethernet_private->p_rx_desc_area =
790 (ETH_RX_DESC *) mv64340_eth_malloc_ring(size);
791 if (!ethernet_private->p_rx_desc_area) {
792 printk(KERN_ERR
793 "%s: Cannot allocate Rx ring (size %d bytes)\n",
794 dev->name, size);
795 printk(KERN_ERR
796 "%s: Freeing previously allocated TX queues...",
797 dev->name);
798 pci_free_consistent(0, ethernet_private->tx_desc_area_size,
799 (void *)
800 ethernet_private->p_tx_desc_area,
801 virt_to_bus(ethernet_private->
802 p_tx_desc_area));
803 return -ENOMEM;
804 }
805 memset((void *) ethernet_private->p_rx_desc_area, 0,
806 ethernet_private->rx_desc_area_size);
807 if (
808 (ether_init_rx_desc_ring(ethernet_private, port_private->rx_ring_size, 1536, /* Dummy, will be replaced later */
809 (unsigned long) ethernet_private->p_rx_desc_area,
810 0)) == false)
811 panic("%s: Error initializing RX Ring\n", dev->name);
812
813 /* Fill RX ring with skb's */
814 mv64340_eth_rx_task(dev);
815
816 eth_port_start(ethernet_private);
817
818 /* Interrupt Coalescing */
819
820 #ifdef MV64340_COAL
821 port_private->rx_int_coal =
822 eth_port_set_rx_coal (port_num, 133000000, MV64340_RX_COAL);
823 #endif
824
825 port_private->tx_int_coal =
826 eth_port_set_tx_coal (port_num, 133000000, MV64340_TX_COAL);
827
828 /* Increase the Rx side buffer size */
829
830 MV_WRITE (MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num), (0x5 << 17) |
831 (MV_READ_DATA (MV64340_ETH_PORT_SERIAL_CONTROL_REG(port_num))
832 & 0xfff1ffff));
833
834 /* Check Link status on phy */
835 eth_port_read_smi_reg(port_num, 1, &phy_reg_data);
836 if (!(phy_reg_data & 0x20)) {
837 netif_stop_queue(dev);
838 dev->flags &= ~(IFF_RUNNING);
839 } else {
840 netif_start_queue(dev);
841 dev->flags |= (IFF_RUNNING);
842 }
843
844 return 0;
845 }
846
847 static void mv64340_eth_free_tx_rings(struct net_device *dev)
848 {
849 ETH_PORT_INFO *ethernet_private;
850 struct mv64340_eth_priv *port_private;
851 unsigned int port_num, curr;
852
853 ethernet_private = dev->priv;
854 port_private =
855 (struct mv64340_eth_priv *) ethernet_private->port_private;
856 port_num = port_private->port_num;
857
858 /* Stop Tx Queues */
859 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
860 0x0000ff00);
861
862 /* Free TX rings */
863 /* Free outstanding skb's on TX rings */
864 for (curr = 0;
865 (port_private->tx_ring_skbs) && (curr < MV64340_TX_QUEUE_SIZE);
866 curr++) {
867 if (ethernet_private->tx_skb[curr]) {
868 dev_kfree_skb(ethernet_private->tx_skb[curr]);
869 port_private->tx_ring_skbs--;
870 }
871 }
872 if (port_private->tx_ring_skbs != 0)
873 printk
874 ("%s: Error on Tx descriptor free - could not free %d"
875 " descriptors\n", dev->name,
876 port_private->tx_ring_skbs);
877 pci_free_consistent(0, ethernet_private->tx_desc_area_size,
878 (void *) ethernet_private->p_tx_desc_area,
879 virt_to_bus(ethernet_private->p_tx_desc_area));
880 }
881
882 static void mv64340_eth_free_rx_rings(struct net_device *dev)
883 {
884 ETH_PORT_INFO *ethernet_private;
885 struct mv64340_eth_priv *port_private;
886 unsigned int port_num;
887 int curr;
888
889 ethernet_private = dev->priv;
890 port_private =
891 (struct mv64340_eth_priv *) ethernet_private->port_private;
892 port_num = port_private->port_num;
893
894
895 /* Stop RX Queues */
896 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
897 0x0000ff00);
898
899 /* Free RX rings */
900 /* Free preallocated skb's on RX rings */
901 for (curr = 0;
902 port_private->rx_ring_skbs && (curr < MV64340_RX_QUEUE_SIZE);
903 curr++) {
904 if (ethernet_private->rx_skb[curr]) {
905 dev_kfree_skb(ethernet_private->rx_skb[curr]);
906 port_private->rx_ring_skbs--;
907 }
908 }
909
910 if (port_private->rx_ring_skbs != 0)
911 printk(KERN_ERR
912 "%s: Error in freeing Rx Ring. %d skb's still"
913 " stuck in RX Ring - ignoring them\n", dev->name,
914 port_private->rx_ring_skbs);
915 pci_free_consistent(0, ethernet_private->rx_desc_area_size,
916 (void *) ethernet_private->p_rx_desc_area,
917 virt_to_bus(ethernet_private->p_rx_desc_area));
918 }
919
920 /**********************************************************************
921 * mv64340_eth_stop
922 *
923 * This function is used when closing the network device.
924 * It updates the hardware,
925 * release all memory that holds buffers and descriptors and release the IRQ.
926 * Input : a pointer to the device structure
927 * Output : zero if success , nonzero if fails
928 *********************************************************************/
929
930 static int mv64340_eth_stop(struct net_device *dev)
931 {
932 ETH_PORT_INFO *ethernet_private;
933 struct mv64340_eth_priv *port_private;
934 unsigned int port_num;
935
936 ethernet_private = dev->priv;
937 port_private =
938 (struct mv64340_eth_priv *) ethernet_private->port_private;
939 port_num = port_private->port_num;
940
941 spin_lock_irq(&(port_private->lock));
942
943 mv64340_eth_real_stop(dev);
944
945 free_irq(dev->irq, dev);
946 MOD_DEC_USE_COUNT;
947 spin_unlock_irq(&port_private->lock);
948 return 0;
949 };
950
951 /* Helper function for mv64340_eth_stop */
952
953 static int mv64340_eth_real_stop(struct net_device *dev)
954 {
955 ETH_PORT_INFO *ethernet_private;
956 struct mv64340_eth_priv *port_private;
957 unsigned int port_num;
958 ethernet_private = dev->priv;
959 port_private =
960 (struct mv64340_eth_priv *) ethernet_private->port_private;
961 port_num = port_private->port_num;
962
963 netif_stop_queue(dev);
964
965 mv64340_eth_free_tx_rings(dev);
966 mv64340_eth_free_rx_rings(dev);
967
968 eth_port_reset(ethernet_private->port_num);
969
970 /* Disable ethernet port interrupts */
971 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
972 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
973
974 /* Mask RX buffer and TX end interrupt */
975 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num), 0);
976
977 /* Mask phy and link status changes interrupts */
978 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
979
980 return 0;
981 }
982
983 #ifdef MV64340_NAPI
984 static void mv64340_tx(struct net_device *dev)
985 {
986 ETH_PORT_INFO *ethernet_private;
987 struct mv64340_eth_priv *port_private;
988 unsigned int port_num;
989 PKT_INFO pkt_info;
990
991 ethernet_private = dev->priv;
992 port_private =
993 (struct mv64340_eth_priv *) ethernet_private->port_private;
994
995 port_num = port_private->port_num;
996
997 while (eth_tx_return_desc(ethernet_private, &pkt_info) == ETH_OK) {
998 if (pkt_info.return_info) {
999 dev_kfree_skb_irq((struct sk_buff *)
1000 pkt_info.return_info);
1001 if (skb_shinfo(pkt_info.return_info)->nr_frags)
1002 pci_unmap_page(0, pkt_info.buf_ptr,
1003 pkt_info.byte_cnt,
1004 PCI_DMA_TODEVICE);
1005
1006 if (port_private->tx_ring_skbs != 1)
1007 port_private->tx_ring_skbs--;
1008 } else
1009 pci_unmap_page(0, pkt_info.buf_ptr,
1010 pkt_info.byte_cnt,
1011 PCI_DMA_TODEVICE);
1012 }
1013
1014 if (netif_queue_stopped(dev) &&
1015 (dev->flags & IFF_RUNNING) &&
1016 (MV64340_TX_QUEUE_SIZE > port_private->tx_ring_skbs + 1)) {
1017 netif_wake_queue(dev);
1018 }
1019 }
1020
1021 /**********************************************************************
1022 * mv64340_poll
1023 *
1024 * This function is used in case of NAPI
1025 ***********************************************************************/
1026 static int mv64340_poll(struct net_device *netdev, int *budget)
1027 {
1028 ETH_PORT_INFO *ethernet_private = netdev->priv;
1029 struct mv64340_eth_priv *port_private =
1030 (struct mv64340_eth_priv *) ethernet_private->port_private;
1031 int done = 1, orig_budget, work_done;
1032 unsigned long flags;
1033 unsigned int port_num = port_private->port_num;
1034
1035 spin_lock_irqsave(&port_private->lock, flags);
1036
1037 #ifdef MV64340_TX_FAST_REFILL
1038 if (++ethernet_private->tx_clean_threshold > 5) {
1039 mv64340_tx(netdev);
1040 ethernet_private->tx_clean_threshold = 0;
1041 }
1042 #endif
1043 if ((u32)(MV_READ_DATA(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) != (u32)ethernet_private->rx_used_desc_q) {
1044 orig_budget = *budget;
1045 if (orig_budget > netdev->quota)
1046 orig_budget = netdev->quota;
1047 work_done = mv64340_eth_receive_queue(netdev, 0, orig_budget);
1048 port_private->rx_task.routine(netdev);
1049 *budget -= work_done;
1050 netdev->quota -= work_done;
1051 if (work_done >= orig_budget)
1052 done = 0;
1053 }
1054
1055 if (done) {
1056 __netif_rx_complete(netdev);
1057 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),0);
1058 MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),0);
1059 MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),
1060 INT_CAUSE_UNMASK_ALL);
1061 MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1062 INT_CAUSE_UNMASK_ALL_EXT);
1063 }
1064
1065 spin_unlock_irqrestore(&port_private->lock, flags);
1066 return (done ? 0 : 1);
1067 }
1068 #endif
1069
1070 /**********************************************************************
1071 * mv64340_eth_start_xmit
1072 *
1073 * This function is queues a packet in the Tx descriptor for
1074 * required port.
1075 *
1076 * Input : skb - a pointer to socket buffer
1077 * dev - a pointer to the required port
1078 *
1079 * Output : zero upon success
1080 **********************************************************************/
1081
1082 static int mv64340_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1083 {
1084 ETH_PORT_INFO *ethernet_private;
1085 struct mv64340_eth_priv *port_private;
1086 unsigned int port_num;
1087 PKT_INFO pkt_info;
1088 unsigned long flags;
1089 ETH_FUNC_RET_STATUS status;
1090 struct net_device_stats *stats;
1091
1092 ethernet_private = dev->priv;
1093 port_private =
1094 (struct mv64340_eth_priv *) ethernet_private->port_private;
1095 port_num = port_private->port_num;
1096 stats = &port_private->stats;
1097
1098 if (netif_queue_stopped(dev)) {
1099 printk(KERN_ERR
1100 "%s: Tried sending packet when interface is stopped\n",
1101 dev->name);
1102 return 1;
1103 }
1104
1105 /* This is a hard error, log it. */
1106 if ((MV64340_TX_QUEUE_SIZE - port_private->tx_ring_skbs) <=
1107 (skb_shinfo(skb)->nr_frags + 1)) {
1108 netif_stop_queue(dev);
1109 printk(KERN_ERR
1110 "%s: Bug in mv64340_eth - Trying to transmit when"
1111 " queue full !\n", dev->name);
1112 return 1;
1113 }
1114
1115 /* Paranoid check - this shouldn't happen */
1116 if (skb == NULL) {
1117 stats->tx_dropped++;
1118 return 1;
1119 }
1120
1121 spin_lock_irqsave(&port_private->lock, flags);
1122
1123 /* Update packet info data structure -- DMA owned, first last */
1124 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
1125 if (!skb_shinfo(skb)->nr_frags || (skb_shinfo(skb)->nr_frags > 3)) {
1126 #endif
1127 pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
1128 ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
1129
1130 pkt_info.byte_cnt = skb->len;
1131 pkt_info.buf_ptr = pci_map_single
1132 (0, skb->data, skb->len, PCI_DMA_TODEVICE);
1133
1134 pkt_info.return_info = skb;
1135 status = eth_port_send(ethernet_private, &pkt_info);
1136 if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL))
1137 printk(KERN_ERR "%s: Error on transmitting packet\n",
1138 dev->name);
1139 port_private->tx_ring_skbs++;
1140 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
1141 }
1142 else {
1143 unsigned int frag;
1144 u32 ipheader;
1145
1146 /* first frag which is skb header */
1147 pkt_info.byte_cnt = skb_headlen(skb);
1148 pkt_info.buf_ptr = pci_map_single(0, skb->data,
1149 skb_headlen(skb), PCI_DMA_TODEVICE);
1150 pkt_info.return_info = 0;
1151 ipheader = (skb->nh.iph->ihl << 11);
1152 pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
1153 ETH_GEN_TCP_UDP_CHECKSUM |
1154 ETH_GEN_IP_V_4_CHECKSUM |
1155 ipheader;
1156 /* CPU already calculated pseudo header checksum. So, use it */
1157 pkt_info.l4i_chk = skb->h.th->check;
1158 status = eth_port_send(ethernet_private, &pkt_info);
1159 if (status != ETH_OK) {
1160 if ((status == ETH_ERROR))
1161 printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
1162 if (status == ETH_QUEUE_FULL)
1163 printk("Error on Queue Full \n");
1164 if (status == ETH_QUEUE_LAST_RESOURCE)
1165 printk("Tx resource error \n");
1166 }
1167
1168 /* Check for the remaining frags */
1169 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1170 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1171 pkt_info.l4i_chk = 0x0000;
1172 pkt_info.cmd_sts = 0x00000000;
1173
1174 /* Last Frag enables interrupt and frees the skb */
1175 if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
1176 pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT |
1177 ETH_TX_LAST_DESC;
1178 pkt_info.return_info = skb;
1179 port_private->tx_ring_skbs++;
1180 }
1181 else {
1182 pkt_info.return_info = 0;
1183 }
1184 pkt_info.byte_cnt = this_frag->size;
1185 if (this_frag->size < 8)
1186 printk("%d : \n", skb_shinfo(skb)->nr_frags);
1187
1188 pkt_info.buf_ptr = pci_map_page(0,
1189 this_frag->page,
1190 this_frag->page_offset,
1191 this_frag->size,
1192 PCI_DMA_TODEVICE);
1193
1194 status = eth_port_send(ethernet_private, &pkt_info);
1195
1196 if (status != ETH_OK) {
1197 if ((status == ETH_ERROR))
1198 printk(KERN_ERR "%s: Error on transmitting packet\n", dev->name);
1199
1200 if (status == ETH_QUEUE_LAST_RESOURCE)
1201 printk("Tx resource error \n");
1202
1203 if (status == ETH_QUEUE_FULL)
1204 printk("Queue is full \n");
1205 }
1206 }
1207 }
1208 #endif
1209
1210 /* Check if TX queue can handle another skb. If not, then
1211 * signal higher layers to stop requesting TX
1212 */
1213 if (MV64340_TX_QUEUE_SIZE <= (port_private->tx_ring_skbs + 1))
1214 /*
1215 * Stop getting skb's from upper layers.
1216 * Getting skb's from upper layers will be enabled again after
1217 * packets are released.
1218 */
1219 netif_stop_queue(dev);
1220
1221
1222 /* Update statistics and start of transmittion time */
1223 stats->tx_bytes += skb->len;
1224 stats->tx_packets++;
1225 dev->trans_start = jiffies;
1226
1227 spin_unlock_irqrestore(&port_private->lock, flags);
1228 return 0; /* success */
1229 }
1230
1231 /**********************************************************************
1232 * mv64340_eth_get_stats
1233 *
1234 * Returns a pointer to the interface statistics.
1235 *
1236 * Input : dev - a pointer to the required interface
1237 *
1238 * Output : a pointer to the interface's statistics
1239 **********************************************************************/
1240
1241 static struct net_device_stats *mv64340_eth_get_stats(struct net_device
1242 *dev)
1243 {
1244 ETH_PORT_INFO *ethernet_private;
1245 struct mv64340_eth_priv *port_private;
1246 unsigned int port_num;
1247
1248 ethernet_private = dev->priv;
1249 port_private =
1250 (struct mv64340_eth_priv *) ethernet_private->port_private;
1251 port_num = port_private->port_num;
1252
1253 return &port_private->stats;
1254 }
1255
1256 /**********************************************************************
1257 * mv64340_eth_init
1258 *
1259 * First function called after registering the network device.
1260 * It's purpose is to initialize the device as an ethernet device,
1261 * fill the structure that was given in registration with pointers
1262 * to functions, and setting the MAC address of the interface
1263 *
1264 * Input : number of port to initialize
1265 * Output : -ENONMEM if failed , 0 if success
1266 **********************************************************************/
1267 static int mv64340_eth_init(int port_num)
1268 {
1269 ETH_PORT_INFO *ethernet_private;
1270 struct mv64340_eth_priv *port_private;
1271 struct net_device *dev;
1272 int err;
1273
1274 dev = alloc_etherdev(sizeof(ETH_PORT_INFO));
1275 if (!dev)
1276 return -ENOMEM;
1277
1278 dev->open = mv64340_eth_open;
1279 dev->stop = mv64340_eth_stop;
1280 dev->hard_start_xmit = mv64340_eth_start_xmit;
1281 dev->get_stats = mv64340_eth_get_stats;
1282 dev->set_mac_address = mv64340_eth_set_mac_address;
1283 dev->set_multicast_list = mv64340_eth_set_rx_mode;
1284
1285 /* No need to Tx Timeout */
1286 dev->tx_timeout = mv64340_eth_tx_timeout;
1287 #ifdef MV64340_NAPI
1288 dev->poll = mv64340_poll;
1289 dev->weight = 64;
1290 #endif
1291
1292 dev->watchdog_timeo = 2 * HZ;
1293 dev->tx_queue_len = MV64340_TX_QUEUE_SIZE;
1294 dev->flags &= ~(IFF_RUNNING);
1295 dev->base_addr = 0;
1296 dev->change_mtu = &mv64340_eth_change_mtu;
1297
1298 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
1299 #ifdef MAX_SKB_FRAGS
1300 #ifndef CONFIG_JAGUAR_DMALOW
1301 /*
1302 * Zero copy can only work if we use Discovery II memory. Else, we will
1303 * have to map the buffers to ISA memory which is only 16 MB
1304 */
1305 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
1306 #endif
1307 #endif
1308 #endif
1309 ethernet_private = dev->priv;
1310 /* Allocate memory for stats data structure and spinlock etc... */
1311 ethernet_private->port_private = (void *)
1312 kmalloc(sizeof(struct mv64340_eth_priv), GFP_KERNEL);
1313 if (!ethernet_private->port_private) {
1314 err = -ENOMEM;
1315 goto out_free_dev;
1316 }
1317 memset(ethernet_private->port_private, 0,
1318 sizeof(struct mv64340_eth_priv));
1319 if (port_num == 0)
1320 ethernet_private->port_num = ETH_0;
1321 else if (port_num == 1)
1322 ethernet_private->port_num = ETH_1;
1323 else if (port_num == 2)
1324 ethernet_private->port_num = ETH_2;
1325 else {
1326 printk(KERN_ERR "%s: Invalid port number\n", dev->name);
1327 kfree(ethernet_private->port_private);
1328 err = -ENODEV;
1329 goto out_free_dev;
1330 }
1331
1332 port_private =
1333 (struct mv64340_eth_priv *) ethernet_private->port_private;
1334 port_private->port_num = port_num;
1335
1336 memset(&port_private->stats, 0, sizeof(struct net_device_stats));
1337
1338 /* Configure the timeout task */
1339 INIT_TQUEUE(ðernet_private->tx_timeout_task,
1340 (void (*)(void *))mv64340_eth_tx_timeout_task, dev);
1341
1342 /* Init spinlock */
1343 spin_lock_init(&port_private->lock);
1344
1345 /* set MAC addresses */
1346 memcpy(dev->dev_addr, prom_mac_addr_base, 6);
1347 dev->dev_addr[5] += port_num;
1348
1349 err = register_netdev(dev);
1350 if (err)
1351 goto out_free_dev;
1352
1353 printk(KERN_NOTICE "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1354 dev->name, port_num,
1355 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1356 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1357
1358 if (dev->features & NETIF_F_SG)
1359 printk("Scatter Gather Enabled ");
1360
1361 if (dev->features & NETIF_F_IP_CSUM)
1362 printk("TX TCP/IP Checksumming Supported \n");
1363
1364 printk("RX TCP/UDP Checksum Offload ON, \n");
1365 printk("TX and RX Interrupt Coalescing ON \n");
1366
1367 #ifdef MV64340_NAPI
1368 printk("RX NAPI Enabled \n");
1369 #endif
1370
1371 return 0;
1372
1373 out_free_dev:
1374 kfree(dev->priv); /* free_netdev(dev) in 2.6 */
1375 kfree(dev);
1376
1377 return err;
1378 }
1379
1380 /**********************************************************************
1381 * mv64340_init_module
1382 *
1383 * Registers the network drivers into the Linux kernel
1384 *
1385 * Input : N/A
1386 *
1387 * Output : N/A
1388 **********************************************************************/
1389 static int __init mv64340_init_module(void)
1390 {
1391 printk(KERN_NOTICE "MV-64340 10/100/1000 Ethernet Driver\n");
1392 #ifdef CONFIG_MV64340_ETH_0
1393 if (mv64340_eth_init(0)) {
1394 printk(KERN_ERR
1395 "Error registering MV-64360 ethernet port 0\n");
1396 }
1397 #endif
1398 #ifdef CONFIG_MV64340_ETH_1
1399 if (mv64340_eth_init(1)) {
1400 printk(KERN_ERR
1401 "Error registering MV-64360 ethernet port 1\n");
1402 }
1403 #endif
1404 #ifdef CONFIG_MV64340_ETH_2
1405 if (mv64340_eth_init(2)) {
1406 printk(KERN_ERR
1407 "Error registering MV-64360 ethernet port 2\n");
1408 }
1409 #endif
1410 return 0;
1411 }
1412
1413 /**********************************************************************
1414 * mv64340_cleanup_module
1415 *
1416 * Registers the network drivers into the Linux kernel
1417 *
1418 * Input : N/A
1419 *
1420 * Output : N/A
1421 **********************************************************************/
1422 static void __init mv64340_cleanup_module(void)
1423 {
1424 /* Nothing to do here ! it's not a removable module */
1425 }
1426
1427 module_init(mv64340_init_module);
1428 module_exit(mv64340_cleanup_module);
1429
1430 MODULE_LICENSE("GPL");
1431 MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm and Manish Lachwani");
1432 MODULE_DESCRIPTION("Ethernet driver for Marvell MV64340");
1433
1434 /*************************************************************************
1435 **************************************************************************
1436 **************************************************************************
1437 * The second part is the low level driver of the gigE ethernet ports. *
1438 **************************************************************************
1439 **************************************************************************
1440 *************************************************************************/
1441
1442
1443
1444
1445 /********************************************************************************
1446 * Marvell's Gigabit Ethernet controller low level driver
1447 *
1448 * DESCRIPTION:
1449 * This file introduce low level API to Marvell's Gigabit Ethernet
1450 * controller. This Gigabit Ethernet Controller driver API controls
1451 * 1) Operations (i.e. port init, start, reset etc').
1452 * 2) Data flow (i.e. port send, receive etc').
1453 * Each Gigabit Ethernet port is controlled via ETH_PORT_INFO
1454 * struct.
1455 * This struct includes user configuration information as well as
1456 * driver internal data needed for its operations.
1457 *
1458 * Supported Features:
1459 * - This low level driver is OS independent. Allocating memory for
1460 * the descriptor rings and buffers are not within the scope of
1461 * this driver.
1462 * - The user is free from Rx/Tx queue managing.
1463 * - This low level driver introduce functionality API that enable
1464 * the to operate Marvell's Gigabit Ethernet Controller in a
1465 * convenient way.
1466 * - Simple Gigabit Ethernet port operation API.
1467 * - Simple Gigabit Ethernet port data flow API.
1468 * - Data flow and operation API support per queue functionality.
1469 * - Support cached descriptors for better performance.
1470 * - Enable access to all four DRAM banks and internal SRAM memory
1471 * spaces.
1472 * - PHY access and control API.
1473 * - Port control register configuration API.
1474 * - Full control over Unicast and Multicast MAC configurations.
1475 *
1476 * Operation flow:
1477 *
1478 * Initialization phase
1479 * This phase complete the initialization of the ETH_PORT_INFO
1480 * struct.
1481 * User information regarding port configuration has to be set
1482 * prior to calling the port initialization routine.
1483 *
1484 * In this phase any port Tx/Rx activity is halted, MIB counters
1485 * are cleared, PHY address is set according to user parameter and
1486 * access to DRAM and internal SRAM memory spaces.
1487 *
1488 * Driver ring initialization
1489 * Allocating memory for the descriptor rings and buffers is not
1490 * within the scope of this driver. Thus, the user is required to
1491 * allocate memory for the descriptors ring and buffers. Those
1492 * memory parameters are used by the Rx and Tx ring initialization
1493 * routines in order to curve the descriptor linked list in a form
1494 * of a ring.
1495 * Note: Pay special attention to alignment issues when using
1496 * cached descriptors/buffers. In this phase the driver store
1497 * information in the ETH_PORT_INFO struct regarding each queue
1498 * ring.
1499 *
1500 * Driver start
1501 * This phase prepares the Ethernet port for Rx and Tx activity.
1502 * It uses the information stored in the ETH_PORT_INFO struct to
1503 * initialize the various port registers.
1504 *
1505 * Data flow:
1506 * All packet references to/from the driver are done using PKT_INFO
1507 * struct.
1508 * This struct is a unified struct used with Rx and Tx operations.
1509 * This way the user is not required to be familiar with neither
1510 * Tx nor Rx descriptors structures.
1511 * The driver's descriptors rings are management by indexes.
1512 * Those indexes controls the ring resources and used to indicate
1513 * a SW resource error:
1514 * 'current'
1515 * This index points to the current available resource for use. For
1516 * example in Rx process this index will point to the descriptor
1517 * that will be passed to the user upon calling the receive routine.
1518 * In Tx process, this index will point to the descriptor
1519 * that will be assigned with the user packet info and transmitted.
1520 * 'used'
1521 * This index points to the descriptor that need to restore its
1522 * resources. For example in Rx process, using the Rx buffer return
1523 * API will attach the buffer returned in packet info to the
1524 * descriptor pointed by 'used'. In Tx process, using the Tx
1525 * descriptor return will merely return the user packet info with
1526 * the command status of the transmitted buffer pointed by the
1527 * 'used' index. Nevertheless, it is essential to use this routine
1528 * to update the 'used' index.
1529 * 'first'
1530 * This index supports Tx Scatter-Gather. It points to the first
1531 * descriptor of a packet assembled of multiple buffers. For example
1532 * when in middle of Such packet we have a Tx resource error the
1533 * 'curr' index get the value of 'first' to indicate that the ring
1534 * returned to its state before trying to transmit this packet.
1535 *
1536 * Receive operation:
1537 * The eth_port_receive API set the packet information struct,
1538 * passed by the caller, with received information from the
1539 * 'current' SDMA descriptor.
1540 * It is the user responsibility to return this resource back
1541 * to the Rx descriptor ring to enable the reuse of this source.
1542 * Return Rx resource is done using the eth_rx_return_buff API.
1543 *
1544 * Transmit operation:
1545 * The eth_port_send API supports Scatter-Gather which enables to
1546 * send a packet spanned over multiple buffers. This means that
1547 * for each packet info structure given by the user and put into
1548 * the Tx descriptors ring, will be transmitted only if the 'LAST'
1549 * bit will be set in the packet info command status field. This
1550 * API also consider restriction regarding buffer alignments and
1551 * sizes.
1552 * The user must return a Tx resource after ensuring the buffer
1553 * has been transmitted to enable the Tx ring indexes to update.
1554 *
1555 * BOARD LAYOUT
1556 * This device is on-board. No jumper diagram is necessary.
1557 *
1558 * EXTERNAL INTERFACE
1559 *
1560 * Prior to calling the initialization routine eth_port_init() the user
1561 * must set the following fields under ETH_PORT_INFO struct:
1562 * port_num User Ethernet port number.
1563 * port_mac_addr[6] User defined port MAC address.
1564 * port_config User port configuration value.
1565 * port_config_extend User port config extend value.
1566 * port_sdma_config User port SDMA config value.
1567 * port_serial_control User port serial control value.
1568 * *port_private User scratch pad for user specific data structures.
1569 *
1570 * This driver introduce a set of default values:
1571 * PORT_CONFIG_VALUE Default port configuration value
1572 * PORT_CONFIG_EXTEND_VALUE Default port extend configuration value
1573 * PORT_SDMA_CONFIG_VALUE Default sdma control value
1574 * PORT_SERIAL_CONTROL_VALUE Default port serial control value
1575 *
1576 * This driver data flow is done using the PKT_INFO struct which is
1577 * a unified struct for Rx and Tx operations:
1578 * byte_cnt Tx/Rx descriptor buffer byte count.
1579 * l4i_chk CPU provided TCP Checksum. For Tx operation only.
1580 * cmd_sts Tx/Rx descriptor command status.
1581 * buf_ptr Tx/Rx descriptor buffer pointer.
1582 * return_info Tx/Rx user resource return information.
1583 *
1584 *******************************************************************************/
1585 /* includes */
1586
1587 /* defines */
1588 /* SDMA command macros */
1589 #define ETH_ENABLE_TX_QUEUE(eth_port) \
1590 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1)
1591
1592 #define ETH_DISABLE_TX_QUEUE(eth_port) \
1593 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), (1 << 8))
1594
1595 #define ETH_ENABLE_RX_QUEUE(rx_queue, eth_port) \
1596 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << rx_queue))
1597
1598 #define ETH_DISABLE_RX_QUEUE(rx_queue, eth_port) \
1599 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port), (1 << (8 + rx_queue)))
1600
1601 #define LINK_UP_TIMEOUT 100000
1602 #define PHY_BUSY_TIMEOUT 10000000
1603
1604 /* locals */
1605
1606 /* PHY routines */
1607 #ifdef MDD_CUT
1608 static void ethernet_phy_set(ETH_PORT eth_port_num, int phy_addr);
1609 #endif
1610 static int ethernet_phy_get(ETH_PORT eth_port_num);
1611
1612 /* Ethernet Port routines */
1613 static bool eth_port_uc_addr(ETH_PORT eth_port_num,
1614 unsigned char uc_nibble, int option);
1615
1616 #ifdef MDD_CUT
1617 static void eth_b_copy(unsigned int src_addr, unsigned int dst_addr,
1618 int byte_count);
1619 #endif
1620
1621 /*******************************************************************************
1622 * eth_port_init - Initialize the Ethernet port driver
1623 *
1624 * DESCRIPTION:
1625 * This function prepares the ethernet port to start its activity:
1626 * 1) Completes the ethernet port driver struct initialization toward port
1627 * start routine.
1628 * 2) Resets the device to a quiescent state in case of warm reboot.
1629 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1630 * 4) Clean MAC tables. The reset status of those tables is unknown.
1631 * 5) Set PHY address.
1632 * Note: Call this routine prior to eth_port_start routine and after setting
1633 * user values in the user fields of Ethernet port control struct.
1634 *
1635 * INPUT:
1636 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet port control struct
1637 *
1638 * OUTPUT:
1639 * See description.
1640 *
1641 * RETURN:
1642 * None.
1643 *
1644 *******************************************************************************/
1645 static void eth_port_init(ETH_PORT_INFO * p_eth_port_ctrl)
1646 {
1647 p_eth_port_ctrl->port_config = PORT_CONFIG_VALUE;
1648 p_eth_port_ctrl->port_config_extend = PORT_CONFIG_EXTEND_VALUE;
1649 #if defined(__BIG_ENDIAN)
1650 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE;
1651 #elif defined(__LITTLE_ENDIAN)
1652 p_eth_port_ctrl->port_sdma_config = PORT_SDMA_CONFIG_VALUE |
1653 ETH_BLM_RX_NO_SWAP | ETH_BLM_TX_NO_SWAP;
1654 #else
1655 #error One of __LITTLE_ENDIAN or __BIG_ENDIAN must be defined!
1656 #endif
1657 p_eth_port_ctrl->port_serial_control = PORT_SERIAL_CONTROL_VALUE;
1658
1659 p_eth_port_ctrl->port_rx_queue_command = 0;
1660 p_eth_port_ctrl->port_tx_queue_command = 0;
1661
1662 p_eth_port_ctrl->rx_resource_err = false;
1663 p_eth_port_ctrl->tx_resource_err = false;
1664
1665 eth_port_reset(p_eth_port_ctrl->port_num);
1666
1667 eth_port_init_mac_tables(p_eth_port_ctrl->port_num);
1668
1669 ethernet_phy_reset(p_eth_port_ctrl->port_num);
1670 }
1671
1672 /*******************************************************************************
1673 * eth_port_start - Start the Ethernet port activity.
1674 *
1675 * DESCRIPTION:
1676 * This routine prepares the Ethernet port for Rx and Tx activity:
1677 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1678 * has been initialized a descriptor's ring (using ether_init_tx_desc_ring
1679 * for Tx and ether_init_rx_desc_ring for Rx)
1680 * 2. Initialize and enable the Ethernet configuration port by writing to
1681 * the port's configuration and command registers.
1682 * 3. Initialize and enable the SDMA by writing to the SDMA's
1683 * configuration and command registers.
1684 * After completing these steps, the ethernet port SDMA can starts to
1685 * perform Rx and Tx activities.
1686 *
1687 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1688 * to calling this function (use ether_init_tx_desc_ring for Tx queues and
1689 * ether_init_rx_desc_ring for Rx queues).
1690 *
1691 * INPUT:
1692 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet port control struct
1693 *
1694 * OUTPUT:
1695 * Ethernet port is ready to receive and transmit.
1696 *
1697 * RETURN:
1698 * false if the port PHY is not up.
1699 * true otherwise.
1700 *
1701 *******************************************************************************/
1702 static bool eth_port_start(ETH_PORT_INFO * p_eth_port_ctrl)
1703 {
1704 int tx_curr_desc;
1705 int rx_curr_desc;
1706 unsigned int phy_reg_data;
1707 ETH_PORT eth_port_num = p_eth_port_ctrl->port_num;
1708
1709 /* Assignment of Tx CTRP of given queue */
1710 tx_curr_desc = p_eth_port_ctrl->tx_curr_desc_q;
1711 MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
1712 (u32)&(p_eth_port_ctrl->p_tx_desc_area[tx_curr_desc]));
1713
1714 /* Assignment of Rx CRDP of given queue */
1715 rx_curr_desc = p_eth_port_ctrl->rx_curr_desc_q;
1716 MV_WRITE(MV64340_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(eth_port_num),
1717 virt_to_bus(&(p_eth_port_ctrl->p_rx_desc_area[rx_curr_desc])));
1718
1719 /* Add the assigned Ethernet address to the port's address table */
1720 eth_port_uc_addr_set(p_eth_port_ctrl->port_num,
1721 p_eth_port_ctrl->port_mac_addr);
1722
1723 /* Assign port configuration and command. */
1724 MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
1725 p_eth_port_ctrl->port_config);
1726
1727 MV_WRITE(MV64340_ETH_PORT_CONFIG_EXTEND_REG(eth_port_num),
1728 p_eth_port_ctrl->port_config_extend);
1729
1730 MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
1731 p_eth_port_ctrl->port_serial_control);
1732
1733 MV_SET_REG_BITS(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num),
1734 ETH_SERIAL_PORT_ENABLE);
1735
1736 /* Assign port SDMA configuration */
1737 MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
1738 p_eth_port_ctrl->port_sdma_config);
1739
1740 /* Enable port Rx. */
1741 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(eth_port_num),
1742 p_eth_port_ctrl->port_rx_queue_command);
1743
1744 /* Check if link is up */
1745 eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
1746
1747 if (!(phy_reg_data & 0x20))
1748 return false;
1749
1750 return true;
1751 }
1752
1753 /*******************************************************************************
1754 * eth_port_uc_addr_set - This function Set the port Unicast address.
1755 *
1756 * DESCRIPTION:
1757 * This function Set the port Ethernet MAC address.
1758 *
1759 * INPUT:
1760 * ETH_PORT eth_port_num Port number.
1761 * char * p_addr Address to be set
1762 *
1763 * OUTPUT:
1764 * Set MAC address low and high registers. also calls eth_port_uc_addr()
1765 * To set the unicast table with the proper information.
1766 *
1767 * RETURN:
1768 * N/A.
1769 *
1770 *******************************************************************************/
1771 static void eth_port_uc_addr_set(ETH_PORT eth_port_num,
1772 unsigned char *p_addr)
1773 {
1774 unsigned int mac_h;
1775 unsigned int mac_l;
1776
1777 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1778 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) |
1779 (p_addr[2] << 8) | (p_addr[3] << 0);
1780
1781 MV_WRITE(MV64340_ETH_MAC_ADDR_LOW(eth_port_num), mac_l);
1782 MV_WRITE(MV64340_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
1783
1784 /* Accept frames of this address */
1785 eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR);
1786
1787 return;
1788 }
1789
1790 /*******************************************************************************
1791 * eth_port_uc_addr - This function Set the port unicast address table
1792 *
1793 * DESCRIPTION:
1794 * This function locates the proper entry in the Unicast table for the
1795 * specified MAC nibble and sets its properties according to function
1796 * parameters.
1797 *
1798 * INPUT:
1799 * ETH_PORT eth_port_num Port number.
1800 * unsigned char uc_nibble Unicast MAC Address last nibble.
1801 * int option 0 = Add, 1 = remove address.
1802 *
1803 * OUTPUT:
1804 * This function add/removes MAC addresses from the port unicast address
1805 * table.
1806 *
1807 * RETURN:
1808 * true is output succeeded.
1809 * false if option parameter is invalid.
1810 *
1811 *******************************************************************************/
1812 static bool eth_port_uc_addr(ETH_PORT eth_port_num,
1813 unsigned char uc_nibble, int option)
1814 {
1815 unsigned int unicast_reg;
1816 unsigned int tbl_offset;
1817 unsigned int reg_offset;
1818
1819 /* Locate the Unicast table entry */
1820 uc_nibble = (0xf & uc_nibble);
1821 tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */
1822 reg_offset = uc_nibble % 4; /* Entry offset within the above register */
1823
1824 switch (option) {
1825 case REJECT_MAC_ADDR:
1826 /* Clear accepts frame bit at specified unicast DA table entry */
1827 unicast_reg =
1828 MV_READ_DATA(
1829 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1830 (eth_port_num) + tbl_offset));
1831
1832 unicast_reg &= (0x0E << (8 * reg_offset));
1833
1834 MV_WRITE(
1835 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1836 (eth_port_num) + tbl_offset), unicast_reg);
1837 break;
1838
1839 case ACCEPT_MAC_ADDR:
1840 /* Set accepts frame bit at unicast DA filter table entry */
1841 unicast_reg =
1842 MV_READ_DATA(
1843 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1844 (eth_port_num) + tbl_offset));
1845
1846 unicast_reg |= (0x01 << (8 * reg_offset));
1847
1848 MV_WRITE(
1849 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1850 (eth_port_num) + tbl_offset), unicast_reg);
1851
1852 break;
1853
1854 default:
1855 return false;
1856 }
1857 return true;
1858 }
1859
1860 /*******************************************************************************
1861 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
1862 *
1863 * DESCRIPTION:
1864 * Go through all the DA filter tables (Unicast, Special Multicast & Other
1865 * Multicast) and set each entry to 0.
1866 *
1867 * INPUT:
1868 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
1869 *
1870 * OUTPUT:
1871 * Multicast and Unicast packets are rejected.
1872 *
1873 * RETURN:
1874 * None.
1875 *
1876 *******************************************************************************/
1877 static void eth_port_init_mac_tables(ETH_PORT eth_port_num)
1878 {
1879 int table_index;
1880
1881 /* Clear DA filter unicast table (Ex_dFUT) */
1882 for (table_index = 0; table_index <= 0xC; table_index += 4)
1883 MV_WRITE(
1884 (MV64340_ETH_DA_FILTER_UNICAST_TABLE_BASE
1885 (eth_port_num) + table_index), 0);
1886
1887 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1888 /* Clear DA filter special multicast table (Ex_dFSMT) */
1889 MV_WRITE(
1890 (MV64340_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
1891 (eth_port_num) + table_index), 0);
1892 /* Clear DA filter other multicast table (Ex_dFOMT) */
1893 MV_WRITE((MV64340_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
1894 (eth_port_num) + table_index), 0);
1895 }
1896 }
1897
1898 /*******************************************************************************
1899 * eth_clear_mib_counters - Clear all MIB counters
1900 *
1901 * DESCRIPTION:
1902 * This function clears all MIB counters of a specific ethernet port.
1903 * A read from the MIB counter will reset the counter.
1904 *
1905 * INPUT:
1906 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
1907 *
1908 * OUTPUT:
1909 * After reading all MIB counters, the counters resets.
1910 *
1911 * RETURN:
1912 * MIB counter value.
1913 *
1914 *******************************************************************************/
1915 static void eth_clear_mib_counters(ETH_PORT eth_port_num)
1916 {
1917 int i;
1918 unsigned int dummy;
1919
1920 /* Perform dummy reads from MIB counters */
1921 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW;
1922 i < ETH_MIB_LATE_COLLISION; i += 4)
1923 dummy =
1924 MV_READ_DATA(
1925 (MV64340_ETH_MIB_COUNTERS_BASE
1926 (eth_port_num) + i));
1927
1928 return;
1929 }
1930
1931
1932 #ifdef MDD_CUT
1933 /*******************************************************************************
1934 * ethernet_phy_set - Set the ethernet port PHY address.
1935 *
1936 * DESCRIPTION:
1937 * This routine set the ethernet port PHY address according to given
1938 * parameter.
1939 *
1940 * INPUT:
1941 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
1942 *
1943 * OUTPUT:
1944 * Set PHY Address Register with given PHY address parameter.
1945 *
1946 * RETURN:
1947 * None.
1948 *
1949 *******************************************************************************/
1950 static void ethernet_phy_set(ETH_PORT eth_port_num, int phy_addr)
1951 {
1952 unsigned int reg_data;
1953
1954 reg_data = MV_READ_DATA(MV64340_ETH_PHY_ADDR_REG);
1955
1956 reg_data &= ~(0x1F << (5 * eth_port_num));
1957 reg_data |= (phy_addr << (5 * eth_port_num));
1958
1959 MV_WRITE(MV64340_ETH_PHY_ADDR_REG, reg_data);
1960
1961 return;
1962 }
1963 #endif
1964
1965 /*******************************************************************************
1966 * ethernet_phy_get - Get the ethernet port PHY address.
1967 *
1968 * DESCRIPTION:
1969 * This routine returns the given ethernet port PHY address.
1970 *
1971 * INPUT:
1972 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
1973 *
1974 * OUTPUT:
1975 * None.
1976 *
1977 * RETURN:
1978 * PHY address.
1979 *
1980 *******************************************************************************/
1981 static int ethernet_phy_get(ETH_PORT eth_port_num)
1982 {
1983 unsigned int reg_data;
1984
1985 reg_data = MV_READ_DATA(MV64340_ETH_PHY_ADDR_REG);
1986
1987 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
1988 }
1989
1990 /*******************************************************************************
1991 * ethernet_phy_reset - Reset Ethernet port PHY.
1992 *
1993 * DESCRIPTION:
1994 * This routine utilize the SMI interface to reset the ethernet port PHY.
1995 * The routine waits until the link is up again or link up is timeout.
1996 *
1997 * INPUT:
1998 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
1999 *
2000 * OUTPUT:
2001 * The ethernet port PHY renew its link.
2002 *
2003 * RETURN:
2004 * None.
2005 *
2006 *******************************************************************************/
2007 static bool ethernet_phy_reset(ETH_PORT eth_port_num)
2008 {
2009 unsigned int time_out = 50;
2010 unsigned int phy_reg_data;
2011
2012 /* Reset the PHY */
2013 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
2014 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
2015 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
2016
2017 /* Poll on the PHY LINK */
2018 do {
2019 eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data);
2020
2021 if (time_out-- == 0)
2022 return false;
2023 }
2024 while (!(phy_reg_data & 0x20));
2025
2026 return true;
2027 }
2028
2029 /*******************************************************************************
2030 * eth_port_reset - Reset Ethernet port
2031 *
2032 * DESCRIPTION:
2033 * This routine resets the chip by aborting any SDMA engine activity and
2034 * clearing the MIB counters. The Receiver and the Transmit unit are in
2035 * idle state after this command is performed and the port is disabled.
2036 *
2037 * INPUT:
2038 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
2039 *
2040 * OUTPUT:
2041 * Channel activity is halted.
2042 *
2043 * RETURN:
2044 * None.
2045 *
2046 *******************************************************************************/
2047 static void eth_port_reset(ETH_PORT eth_port_num)
2048 {
2049 unsigned int reg_data;
2050
2051 /* Stop Tx port activity. Check port Tx activity. */
2052 reg_data =
2053 MV_READ_DATA(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port_num));
2054
2055 if (reg_data & 0xFF) {
2056 /* Issue stop command for active channels only */
2057 MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
2058 (eth_port_num), (reg_data << 8));
2059
2060 /* Wait for all Tx activity to terminate. */
2061 do {
2062 /* Check port cause register that all Tx queues are stopped */
2063 reg_data =
2064 MV_READ_DATA
2065 (MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG
2066 (eth_port_num));
2067 }
2068 while (reg_data & 0xFF);
2069 }
2070
2071 /* Stop Rx port activity. Check port Rx activity. */
2072 reg_data =
2073 MV_READ_DATA(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
2074 (eth_port_num));
2075
2076 if (reg_data & 0xFF) {
2077 /* Issue stop command for active channels only */
2078 MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
2079 (eth_port_num), (reg_data << 8));
2080
2081 /* Wait for all Rx activity to terminate. */
2082 do {
2083 /* Check port cause register that all Rx queues are stopped */
2084 reg_data =
2085 MV_READ_DATA
2086 (MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG
2087 (eth_port_num));
2088 }
2089 while (reg_data & 0xFF);
2090 }
2091
2092
2093 /* Clear all MIB counters */
2094 eth_clear_mib_counters(eth_port_num);
2095
2096 /* Reset the Enable bit in the Configuration Register */
2097 reg_data =
2098 MV_READ_DATA(MV64340_ETH_PORT_SERIAL_CONTROL_REG (eth_port_num));
2099 reg_data &= ~ETH_SERIAL_PORT_ENABLE;
2100 MV_WRITE(MV64340_ETH_PORT_SERIAL_CONTROL_REG(eth_port_num), reg_data);
2101
2102 return;
2103 }
2104
2105 /*******************************************************************************
2106 * ethernet_set_config_reg - Set specified bits in configuration register.
2107 *
2108 * DESCRIPTION:
2109 * This function sets specified bits in the given ethernet
2110 * configuration register.
2111 *
2112 * INPUT:
2113 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
2114 * unsigned int value 32 bit value.
2115 *
2116 * OUTPUT:
2117 * The set bits in the value parameter are set in the configuration
2118 * register.
2119 *
2120 * RETURN:
2121 * None.
2122 *
2123 *******************************************************************************/
2124 static void ethernet_set_config_reg(ETH_PORT eth_port_num,
2125 unsigned int value)
2126 {
2127 unsigned int eth_config_reg;
2128
2129 eth_config_reg =
2130 MV_READ_DATA(MV64340_ETH_PORT_CONFIG_REG(eth_port_num));
2131 eth_config_reg |= value;
2132 MV_WRITE(MV64340_ETH_PORT_CONFIG_REG(eth_port_num),
2133 eth_config_reg);
2134
2135 return;
2136 }
2137
2138 /*******************************************************************************
2139 * ethernet_get_config_reg - Get the port configuration register
2140 *
2141 * DESCRIPTION:
2142 * This function returns the configuration register value of the given
2143 * ethernet port.
2144 *
2145 * INPUT:
2146 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
2147 *
2148 * OUTPUT:
2149 * None.
2150 *
2151 * RETURN:
2152 * Port configuration register value.
2153 *
2154 *******************************************************************************/
2155 static unsigned int ethernet_get_config_reg(ETH_PORT eth_port_num)
2156 {
2157 unsigned int eth_config_reg;
2158
2159 eth_config_reg = MV_READ_DATA(MV64340_ETH_PORT_CONFIG_EXTEND_REG
2160 (eth_port_num));
2161 return eth_config_reg;
2162 }
2163
2164
2165 /*******************************************************************************
2166 * eth_port_read_smi_reg - Read PHY registers
2167 *
2168 * DESCRIPTION:
2169 * This routine utilize the SMI interface to interact with the PHY in
2170 * order to perform PHY register read.
2171 *
2172 * INPUT:
2173 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
2174 * unsigned int phy_reg PHY register address offset.
2175 * unsigned int *value Register value buffer.
2176 *
2177 * OUTPUT:
2178 * Write the value of a specified PHY register into given buffer.
2179 *
2180 * RETURN:
2181 * false if the PHY is busy or read data is not in valid state.
2182 * true otherwise.
2183 *
2184 *******************************************************************************/
2185 static bool eth_port_read_smi_reg(ETH_PORT eth_port_num,
2186 unsigned int phy_reg,
2187 unsigned int *value)
2188 {
2189 unsigned int reg_value;
2190 unsigned int time_out = PHY_BUSY_TIMEOUT;
2191 int phy_addr;
2192
2193 phy_addr = ethernet_phy_get(eth_port_num);
2194
2195 /* first check that it is not busy */
2196 do {
2197 reg_value = MV_READ_DATA(MV64340_ETH_SMI_REG);
2198 if (time_out-- == 0) {
2199 return false;
2200 }
2201 }
2202 while (reg_value & ETH_SMI_BUSY);
2203
2204 /* not busy */
2205
2206 MV_WRITE(MV64340_ETH_SMI_REG,
2207 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2208
2209 time_out = PHY_BUSY_TIMEOUT; /* initialize the time out var again */
2210
2211 do {
2212 reg_value = MV_READ_DATA(MV64340_ETH_SMI_REG);
2213 if (time_out-- == 0) {
2214 return false;
2215 }
2216 }
2217 while (reg_value & ETH_SMI_READ_VALID);
2218
2219 /* Wait for the data to update in the SMI register */
2220 for (time_out = 0; time_out < PHY_BUSY_TIMEOUT; time_out++);
2221
2222 reg_value = MV_READ_DATA(MV64340_ETH_SMI_REG);
2223
2224 *value = reg_value & 0xffff;
2225
2226 return true;
2227 }
2228
2229 /*******************************************************************************
2230 * eth_port_write_smi_reg - Write to PHY registers
2231 *
2232 * DESCRIPTION:
2233 * This routine utilize the SMI interface to interact with the PHY in
2234 * order to perform writes to PHY registers.
2235 *
2236 * INPUT:
2237 * ETH_PORT eth_port_num Ethernet Port number. See ETH_PORT enum.
2238 * unsigned int phy_reg PHY register address offset.
2239 * unsigned int value Register value.
2240 *
2241 * OUTPUT:
2242 * Write the given value to the specified PHY register.
2243 *
2244 * RETURN:
2245 * false if the PHY is busy.
2246 * true otherwise.
2247 *
2248 *******************************************************************************/
2249 static bool eth_port_write_smi_reg(ETH_PORT eth_port_num,
2250 unsigned int phy_reg,
2251 unsigned int value)
2252 {
2253 unsigned int reg_value;
2254 unsigned int time_out = PHY_BUSY_TIMEOUT;
2255 int phy_addr;
2256
2257 phy_addr = ethernet_phy_get(eth_port_num);
2258
2259 /* first check that it is not busy */
2260 do {
2261 reg_value = MV_READ_DATA(MV64340_ETH_SMI_REG);
2262 if (time_out-- == 0) {
2263 return false;
2264 }
2265 }
2266 while (reg_value & ETH_SMI_BUSY);
2267
2268 /* not busy */
2269 MV_WRITE(MV64340_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2270 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2271 return true;
2272 }
2273
2274 /*******************************************************************************
2275 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
2276 *
2277 * DESCRIPTION:
2278 * This function prepares a Rx chained list of descriptors and packet
2279 * buffers in a form of a ring. The routine must be called after port
2280 * initialization routine and before port start routine.
2281 * The Ethernet SDMA engine uses CPU bus addresses to access the various
2282 * devices in the system (i.e. DRAM). This function uses the ethernet
2283 * struct 'virtual to physical' routine (set by the user) to set the ring
2284 * with physical addresses.
2285 *
2286 * INPUT:
2287 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet Port Control srtuct.
2288 * int rx_desc_num Number of Rx descriptors
2289 * int rx_buff_size Size of Rx buffer
2290 * unsigned int rx_desc_base_addr Rx descriptors memory area base addr.
2291 * unsigned int rx_buff_base_addr Rx buffer memory area base addr.
2292 *
2293 * OUTPUT:
2294 * The routine updates the Ethernet port control struct with information
2295 * regarding the Rx descriptors and buffers.
2296 *
2297 * RETURN:
2298 * false if the given descriptors memory area is not aligned according to
2299 * Ethernet SDMA specifications.
2300 * true otherwise.
2301 *
2302 *******************************************************************************/
2303 static bool ether_init_rx_desc_ring(ETH_PORT_INFO * p_eth_port_ctrl,
2304 int rx_desc_num,
2305 int rx_buff_size,
2306 unsigned long rx_desc_base_addr,
2307 unsigned long rx_buff_base_addr)
2308 {
2309 volatile ETH_RX_DESC* p_rx_desc;
2310 unsigned long buffer_addr;
2311 int ix; /* a counter */
2312
2313 buffer_addr = rx_buff_base_addr;
2314
2315 p_rx_desc = (ETH_RX_DESC *) rx_desc_base_addr;
2316
2317 /* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
2318 if (rx_buff_base_addr & 0xF)
2319 return false;
2320
2321 /* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes */
2322 if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))
2323 return false;
2324
2325 /* Rx buffers must be 64-bit aligned. */
2326 if ((rx_buff_base_addr + rx_buff_size) & 0x7)
2327 return false;
2328
2329 /* initialize the Rx descriptors ring */
2330 for (ix = 0; ix < rx_desc_num; ix++) {
2331 p_rx_desc[ix].buf_size = rx_buff_size;
2332 p_rx_desc[ix].byte_cnt = 0x0000;
2333 p_rx_desc[ix].cmd_sts =
2334 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2335 p_rx_desc[ix].next_desc_ptr = virt_to_bus(&(p_rx_desc[ix+1]));
2336 p_rx_desc[ix].buf_ptr = buffer_addr;
2337
2338 dma_cache_wback_inv((unsigned long)p_rx_desc, sizeof(ETH_RX_DESC));
2339 p_eth_port_ctrl->rx_skb[ix] = NULL;
2340 buffer_addr += rx_buff_size;
2341 }
2342
2343 /* Closing Rx descriptors ring */
2344 p_rx_desc[rx_desc_num-1].next_desc_ptr = virt_to_bus(&(p_rx_desc[0]));
2345 dma_cache_wback_inv((unsigned long)p_rx_desc, sizeof(ETH_RX_DESC));
2346
2347 /* Save Rx desc pointer to driver struct. */
2348 p_eth_port_ctrl->rx_curr_desc_q = 0;
2349 p_eth_port_ctrl->rx_used_desc_q = 0;
2350
2351 p_eth_port_ctrl->p_rx_desc_area = (ETH_RX_DESC *) rx_desc_base_addr;
2352 p_eth_port_ctrl->rx_desc_area_size = rx_desc_num * sizeof(ETH_RX_DESC);
2353
2354 p_eth_port_ctrl->port_rx_queue_command |= 1;
2355
2356 return true;
2357 }
2358
2359 /*******************************************************************************
2360 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
2361 *
2362 * DESCRIPTION:
2363 * This function prepares a Tx chained list of descriptors and packet
2364 * buffers in a form of a ring. The routine must be called after port
2365 * initialization routine and before port start routine.
2366 * The Ethernet SDMA engine uses CPU bus addresses to access the various
2367 * devices in the system (i.e. DRAM). This function uses the ethernet
2368 * struct 'virtual to physical' routine (set by the user) to set the ring
2369 * with physical addresses.
2370 *
2371 * INPUT:
2372 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet Port Control srtuct.
2373 * int tx_desc_num Number of Tx descriptors
2374 * int tx_buff_size Size of Tx buffer
2375 * unsigned int tx_desc_base_addr Tx descriptors memory area base addr.
2376 *
2377 * OUTPUT:
2378 * The routine updates the Ethernet port control struct with information
2379 * regarding the Tx descriptors and buffers.
2380 *
2381 * RETURN:
2382 * false if the given descriptors memory area is not aligned according to
2383 * Ethernet SDMA specifications.
2384 * true otherwise.
2385 *
2386 *******************************************************************************/
2387 static bool ether_init_tx_desc_ring(ETH_PORT_INFO * p_eth_port_ctrl,
2388 int tx_desc_num,
2389 unsigned long tx_desc_base_addr)
2390 {
2391
2392 ETH_TX_DESC *p_tx_desc;
2393 int ix; /* a counter */
2394
2395 /* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */
2396 if (tx_desc_base_addr & 0xF)
2397 return false;
2398
2399 /* save the first desc pointer to link with the last descriptor */
2400 p_tx_desc = (ETH_TX_DESC *) tx_desc_base_addr;
2401
2402 /* Initialize the Tx descriptors ring */
2403 for (ix = 0; ix < tx_desc_num; ix++) {
2404 p_tx_desc[ix].byte_cnt = 0x0000;
2405 p_tx_desc[ix].l4i_chk = 0x0000;
2406 p_tx_desc[ix].cmd_sts = 0x00000000;
2407 p_tx_desc[ix].next_desc_ptr = (u32)&(p_tx_desc[ix+1]);
2408 p_tx_desc[ix].buf_ptr = 0x00000000;
2409 dma_cache_wback_inv((unsigned long)&(p_tx_desc[ix]), sizeof(ETH_TX_DESC));
2410 p_eth_port_ctrl->tx_skb[ix] = NULL;
2411 }
2412
2413 /* Closing Tx descriptors ring */
2414 p_tx_desc[tx_desc_num-1].next_desc_ptr = (u32)&(p_tx_desc[0]);
2415 dma_cache_wback_inv((unsigned long)&(p_tx_desc[tx_desc_num-1]),
2416 sizeof(ETH_TX_DESC));
2417
2418 /* Set Tx desc pointer in driver struct. */
2419 p_eth_port_ctrl->tx_curr_desc_q = 0;
2420 p_eth_port_ctrl->tx_used_desc_q = 0;
2421 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2422 p_eth_port_ctrl->tx_first_desc_q = 0;
2423 #endif
2424 /* Init Tx ring base and size parameters */
2425 p_eth_port_ctrl->p_tx_desc_area = (ETH_TX_DESC*) tx_desc_base_addr;
2426 p_eth_port_ctrl->tx_desc_area_size = tx_desc_num * sizeof(ETH_TX_DESC);
2427
2428 /* Add the queue to the list of Tx queues of this port */
2429 p_eth_port_ctrl->port_tx_queue_command |= 1;
2430
2431 return true;
2432 }
2433
2434 /*******************************************************************************
2435 * eth_port_send - Send an Ethernet packet
2436 *
2437 * DESCRIPTION:
2438 * This routine send a given packet described by p_pktinfo parameter. It
2439 * supports transmitting of a packet spaned over multiple buffers. The
2440 * routine updates 'curr' and 'first' indexes according to the packet
2441 * segment passed to the routine. In case the packet segment is first,
2442 * the 'first' index is update. In any case, the 'curr' index is updated.
2443 * If the routine get into Tx resource error it assigns 'curr' index as
2444 * 'first'. This way the function can abort Tx process of multiple
2445 * descriptors per packet.
2446 *
2447 * INPUT:
2448 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet Port Control srtuct.
2449 * PKT_INFO *p_pkt_info User packet buffer.
2450 *
2451 * OUTPUT:
2452 * Tx ring 'curr' and 'first' indexes are updated.
2453 *
2454 * RETURN:
2455 * ETH_QUEUE_FULL in case of Tx resource error.
2456 * ETH_ERROR in case the routine can not access Tx desc ring.
2457 * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource.
2458 * ETH_OK otherwise.
2459 *
2460 *******************************************************************************/
2461 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2462 /*
2463 * Modified to include the first descriptor pointer in case of SG
2464 */
2465 static ETH_FUNC_RET_STATUS eth_port_send(ETH_PORT_INFO * p_eth_port_ctrl,
2466 PKT_INFO * p_pkt_info)
2467 {
2468 int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc;
2469 volatile ETH_TX_DESC* current_descriptor;
2470 volatile ETH_TX_DESC* first_descriptor;
2471 u32 command_status, first_chip_ptr;
2472
2473 /* Do not process Tx ring in case of Tx ring resource error */
2474 if (p_eth_port_ctrl->tx_resource_err == true)
2475 return ETH_QUEUE_FULL;
2476
2477 /* Get the Tx Desc ring indexes */
2478 tx_desc_curr = p_eth_port_ctrl->tx_curr_desc_q;
2479 tx_desc_used = p_eth_port_ctrl->tx_used_desc_q;
2480
2481 current_descriptor = &(p_eth_port_ctrl->p_tx_desc_area[tx_desc_curr]);
2482 if (current_descriptor == NULL)
2483 return ETH_ERROR;
2484
2485 tx_next_desc = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
2486 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2487
2488 if (command_status & ETH_TX_FIRST_DESC) {
2489 tx_first_desc = tx_desc_curr;
2490 p_eth_port_ctrl->tx_first_desc_q = tx_first_desc;
2491
2492 /* fill first descriptor */
2493 first_descriptor = &(p_eth_port_ctrl->p_tx_desc_area[tx_desc_curr]);
2494 first_descriptor->l4i_chk = p_pkt_info->l4i_chk;
2495 first_descriptor->cmd_sts = command_status;
2496 first_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2497 first_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2498 first_descriptor->next_desc_ptr = (u32)&(p_eth_port_ctrl->p_tx_desc_area[tx_next_desc]);
2499 dma_cache_wback_inv((unsigned long)first_descriptor, sizeof(ETH_TX_DESC));
2500 wmb();
2501 }
2502 else {
2503 tx_first_desc = p_eth_port_ctrl->tx_first_desc_q;
2504 first_descriptor = &(p_eth_port_ctrl->p_tx_desc_area[tx_first_desc]);
2505 if (first_descriptor == NULL) {
2506 printk("First desc is NULL !!\n");
2507 return ETH_ERROR;
2508 }
2509 if (command_status & ETH_TX_LAST_DESC)
2510 current_descriptor->next_desc_ptr = 0x00000000;
2511 else {
2512 command_status |= ETH_BUFFER_OWNED_BY_DMA;
2513 current_descriptor->next_desc_ptr = (u32)&(p_eth_port_ctrl->p_tx_desc_area[tx_next_desc]);
2514 }
2515 }
2516
2517 if (p_pkt_info->byte_cnt < 8) {
2518 printk(" < 8 problem \n");
2519 return ETH_ERROR;
2520 }
2521
2522 current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2523 current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2524 current_descriptor->l4i_chk = p_pkt_info->l4i_chk;
2525 current_descriptor->cmd_sts = command_status;
2526 dma_cache_wback_inv((unsigned long)current_descriptor, sizeof(ETH_TX_DESC));
2527
2528 p_eth_port_ctrl->tx_skb[tx_desc_curr] =
2529 (struct sk_buff*)p_pkt_info->return_info;
2530
2531 dma_cache_wback_inv((unsigned long)p_eth_port_ctrl, sizeof(ETH_PORT_INFO));
2532 wmb();
2533
2534 /* Set last desc with DMA ownership and interrupt enable. */
2535 if (command_status & ETH_TX_LAST_DESC) {
2536 current_descriptor->cmd_sts = command_status |
2537 ETH_TX_ENABLE_INTERRUPT |
2538 ETH_BUFFER_OWNED_BY_DMA;
2539
2540 if (!(command_status & ETH_TX_FIRST_DESC) ) {
2541 first_descriptor->cmd_sts |= ETH_BUFFER_OWNED_BY_DMA;
2542 dma_cache_wback_inv((unsigned long)first_descriptor, sizeof(ETH_TX_DESC));
2543 }
2544 dma_cache_wback_inv((unsigned long)current_descriptor, sizeof(ETH_TX_DESC));
2545 wmb();
2546
2547 first_chip_ptr = MV_READ_DATA(MV64340_ETH_CURRENT_SERVED_TX_DESC_PTR(p_eth_port_ctrl->port_num));
2548
2549 /* Apply send command */
2550 if (first_chip_ptr == 0x00000000) {
2551 MV_WRITE(MV64340_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(p_eth_port_ctrl->port_num), (u32)&(p_eth_port_ctrl->p_tx_desc_area[tx_first_desc]));
2552 }
2553
2554 ETH_ENABLE_TX_QUEUE(p_eth_port_ctrl->port_num);
2555
2556 /* Finish Tx packet. Update first desc in case of Tx resource error */
2557 tx_first_desc = tx_next_desc;
2558 p_eth_port_ctrl->tx_first_desc_q = tx_first_desc;
2559 }
2560 else {
2561 if (! (command_status & ETH_TX_FIRST_DESC) ) {
2562 current_descriptor->cmd_sts = command_status;
2563 dma_cache_wback_inv((unsigned long)current_descriptor,
2564 sizeof(ETH_TX_DESC));
2565 wmb();
2566 }
2567 }
2568
2569 /* Check for ring index overlap in the Tx desc ring */
2570 if (tx_next_desc == tx_desc_used) {
2571 p_eth_port_ctrl->tx_resource_err = true;
2572 p_eth_port_ctrl->tx_curr_desc_q = tx_first_desc;
2573 return ETH_QUEUE_LAST_RESOURCE;
2574 }
2575
2576 p_eth_port_ctrl->tx_curr_desc_q = tx_next_desc;
2577 dma_cache_wback_inv((unsigned long) p_eth_port_ctrl, sizeof(ETH_PORT_INFO));
2578 wmb();
2579 return ETH_OK;
2580 }
2581 #else
2582 static ETH_FUNC_RET_STATUS eth_port_send(ETH_PORT_INFO * p_eth_port_ctrl,
2583 PKT_INFO * p_pkt_info)
2584 {
2585 int tx_desc_curr;
2586 int tx_desc_used;
2587 volatile ETH_TX_DESC* current_descriptor;
2588 unsigned int command_status;
2589
2590 /* Do not process Tx ring in case of Tx ring resource error */
2591 if (p_eth_port_ctrl->tx_resource_err == true)
2592 return ETH_QUEUE_FULL;
2593
2594 /* Get the Tx Desc ring indexes */
2595 tx_desc_curr = p_eth_port_ctrl->tx_curr_desc_q;
2596 tx_desc_used = p_eth_port_ctrl->tx_used_desc_q;
2597 current_descriptor = &(p_eth_port_ctrl->p_tx_desc_area[tx_desc_curr]);
2598
2599 if (current_descriptor == NULL)
2600 return ETH_ERROR;
2601
2602 command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC;
2603
2604 /* XXX Is this for real ?!?!? */
2605 /* Buffers with a payload smaller than 8 bytes must be aligned to a
2606 * 64-bit boundary. We use the memory allocated for Tx descriptor.
2607 * This memory is located in TX_BUF_OFFSET_IN_DESC offset within the
2608 * Tx descriptor. */
2609 if (p_pkt_info->byte_cnt <= 8) {
2610 printk(KERN_ERR
2611 "You have failed in the < 8 bytes errata - fixme\n");
2612 return ETH_ERROR;
2613 }
2614 current_descriptor->buf_ptr = p_pkt_info->buf_ptr;
2615 current_descriptor->byte_cnt = p_pkt_info->byte_cnt;
2616 p_eth_port_ctrl->tx_skb[tx_desc_curr] =
2617 (struct sk_buff*)p_pkt_info->return_info;
2618
2619 mb();
2620
2621 /* Set last desc with DMA ownership and interrupt enable. */
2622 current_descriptor->cmd_sts = command_status |
2623 ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT;
2624
2625 #if 0
2626 /* Flush CPU pipe */
2627 dma_cache_wback_inv(current_descriptor, sizeof(ETH_TX_DESC));
2628 mb();
2629 #endif
2630
2631 /* Apply send command */
2632 ETH_ENABLE_TX_QUEUE(p_eth_port_ctrl->port_num);
2633
2634 /* Finish Tx packet. Update first desc in case of Tx resource error */
2635 tx_desc_curr = (tx_desc_curr + 1) % MV64340_TX_QUEUE_SIZE;
2636
2637 /* Update the current descriptor */
2638 p_eth_port_ctrl->tx_curr_desc_q = tx_desc_curr;
2639
2640 /* Check for ring index overlap in the Tx desc ring */
2641 if (tx_desc_curr == tx_desc_used) {
2642 p_eth_port_ctrl->tx_resource_err = true;
2643 return ETH_QUEUE_LAST_RESOURCE;
2644 }
2645
2646 return ETH_OK;
2647 }
2648 #endif
2649
2650 /*******************************************************************************
2651 * eth_tx_return_desc - Free all used Tx descriptors
2652 *
2653 * DESCRIPTION:
2654 * This routine returns the transmitted packet information to the caller.
2655 * It uses the 'first' index to support Tx desc return in case a transmit
2656 * of a packet spanned over multiple buffer still in process.
2657 * In case the Tx queue was in "resource error" condition, where there are
2658 * no available Tx resources, the function resets the resource error flag.
2659 *
2660 * INPUT:
2661 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet Port Control srtuct.
2662 * PKT_INFO *p_pkt_info User packet buffer.
2663 *
2664 * OUTPUT:
2665 * Tx ring 'first' and 'used' indexes are updated.
2666 *
2667 * RETURN:
2668 * ETH_ERROR in case the routine can not access Tx desc ring.
2669 * ETH_RETRY in case there is transmission in process.
2670 * ETH_END_OF_JOB if the routine has nothing to release.
2671 * ETH_OK otherwise.
2672 *
2673 *******************************************************************************/
2674 static ETH_FUNC_RET_STATUS eth_tx_return_desc(ETH_PORT_INFO * p_eth_port_ctrl,
2675 PKT_INFO * p_pkt_info)
2676 {
2677 int tx_desc_used, tx_desc_curr;
2678 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2679 int tx_first_desc;
2680 #endif
2681 volatile ETH_TX_DESC *p_tx_desc_used;
2682 unsigned int command_status;
2683
2684 /* Get the Tx Desc ring indexes */
2685 tx_desc_curr = p_eth_port_ctrl->tx_curr_desc_q;
2686 tx_desc_used = p_eth_port_ctrl->tx_used_desc_q;
2687 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2688 tx_first_desc = p_eth_port_ctrl->tx_first_desc_q;
2689 #endif
2690 p_tx_desc_used = &(p_eth_port_ctrl->p_tx_desc_area[tx_desc_used]);
2691
2692 /* XXX Sanity check */
2693 if (p_tx_desc_used == NULL)
2694 return ETH_ERROR;
2695
2696 command_status = p_tx_desc_used->cmd_sts;
2697
2698 /* Still transmitting... */
2699 #ifndef MV64340_CHECKSUM_OFFLOAD_TX
2700 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2701 dma_cache_wback_inv((unsigned long)p_tx_desc_used,
2702 sizeof(ETH_TX_DESC));
2703 return ETH_RETRY;
2704 }
2705 #endif
2706 /* Stop release. About to overlap the current available Tx descriptor */
2707 #ifdef MV64340_CHECKSUM_OFFLOAD_TX
2708 if ((tx_desc_used == tx_first_desc) &&
2709 #else
2710 if ((tx_desc_used == tx_desc_curr) &&
2711 #endif
2712 (p_eth_port_ctrl->tx_resource_err == false)) {
2713 dma_cache_wback_inv((unsigned long)p_tx_desc_used,
2714 sizeof(ETH_TX_DESC));
2715 return ETH_END_OF_JOB;
2716 }
2717
2718 /* Pass the packet information to the caller */
2719 p_pkt_info->cmd_sts = command_status;
2720 p_pkt_info->return_info = p_eth_port_ctrl->tx_skb[tx_desc_used];
2721 p_eth_port_ctrl->tx_skb[tx_desc_used] = NULL;
2722
2723 /* Update the next descriptor to release. */
2724 p_eth_port_ctrl->tx_used_desc_q =
2725 (tx_desc_used + 1) % MV64340_TX_QUEUE_SIZE;
2726
2727 /* Any Tx return cancels the Tx resource error status */
2728 if (p_eth_port_ctrl->tx_resource_err == true)
2729 p_eth_port_ctrl->tx_resource_err = false;
2730
2731 dma_cache_wback_inv((unsigned long)p_tx_desc_used, sizeof(ETH_TX_DESC));
2732
2733 return ETH_OK;
2734 }
2735
2736 /*******************************************************************************
2737 * eth_port_receive - Get received information from Rx ring.
2738 *
2739 * DESCRIPTION:
2740 * This routine returns the received data to the caller. There is no
2741 * data copying during routine operation. All information is returned
2742 * using pointer to packet information struct passed from the caller.
2743 * If the routine exhausts Rx ring resources then the resource error flag
2744 * is set.
2745 *
2746 * INPUT:
2747 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet Port Control srtuct.
2748 * PKT_INFO *p_pkt_info User packet buffer.
2749 *
2750 * OUTPUT:
2751 * Rx ring current and used indexes are updated.
2752 *
2753 * RETURN:
2754 * ETH_ERROR in case the routine can not access Rx desc ring.
2755 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2756 * ETH_END_OF_JOB if there is no received data.
2757 * ETH_OK otherwise.
2758 *
2759 *******************************************************************************/
2760 static ETH_FUNC_RET_STATUS eth_port_receive(ETH_PORT_INFO *
2761 p_eth_port_ctrl,
2762 PKT_INFO * p_pkt_info)
2763 {
2764 int rx_curr_desc;
2765 int rx_next_curr_desc;
2766 int rx_used_desc;
2767 unsigned int command_status;
2768 volatile ETH_RX_DESC* p_rx_desc;
2769
2770 /* Do not process Rx ring in case of Rx ring resource error */
2771 if (p_eth_port_ctrl->rx_resource_err == true)
2772 return ETH_QUEUE_FULL;
2773
2774 /* Get the Rx Desc ring 'curr and 'used' indexes */
2775 rx_curr_desc = p_eth_port_ctrl->rx_curr_desc_q;
2776 rx_used_desc = p_eth_port_ctrl->rx_used_desc_q;
2777
2778 p_rx_desc = &(p_eth_port_ctrl->p_rx_desc_area[rx_curr_desc]);
2779
2780 /* The following parameters are used to save readings from memory */
2781 command_status = p_rx_desc->cmd_sts;
2782
2783 /* Nothing to receive... */
2784 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2785 dma_cache_wback_inv((unsigned long)p_rx_desc, sizeof(ETH_RX_DESC));
2786 return ETH_END_OF_JOB;
2787 }
2788
2789 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2790 p_pkt_info->cmd_sts = command_status;
2791 p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
2792 p_pkt_info->return_info = p_eth_port_ctrl->rx_skb[rx_curr_desc];
2793 p_pkt_info->l4i_chk = p_rx_desc->buf_size;
2794
2795 /* Clean the return info field to indicate that the packet has been */
2796 /* moved to the upper layers */
2797 p_eth_port_ctrl->rx_skb[rx_curr_desc] = NULL;
2798
2799 /* Update current index in data structure */
2800 rx_next_curr_desc = (rx_curr_desc + 1) % MV64340_RX_QUEUE_SIZE;
2801 p_eth_port_ctrl->rx_curr_desc_q = rx_next_curr_desc;
2802
2803 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
2804 if (rx_next_curr_desc == rx_used_desc)
2805 p_eth_port_ctrl->rx_resource_err = true;
2806
2807 dma_cache_wback_inv((unsigned long)p_rx_desc, sizeof(ETH_RX_DESC));
2808 mb();
2809 return ETH_OK;
2810 }
2811
2812 /*******************************************************************************
2813 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
2814 *
2815 * DESCRIPTION:
2816 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
2817 * next 'used' descriptor and attached the returned buffer to it.
2818 * In case the Rx ring was in "resource error" condition, where there are
2819 * no available Rx resources, the function resets the resource error flag.
2820 *
2821 * INPUT:
2822 * ETH_PORT_INFO *p_eth_port_ctrl Ethernet Port Control srtuct.
2823 * PKT_INFO *p_pkt_info Information on the returned buffer.
2824 *
2825 * OUTPUT:
2826 * New available Rx resource in Rx descriptor ring.
2827 *
2828 * RETURN:
2829 * ETH_ERROR in case the routine can not access Rx desc ring.
2830 * ETH_OK otherwise.
2831 *
2832 *******************************************************************************/
2833 static ETH_FUNC_RET_STATUS eth_rx_return_buff(ETH_PORT_INFO * p_eth_port_ctrl,
2834 PKT_INFO * p_pkt_info)
2835 {
2836 int used_rx_desc; /* Where to return Rx resource */
2837 volatile ETH_RX_DESC* p_used_rx_desc;
2838
2839 /* Get 'used' Rx descriptor */
2840 used_rx_desc = p_eth_port_ctrl->rx_used_desc_q;
2841 p_used_rx_desc = &(p_eth_port_ctrl->p_rx_desc_area[used_rx_desc]);
2842
2843 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
2844 p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
2845 p_eth_port_ctrl->rx_skb[used_rx_desc] = p_pkt_info->return_info;
2846
2847 /* Flush the write pipe */
2848 mb();
2849
2850 /* Return the descriptor to DMA ownership */
2851 p_used_rx_desc->cmd_sts =
2852 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2853
2854 /* Flush descriptor and CPU pipe */
2855 dma_cache_wback_inv((unsigned long)p_used_rx_desc, sizeof(ETH_RX_DESC));
2856 mb();
2857
2858 /* Move the used descriptor pointer to the next descriptor */
2859 p_eth_port_ctrl->rx_used_desc_q =
2860 (used_rx_desc + 1) % MV64340_RX_QUEUE_SIZE;
2861
2862 /* Any Rx return cancels the Rx resource error status */
2863 if (p_eth_port_ctrl->rx_resource_err == true)
2864 p_eth_port_ctrl->rx_resource_err = false;
2865
2866 return ETH_OK;
2867 }
2868
2869 #ifdef MV64340_COAL
2870
2871 /*******************************************************************************
2872 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
2873 *
2874 * DESCRIPTION:
2875 * This routine sets the RX coalescing interrupt mechanism parameter.
2876 * This parameter is a timeout counter, that counts in 64 t_clk
2877 * chunks ; that when timeout event occurs a maskable interrupt
2878 * occurs.
2879 * The parameter is calculated using the tClk of the MV-643xx chip
2880 * , and the required delay of the interrupt in usec.
2881 *
2882 * INPUT:
2883 * ETH_PORT eth_port_num Ethernet port number
2884 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
2885 * unsigned int delay Delay in usec
2886 *
2887 * OUTPUT:
2888 * Interrupt coalescing mechanism value is set in MV-643xx chip.
2889 *
2890 * RETURN:
2891 * The interrupt coalescing value set in the gigE port.
2892 *
2893 *******************************************************************************/
2894 static unsigned int eth_port_set_rx_coal(ETH_PORT eth_port_num,
2895 unsigned int t_clk,
2896 unsigned int delay)
2897 {
2898 unsigned int coal;
2899 coal = ((t_clk / 1000000) * delay) / 64;
2900 /* Set RX Coalescing mechanism */
2901 MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),
2902 ((coal & 0x3fff) << 8) |
2903 (MV_READ_DATA(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num))
2904 & 0xffc000ff));
2905 return coal;
2906 }
2907 #endif
2908
2909 /*******************************************************************************
2910 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
2911 *
2912 * DESCRIPTION:
2913 * This routine sets the TX coalescing interrupt mechanism parameter.
2914 * This parameter is a timeout counter, that counts in 64 t_clk
2915 * chunks ; that when timeout event occurs a maskable interrupt
2916 * occurs.
2917 * The parameter is calculated using the t_cLK frequency of the
2918 * MV-643xx chip and the required delay in the interrupt in uSec
2919 *
2920 * INPUT:
2921 * ETH_PORT eth_port_num Ethernet port number
2922 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
2923 * unsigned int delay Delay in uSeconds
2924 *
2925 * OUTPUT:
2926 * Interrupt coalescing mechanism value is set in MV-643xx chip.
2927 *
2928 * RETURN:
2929 * The interrupt coalescing value set in the gigE port.
2930 *
2931 *******************************************************************************/
2932 static unsigned int eth_port_set_tx_coal(ETH_PORT eth_port_num,
2933 unsigned int t_clk,
2934 unsigned int delay)
2935 {
2936 unsigned int coal;
2937 coal = ((t_clk / 1000000) * delay) / 64;
2938 /* Set TX Coalescing mechanism */
2939 MV_WRITE(MV64340_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),
2940 coal << 4);
2941 return coal;
2942 }
2943
2944 #ifdef MDD_CUT
2945 /*******************************************************************************
2946 * eth_b_copy - Copy bytes from source to destination
2947 *
2948 * DESCRIPTION:
2949 * This function supports the eight bytes limitation on Tx buffer size.
2950 * The routine will zero eight bytes starting from the destination address
2951 * followed by copying bytes from the source address to the destination.
2952 *
2953 * INPUT:
2954 * unsigned int src_addr 32 bit source address.
2955 * unsigned int dst_addr 32 bit destination address.
2956 * int byte_count Number of bytes to copy.
2957 *
2958 * OUTPUT:
2959 * See description.
2960 *
2961 * RETURN:
2962 * None.
2963 *
2964 *******************************************************************************/
2965 static void eth_b_copy(unsigned int src_addr, unsigned int dst_addr,
2966 int byte_count)
2967 {
2968 /* Zero the dst_addr area */
2969 *(unsigned int *) dst_addr = 0x0;
2970
2971 while (byte_count != 0) {
2972 *(char *) dst_addr = *(char *) src_addr;
2973 dst_addr++;
2974 src_addr++;
2975 byte_count--;
2976 }
2977 }
2978 #endif
2979