1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10  *
11  *------------------------------------------------------------------------------
12  *
13  * SOFTWARE LICENSE
14  *
15  * This software is provided subject to the following terms and conditions,
16  * which you should read carefully before using the software.  Using this
17  * software indicates your acceptance of these terms and conditions.  If you do
18  * not agree with these terms and conditions, do not use the software.
19  *
20  * Copyright © 2005 Agere Systems Inc.
21  * All rights reserved.
22  *
23  * Redistribution and use in source or binary forms, with or without
24  * modifications, are permitted provided that the following conditions are met:
25  *
26  * . Redistributions of source code must retain the above copyright notice, this
27  *    list of conditions and the following Disclaimer as comments in the code as
28  *    well as in the documentation and/or other materials provided with the
29  *    distribution.
30  *
31  * . Redistributions in binary form must reproduce the above copyright notice,
32  *    this list of conditions and the following Disclaimer in the documentation
33  *    and/or other materials provided with the distribution.
34  *
35  * . Neither the name of Agere Systems Inc. nor the names of the contributors
36  *    may be used to endorse or promote products derived from this software
37  *    without specific prior written permission.
38  *
39  * Disclaimer
40  *
41  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
44  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52  * DAMAGE.
53  *
54  */
55 
56 #include <linux/pci.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/types.h>
60 #include <linux/kernel.h>
61 
62 #include <linux/sched.h>
63 #include <linux/ptrace.h>
64 #include <linux/slab.h>
65 #include <linux/ctype.h>
66 #include <linux/string.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/in.h>
70 #include <linux/delay.h>
71 #include <linux/bitops.h>
72 #include <linux/io.h>
73 
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_arp.h>
78 #include <linux/ioport.h>
79 #include <linux/crc32.h>
80 #include <linux/random.h>
81 #include <linux/phy.h>
82 
83 #include "et131x.h"
84 
85 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
86 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
87 MODULE_LICENSE("Dual BSD/GPL");
88 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
89 		   "for the ET1310 by Agere Systems");
90 
91 /* EEPROM defines */
92 #define MAX_NUM_REGISTER_POLLS          1000
93 #define MAX_NUM_WRITE_RETRIES           2
94 
95 /* MAC defines */
96 #define COUNTER_WRAP_16_BIT 0x10000
97 #define COUNTER_WRAP_12_BIT 0x1000
98 
99 /* PCI defines */
100 #define INTERNAL_MEM_SIZE       0x400	/* 1024 of internal memory */
101 #define INTERNAL_MEM_RX_OFFSET  0x1FF	/* 50%   Tx, 50%   Rx */
102 
103 /* ISR defines */
104 /*
105  * For interrupts, normal running is:
106  *       rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
107  *       watchdog_interrupt & txdma_xfer_done
108  *
109  * In both cases, when flow control is enabled for either Tx or bi-direction,
110  * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
111  * buffer rings are running low.
112  */
113 #define INT_MASK_DISABLE            0xffffffff
114 
115 /* NOTE: Masking out MAC_STAT Interrupt for now...
116  * #define INT_MASK_ENABLE             0xfff6bf17
117  * #define INT_MASK_ENABLE_NO_FLOW     0xfff6bfd7
118  */
119 #define INT_MASK_ENABLE             0xfffebf17
120 #define INT_MASK_ENABLE_NO_FLOW     0xfffebfd7
121 
122 /* General defines */
123 /* Packet and header sizes */
124 #define NIC_MIN_PACKET_SIZE	60
125 
126 /* Multicast list size */
127 #define NIC_MAX_MCAST_LIST	128
128 
129 /* Supported Filters */
130 #define ET131X_PACKET_TYPE_DIRECTED		0x0001
131 #define ET131X_PACKET_TYPE_MULTICAST		0x0002
132 #define ET131X_PACKET_TYPE_BROADCAST		0x0004
133 #define ET131X_PACKET_TYPE_PROMISCUOUS		0x0008
134 #define ET131X_PACKET_TYPE_ALL_MULTICAST	0x0010
135 
136 /* Tx Timeout */
137 #define ET131X_TX_TIMEOUT	(1 * HZ)
138 #define NIC_SEND_HANG_THRESHOLD	0
139 
140 /* MP_TCB flags */
141 #define fMP_DEST_MULTI			0x00000001
142 #define fMP_DEST_BROAD			0x00000002
143 
144 /* MP_ADAPTER flags */
145 #define fMP_ADAPTER_RECV_LOOKASIDE	0x00000004
146 #define fMP_ADAPTER_INTERRUPT_IN_USE	0x00000008
147 
148 /* MP_SHARED flags */
149 #define fMP_ADAPTER_LOWER_POWER		0x00200000
150 
151 #define fMP_ADAPTER_NON_RECOVER_ERROR	0x00800000
152 #define fMP_ADAPTER_HARDWARE_ERROR	0x04000000
153 
154 #define fMP_ADAPTER_FAIL_SEND_MASK	0x3ff00000
155 
156 /* Some offsets in PCI config space that are actually used. */
157 #define ET1310_PCI_MAC_ADDRESS		0xA4
158 #define ET1310_PCI_EEPROM_STATUS	0xB2
159 #define ET1310_PCI_ACK_NACK		0xC0
160 #define ET1310_PCI_REPLAY		0xC2
161 #define ET1310_PCI_L0L1LATENCY		0xCF
162 
163 /* PCI Product IDs */
164 #define ET131X_PCI_DEVICE_ID_GIG	0xED00	/* ET1310 1000 Base-T 8 */
165 #define ET131X_PCI_DEVICE_ID_FAST	0xED01	/* ET1310 100  Base-T */
166 
167 /* Define order of magnitude converter */
168 #define NANO_IN_A_MICRO	1000
169 
170 #define PARM_RX_NUM_BUFS_DEF    4
171 #define PARM_RX_TIME_INT_DEF    10
172 #define PARM_RX_MEM_END_DEF     0x2bc
173 #define PARM_TX_TIME_INT_DEF    40
174 #define PARM_TX_NUM_BUFS_DEF    4
175 #define PARM_DMA_CACHE_DEF      0
176 
177 /* RX defines */
178 #define USE_FBR0 1
179 #define FBR_CHUNKS 32
180 #define MAX_DESC_PER_RING_RX         1024
181 
182 /* number of RFDs - default and min */
183 #ifdef USE_FBR0
184 #define RFD_LOW_WATER_MARK	40
185 #define NIC_DEFAULT_NUM_RFD	1024
186 #define NUM_FBRS		2
187 #else
188 #define RFD_LOW_WATER_MARK	20
189 #define NIC_DEFAULT_NUM_RFD	256
190 #define NUM_FBRS		1
191 #endif
192 
193 #define NIC_MIN_NUM_RFD		64
194 #define NUM_PACKETS_HANDLED	256
195 
196 #define ALCATEL_MULTICAST_PKT	0x01000000
197 #define ALCATEL_BROADCAST_PKT	0x02000000
198 
199 /* typedefs for Free Buffer Descriptors */
200 struct fbr_desc {
201 	u32 addr_lo;
202 	u32 addr_hi;
203 	u32 word2;		/* Bits 10-31 reserved, 0-9 descriptor */
204 };
205 
206 /* Packet Status Ring Descriptors
207  *
208  * Word 0:
209  *
210  * top 16 bits are from the Alcatel Status Word as enumerated in
211  * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
212  *
213  * 0: hp			hash pass
214  * 1: ipa			IP checksum assist
215  * 2: ipp			IP checksum pass
216  * 3: tcpa			TCP checksum assist
217  * 4: tcpp			TCP checksum pass
218  * 5: wol			WOL Event
219  * 6: rxmac_error		RXMAC Error Indicator
220  * 7: drop			Drop packet
221  * 8: ft			Frame Truncated
222  * 9: jp			Jumbo Packet
223  * 10: vp			VLAN Packet
224  * 11-15: unused
225  * 16: asw_prev_pkt_dropped	e.g. IFG too small on previous
226  * 17: asw_RX_DV_event		short receive event detected
227  * 18: asw_false_carrier_event	bad carrier since last good packet
228  * 19: asw_code_err		one or more nibbles signalled as errors
229  * 20: asw_CRC_err		CRC error
230  * 21: asw_len_chk_err		frame length field incorrect
231  * 22: asw_too_long		frame length > 1518 bytes
232  * 23: asw_OK			valid CRC + no code error
233  * 24: asw_multicast		has a multicast address
234  * 25: asw_broadcast		has a broadcast address
235  * 26: asw_dribble_nibble	spurious bits after EOP
236  * 27: asw_control_frame	is a control frame
237  * 28: asw_pause_frame		is a pause frame
238  * 29: asw_unsupported_op	unsupported OP code
239  * 30: asw_VLAN_tag		VLAN tag detected
240  * 31: asw_long_evt		Rx long event
241  *
242  * Word 1:
243  * 0-15: length			length in bytes
244  * 16-25: bi			Buffer Index
245  * 26-27: ri			Ring Index
246  * 28-31: reserved
247  */
248 
249 struct pkt_stat_desc {
250 	u32 word0;
251 	u32 word1;
252 };
253 
254 /* Typedefs for the RX DMA status word */
255 
256 /*
257  * rx status word 0 holds part of the status bits of the Rx DMA engine
258  * that get copied out to memory by the ET-1310.  Word 0 is a 32 bit word
259  * which contains the Free Buffer ring 0 and 1 available offset.
260  *
261  * bit 0-9 FBR1 offset
262  * bit 10 Wrap flag for FBR1
263  * bit 16-25 FBR0 offset
264  * bit 26 Wrap flag for FBR0
265  */
266 
267 /*
268  * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
269  * that get copied out to memory by the ET-1310.  Word 3 is a 32 bit word
270  * which contains the Packet Status Ring available offset.
271  *
272  * bit 0-15 reserved
273  * bit 16-27 PSRoffset
274  * bit 28 PSRwrap
275  * bit 29-31 unused
276  */
277 
278 /*
279  * struct rx_status_block is a structure representing the status of the Rx
280  * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
281  */
282 struct rx_status_block {
283 	u32 word0;
284 	u32 word1;
285 };
286 
287 /*
288  * Structure for look-up table holding free buffer ring pointers, addresses
289  * and state.
290  */
291 struct fbr_lookup {
292 	void		*virt[MAX_DESC_PER_RING_RX];
293 	void		*buffer1[MAX_DESC_PER_RING_RX];
294 	void		*buffer2[MAX_DESC_PER_RING_RX];
295 	u32		 bus_high[MAX_DESC_PER_RING_RX];
296 	u32		 bus_low[MAX_DESC_PER_RING_RX];
297 	void		*ring_virtaddr;
298 	dma_addr_t	 ring_physaddr;
299 	void		*mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
300 	dma_addr_t	 mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
301 	u64		 real_physaddr;
302 	u64		 offset;
303 	u32		 local_full;
304 	u32		 num_entries;
305 	u32		 buffsize;
306 };
307 
308 /*
309  * struct rx_ring is the sructure representing the adaptor's local
310  * reference(s) to the rings
311  *
312  ******************************************************************************
313  * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
314  *			and index 1 to refer to FRB0
315  ******************************************************************************
316  */
317 struct rx_ring {
318 	struct fbr_lookup *fbr[NUM_FBRS];
319 	void *ps_ring_virtaddr;
320 	dma_addr_t ps_ring_physaddr;
321 	u32 local_psr_full;
322 	u32 psr_num_entries;
323 
324 	struct rx_status_block *rx_status_block;
325 	dma_addr_t rx_status_bus;
326 
327 	/* RECV */
328 	struct list_head recv_list;
329 	u32 num_ready_recv;
330 
331 	u32 num_rfd;
332 
333 	bool unfinished_receives;
334 
335 	/* lookaside lists */
336 	struct kmem_cache *recv_lookaside;
337 };
338 
339 /* TX defines */
340 /*
341  * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
342  *
343  * 0-15: length of packet
344  * 16-27: VLAN tag
345  * 28: VLAN CFI
346  * 29-31: VLAN priority
347  *
348  * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
349  *
350  * 0: last packet in the sequence
351  * 1: first packet in the sequence
352  * 2: interrupt the processor when this pkt sent
353  * 3: Control word - no packet data
354  * 4: Issue half-duplex backpressure : XON/XOFF
355  * 5: send pause frame
356  * 6: Tx frame has error
357  * 7: append CRC
358  * 8: MAC override
359  * 9: pad packet
360  * 10: Packet is a Huge packet
361  * 11: append VLAN tag
362  * 12: IP checksum assist
363  * 13: TCP checksum assist
364  * 14: UDP checksum assist
365  */
366 
367 /* struct tx_desc represents each descriptor on the ring */
368 struct tx_desc {
369 	u32 addr_hi;
370 	u32 addr_lo;
371 	u32 len_vlan;	/* control words how to xmit the */
372 	u32 flags;	/* data (detailed above) */
373 };
374 
375 /*
376  * The status of the Tx DMA engine it sits in free memory, and is pointed to
377  * by 0x101c / 0x1020. This is a DMA10 type
378  */
379 
380 /* TCB (Transmit Control Block: Host Side) */
381 struct tcb {
382 	struct tcb *next;	/* Next entry in ring */
383 	u32 flags;		/* Our flags for the packet */
384 	u32 count;		/* Used to spot stuck/lost packets */
385 	u32 stale;		/* Used to spot stuck/lost packets */
386 	struct sk_buff *skb;	/* Network skb we are tied to */
387 	u32 index;		/* Ring indexes */
388 	u32 index_start;
389 };
390 
391 /* Structure representing our local reference(s) to the ring */
392 struct tx_ring {
393 	/* TCB (Transmit Control Block) memory and lists */
394 	struct tcb *tcb_ring;
395 
396 	/* List of TCBs that are ready to be used */
397 	struct tcb *tcb_qhead;
398 	struct tcb *tcb_qtail;
399 
400 	/* list of TCBs that are currently being sent.  NOTE that access to all
401 	 * three of these (including used) are controlled via the
402 	 * TCBSendQLock.  This lock should be secured prior to incementing /
403 	 * decrementing used, or any queue manipulation on send_head /
404 	 * tail
405 	 */
406 	struct tcb *send_head;
407 	struct tcb *send_tail;
408 	int used;
409 
410 	/* The actual descriptor ring */
411 	struct tx_desc *tx_desc_ring;
412 	dma_addr_t tx_desc_ring_pa;
413 
414 	/* send_idx indicates where we last wrote to in the descriptor ring. */
415 	u32 send_idx;
416 
417 	/* The location of the write-back status block */
418 	u32 *tx_status;
419 	dma_addr_t tx_status_pa;
420 
421 	/* Packets since the last IRQ: used for interrupt coalescing */
422 	int since_irq;
423 };
424 
425 /*
426  * Do not change these values: if changed, then change also in respective
427  * TXdma and Rxdma engines
428  */
429 #define NUM_DESC_PER_RING_TX         512    /* TX Do not change these values */
430 #define NUM_TCB                      64
431 
432 /*
433  * These values are all superseded by registry entries to facilitate tuning.
434  * Once the desired performance has been achieved, the optimal registry values
435  * should be re-populated to these #defines:
436  */
437 #define TX_ERROR_PERIOD             1000
438 
439 #define LO_MARK_PERCENT_FOR_PSR     15
440 #define LO_MARK_PERCENT_FOR_RX      15
441 
442 /* RFD (Receive Frame Descriptor) */
443 struct rfd {
444 	struct list_head list_node;
445 	struct sk_buff *skb;
446 	u32 len;	/* total size of receive frame */
447 	u16 bufferindex;
448 	u8 ringindex;
449 };
450 
451 /* Flow Control */
452 #define FLOW_BOTH	0
453 #define FLOW_TXONLY	1
454 #define FLOW_RXONLY	2
455 #define FLOW_NONE	3
456 
457 /* Struct to define some device statistics */
458 struct ce_stats {
459 	/* MIB II variables
460 	 *
461 	 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
462 	 * MUST have 32, then we'll need another way to perform atomic
463 	 * operations
464 	 */
465 	u32		unicast_pkts_rcvd;
466 	atomic_t	unicast_pkts_xmtd;
467 	u32		multicast_pkts_rcvd;
468 	atomic_t	multicast_pkts_xmtd;
469 	u32		broadcast_pkts_rcvd;
470 	atomic_t	broadcast_pkts_xmtd;
471 	u32		rcvd_pkts_dropped;
472 
473 	/* Tx Statistics. */
474 	u32		tx_underflows;
475 
476 	u32		tx_collisions;
477 	u32		tx_excessive_collisions;
478 	u32		tx_first_collisions;
479 	u32		tx_late_collisions;
480 	u32		tx_max_pkt_errs;
481 	u32		tx_deferred;
482 
483 	/* Rx Statistics. */
484 	u32		rx_overflows;
485 
486 	u32		rx_length_errs;
487 	u32		rx_align_errs;
488 	u32		rx_crc_errs;
489 	u32		rx_code_violations;
490 	u32		rx_other_errs;
491 
492 	u32		synchronous_iterations;
493 	u32		interrupt_status;
494 };
495 
496 /* The private adapter structure */
497 struct et131x_adapter {
498 	struct net_device *netdev;
499 	struct pci_dev *pdev;
500 	struct mii_bus *mii_bus;
501 	struct phy_device *phydev;
502 	struct work_struct task;
503 
504 	/* Flags that indicate current state of the adapter */
505 	u32 flags;
506 
507 	/* local link state, to determine if a state change has occurred */
508 	int link;
509 
510 	/* Configuration  */
511 	u8 rom_addr[ETH_ALEN];
512 	u8 addr[ETH_ALEN];
513 	bool has_eeprom;
514 	u8 eeprom_data[2];
515 
516 	/* Spinlocks */
517 	spinlock_t lock;
518 
519 	spinlock_t tcb_send_qlock;
520 	spinlock_t tcb_ready_qlock;
521 	spinlock_t send_hw_lock;
522 
523 	spinlock_t rcv_lock;
524 	spinlock_t rcv_pend_lock;
525 	spinlock_t fbr_lock;
526 
527 	spinlock_t phy_lock;
528 
529 	/* Packet Filter and look ahead size */
530 	u32 packet_filter;
531 
532 	/* multicast list */
533 	u32 multicast_addr_count;
534 	u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
535 
536 	/* Pointer to the device's PCI register space */
537 	struct address_map __iomem *regs;
538 
539 	/* Registry parameters */
540 	u8 wanted_flow;		/* Flow we want for 802.3x flow control */
541 	u32 registry_jumbo_packet;	/* Max supported ethernet packet size */
542 
543 	/* Derived from the registry: */
544 	u8 flowcontrol;		/* flow control validated by the far-end */
545 
546 	/* Minimize init-time */
547 	struct timer_list error_timer;
548 
549 	/* variable putting the phy into coma mode when boot up with no cable
550 	 * plugged in after 5 seconds
551 	 */
552 	u8 boot_coma;
553 
554 	/* Next two used to save power information at power down. This
555 	 * information will be used during power up to set up parts of Power
556 	 * Management in JAGCore
557 	 */
558 	u16 pdown_speed;
559 	u8 pdown_duplex;
560 
561 	/* Tx Memory Variables */
562 	struct tx_ring tx_ring;
563 
564 	/* Rx Memory Variables */
565 	struct rx_ring rx_ring;
566 
567 	/* Stats */
568 	struct ce_stats stats;
569 
570 	struct net_device_stats net_stats;
571 };
572 
eeprom_wait_ready(struct pci_dev * pdev,u32 * status)573 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
574 {
575 	u32 reg;
576 	int i;
577 
578 	/*
579 	 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
580 	 *    bits 7,1:0 both equal to 1, at least once after reset.
581 	 *    Subsequent operations need only to check that bits 1:0 are equal
582 	 *    to 1 prior to starting a single byte read/write
583 	 */
584 
585 	for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
586 		/* Read registers grouped in DWORD1 */
587 		if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
588 			return -EIO;
589 
590 		/* I2C idle and Phy Queue Avail both true */
591 		if ((reg & 0x3000) == 0x3000) {
592 			if (status)
593 				*status = reg;
594 			return reg & 0xFF;
595 		}
596 	}
597 	return -ETIMEDOUT;
598 }
599 
600 
601 /**
602  * eeprom_write - Write a byte to the ET1310's EEPROM
603  * @adapter: pointer to our private adapter structure
604  * @addr: the address to write
605  * @data: the value to write
606  *
607  * Returns 1 for a successful write.
608  */
eeprom_write(struct et131x_adapter * adapter,u32 addr,u8 data)609 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
610 {
611 	struct pci_dev *pdev = adapter->pdev;
612 	int index = 0;
613 	int retries;
614 	int err = 0;
615 	int i2c_wack = 0;
616 	int writeok = 0;
617 	u32 status;
618 	u32 val = 0;
619 
620 	/*
621 	 * For an EEPROM, an I2C single byte write is defined as a START
622 	 * condition followed by the device address, EEPROM address, one byte
623 	 * of data and a STOP condition.  The STOP condition will trigger the
624 	 * EEPROM's internally timed write cycle to the nonvolatile memory.
625 	 * All inputs are disabled during this write cycle and the EEPROM will
626 	 * not respond to any access until the internal write is complete.
627 	 */
628 
629 	err = eeprom_wait_ready(pdev, NULL);
630 	if (err)
631 		return err;
632 
633 	 /*
634 	 * 2. Write to the LBCIF Control Register:  bit 7=1, bit 6=1, bit 3=0,
635 	 *    and bits 1:0 both =0.  Bit 5 should be set according to the
636 	 *    type of EEPROM being accessed (1=two byte addressing, 0=one
637 	 *    byte addressing).
638 	 */
639 	if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
640 			LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
641 		return -EIO;
642 
643 	i2c_wack = 1;
644 
645 	/* Prepare EEPROM address for Step 3 */
646 
647 	for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
648 		/* Write the address to the LBCIF Address Register */
649 		if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
650 			break;
651 		/*
652 		 * Write the data to the LBCIF Data Register (the I2C write
653 		 * will begin).
654 		 */
655 		if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
656 			break;
657 		/*
658 		 * Monitor bit 1:0 of the LBCIF Status Register.  When bits
659 		 * 1:0 are both equal to 1, the I2C write has completed and the
660 		 * internal write cycle of the EEPROM is about to start.
661 		 * (bits 1:0 = 01 is a legal state while waiting from both
662 		 * equal to 1, but bits 1:0 = 10 is invalid and implies that
663 		 * something is broken).
664 		 */
665 		err = eeprom_wait_ready(pdev, &status);
666 		if (err < 0)
667 			return 0;
668 
669 		/*
670 		 * Check bit 3 of the LBCIF Status Register.  If  equal to 1,
671 		 * an error has occurred.Don't break here if we are revision
672 		 * 1, this is so we do a blind write for load bug.
673 		 */
674 		if ((status & LBCIF_STATUS_GENERAL_ERROR)
675 			&& adapter->pdev->revision == 0)
676 			break;
677 
678 		/*
679 		 * Check bit 2 of the LBCIF Status Register.  If equal to 1 an
680 		 * ACK error has occurred on the address phase of the write.
681 		 * This could be due to an actual hardware failure or the
682 		 * EEPROM may still be in its internal write cycle from a
683 		 * previous write. This write operation was ignored and must be
684 		  *repeated later.
685 		 */
686 		if (status & LBCIF_STATUS_ACK_ERROR) {
687 			/*
688 			 * This could be due to an actual hardware failure
689 			 * or the EEPROM may still be in its internal write
690 			 * cycle from a previous write. This write operation
691 			 * was ignored and must be repeated later.
692 			 */
693 			udelay(10);
694 			continue;
695 		}
696 
697 		writeok = 1;
698 		break;
699 	}
700 
701 	/*
702 	 * Set bit 6 of the LBCIF Control Register = 0.
703 	 */
704 	udelay(10);
705 
706 	while (i2c_wack) {
707 		if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
708 			LBCIF_CONTROL_LBCIF_ENABLE))
709 			writeok = 0;
710 
711 		/* Do read until internal ACK_ERROR goes away meaning write
712 		 * completed
713 		 */
714 		do {
715 			pci_write_config_dword(pdev,
716 					       LBCIF_ADDRESS_REGISTER,
717 					       addr);
718 			do {
719 				pci_read_config_dword(pdev,
720 					LBCIF_DATA_REGISTER, &val);
721 			} while ((val & 0x00010000) == 0);
722 		} while (val & 0x00040000);
723 
724 		if ((val & 0xFF00) != 0xC000 || index == 10000)
725 			break;
726 		index++;
727 	}
728 	return writeok ? 0 : -EIO;
729 }
730 
731 /**
732  * eeprom_read - Read a byte from the ET1310's EEPROM
733  * @adapter: pointer to our private adapter structure
734  * @addr: the address from which to read
735  * @pdata: a pointer to a byte in which to store the value of the read
736  * @eeprom_id: the ID of the EEPROM
737  * @addrmode: how the EEPROM is to be accessed
738  *
739  * Returns 1 for a successful read
740  */
eeprom_read(struct et131x_adapter * adapter,u32 addr,u8 * pdata)741 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
742 {
743 	struct pci_dev *pdev = adapter->pdev;
744 	int err;
745 	u32 status;
746 
747 	/*
748 	 * A single byte read is similar to the single byte write, with the
749 	 * exception of the data flow:
750 	 */
751 
752 	err = eeprom_wait_ready(pdev, NULL);
753 	if (err)
754 		return err;
755 	/*
756 	 * Write to the LBCIF Control Register:  bit 7=1, bit 6=0, bit 3=0,
757 	 * and bits 1:0 both =0.  Bit 5 should be set according to the type
758 	 * of EEPROM being accessed (1=two byte addressing, 0=one byte
759 	 * addressing).
760 	 */
761 	if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
762 				  LBCIF_CONTROL_LBCIF_ENABLE))
763 		return -EIO;
764 	/*
765 	 * Write the address to the LBCIF Address Register (I2C read will
766 	 * begin).
767 	 */
768 	if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
769 		return -EIO;
770 	/*
771 	 * Monitor bit 0 of the LBCIF Status Register.  When = 1, I2C read
772 	 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
773 	 * has occurred).
774 	 */
775 	err = eeprom_wait_ready(pdev, &status);
776 	if (err < 0)
777 		return err;
778 	/*
779 	 * Regardless of error status, read data byte from LBCIF Data
780 	 * Register.
781 	 */
782 	*pdata = err;
783 	/*
784 	 * Check bit 2 of the LBCIF Status Register.  If = 1,
785 	 * then an error has occurred.
786 	 */
787 	return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
788 }
789 
et131x_init_eeprom(struct et131x_adapter * adapter)790 static int et131x_init_eeprom(struct et131x_adapter *adapter)
791 {
792 	struct pci_dev *pdev = adapter->pdev;
793 	u8 eestatus;
794 
795 	/* We first need to check the EEPROM Status code located at offset
796 	 * 0xB2 of config space
797 	 */
798 	pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
799 				      &eestatus);
800 
801 	/* THIS IS A WORKAROUND:
802 	 * I need to call this function twice to get my card in a
803 	 * LG M1 Express Dual running. I tried also a msleep before this
804 	 * function, because I thought there could be some time condidions
805 	 * but it didn't work. Call the whole function twice also work.
806 	 */
807 	if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
808 		dev_err(&pdev->dev,
809 		       "Could not read PCI config space for EEPROM Status\n");
810 		return -EIO;
811 	}
812 
813 	/* Determine if the error(s) we care about are present. If they are
814 	 * present we need to fail.
815 	 */
816 	if (eestatus & 0x4C) {
817 		int write_failed = 0;
818 		if (pdev->revision == 0x01) {
819 			int	i;
820 			static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
821 
822 			/* Re-write the first 4 bytes if we have an eeprom
823 			 * present and the revision id is 1, this fixes the
824 			 * corruption seen with 1310 B Silicon
825 			 */
826 			for (i = 0; i < 3; i++)
827 				if (eeprom_write(adapter, i, eedata[i]) < 0)
828 					write_failed = 1;
829 		}
830 		if (pdev->revision  != 0x01 || write_failed) {
831 			dev_err(&pdev->dev,
832 			    "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
833 
834 			/* This error could mean that there was an error
835 			 * reading the eeprom or that the eeprom doesn't exist.
836 			 * We will treat each case the same and not try to
837 			 * gather additional information that normally would
838 			 * come from the eeprom, like MAC Address
839 			 */
840 			adapter->has_eeprom = 0;
841 			return -EIO;
842 		}
843 	}
844 	adapter->has_eeprom = 1;
845 
846 	/* Read the EEPROM for information regarding LED behavior. Refer to
847 	 * ET1310_phy.c, et131x_xcvr_init(), for its use.
848 	 */
849 	eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
850 	eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
851 
852 	if (adapter->eeprom_data[0] != 0xcd)
853 		/* Disable all optional features */
854 		adapter->eeprom_data[1] = 0x00;
855 
856 	return 0;
857 }
858 
859 /**
860  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
861  * @adapter: pointer to our adapter structure
862  */
et131x_rx_dma_enable(struct et131x_adapter * adapter)863 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
864 {
865 	/* Setup the receive dma configuration register for normal operation */
866 	u32 csr =  0x2000;	/* FBR1 enable */
867 
868 	if (adapter->rx_ring.fbr[0]->buffsize == 4096)
869 		csr |= 0x0800;
870 	else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
871 		csr |= 0x1000;
872 	else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
873 		csr |= 0x1800;
874 #ifdef USE_FBR0
875 	csr |= 0x0400;		/* FBR0 enable */
876 	if (adapter->rx_ring.fbr[1]->buffsize == 256)
877 		csr |= 0x0100;
878 	else if (adapter->rx_ring.fbr[1]->buffsize == 512)
879 		csr |= 0x0200;
880 	else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
881 		csr |= 0x0300;
882 #endif
883 	writel(csr, &adapter->regs->rxdma.csr);
884 
885 	csr = readl(&adapter->regs->rxdma.csr);
886 	if ((csr & 0x00020000) != 0) {
887 		udelay(5);
888 		csr = readl(&adapter->regs->rxdma.csr);
889 		if ((csr & 0x00020000) != 0) {
890 			dev_err(&adapter->pdev->dev,
891 			    "RX Dma failed to exit halt state.  CSR 0x%08x\n",
892 				csr);
893 		}
894 	}
895 }
896 
897 /**
898  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
899  * @adapter: pointer to our adapter structure
900  */
et131x_rx_dma_disable(struct et131x_adapter * adapter)901 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
902 {
903 	u32 csr;
904 	/* Setup the receive dma configuration register */
905 	writel(0x00002001, &adapter->regs->rxdma.csr);
906 	csr = readl(&adapter->regs->rxdma.csr);
907 	if ((csr & 0x00020000) == 0) {	/* Check halt status (bit 17) */
908 		udelay(5);
909 		csr = readl(&adapter->regs->rxdma.csr);
910 		if ((csr & 0x00020000) == 0)
911 			dev_err(&adapter->pdev->dev,
912 			"RX Dma failed to enter halt state. CSR 0x%08x\n",
913 				csr);
914 	}
915 }
916 
917 /**
918  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
919  * @adapter: pointer to our adapter structure
920  *
921  * Mainly used after a return to the D0 (full-power) state from a lower state.
922  */
et131x_tx_dma_enable(struct et131x_adapter * adapter)923 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
924 {
925 	/* Setup the transmit dma configuration register for normal
926 	 * operation
927 	 */
928 	writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
929 					&adapter->regs->txdma.csr);
930 }
931 
add_10bit(u32 * v,int n)932 static inline void add_10bit(u32 *v, int n)
933 {
934 	*v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
935 }
936 
add_12bit(u32 * v,int n)937 static inline void add_12bit(u32 *v, int n)
938 {
939 	*v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
940 }
941 
942 /**
943  * et1310_config_mac_regs1 - Initialize the first part of MAC regs
944  * @adapter: pointer to our adapter structure
945  */
et1310_config_mac_regs1(struct et131x_adapter * adapter)946 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
947 {
948 	struct mac_regs __iomem *macregs = &adapter->regs->mac;
949 	u32 station1;
950 	u32 station2;
951 	u32 ipg;
952 
953 	/* First we need to reset everything.  Write to MAC configuration
954 	 * register 1 to perform reset.
955 	 */
956 	writel(0xC00F0000, &macregs->cfg1);
957 
958 	/* Next lets configure the MAC Inter-packet gap register */
959 	ipg = 0x38005860;		/* IPG1 0x38 IPG2 0x58 B2B 0x60 */
960 	ipg |= 0x50 << 8;		/* ifg enforce 0x50 */
961 	writel(ipg, &macregs->ipg);
962 
963 	/* Next lets configure the MAC Half Duplex register */
964 	/* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
965 	writel(0x00A1F037, &macregs->hfdp);
966 
967 	/* Next lets configure the MAC Interface Control register */
968 	writel(0, &macregs->if_ctrl);
969 
970 	/* Let's move on to setting up the mii management configuration */
971 	writel(0x07, &macregs->mii_mgmt_cfg);	/* Clock reset 0x7 */
972 
973 	/* Next lets configure the MAC Station Address register.  These
974 	 * values are read from the EEPROM during initialization and stored
975 	 * in the adapter structure.  We write what is stored in the adapter
976 	 * structure to the MAC Station Address registers high and low.  This
977 	 * station address is used for generating and checking pause control
978 	 * packets.
979 	 */
980 	station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
981 		   (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
982 	station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
983 		   (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
984 		   (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
985 		    adapter->addr[2];
986 	writel(station1, &macregs->station_addr_1);
987 	writel(station2, &macregs->station_addr_2);
988 
989 	/* Max ethernet packet in bytes that will be passed by the mac without
990 	 * being truncated.  Allow the MAC to pass 4 more than our max packet
991 	 * size.  This is 4 for the Ethernet CRC.
992 	 *
993 	 * Packets larger than (registry_jumbo_packet) that do not contain a
994 	 * VLAN ID will be dropped by the Rx function.
995 	 */
996 	writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
997 
998 	/* clear out MAC config reset */
999 	writel(0, &macregs->cfg1);
1000 }
1001 
1002 /**
1003  * et1310_config_mac_regs2 - Initialize the second part of MAC regs
1004  * @adapter: pointer to our adapter structure
1005  */
et1310_config_mac_regs2(struct et131x_adapter * adapter)1006 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1007 {
1008 	int32_t delay = 0;
1009 	struct mac_regs __iomem *mac = &adapter->regs->mac;
1010 	struct phy_device *phydev = adapter->phydev;
1011 	u32 cfg1;
1012 	u32 cfg2;
1013 	u32 ifctrl;
1014 	u32 ctl;
1015 
1016 	ctl = readl(&adapter->regs->txmac.ctl);
1017 	cfg1 = readl(&mac->cfg1);
1018 	cfg2 = readl(&mac->cfg2);
1019 	ifctrl = readl(&mac->if_ctrl);
1020 
1021 	/* Set up the if mode bits */
1022 	cfg2 &= ~0x300;
1023 	if (phydev && phydev->speed == SPEED_1000) {
1024 		cfg2 |= 0x200;
1025 		/* Phy mode bit */
1026 		ifctrl &= ~(1 << 24);
1027 	} else {
1028 		cfg2 |= 0x100;
1029 		ifctrl |= (1 << 24);
1030 	}
1031 
1032 	/* We need to enable Rx/Tx */
1033 	cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1034 	/* Initialize loop back to off */
1035 	cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1036 	if (adapter->flowcontrol == FLOW_RXONLY ||
1037 				adapter->flowcontrol == FLOW_BOTH)
1038 		cfg1 |= CFG1_RX_FLOW;
1039 	writel(cfg1, &mac->cfg1);
1040 
1041 	/* Now we need to initialize the MAC Configuration 2 register */
1042 	/* preamble 7, check length, huge frame off, pad crc, crc enable
1043 	   full duplex off */
1044 	cfg2 |= 0x7016;
1045 	cfg2 &= ~0x0021;
1046 
1047 	/* Turn on duplex if needed */
1048 	if (phydev && phydev->duplex == DUPLEX_FULL)
1049 		cfg2 |= 0x01;
1050 
1051 	ifctrl &= ~(1 << 26);
1052 	if (phydev && phydev->duplex == DUPLEX_HALF)
1053 		ifctrl |= (1<<26);	/* Enable ghd */
1054 
1055 	writel(ifctrl, &mac->if_ctrl);
1056 	writel(cfg2, &mac->cfg2);
1057 
1058 	do {
1059 		udelay(10);
1060 		delay++;
1061 		cfg1 = readl(&mac->cfg1);
1062 	} while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1063 
1064 	if (delay == 100) {
1065 		dev_warn(&adapter->pdev->dev,
1066 		    "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1067 			cfg1);
1068 	}
1069 
1070 	/* Enable txmac */
1071 	ctl |= 0x09;	/* TX mac enable, FC disable */
1072 	writel(ctl, &adapter->regs->txmac.ctl);
1073 
1074 	/* Ready to start the RXDMA/TXDMA engine */
1075 	if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1076 		et131x_rx_dma_enable(adapter);
1077 		et131x_tx_dma_enable(adapter);
1078 	}
1079 }
1080 
1081 /**
1082  * et1310_in_phy_coma - check if the device is in phy coma
1083  * @adapter: pointer to our adapter structure
1084  *
1085  * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1086  */
et1310_in_phy_coma(struct et131x_adapter * adapter)1087 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1088 {
1089 	u32 pmcsr;
1090 
1091 	pmcsr = readl(&adapter->regs->global.pm_csr);
1092 
1093 	return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1094 }
1095 
et1310_setup_device_for_multicast(struct et131x_adapter * adapter)1096 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1097 {
1098 	struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1099 	u32 hash1 = 0;
1100 	u32 hash2 = 0;
1101 	u32 hash3 = 0;
1102 	u32 hash4 = 0;
1103 	u32 pm_csr;
1104 
1105 	/* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1106 	 * the multi-cast LIST.  If it is NOT specified, (and "ALL" is not
1107 	 * specified) then we should pass NO multi-cast addresses to the
1108 	 * driver.
1109 	 */
1110 	if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1111 		int i;
1112 
1113 		/* Loop through our multicast array and set up the device */
1114 		for (i = 0; i < adapter->multicast_addr_count; i++) {
1115 			u32 result;
1116 
1117 			result = ether_crc(6, adapter->multicast_list[i]);
1118 
1119 			result = (result & 0x3F800000) >> 23;
1120 
1121 			if (result < 32) {
1122 				hash1 |= (1 << result);
1123 			} else if ((31 < result) && (result < 64)) {
1124 				result -= 32;
1125 				hash2 |= (1 << result);
1126 			} else if ((63 < result) && (result < 96)) {
1127 				result -= 64;
1128 				hash3 |= (1 << result);
1129 			} else {
1130 				result -= 96;
1131 				hash4 |= (1 << result);
1132 			}
1133 		}
1134 	}
1135 
1136 	/* Write out the new hash to the device */
1137 	pm_csr = readl(&adapter->regs->global.pm_csr);
1138 	if (!et1310_in_phy_coma(adapter)) {
1139 		writel(hash1, &rxmac->multi_hash1);
1140 		writel(hash2, &rxmac->multi_hash2);
1141 		writel(hash3, &rxmac->multi_hash3);
1142 		writel(hash4, &rxmac->multi_hash4);
1143 	}
1144 }
1145 
et1310_setup_device_for_unicast(struct et131x_adapter * adapter)1146 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1147 {
1148 	struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1149 	u32 uni_pf1;
1150 	u32 uni_pf2;
1151 	u32 uni_pf3;
1152 	u32 pm_csr;
1153 
1154 	/* Set up unicast packet filter reg 3 to be the first two octets of
1155 	 * the MAC address for both address
1156 	 *
1157 	 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1158 	 * MAC address for second address
1159 	 *
1160 	 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1161 	 * MAC address for first address
1162 	 */
1163 	uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1164 		  (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1165 		  (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1166 		   adapter->addr[1];
1167 
1168 	uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1169 		  (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1170 		  (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1171 		   adapter->addr[5];
1172 
1173 	uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1174 		  (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1175 		  (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1176 		   adapter->addr[5];
1177 
1178 	pm_csr = readl(&adapter->regs->global.pm_csr);
1179 	if (!et1310_in_phy_coma(adapter)) {
1180 		writel(uni_pf1, &rxmac->uni_pf_addr1);
1181 		writel(uni_pf2, &rxmac->uni_pf_addr2);
1182 		writel(uni_pf3, &rxmac->uni_pf_addr3);
1183 	}
1184 }
1185 
et1310_config_rxmac_regs(struct et131x_adapter * adapter)1186 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1187 {
1188 	struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1189 	struct phy_device *phydev = adapter->phydev;
1190 	u32 sa_lo;
1191 	u32 sa_hi = 0;
1192 	u32 pf_ctrl = 0;
1193 
1194 	/* Disable the MAC while it is being configured (also disable WOL) */
1195 	writel(0x8, &rxmac->ctrl);
1196 
1197 	/* Initialize WOL to disabled. */
1198 	writel(0, &rxmac->crc0);
1199 	writel(0, &rxmac->crc12);
1200 	writel(0, &rxmac->crc34);
1201 
1202 	/* We need to set the WOL mask0 - mask4 next.  We initialize it to
1203 	 * its default Values of 0x00000000 because there are not WOL masks
1204 	 * as of this time.
1205 	 */
1206 	writel(0, &rxmac->mask0_word0);
1207 	writel(0, &rxmac->mask0_word1);
1208 	writel(0, &rxmac->mask0_word2);
1209 	writel(0, &rxmac->mask0_word3);
1210 
1211 	writel(0, &rxmac->mask1_word0);
1212 	writel(0, &rxmac->mask1_word1);
1213 	writel(0, &rxmac->mask1_word2);
1214 	writel(0, &rxmac->mask1_word3);
1215 
1216 	writel(0, &rxmac->mask2_word0);
1217 	writel(0, &rxmac->mask2_word1);
1218 	writel(0, &rxmac->mask2_word2);
1219 	writel(0, &rxmac->mask2_word3);
1220 
1221 	writel(0, &rxmac->mask3_word0);
1222 	writel(0, &rxmac->mask3_word1);
1223 	writel(0, &rxmac->mask3_word2);
1224 	writel(0, &rxmac->mask3_word3);
1225 
1226 	writel(0, &rxmac->mask4_word0);
1227 	writel(0, &rxmac->mask4_word1);
1228 	writel(0, &rxmac->mask4_word2);
1229 	writel(0, &rxmac->mask4_word3);
1230 
1231 	/* Lets setup the WOL Source Address */
1232 	sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1233 		(adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1234 		(adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1235 		 adapter->addr[5];
1236 	writel(sa_lo, &rxmac->sa_lo);
1237 
1238 	sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1239 		       adapter->addr[1];
1240 	writel(sa_hi, &rxmac->sa_hi);
1241 
1242 	/* Disable all Packet Filtering */
1243 	writel(0, &rxmac->pf_ctrl);
1244 
1245 	/* Let's initialize the Unicast Packet filtering address */
1246 	if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1247 		et1310_setup_device_for_unicast(adapter);
1248 		pf_ctrl |= 4;	/* Unicast filter */
1249 	} else {
1250 		writel(0, &rxmac->uni_pf_addr1);
1251 		writel(0, &rxmac->uni_pf_addr2);
1252 		writel(0, &rxmac->uni_pf_addr3);
1253 	}
1254 
1255 	/* Let's initialize the Multicast hash */
1256 	if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1257 		pf_ctrl |= 2;	/* Multicast filter */
1258 		et1310_setup_device_for_multicast(adapter);
1259 	}
1260 
1261 	/* Runt packet filtering.  Didn't work in version A silicon. */
1262 	pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1263 	pf_ctrl |= 8;	/* Fragment filter */
1264 
1265 	if (adapter->registry_jumbo_packet > 8192)
1266 		/* In order to transmit jumbo packets greater than 8k, the
1267 		 * FIFO between RxMAC and RxDMA needs to be reduced in size
1268 		 * to (16k - Jumbo packet size).  In order to implement this,
1269 		 * we must use "cut through" mode in the RxMAC, which chops
1270 		 * packets down into segments which are (max_size * 16).  In
1271 		 * this case we selected 256 bytes, since this is the size of
1272 		 * the PCI-Express TLP's that the 1310 uses.
1273 		 *
1274 		 * seg_en on, fc_en off, size 0x10
1275 		 */
1276 		writel(0x41, &rxmac->mcif_ctrl_max_seg);
1277 	else
1278 		writel(0, &rxmac->mcif_ctrl_max_seg);
1279 
1280 	/* Initialize the MCIF water marks */
1281 	writel(0, &rxmac->mcif_water_mark);
1282 
1283 	/*  Initialize the MIF control */
1284 	writel(0, &rxmac->mif_ctrl);
1285 
1286 	/* Initialize the Space Available Register */
1287 	writel(0, &rxmac->space_avail);
1288 
1289 	/* Initialize the the mif_ctrl register
1290 	 * bit 3:  Receive code error. One or more nibbles were signaled as
1291 	 *	   errors  during the reception of the packet.  Clear this
1292 	 *	   bit in Gigabit, set it in 100Mbit.  This was derived
1293 	 *	   experimentally at UNH.
1294 	 * bit 4:  Receive CRC error. The packet's CRC did not match the
1295 	 *	   internally generated CRC.
1296 	 * bit 5:  Receive length check error. Indicates that frame length
1297 	 *	   field value in the packet does not match the actual data
1298 	 *	   byte length and is not a type field.
1299 	 * bit 16: Receive frame truncated.
1300 	 * bit 17: Drop packet enable
1301 	 */
1302 	if (phydev && phydev->speed == SPEED_100)
1303 		writel(0x30038, &rxmac->mif_ctrl);
1304 	else
1305 		writel(0x30030, &rxmac->mif_ctrl);
1306 
1307 	/* Finally we initialize RxMac to be enabled & WOL disabled.  Packet
1308 	 * filter is always enabled since it is where the runt packets are
1309 	 * supposed to be dropped.  For version A silicon, runt packet
1310 	 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1311 	 * but we still leave the packet filter on.
1312 	 */
1313 	writel(pf_ctrl, &rxmac->pf_ctrl);
1314 	writel(0x9, &rxmac->ctrl);
1315 }
1316 
et1310_config_txmac_regs(struct et131x_adapter * adapter)1317 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1318 {
1319 	struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1320 
1321 	/* We need to update the Control Frame Parameters
1322 	 * cfpt - control frame pause timer set to 64 (0x40)
1323 	 * cfep - control frame extended pause timer set to 0x0
1324 	 */
1325 	if (adapter->flowcontrol == FLOW_NONE)
1326 		writel(0, &txmac->cf_param);
1327 	else
1328 		writel(0x40, &txmac->cf_param);
1329 }
1330 
et1310_config_macstat_regs(struct et131x_adapter * adapter)1331 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1332 {
1333 	struct macstat_regs __iomem *macstat =
1334 		&adapter->regs->macstat;
1335 
1336 	/* Next we need to initialize all the macstat registers to zero on
1337 	 * the device.
1338 	 */
1339 	writel(0, &macstat->txrx_0_64_byte_frames);
1340 	writel(0, &macstat->txrx_65_127_byte_frames);
1341 	writel(0, &macstat->txrx_128_255_byte_frames);
1342 	writel(0, &macstat->txrx_256_511_byte_frames);
1343 	writel(0, &macstat->txrx_512_1023_byte_frames);
1344 	writel(0, &macstat->txrx_1024_1518_byte_frames);
1345 	writel(0, &macstat->txrx_1519_1522_gvln_frames);
1346 
1347 	writel(0, &macstat->rx_bytes);
1348 	writel(0, &macstat->rx_packets);
1349 	writel(0, &macstat->rx_fcs_errs);
1350 	writel(0, &macstat->rx_multicast_packets);
1351 	writel(0, &macstat->rx_broadcast_packets);
1352 	writel(0, &macstat->rx_control_frames);
1353 	writel(0, &macstat->rx_pause_frames);
1354 	writel(0, &macstat->rx_unknown_opcodes);
1355 	writel(0, &macstat->rx_align_errs);
1356 	writel(0, &macstat->rx_frame_len_errs);
1357 	writel(0, &macstat->rx_code_errs);
1358 	writel(0, &macstat->rx_carrier_sense_errs);
1359 	writel(0, &macstat->rx_undersize_packets);
1360 	writel(0, &macstat->rx_oversize_packets);
1361 	writel(0, &macstat->rx_fragment_packets);
1362 	writel(0, &macstat->rx_jabbers);
1363 	writel(0, &macstat->rx_drops);
1364 
1365 	writel(0, &macstat->tx_bytes);
1366 	writel(0, &macstat->tx_packets);
1367 	writel(0, &macstat->tx_multicast_packets);
1368 	writel(0, &macstat->tx_broadcast_packets);
1369 	writel(0, &macstat->tx_pause_frames);
1370 	writel(0, &macstat->tx_deferred);
1371 	writel(0, &macstat->tx_excessive_deferred);
1372 	writel(0, &macstat->tx_single_collisions);
1373 	writel(0, &macstat->tx_multiple_collisions);
1374 	writel(0, &macstat->tx_late_collisions);
1375 	writel(0, &macstat->tx_excessive_collisions);
1376 	writel(0, &macstat->tx_total_collisions);
1377 	writel(0, &macstat->tx_pause_honored_frames);
1378 	writel(0, &macstat->tx_drops);
1379 	writel(0, &macstat->tx_jabbers);
1380 	writel(0, &macstat->tx_fcs_errs);
1381 	writel(0, &macstat->tx_control_frames);
1382 	writel(0, &macstat->tx_oversize_frames);
1383 	writel(0, &macstat->tx_undersize_frames);
1384 	writel(0, &macstat->tx_fragments);
1385 	writel(0, &macstat->carry_reg1);
1386 	writel(0, &macstat->carry_reg2);
1387 
1388 	/* Unmask any counters that we want to track the overflow of.
1389 	 * Initially this will be all counters.  It may become clear later
1390 	 * that we do not need to track all counters.
1391 	 */
1392 	writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1393 	writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1394 }
1395 
1396 /**
1397  * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1398  * @adapter: pointer to our private adapter structure
1399  * @addr: the address of the transceiver
1400  * @reg: the register to read
1401  * @value: pointer to a 16-bit value in which the value will be stored
1402  *
1403  * Returns 0 on success, errno on failure (as defined in errno.h)
1404  */
et131x_phy_mii_read(struct et131x_adapter * adapter,u8 addr,u8 reg,u16 * value)1405 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1406 	      u8 reg, u16 *value)
1407 {
1408 	struct mac_regs __iomem *mac = &adapter->regs->mac;
1409 	int status = 0;
1410 	u32 delay = 0;
1411 	u32 mii_addr;
1412 	u32 mii_cmd;
1413 	u32 mii_indicator;
1414 
1415 	/* Save a local copy of the registers we are dealing with so we can
1416 	 * set them back
1417 	 */
1418 	mii_addr = readl(&mac->mii_mgmt_addr);
1419 	mii_cmd = readl(&mac->mii_mgmt_cmd);
1420 
1421 	/* Stop the current operation */
1422 	writel(0, &mac->mii_mgmt_cmd);
1423 
1424 	/* Set up the register we need to read from on the correct PHY */
1425 	writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1426 
1427 	writel(0x1, &mac->mii_mgmt_cmd);
1428 
1429 	do {
1430 		udelay(50);
1431 		delay++;
1432 		mii_indicator = readl(&mac->mii_mgmt_indicator);
1433 	} while ((mii_indicator & MGMT_WAIT) && delay < 50);
1434 
1435 	/* If we hit the max delay, we could not read the register */
1436 	if (delay == 50) {
1437 		dev_warn(&adapter->pdev->dev,
1438 			    "reg 0x%08x could not be read\n", reg);
1439 		dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1440 			    mii_indicator);
1441 
1442 		status = -EIO;
1443 	}
1444 
1445 	/* If we hit here we were able to read the register and we need to
1446 	 * return the value to the caller */
1447 	*value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1448 
1449 	/* Stop the read operation */
1450 	writel(0, &mac->mii_mgmt_cmd);
1451 
1452 	/* set the registers we touched back to the state at which we entered
1453 	 * this function
1454 	 */
1455 	writel(mii_addr, &mac->mii_mgmt_addr);
1456 	writel(mii_cmd, &mac->mii_mgmt_cmd);
1457 
1458 	return status;
1459 }
1460 
et131x_mii_read(struct et131x_adapter * adapter,u8 reg,u16 * value)1461 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1462 {
1463 	struct phy_device *phydev = adapter->phydev;
1464 
1465 	if (!phydev)
1466 		return -EIO;
1467 
1468 	return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1469 }
1470 
1471 /**
1472  * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1473  * @adapter: pointer to our private adapter structure
1474  * @reg: the register to read
1475  * @value: 16-bit value to write
1476  *
1477  * FIXME: one caller in netdev still
1478  *
1479  * Return 0 on success, errno on failure (as defined in errno.h)
1480  */
et131x_mii_write(struct et131x_adapter * adapter,u8 reg,u16 value)1481 static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1482 {
1483 	struct mac_regs __iomem *mac = &adapter->regs->mac;
1484 	struct phy_device *phydev = adapter->phydev;
1485 	int status = 0;
1486 	u8 addr;
1487 	u32 delay = 0;
1488 	u32 mii_addr;
1489 	u32 mii_cmd;
1490 	u32 mii_indicator;
1491 
1492 	if (!phydev)
1493 		return -EIO;
1494 
1495 	addr = phydev->addr;
1496 
1497 	/* Save a local copy of the registers we are dealing with so we can
1498 	 * set them back
1499 	 */
1500 	mii_addr = readl(&mac->mii_mgmt_addr);
1501 	mii_cmd = readl(&mac->mii_mgmt_cmd);
1502 
1503 	/* Stop the current operation */
1504 	writel(0, &mac->mii_mgmt_cmd);
1505 
1506 	/* Set up the register we need to write to on the correct PHY */
1507 	writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1508 
1509 	/* Add the value to write to the registers to the mac */
1510 	writel(value, &mac->mii_mgmt_ctrl);
1511 
1512 	do {
1513 		udelay(50);
1514 		delay++;
1515 		mii_indicator = readl(&mac->mii_mgmt_indicator);
1516 	} while ((mii_indicator & MGMT_BUSY) && delay < 100);
1517 
1518 	/* If we hit the max delay, we could not write the register */
1519 	if (delay == 100) {
1520 		u16 tmp;
1521 
1522 		dev_warn(&adapter->pdev->dev,
1523 		    "reg 0x%08x could not be written", reg);
1524 		dev_warn(&adapter->pdev->dev, "status is  0x%08x\n",
1525 			    mii_indicator);
1526 		dev_warn(&adapter->pdev->dev, "command is  0x%08x\n",
1527 			    readl(&mac->mii_mgmt_cmd));
1528 
1529 		et131x_mii_read(adapter, reg, &tmp);
1530 
1531 		status = -EIO;
1532 	}
1533 	/* Stop the write operation */
1534 	writel(0, &mac->mii_mgmt_cmd);
1535 
1536 	/*
1537 	 * set the registers we touched back to the state at which we entered
1538 	 * this function
1539 	 */
1540 	writel(mii_addr, &mac->mii_mgmt_addr);
1541 	writel(mii_cmd, &mac->mii_mgmt_cmd);
1542 
1543 	return status;
1544 }
1545 
1546 /* Still used from _mac for BIT_READ */
et1310_phy_access_mii_bit(struct et131x_adapter * adapter,u16 action,u16 regnum,u16 bitnum,u8 * value)1547 static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1548 				      u16 action, u16 regnum, u16 bitnum,
1549 				      u8 *value)
1550 {
1551 	u16 reg;
1552 	u16 mask = 0x0001 << bitnum;
1553 
1554 	/* Read the requested register */
1555 	et131x_mii_read(adapter, regnum, &reg);
1556 
1557 	switch (action) {
1558 	case TRUEPHY_BIT_READ:
1559 		*value = (reg & mask) >> bitnum;
1560 		break;
1561 
1562 	case TRUEPHY_BIT_SET:
1563 		et131x_mii_write(adapter, regnum, reg | mask);
1564 		break;
1565 
1566 	case TRUEPHY_BIT_CLEAR:
1567 		et131x_mii_write(adapter, regnum, reg & ~mask);
1568 		break;
1569 
1570 	default:
1571 		break;
1572 	}
1573 }
1574 
et1310_config_flow_control(struct et131x_adapter * adapter)1575 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1576 {
1577 	struct phy_device *phydev = adapter->phydev;
1578 
1579 	if (phydev->duplex == DUPLEX_HALF) {
1580 		adapter->flowcontrol = FLOW_NONE;
1581 	} else {
1582 		char remote_pause, remote_async_pause;
1583 
1584 		et1310_phy_access_mii_bit(adapter,
1585 				TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1586 		et1310_phy_access_mii_bit(adapter,
1587 				TRUEPHY_BIT_READ, 5, 11,
1588 				&remote_async_pause);
1589 
1590 		if ((remote_pause == TRUEPHY_BIT_SET) &&
1591 		    (remote_async_pause == TRUEPHY_BIT_SET)) {
1592 			adapter->flowcontrol = adapter->wanted_flow;
1593 		} else if ((remote_pause == TRUEPHY_BIT_SET) &&
1594 			   (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1595 			if (adapter->wanted_flow == FLOW_BOTH)
1596 				adapter->flowcontrol = FLOW_BOTH;
1597 			else
1598 				adapter->flowcontrol = FLOW_NONE;
1599 		} else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1600 			   (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1601 			adapter->flowcontrol = FLOW_NONE;
1602 		} else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1603 			       remote_async_pause == TRUEPHY_SET_BIT) */
1604 			if (adapter->wanted_flow == FLOW_BOTH)
1605 				adapter->flowcontrol = FLOW_RXONLY;
1606 			else
1607 				adapter->flowcontrol = FLOW_NONE;
1608 		}
1609 	}
1610 }
1611 
1612 /**
1613  * et1310_update_macstat_host_counters - Update the local copy of the statistics
1614  * @adapter: pointer to the adapter structure
1615  */
et1310_update_macstat_host_counters(struct et131x_adapter * adapter)1616 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1617 {
1618 	struct ce_stats *stats = &adapter->stats;
1619 	struct macstat_regs __iomem *macstat =
1620 		&adapter->regs->macstat;
1621 
1622 	stats->tx_collisions	       += readl(&macstat->tx_total_collisions);
1623 	stats->tx_first_collisions     += readl(&macstat->tx_single_collisions);
1624 	stats->tx_deferred	       += readl(&macstat->tx_deferred);
1625 	stats->tx_excessive_collisions +=
1626 				readl(&macstat->tx_multiple_collisions);
1627 	stats->tx_late_collisions      += readl(&macstat->tx_late_collisions);
1628 	stats->tx_underflows	       += readl(&macstat->tx_undersize_frames);
1629 	stats->tx_max_pkt_errs	       += readl(&macstat->tx_oversize_frames);
1630 
1631 	stats->rx_align_errs        += readl(&macstat->rx_align_errs);
1632 	stats->rx_crc_errs          += readl(&macstat->rx_code_errs);
1633 	stats->rcvd_pkts_dropped    += readl(&macstat->rx_drops);
1634 	stats->rx_overflows         += readl(&macstat->rx_oversize_packets);
1635 	stats->rx_code_violations   += readl(&macstat->rx_fcs_errs);
1636 	stats->rx_length_errs       += readl(&macstat->rx_frame_len_errs);
1637 	stats->rx_other_errs        += readl(&macstat->rx_fragment_packets);
1638 }
1639 
1640 /**
1641  * et1310_handle_macstat_interrupt
1642  * @adapter: pointer to the adapter structure
1643  *
1644  * One of the MACSTAT counters has wrapped.  Update the local copy of
1645  * the statistics held in the adapter structure, checking the "wrap"
1646  * bit for each counter.
1647  */
et1310_handle_macstat_interrupt(struct et131x_adapter * adapter)1648 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1649 {
1650 	u32 carry_reg1;
1651 	u32 carry_reg2;
1652 
1653 	/* Read the interrupt bits from the register(s).  These are Clear On
1654 	 * Write.
1655 	 */
1656 	carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1657 	carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1658 
1659 	writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1660 	writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1661 
1662 	/* We need to do update the host copy of all the MAC_STAT counters.
1663 	 * For each counter, check it's overflow bit.  If the overflow bit is
1664 	 * set, then increment the host version of the count by one complete
1665 	 * revolution of the counter.  This routine is called when the counter
1666 	 * block indicates that one of the counters has wrapped.
1667 	 */
1668 	if (carry_reg1 & (1 << 14))
1669 		adapter->stats.rx_code_violations	+= COUNTER_WRAP_16_BIT;
1670 	if (carry_reg1 & (1 << 8))
1671 		adapter->stats.rx_align_errs	+= COUNTER_WRAP_12_BIT;
1672 	if (carry_reg1 & (1 << 7))
1673 		adapter->stats.rx_length_errs	+= COUNTER_WRAP_16_BIT;
1674 	if (carry_reg1 & (1 << 2))
1675 		adapter->stats.rx_other_errs	+= COUNTER_WRAP_16_BIT;
1676 	if (carry_reg1 & (1 << 6))
1677 		adapter->stats.rx_crc_errs	+= COUNTER_WRAP_16_BIT;
1678 	if (carry_reg1 & (1 << 3))
1679 		adapter->stats.rx_overflows	+= COUNTER_WRAP_16_BIT;
1680 	if (carry_reg1 & (1 << 0))
1681 		adapter->stats.rcvd_pkts_dropped	+= COUNTER_WRAP_16_BIT;
1682 	if (carry_reg2 & (1 << 16))
1683 		adapter->stats.tx_max_pkt_errs	+= COUNTER_WRAP_12_BIT;
1684 	if (carry_reg2 & (1 << 15))
1685 		adapter->stats.tx_underflows	+= COUNTER_WRAP_12_BIT;
1686 	if (carry_reg2 & (1 << 6))
1687 		adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1688 	if (carry_reg2 & (1 << 8))
1689 		adapter->stats.tx_deferred	+= COUNTER_WRAP_12_BIT;
1690 	if (carry_reg2 & (1 << 5))
1691 		adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1692 	if (carry_reg2 & (1 << 4))
1693 		adapter->stats.tx_late_collisions	+= COUNTER_WRAP_12_BIT;
1694 	if (carry_reg2 & (1 << 2))
1695 		adapter->stats.tx_collisions	+= COUNTER_WRAP_12_BIT;
1696 }
1697 
et131x_mdio_read(struct mii_bus * bus,int phy_addr,int reg)1698 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1699 {
1700 	struct net_device *netdev = bus->priv;
1701 	struct et131x_adapter *adapter = netdev_priv(netdev);
1702 	u16 value;
1703 	int ret;
1704 
1705 	ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1706 
1707 	if (ret < 0)
1708 		return ret;
1709 	else
1710 		return value;
1711 }
1712 
et131x_mdio_write(struct mii_bus * bus,int phy_addr,int reg,u16 value)1713 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
1714 {
1715 	struct net_device *netdev = bus->priv;
1716 	struct et131x_adapter *adapter = netdev_priv(netdev);
1717 
1718 	return et131x_mii_write(adapter, reg, value);
1719 }
1720 
et131x_mdio_reset(struct mii_bus * bus)1721 static int et131x_mdio_reset(struct mii_bus *bus)
1722 {
1723 	struct net_device *netdev = bus->priv;
1724 	struct et131x_adapter *adapter = netdev_priv(netdev);
1725 
1726 	et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1727 
1728 	return 0;
1729 }
1730 
1731 /**
1732  *	et1310_phy_power_down	-	PHY power control
1733  *	@adapter: device to control
1734  *	@down: true for off/false for back on
1735  *
1736  *	one hundred, ten, one thousand megs
1737  *	How would you like to have your LAN accessed
1738  *	Can't you see that this code processed
1739  *	Phy power, phy power..
1740  */
et1310_phy_power_down(struct et131x_adapter * adapter,bool down)1741 static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1742 {
1743 	u16 data;
1744 
1745 	et131x_mii_read(adapter, MII_BMCR, &data);
1746 	data &= ~BMCR_PDOWN;
1747 	if (down)
1748 		data |= BMCR_PDOWN;
1749 	et131x_mii_write(adapter, MII_BMCR, data);
1750 }
1751 
1752 /**
1753  * et131x_xcvr_init - Init the phy if we are setting it into force mode
1754  * @adapter: pointer to our private adapter structure
1755  *
1756  */
et131x_xcvr_init(struct et131x_adapter * adapter)1757 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1758 {
1759 	u16 imr;
1760 	u16 isr;
1761 	u16 lcr2;
1762 
1763 	et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1764 	et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1765 
1766 	/* Set the link status interrupt only.  Bad behavior when link status
1767 	 * and auto neg are set, we run into a nested interrupt problem
1768 	 */
1769 	imr |= (ET_PHY_INT_MASK_AUTONEGSTAT &
1770 		ET_PHY_INT_MASK_LINKSTAT &
1771 		ET_PHY_INT_MASK_ENABLE);
1772 
1773 	et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1774 
1775 	/* Set the LED behavior such that LED 1 indicates speed (off =
1776 	 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1777 	 * link and activity (on for link, blink off for activity).
1778 	 *
1779 	 * NOTE: Some customizations have been added here for specific
1780 	 * vendors; The LED behavior is now determined by vendor data in the
1781 	 * EEPROM. However, the above description is the default.
1782 	 */
1783 	if ((adapter->eeprom_data[1] & 0x4) == 0) {
1784 		et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1785 
1786 		lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T);
1787 		lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1788 
1789 		if ((adapter->eeprom_data[1] & 0x8) == 0)
1790 			lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1791 		else
1792 			lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1793 
1794 		et131x_mii_write(adapter, PHY_LED_2, lcr2);
1795 	}
1796 }
1797 
1798 /**
1799  * et131x_configure_global_regs	-	configure JAGCore global regs
1800  * @adapter: pointer to our adapter structure
1801  *
1802  * Used to configure the global registers on the JAGCore
1803  */
et131x_configure_global_regs(struct et131x_adapter * adapter)1804 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1805 {
1806 	struct global_regs __iomem *regs = &adapter->regs->global;
1807 
1808 	writel(0, &regs->rxq_start_addr);
1809 	writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1810 
1811 	if (adapter->registry_jumbo_packet < 2048) {
1812 		/* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1813 		 * block of RAM that the driver can split between Tx
1814 		 * and Rx as it desires.  Our default is to split it
1815 		 * 50/50:
1816 		 */
1817 		writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1818 		writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1819 	} else if (adapter->registry_jumbo_packet < 8192) {
1820 		/* For jumbo packets > 2k but < 8k, split 50-50. */
1821 		writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1822 		writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1823 	} else {
1824 		/* 9216 is the only packet size greater than 8k that
1825 		 * is available. The Tx buffer has to be big enough
1826 		 * for one whole packet on the Tx side. We'll make
1827 		 * the Tx 9408, and give the rest to Rx
1828 		 */
1829 		writel(0x01b3, &regs->rxq_end_addr);
1830 		writel(0x01b4, &regs->txq_start_addr);
1831 	}
1832 
1833 	/* Initialize the loopback register. Disable all loopbacks. */
1834 	writel(0, &regs->loopback);
1835 
1836 	/* MSI Register */
1837 	writel(0, &regs->msi_config);
1838 
1839 	/* By default, disable the watchdog timer.  It will be enabled when
1840 	 * a packet is queued.
1841 	 */
1842 	writel(0, &regs->watchdog_timer);
1843 }
1844 
1845 /**
1846  * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
1847  * @adapter: pointer to our adapter structure
1848  */
et131x_config_rx_dma_regs(struct et131x_adapter * adapter)1849 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1850 {
1851 	struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1852 	struct rx_ring *rx_local = &adapter->rx_ring;
1853 	struct fbr_desc *fbr_entry;
1854 	u32 entry;
1855 	u32 psr_num_des;
1856 	unsigned long flags;
1857 
1858 	/* Halt RXDMA to perform the reconfigure.  */
1859 	et131x_rx_dma_disable(adapter);
1860 
1861 	/* Load the completion writeback physical address
1862 	 *
1863 	 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
1864 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1865 	 * are ever returned, make sure the high part is retrieved here
1866 	 * before storing the adjusted address.
1867 	 */
1868 	writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1869 	       &rx_dma->dma_wb_base_hi);
1870 	writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1871 
1872 	memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1873 
1874 	/* Set the address and parameters of the packet status ring into the
1875 	 * 1310's registers
1876 	 */
1877 	writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1878 	       &rx_dma->psr_base_hi);
1879 	writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1880 	writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1881 	writel(0, &rx_dma->psr_full_offset);
1882 
1883 	psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1884 	writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1885 	       &rx_dma->psr_min_des);
1886 
1887 	spin_lock_irqsave(&adapter->rcv_lock, flags);
1888 
1889 	/* These local variables track the PSR in the adapter structure */
1890 	rx_local->local_psr_full = 0;
1891 
1892 	/* Now's the best time to initialize FBR1 contents */
1893 	fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1894 	for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1895 		fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1896 		fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1897 		fbr_entry->word2 = entry;
1898 		fbr_entry++;
1899 	}
1900 
1901 	/* Set the address and parameters of Free buffer ring 1 (and 0 if
1902 	 * required) into the 1310's registers
1903 	 */
1904 	writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1905 	       &rx_dma->fbr1_base_hi);
1906 	writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1907 	writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1908 	writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
1909 
1910 	/* This variable tracks the free buffer ring 1 full position, so it
1911 	 * has to match the above.
1912 	 */
1913 	rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1914 	writel(
1915 	   ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1916 	   &rx_dma->fbr1_min_des);
1917 
1918 #ifdef USE_FBR0
1919 	/* Now's the best time to initialize FBR0 contents */
1920 	fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1921 	for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1922 		fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1923 		fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1924 		fbr_entry->word2 = entry;
1925 		fbr_entry++;
1926 	}
1927 
1928 	writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1929 	       &rx_dma->fbr0_base_hi);
1930 	writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1931 	writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1932 	writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
1933 
1934 	/* This variable tracks the free buffer ring 0 full position, so it
1935 	 * has to match the above.
1936 	 */
1937 	rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1938 	writel(
1939 	   ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1940 	   &rx_dma->fbr0_min_des);
1941 #endif
1942 
1943 	/* Program the number of packets we will receive before generating an
1944 	 * interrupt.
1945 	 * For version B silicon, this value gets updated once autoneg is
1946 	 *complete.
1947 	 */
1948 	writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1949 
1950 	/* The "time_done" is not working correctly to coalesce interrupts
1951 	 * after a given time period, but rather is giving us an interrupt
1952 	 * regardless of whether we have received packets.
1953 	 * This value gets updated once autoneg is complete.
1954 	 */
1955 	writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1956 
1957 	spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1958 }
1959 
1960 /**
1961  * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1962  * @adapter: pointer to our private adapter structure
1963  *
1964  * Configure the transmit engine with the ring buffers we have created
1965  * and prepare it for use.
1966  */
et131x_config_tx_dma_regs(struct et131x_adapter * adapter)1967 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1968 {
1969 	struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1970 
1971 	/* Load the hardware with the start of the transmit descriptor ring. */
1972 	writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1973 	       &txdma->pr_base_hi);
1974 	writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1975 	       &txdma->pr_base_lo);
1976 
1977 	/* Initialise the transmit DMA engine */
1978 	writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1979 
1980 	/* Load the completion writeback physical address */
1981 	writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
1982 						&txdma->dma_wb_base_hi);
1983 	writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
1984 
1985 	*adapter->tx_ring.tx_status = 0;
1986 
1987 	writel(0, &txdma->service_request);
1988 	adapter->tx_ring.send_idx = 0;
1989 }
1990 
1991 /**
1992  * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
1993  * @adapter: pointer to our private adapter structure
1994  *
1995  * Returns 0 on success, errno on failure (as defined in errno.h)
1996  */
et131x_adapter_setup(struct et131x_adapter * adapter)1997 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1998 {
1999 	/* Configure the JAGCore */
2000 	et131x_configure_global_regs(adapter);
2001 
2002 	et1310_config_mac_regs1(adapter);
2003 
2004 	/* Configure the MMC registers */
2005 	/* All we need to do is initialize the Memory Control Register */
2006 	writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2007 
2008 	et1310_config_rxmac_regs(adapter);
2009 	et1310_config_txmac_regs(adapter);
2010 
2011 	et131x_config_rx_dma_regs(adapter);
2012 	et131x_config_tx_dma_regs(adapter);
2013 
2014 	et1310_config_macstat_regs(adapter);
2015 
2016 	et1310_phy_power_down(adapter, 0);
2017 	et131x_xcvr_init(adapter);
2018 }
2019 
2020 /**
2021  * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
2022  * @adapter: pointer to our private adapter structure
2023  */
et131x_soft_reset(struct et131x_adapter * adapter)2024 static void et131x_soft_reset(struct et131x_adapter *adapter)
2025 {
2026 	/* Disable MAC Core */
2027 	writel(0xc00f0000, &adapter->regs->mac.cfg1);
2028 
2029 	/* Set everything to a reset value */
2030 	writel(0x7F, &adapter->regs->global.sw_reset);
2031 	writel(0x000f0000, &adapter->regs->mac.cfg1);
2032 	writel(0x00000000, &adapter->regs->mac.cfg1);
2033 }
2034 
2035 /**
2036  *	et131x_enable_interrupts	-	enable interrupt
2037  *	@adapter: et131x device
2038  *
2039  *	Enable the appropriate interrupts on the ET131x according to our
2040  *	configuration
2041  */
et131x_enable_interrupts(struct et131x_adapter * adapter)2042 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2043 {
2044 	u32 mask;
2045 
2046 	/* Enable all global interrupts */
2047 	if (adapter->flowcontrol == FLOW_TXONLY ||
2048 			    adapter->flowcontrol == FLOW_BOTH)
2049 		mask = INT_MASK_ENABLE;
2050 	else
2051 		mask = INT_MASK_ENABLE_NO_FLOW;
2052 
2053 	writel(mask, &adapter->regs->global.int_mask);
2054 }
2055 
2056 /**
2057  *	et131x_disable_interrupts	-	interrupt disable
2058  *	@adapter: et131x device
2059  *
2060  *	Block all interrupts from the et131x device at the device itself
2061  */
et131x_disable_interrupts(struct et131x_adapter * adapter)2062 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2063 {
2064 	/* Disable all global interrupts */
2065 	writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2066 }
2067 
2068 /**
2069  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
2070  * @adapter: pointer to our adapter structure
2071  */
et131x_tx_dma_disable(struct et131x_adapter * adapter)2072 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2073 {
2074 	/* Setup the tramsmit dma configuration register */
2075 	writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2076 					&adapter->regs->txdma.csr);
2077 }
2078 
2079 /**
2080  * et131x_enable_txrx - Enable tx/rx queues
2081  * @netdev: device to be enabled
2082  */
et131x_enable_txrx(struct net_device * netdev)2083 static void et131x_enable_txrx(struct net_device *netdev)
2084 {
2085 	struct et131x_adapter *adapter = netdev_priv(netdev);
2086 
2087 	/* Enable the Tx and Rx DMA engines (if not already enabled) */
2088 	et131x_rx_dma_enable(adapter);
2089 	et131x_tx_dma_enable(adapter);
2090 
2091 	/* Enable device interrupts */
2092 	if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2093 		et131x_enable_interrupts(adapter);
2094 
2095 	/* We're ready to move some data, so start the queue */
2096 	netif_start_queue(netdev);
2097 }
2098 
2099 /**
2100  * et131x_disable_txrx - Disable tx/rx queues
2101  * @netdev: device to be disabled
2102  */
et131x_disable_txrx(struct net_device * netdev)2103 static void et131x_disable_txrx(struct net_device *netdev)
2104 {
2105 	struct et131x_adapter *adapter = netdev_priv(netdev);
2106 
2107 	/* First thing is to stop the queue */
2108 	netif_stop_queue(netdev);
2109 
2110 	/* Stop the Tx and Rx DMA engines */
2111 	et131x_rx_dma_disable(adapter);
2112 	et131x_tx_dma_disable(adapter);
2113 
2114 	/* Disable device interrupts */
2115 	et131x_disable_interrupts(adapter);
2116 }
2117 
2118 /**
2119  * et131x_init_send - Initialize send data structures
2120  * @adapter: pointer to our private adapter structure
2121  */
et131x_init_send(struct et131x_adapter * adapter)2122 static void et131x_init_send(struct et131x_adapter *adapter)
2123 {
2124 	struct tcb *tcb;
2125 	u32 ct;
2126 	struct tx_ring *tx_ring;
2127 
2128 	/* Setup some convenience pointers */
2129 	tx_ring = &adapter->tx_ring;
2130 	tcb = adapter->tx_ring.tcb_ring;
2131 
2132 	tx_ring->tcb_qhead = tcb;
2133 
2134 	memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2135 
2136 	/* Go through and set up each TCB */
2137 	for (ct = 0; ct++ < NUM_TCB; tcb++)
2138 		/* Set the link pointer in HW TCB to the next TCB in the
2139 		 * chain
2140 		 */
2141 		tcb->next = tcb + 1;
2142 
2143 	/* Set the  tail pointer */
2144 	tcb--;
2145 	tx_ring->tcb_qtail = tcb;
2146 	tcb->next = NULL;
2147 	/* Curr send queue should now be empty */
2148 	tx_ring->send_head = NULL;
2149 	tx_ring->send_tail = NULL;
2150 }
2151 
2152 /**
2153  * et1310_enable_phy_coma - called when network cable is unplugged
2154  * @adapter: pointer to our adapter structure
2155  *
2156  * driver receive an phy status change interrupt while in D0 and check that
2157  * phy_status is down.
2158  *
2159  *          -- gate off JAGCore;
2160  *          -- set gigE PHY in Coma mode
2161  *          -- wake on phy_interrupt; Perform software reset JAGCore,
2162  *             re-initialize jagcore and gigE PHY
2163  *
2164  *      Add D0-ASPM-PhyLinkDown Support:
2165  *          -- while in D0, when there is a phy_interrupt indicating phy link
2166  *             down status, call the MPSetPhyComa routine to enter this active
2167  *             state power saving mode
2168  *          -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
2169  *       indicating linkup status, call the MPDisablePhyComa routine to
2170  *             restore JAGCore and gigE PHY
2171  */
et1310_enable_phy_coma(struct et131x_adapter * adapter)2172 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2173 {
2174 	unsigned long flags;
2175 	u32 pmcsr;
2176 
2177 	pmcsr = readl(&adapter->regs->global.pm_csr);
2178 
2179 	/* Save the GbE PHY speed and duplex modes. Need to restore this
2180 	 * when cable is plugged back in
2181 	 */
2182 	/*
2183 	 * TODO - when PM is re-enabled, check if we need to
2184 	 * perform a similar task as this -
2185 	 * adapter->pdown_speed = adapter->ai_force_speed;
2186 	 * adapter->pdown_duplex = adapter->ai_force_duplex;
2187 	 */
2188 
2189 	/* Stop sending packets. */
2190 	spin_lock_irqsave(&adapter->send_hw_lock, flags);
2191 	adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2192 	spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2193 
2194 	/* Wait for outstanding Receive packets */
2195 
2196 	et131x_disable_txrx(adapter->netdev);
2197 
2198 	/* Gate off JAGCore 3 clock domains */
2199 	pmcsr &= ~ET_PMCSR_INIT;
2200 	writel(pmcsr, &adapter->regs->global.pm_csr);
2201 
2202 	/* Program gigE PHY in to Coma mode */
2203 	pmcsr |= ET_PM_PHY_SW_COMA;
2204 	writel(pmcsr, &adapter->regs->global.pm_csr);
2205 }
2206 
2207 /**
2208  * et1310_disable_phy_coma - Disable the Phy Coma Mode
2209  * @adapter: pointer to our adapter structure
2210  */
et1310_disable_phy_coma(struct et131x_adapter * adapter)2211 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2212 {
2213 	u32 pmcsr;
2214 
2215 	pmcsr = readl(&adapter->regs->global.pm_csr);
2216 
2217 	/* Disable phy_sw_coma register and re-enable JAGCore clocks */
2218 	pmcsr |= ET_PMCSR_INIT;
2219 	pmcsr &= ~ET_PM_PHY_SW_COMA;
2220 	writel(pmcsr, &adapter->regs->global.pm_csr);
2221 
2222 	/* Restore the GbE PHY speed and duplex modes;
2223 	 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2224 	 */
2225 	/* TODO - when PM is re-enabled, check if we need to
2226 	 * perform a similar task as this -
2227 	 * adapter->ai_force_speed = adapter->pdown_speed;
2228 	 * adapter->ai_force_duplex = adapter->pdown_duplex;
2229 	 */
2230 
2231 	/* Re-initialize the send structures */
2232 	et131x_init_send(adapter);
2233 
2234 	/* Bring the device back to the state it was during init prior to
2235 	 * autonegotiation being complete.  This way, when we get the auto-neg
2236 	 * complete interrupt, we can complete init by calling ConfigMacREGS2.
2237 	 */
2238 	et131x_soft_reset(adapter);
2239 
2240 	/* setup et1310 as per the documentation ?? */
2241 	et131x_adapter_setup(adapter);
2242 
2243 	/* Allow Tx to restart */
2244 	adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2245 
2246 	et131x_enable_txrx(adapter->netdev);
2247 }
2248 
bump_free_buff_ring(u32 * free_buff_ring,u32 limit)2249 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2250 {
2251 	u32 tmp_free_buff_ring = *free_buff_ring;
2252 	tmp_free_buff_ring++;
2253 	/* This works for all cases where limit < 1024. The 1023 case
2254 	   works because 1023++ is 1024 which means the if condition is not
2255 	   taken but the carry of the bit into the wrap bit toggles the wrap
2256 	   value correctly */
2257 	if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2258 		tmp_free_buff_ring &= ~ET_DMA10_MASK;
2259 		tmp_free_buff_ring ^= ET_DMA10_WRAP;
2260 	}
2261 	/* For the 1023 case */
2262 	tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2263 	*free_buff_ring = tmp_free_buff_ring;
2264 	return tmp_free_buff_ring;
2265 }
2266 
2267 /**
2268  * et131x_align_allocated_memory - Align allocated memory on a given boundary
2269  * @adapter: pointer to our adapter structure
2270  * @phys_addr: pointer to Physical address
2271  * @offset: pointer to the offset variable
2272  * @mask: correct mask
2273  */
et131x_align_allocated_memory(struct et131x_adapter * adapter,u64 * phys_addr,u64 * offset,u64 mask)2274 static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2275 					  u64 *phys_addr, u64 *offset,
2276 					  u64 mask)
2277 {
2278 	u64 new_addr = *phys_addr & ~mask;
2279 
2280 	*offset = 0;
2281 
2282 	if (new_addr != *phys_addr) {
2283 		/* Move to next aligned block */
2284 		new_addr += mask + 1;
2285 		/* Return offset for adjusting virt addr */
2286 		*offset = new_addr - *phys_addr;
2287 		/* Return new physical address */
2288 		*phys_addr = new_addr;
2289 	}
2290 }
2291 
2292 /**
2293  * et131x_rx_dma_memory_alloc
2294  * @adapter: pointer to our private adapter structure
2295  *
2296  * Returns 0 on success and errno on failure (as defined in errno.h)
2297  *
2298  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2299  * and the Packet Status Ring.
2300  */
et131x_rx_dma_memory_alloc(struct et131x_adapter * adapter)2301 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2302 {
2303 	u32 i, j;
2304 	u32 bufsize;
2305 	u32 pktstat_ringsize, fbr_chunksize;
2306 	struct rx_ring *rx_ring;
2307 
2308 	/* Setup some convenience pointers */
2309 	rx_ring = &adapter->rx_ring;
2310 
2311 	/* Alloc memory for the lookup table */
2312 #ifdef USE_FBR0
2313 	rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2314 #endif
2315 	rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2316 
2317 	/* The first thing we will do is configure the sizes of the buffer
2318 	 * rings. These will change based on jumbo packet support.  Larger
2319 	 * jumbo packets increases the size of each entry in FBR0, and the
2320 	 * number of entries in FBR0, while at the same time decreasing the
2321 	 * number of entries in FBR1.
2322 	 *
2323 	 * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
2324 	 * entries are huge in order to accommodate a "jumbo" frame, then it
2325 	 * will have less entries.  Conversely, FBR1 will now be relied upon
2326 	 * to carry more "normal" frames, thus it's entry size also increases
2327 	 * and the number of entries goes up too (since it now carries
2328 	 * "small" + "regular" packets.
2329 	 *
2330 	 * In this scheme, we try to maintain 512 entries between the two
2331 	 * rings. Also, FBR1 remains a constant size - when it's size doubles
2332 	 * the number of entries halves.  FBR0 increases in size, however.
2333 	 */
2334 
2335 	if (adapter->registry_jumbo_packet < 2048) {
2336 #ifdef USE_FBR0
2337 		rx_ring->fbr[1]->buffsize = 256;
2338 		rx_ring->fbr[1]->num_entries = 512;
2339 #endif
2340 		rx_ring->fbr[0]->buffsize = 2048;
2341 		rx_ring->fbr[0]->num_entries = 512;
2342 	} else if (adapter->registry_jumbo_packet < 4096) {
2343 #ifdef USE_FBR0
2344 		rx_ring->fbr[1]->buffsize = 512;
2345 		rx_ring->fbr[1]->num_entries = 1024;
2346 #endif
2347 		rx_ring->fbr[0]->buffsize = 4096;
2348 		rx_ring->fbr[0]->num_entries = 512;
2349 	} else {
2350 #ifdef USE_FBR0
2351 		rx_ring->fbr[1]->buffsize = 1024;
2352 		rx_ring->fbr[1]->num_entries = 768;
2353 #endif
2354 		rx_ring->fbr[0]->buffsize = 16384;
2355 		rx_ring->fbr[0]->num_entries = 128;
2356 	}
2357 
2358 #ifdef USE_FBR0
2359 	adapter->rx_ring.psr_num_entries =
2360 				adapter->rx_ring.fbr[1]->num_entries +
2361 				adapter->rx_ring.fbr[0]->num_entries;
2362 #else
2363 	adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2364 #endif
2365 
2366 	/* Allocate an area of memory for Free Buffer Ring 1 */
2367 	bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2368 									0xfff;
2369 	rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2370 					bufsize,
2371 					&rx_ring->fbr[0]->ring_physaddr,
2372 					GFP_KERNEL);
2373 	if (!rx_ring->fbr[0]->ring_virtaddr) {
2374 		dev_err(&adapter->pdev->dev,
2375 			  "Cannot alloc memory for Free Buffer Ring 1\n");
2376 		return -ENOMEM;
2377 	}
2378 
2379 	/* Save physical address
2380 	 *
2381 	 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2382 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2383 	 * are ever returned, make sure the high part is retrieved here
2384 	 * before storing the adjusted address.
2385 	 */
2386 	rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2387 
2388 	/* Align Free Buffer Ring 1 on a 4K boundary */
2389 	et131x_align_allocated_memory(adapter,
2390 				      &rx_ring->fbr[0]->real_physaddr,
2391 				      &rx_ring->fbr[0]->offset, 0x0FFF);
2392 
2393 	rx_ring->fbr[0]->ring_virtaddr =
2394 			(void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2395 			rx_ring->fbr[0]->offset);
2396 
2397 #ifdef USE_FBR0
2398 	/* Allocate an area of memory for Free Buffer Ring 0 */
2399 	bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2400 									0xfff;
2401 	rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2402 						bufsize,
2403 						&rx_ring->fbr[1]->ring_physaddr,
2404 						GFP_KERNEL);
2405 	if (!rx_ring->fbr[1]->ring_virtaddr) {
2406 		dev_err(&adapter->pdev->dev,
2407 			  "Cannot alloc memory for Free Buffer Ring 0\n");
2408 		return -ENOMEM;
2409 	}
2410 
2411 	/* Save physical address
2412 	 *
2413 	 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2414 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2415 	 * are ever returned, make sure the high part is retrieved here before
2416 	 * storing the adjusted address.
2417 	 */
2418 	rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2419 
2420 	/* Align Free Buffer Ring 0 on a 4K boundary */
2421 	et131x_align_allocated_memory(adapter,
2422 				      &rx_ring->fbr[1]->real_physaddr,
2423 				      &rx_ring->fbr[1]->offset, 0x0FFF);
2424 
2425 	rx_ring->fbr[1]->ring_virtaddr =
2426 			(void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2427 			rx_ring->fbr[1]->offset);
2428 #endif
2429 	for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2430 		u64 fbr1_tmp_physaddr;
2431 		u64 fbr1_offset;
2432 		u32 fbr1_align;
2433 
2434 		/* This code allocates an area of memory big enough for N
2435 		 * free buffers + (buffer_size - 1) so that the buffers can
2436 		 * be aligned on 4k boundaries.  If each buffer were aligned
2437 		 * to a buffer_size boundary, the effect would be to double
2438 		 * the size of FBR0.  By allocating N buffers at once, we
2439 		 * reduce this overhead.
2440 		 */
2441 		if (rx_ring->fbr[0]->buffsize > 4096)
2442 			fbr1_align = 4096;
2443 		else
2444 			fbr1_align = rx_ring->fbr[0]->buffsize;
2445 
2446 		fbr_chunksize =
2447 		    (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2448 		rx_ring->fbr[0]->mem_virtaddrs[i] =
2449 		    dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2450 				       &rx_ring->fbr[0]->mem_physaddrs[i],
2451 				       GFP_KERNEL);
2452 
2453 		if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2454 			dev_err(&adapter->pdev->dev,
2455 				"Could not alloc memory\n");
2456 			return -ENOMEM;
2457 		}
2458 
2459 		/* See NOTE in "Save Physical Address" comment above */
2460 		fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2461 
2462 		et131x_align_allocated_memory(adapter,
2463 					      &fbr1_tmp_physaddr,
2464 					      &fbr1_offset, (fbr1_align - 1));
2465 
2466 		for (j = 0; j < FBR_CHUNKS; j++) {
2467 			u32 index = (i * FBR_CHUNKS) + j;
2468 
2469 			/* Save the Virtual address of this index for quick
2470 			 * access later
2471 			 */
2472 			rx_ring->fbr[0]->virt[index] =
2473 			    (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2474 			    (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2475 
2476 			/* now store the physical address in the descriptor
2477 			 * so the device can access it
2478 			 */
2479 			rx_ring->fbr[0]->bus_high[index] =
2480 			    (u32) (fbr1_tmp_physaddr >> 32);
2481 			rx_ring->fbr[0]->bus_low[index] =
2482 			    (u32) fbr1_tmp_physaddr;
2483 
2484 			fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2485 
2486 			rx_ring->fbr[0]->buffer1[index] =
2487 			    rx_ring->fbr[0]->virt[index];
2488 			rx_ring->fbr[0]->buffer2[index] =
2489 			    rx_ring->fbr[0]->virt[index] - 4;
2490 		}
2491 	}
2492 
2493 #ifdef USE_FBR0
2494 	/* Same for FBR0 (if in use) */
2495 	for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2496 		u64 fbr0_tmp_physaddr;
2497 		u64 fbr0_offset;
2498 
2499 		fbr_chunksize =
2500 		    ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2501 		rx_ring->fbr[1]->mem_virtaddrs[i] =
2502 		    dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2503 				       &rx_ring->fbr[1]->mem_physaddrs[i],
2504 				       GFP_KERNEL);
2505 
2506 		if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2507 			dev_err(&adapter->pdev->dev,
2508 				"Could not alloc memory\n");
2509 			return -ENOMEM;
2510 		}
2511 
2512 		/* See NOTE in "Save Physical Address" comment above */
2513 		fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2514 
2515 		et131x_align_allocated_memory(adapter,
2516 					      &fbr0_tmp_physaddr,
2517 					      &fbr0_offset,
2518 					      rx_ring->fbr[1]->buffsize - 1);
2519 
2520 		for (j = 0; j < FBR_CHUNKS; j++) {
2521 			u32 index = (i * FBR_CHUNKS) + j;
2522 
2523 			rx_ring->fbr[1]->virt[index] =
2524 			    (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2525 			    (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2526 
2527 			rx_ring->fbr[1]->bus_high[index] =
2528 			    (u32) (fbr0_tmp_physaddr >> 32);
2529 			rx_ring->fbr[1]->bus_low[index] =
2530 			    (u32) fbr0_tmp_physaddr;
2531 
2532 			fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2533 
2534 			rx_ring->fbr[1]->buffer1[index] =
2535 			    rx_ring->fbr[1]->virt[index];
2536 			rx_ring->fbr[1]->buffer2[index] =
2537 			    rx_ring->fbr[1]->virt[index] - 4;
2538 		}
2539 	}
2540 #endif
2541 
2542 	/* Allocate an area of memory for FIFO of Packet Status ring entries */
2543 	pktstat_ringsize =
2544 	    sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2545 
2546 	rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2547 						  pktstat_ringsize,
2548 						  &rx_ring->ps_ring_physaddr,
2549 						  GFP_KERNEL);
2550 
2551 	if (!rx_ring->ps_ring_virtaddr) {
2552 		dev_err(&adapter->pdev->dev,
2553 			  "Cannot alloc memory for Packet Status Ring\n");
2554 		return -ENOMEM;
2555 	}
2556 	printk(KERN_INFO "Packet Status Ring %lx\n",
2557 	    (unsigned long) rx_ring->ps_ring_physaddr);
2558 
2559 	/*
2560 	 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2561 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2562 	 * are ever returned, make sure the high part is retrieved here before
2563 	 * storing the adjusted address.
2564 	 */
2565 
2566 	/* Allocate an area of memory for writeback of status information */
2567 	rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2568 					    sizeof(struct rx_status_block),
2569 					    &rx_ring->rx_status_bus,
2570 					    GFP_KERNEL);
2571 	if (!rx_ring->rx_status_block) {
2572 		dev_err(&adapter->pdev->dev,
2573 			  "Cannot alloc memory for Status Block\n");
2574 		return -ENOMEM;
2575 	}
2576 	rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2577 	printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
2578 
2579 	/* Recv
2580 	 * kmem_cache_create initializes a lookaside list. After successful
2581 	 * creation, nonpaged fixed-size blocks can be allocated from and
2582 	 * freed to the lookaside list.
2583 	 * RFDs will be allocated from this pool.
2584 	 */
2585 	rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2586 						   sizeof(struct rfd),
2587 						   0,
2588 						   SLAB_CACHE_DMA |
2589 						   SLAB_HWCACHE_ALIGN,
2590 						   NULL);
2591 
2592 	adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2593 
2594 	/* The RFDs are going to be put on lists later on, so initialize the
2595 	 * lists now.
2596 	 */
2597 	INIT_LIST_HEAD(&rx_ring->recv_list);
2598 	return 0;
2599 }
2600 
2601 /**
2602  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2603  * @adapter: pointer to our private adapter structure
2604  */
et131x_rx_dma_memory_free(struct et131x_adapter * adapter)2605 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2606 {
2607 	u32 index;
2608 	u32 bufsize;
2609 	u32 pktstat_ringsize;
2610 	struct rfd *rfd;
2611 	struct rx_ring *rx_ring;
2612 
2613 	/* Setup some convenience pointers */
2614 	rx_ring = &adapter->rx_ring;
2615 
2616 	/* Free RFDs and associated packet descriptors */
2617 	WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2618 
2619 	while (!list_empty(&rx_ring->recv_list)) {
2620 		rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2621 				struct rfd, list_node);
2622 
2623 		list_del(&rfd->list_node);
2624 		rfd->skb = NULL;
2625 		kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2626 	}
2627 
2628 	/* Free Free Buffer Ring 1 */
2629 	if (rx_ring->fbr[0]->ring_virtaddr) {
2630 		/* First the packet memory */
2631 		for (index = 0; index <
2632 		     (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2633 			if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2634 				u32 fbr1_align;
2635 
2636 				if (rx_ring->fbr[0]->buffsize > 4096)
2637 					fbr1_align = 4096;
2638 				else
2639 					fbr1_align = rx_ring->fbr[0]->buffsize;
2640 
2641 				bufsize =
2642 				    (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2643 				    fbr1_align - 1;
2644 
2645 				dma_free_coherent(&adapter->pdev->dev,
2646 					bufsize,
2647 					rx_ring->fbr[0]->mem_virtaddrs[index],
2648 					rx_ring->fbr[0]->mem_physaddrs[index]);
2649 
2650 				rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2651 			}
2652 		}
2653 
2654 		/* Now the FIFO itself */
2655 		rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2656 		    rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2657 
2658 		bufsize =
2659 		    (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2660 									0xfff;
2661 
2662 		dma_free_coherent(&adapter->pdev->dev, bufsize,
2663 				    rx_ring->fbr[0]->ring_virtaddr,
2664 				    rx_ring->fbr[0]->ring_physaddr);
2665 
2666 		rx_ring->fbr[0]->ring_virtaddr = NULL;
2667 	}
2668 
2669 #ifdef USE_FBR0
2670 	/* Now the same for Free Buffer Ring 0 */
2671 	if (rx_ring->fbr[1]->ring_virtaddr) {
2672 		/* First the packet memory */
2673 		for (index = 0; index <
2674 		     (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2675 			if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2676 				bufsize =
2677 				    (rx_ring->fbr[1]->buffsize *
2678 				     (FBR_CHUNKS + 1)) - 1;
2679 
2680 				dma_free_coherent(&adapter->pdev->dev,
2681 					bufsize,
2682 					rx_ring->fbr[1]->mem_virtaddrs[index],
2683 					rx_ring->fbr[1]->mem_physaddrs[index]);
2684 
2685 				rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2686 			}
2687 		}
2688 
2689 		/* Now the FIFO itself */
2690 		rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2691 		    rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2692 
2693 		bufsize =
2694 		    (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2695 									0xfff;
2696 
2697 		dma_free_coherent(&adapter->pdev->dev,
2698 				  bufsize,
2699 				  rx_ring->fbr[1]->ring_virtaddr,
2700 				  rx_ring->fbr[1]->ring_physaddr);
2701 
2702 		rx_ring->fbr[1]->ring_virtaddr = NULL;
2703 	}
2704 #endif
2705 
2706 	/* Free Packet Status Ring */
2707 	if (rx_ring->ps_ring_virtaddr) {
2708 		pktstat_ringsize =
2709 		    sizeof(struct pkt_stat_desc) *
2710 		    adapter->rx_ring.psr_num_entries;
2711 
2712 		dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2713 				    rx_ring->ps_ring_virtaddr,
2714 				    rx_ring->ps_ring_physaddr);
2715 
2716 		rx_ring->ps_ring_virtaddr = NULL;
2717 	}
2718 
2719 	/* Free area of memory for the writeback of status information */
2720 	if (rx_ring->rx_status_block) {
2721 		dma_free_coherent(&adapter->pdev->dev,
2722 			sizeof(struct rx_status_block),
2723 			rx_ring->rx_status_block, rx_ring->rx_status_bus);
2724 		rx_ring->rx_status_block = NULL;
2725 	}
2726 
2727 	/* Destroy the lookaside (RFD) pool */
2728 	if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2729 		kmem_cache_destroy(rx_ring->recv_lookaside);
2730 		adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2731 	}
2732 
2733 	/* Free the FBR Lookup Table */
2734 #ifdef USE_FBR0
2735 	kfree(rx_ring->fbr[1]);
2736 #endif
2737 
2738 	kfree(rx_ring->fbr[0]);
2739 
2740 	/* Reset Counters */
2741 	rx_ring->num_ready_recv = 0;
2742 }
2743 
2744 /**
2745  * et131x_init_recv - Initialize receive data structures.
2746  * @adapter: pointer to our private adapter structure
2747  *
2748  * Returns 0 on success and errno on failure (as defined in errno.h)
2749  */
et131x_init_recv(struct et131x_adapter * adapter)2750 static int et131x_init_recv(struct et131x_adapter *adapter)
2751 {
2752 	int status = -ENOMEM;
2753 	struct rfd *rfd = NULL;
2754 	u32 rfdct;
2755 	u32 numrfd = 0;
2756 	struct rx_ring *rx_ring;
2757 
2758 	/* Setup some convenience pointers */
2759 	rx_ring = &adapter->rx_ring;
2760 
2761 	/* Setup each RFD */
2762 	for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2763 		rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2764 						     GFP_ATOMIC | GFP_DMA);
2765 
2766 		if (!rfd) {
2767 			dev_err(&adapter->pdev->dev,
2768 				  "Couldn't alloc RFD out of kmem_cache\n");
2769 			status = -ENOMEM;
2770 			continue;
2771 		}
2772 
2773 		rfd->skb = NULL;
2774 
2775 		/* Add this RFD to the recv_list */
2776 		list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2777 
2778 		/* Increment both the available RFD's, and the total RFD's. */
2779 		rx_ring->num_ready_recv++;
2780 		numrfd++;
2781 	}
2782 
2783 	if (numrfd > NIC_MIN_NUM_RFD)
2784 		status = 0;
2785 
2786 	rx_ring->num_rfd = numrfd;
2787 
2788 	if (status != 0) {
2789 		kmem_cache_free(rx_ring->recv_lookaside, rfd);
2790 		dev_err(&adapter->pdev->dev,
2791 			  "Allocation problems in et131x_init_recv\n");
2792 	}
2793 	return status;
2794 }
2795 
2796 /**
2797  * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2798  * @adapter: pointer to our adapter structure
2799  */
et131x_set_rx_dma_timer(struct et131x_adapter * adapter)2800 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2801 {
2802 	struct phy_device *phydev = adapter->phydev;
2803 
2804 	if (!phydev)
2805 		return;
2806 
2807 	/* For version B silicon, we do not use the RxDMA timer for 10 and 100
2808 	 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2809 	 */
2810 	if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2811 		writel(0, &adapter->regs->rxdma.max_pkt_time);
2812 		writel(1, &adapter->regs->rxdma.num_pkt_done);
2813 	}
2814 }
2815 
2816 /**
2817  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2818  * @adapter: pointer to our adapter
2819  * @rfd: pointer to the RFD
2820  */
nic_return_rfd(struct et131x_adapter * adapter,struct rfd * rfd)2821 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2822 {
2823 	struct rx_ring *rx_local = &adapter->rx_ring;
2824 	struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2825 	u16 buff_index = rfd->bufferindex;
2826 	u8 ring_index = rfd->ringindex;
2827 	unsigned long flags;
2828 
2829 	/* We don't use any of the OOB data besides status. Otherwise, we
2830 	 * need to clean up OOB data
2831 	 */
2832 	if (
2833 #ifdef USE_FBR0
2834 	    (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2835 #endif
2836 	    (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2837 		spin_lock_irqsave(&adapter->fbr_lock, flags);
2838 
2839 		if (ring_index == 1) {
2840 			struct fbr_desc *next = (struct fbr_desc *)
2841 					(rx_local->fbr[0]->ring_virtaddr) +
2842 					INDEX10(rx_local->fbr[0]->local_full);
2843 
2844 			/* Handle the Free Buffer Ring advancement here. Write
2845 			 * the PA / Buffer Index for the returned buffer into
2846 			 * the oldest (next to be freed)FBR entry
2847 			 */
2848 			next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2849 			next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2850 			next->word2 = buff_index;
2851 
2852 			writel(bump_free_buff_ring(
2853 					&rx_local->fbr[0]->local_full,
2854 					rx_local->fbr[0]->num_entries - 1),
2855 					&rx_dma->fbr1_full_offset);
2856 		}
2857 #ifdef USE_FBR0
2858 		else {
2859 			struct fbr_desc *next = (struct fbr_desc *)
2860 				rx_local->fbr[1]->ring_virtaddr +
2861 				    INDEX10(rx_local->fbr[1]->local_full);
2862 
2863 			/* Handle the Free Buffer Ring advancement here. Write
2864 			 * the PA / Buffer Index for the returned buffer into
2865 			 * the oldest (next to be freed) FBR entry
2866 			 */
2867 			next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2868 			next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2869 			next->word2 = buff_index;
2870 
2871 			writel(bump_free_buff_ring(
2872 					&rx_local->fbr[1]->local_full,
2873 					rx_local->fbr[1]->num_entries - 1),
2874 			       &rx_dma->fbr0_full_offset);
2875 		}
2876 #endif
2877 		spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2878 	} else {
2879 		dev_err(&adapter->pdev->dev,
2880 			  "%s illegal Buffer Index returned\n", __func__);
2881 	}
2882 
2883 	/* The processing on this RFD is done, so put it back on the tail of
2884 	 * our list
2885 	 */
2886 	spin_lock_irqsave(&adapter->rcv_lock, flags);
2887 	list_add_tail(&rfd->list_node, &rx_local->recv_list);
2888 	rx_local->num_ready_recv++;
2889 	spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2890 
2891 	WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2892 }
2893 
2894 /**
2895  * nic_rx_pkts - Checks the hardware for available packets
2896  * @adapter: pointer to our adapter
2897  *
2898  * Returns rfd, a pointer to our MPRFD.
2899  *
2900  * Checks the hardware for available packets, using completion ring
2901  * If packets are available, it gets an RFD from the recv_list, attaches
2902  * the packet to it, puts the RFD in the RecvPendList, and also returns
2903  * the pointer to the RFD.
2904  */
nic_rx_pkts(struct et131x_adapter * adapter)2905 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2906 {
2907 	struct rx_ring *rx_local = &adapter->rx_ring;
2908 	struct rx_status_block *status;
2909 	struct pkt_stat_desc *psr;
2910 	struct rfd *rfd;
2911 	u32 i;
2912 	u8 *buf;
2913 	unsigned long flags;
2914 	struct list_head *element;
2915 	u8 ring_index;
2916 	u16 buff_index;
2917 	u32 len;
2918 	u32 word0;
2919 	u32 word1;
2920 
2921 	/* RX Status block is written by the DMA engine prior to every
2922 	 * interrupt. It contains the next to be used entry in the Packet
2923 	 * Status Ring, and also the two Free Buffer rings.
2924 	 */
2925 	status = rx_local->rx_status_block;
2926 	word1 = status->word1 >> 16;	/* Get the useful bits */
2927 
2928 	/* Check the PSR and wrap bits do not match */
2929 	if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2930 		/* Looks like this ring is not updated yet */
2931 		return NULL;
2932 
2933 	/* The packet status ring indicates that data is available. */
2934 	psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2935 			(rx_local->local_psr_full & 0xFFF);
2936 
2937 	/* Grab any information that is required once the PSR is
2938 	 * advanced, since we can no longer rely on the memory being
2939 	 * accurate
2940 	 */
2941 	len = psr->word1 & 0xFFFF;
2942 	ring_index = (psr->word1 >> 26) & 0x03;
2943 	buff_index = (psr->word1 >> 16) & 0x3FF;
2944 	word0 = psr->word0;
2945 
2946 	/* Indicate that we have used this PSR entry. */
2947 	/* FIXME wrap 12 */
2948 	add_12bit(&rx_local->local_psr_full, 1);
2949 	if (
2950 	  (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2951 		/* Clear psr full and toggle the wrap bit */
2952 		rx_local->local_psr_full &=  ~0xFFF;
2953 		rx_local->local_psr_full ^= 0x1000;
2954 	}
2955 
2956 	writel(rx_local->local_psr_full,
2957 	       &adapter->regs->rxdma.psr_full_offset);
2958 
2959 #ifndef USE_FBR0
2960 	if (ring_index != 1)
2961 		return NULL;
2962 #endif
2963 
2964 #ifdef USE_FBR0
2965 	if (ring_index > 1 ||
2966 		(ring_index == 0 &&
2967 		buff_index > rx_local->fbr[1]->num_entries - 1) ||
2968 		(ring_index == 1 &&
2969 		buff_index > rx_local->fbr[0]->num_entries - 1))
2970 #else
2971 	if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1)
2972 #endif
2973 	{
2974 		/* Illegal buffer or ring index cannot be used by S/W*/
2975 		dev_err(&adapter->pdev->dev,
2976 			  "NICRxPkts PSR Entry %d indicates "
2977 			  "length of %d and/or bad bi(%d)\n",
2978 			  rx_local->local_psr_full & 0xFFF,
2979 			  len, buff_index);
2980 		return NULL;
2981 	}
2982 
2983 	/* Get and fill the RFD. */
2984 	spin_lock_irqsave(&adapter->rcv_lock, flags);
2985 
2986 	rfd = NULL;
2987 	element = rx_local->recv_list.next;
2988 	rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2989 
2990 	if (rfd == NULL) {
2991 		spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2992 		return NULL;
2993 	}
2994 
2995 	list_del(&rfd->list_node);
2996 	rx_local->num_ready_recv--;
2997 
2998 	spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2999 
3000 	rfd->bufferindex = buff_index;
3001 	rfd->ringindex = ring_index;
3002 
3003 	/* In V1 silicon, there is a bug which screws up filtering of
3004 	 * runt packets.  Therefore runt packet filtering is disabled
3005 	 * in the MAC and the packets are dropped here.  They are
3006 	 * also counted here.
3007 	 */
3008 	if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3009 		adapter->stats.rx_other_errs++;
3010 		len = 0;
3011 	}
3012 
3013 	if (len) {
3014 		/* Determine if this is a multicast packet coming in */
3015 		if ((word0 & ALCATEL_MULTICAST_PKT) &&
3016 		    !(word0 & ALCATEL_BROADCAST_PKT)) {
3017 			/* Promiscuous mode and Multicast mode are
3018 			 * not mutually exclusive as was first
3019 			 * thought.  I guess Promiscuous is just
3020 			 * considered a super-set of the other
3021 			 * filters. Generally filter is 0x2b when in
3022 			 * promiscuous mode.
3023 			 */
3024 			if ((adapter->packet_filter &
3025 					ET131X_PACKET_TYPE_MULTICAST)
3026 			    && !(adapter->packet_filter &
3027 					ET131X_PACKET_TYPE_PROMISCUOUS)
3028 			    && !(adapter->packet_filter &
3029 					ET131X_PACKET_TYPE_ALL_MULTICAST)) {
3030 				/*
3031 				 * Note - ring_index for fbr[] array is reversed
3032 				 * 1 for FBR0 etc
3033 				 */
3034 				buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3035 						virt[buff_index];
3036 
3037 				/* Loop through our list to see if the
3038 				 * destination address of this packet
3039 				 * matches one in our list.
3040 				 */
3041 				for (i = 0; i < adapter->multicast_addr_count;
3042 				     i++) {
3043 					if (buf[0] ==
3044 						adapter->multicast_list[i][0]
3045 					    && buf[1] ==
3046 						adapter->multicast_list[i][1]
3047 					    && buf[2] ==
3048 						adapter->multicast_list[i][2]
3049 					    && buf[3] ==
3050 						adapter->multicast_list[i][3]
3051 					    && buf[4] ==
3052 						adapter->multicast_list[i][4]
3053 					    && buf[5] ==
3054 						adapter->multicast_list[i][5]) {
3055 						break;
3056 					}
3057 				}
3058 
3059 				/* If our index is equal to the number
3060 				 * of Multicast address we have, then
3061 				 * this means we did not find this
3062 				 * packet's matching address in our
3063 				 * list.  Set the len to zero,
3064 				 * so we free our RFD when we return
3065 				 * from this function.
3066 				 */
3067 				if (i == adapter->multicast_addr_count)
3068 					len = 0;
3069 			}
3070 
3071 			if (len > 0)
3072 				adapter->stats.multicast_pkts_rcvd++;
3073 		} else if (word0 & ALCATEL_BROADCAST_PKT)
3074 			adapter->stats.broadcast_pkts_rcvd++;
3075 		else
3076 			/* Not sure what this counter measures in
3077 			 * promiscuous mode. Perhaps we should check
3078 			 * the MAC address to see if it is directed
3079 			 * to us in promiscuous mode.
3080 			 */
3081 			adapter->stats.unicast_pkts_rcvd++;
3082 	}
3083 
3084 	if (len > 0) {
3085 		struct sk_buff *skb = NULL;
3086 
3087 		/*rfd->len = len - 4; */
3088 		rfd->len = len;
3089 
3090 		skb = dev_alloc_skb(rfd->len + 2);
3091 		if (!skb) {
3092 			dev_err(&adapter->pdev->dev,
3093 				  "Couldn't alloc an SKB for Rx\n");
3094 			return NULL;
3095 		}
3096 
3097 		adapter->net_stats.rx_bytes += rfd->len;
3098 
3099 		/*
3100 		 * Note - ring_index for fbr[] array is reversed,
3101 		 * 1 for FBR0 etc
3102 		 */
3103 		memcpy(skb_put(skb, rfd->len),
3104 		    rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3105 		    rfd->len);
3106 
3107 		skb->dev = adapter->netdev;
3108 		skb->protocol = eth_type_trans(skb, adapter->netdev);
3109 		skb->ip_summed = CHECKSUM_NONE;
3110 
3111 		netif_rx_ni(skb);
3112 	} else {
3113 		rfd->len = 0;
3114 	}
3115 
3116 	nic_return_rfd(adapter, rfd);
3117 	return rfd;
3118 }
3119 
3120 /**
3121  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
3122  * @adapter: pointer to our adapter
3123  *
3124  * Assumption, Rcv spinlock has been acquired.
3125  */
et131x_handle_recv_interrupt(struct et131x_adapter * adapter)3126 static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3127 {
3128 	struct rfd *rfd = NULL;
3129 	u32 count = 0;
3130 	bool done = true;
3131 
3132 	/* Process up to available RFD's */
3133 	while (count < NUM_PACKETS_HANDLED) {
3134 		if (list_empty(&adapter->rx_ring.recv_list)) {
3135 			WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3136 			done = false;
3137 			break;
3138 		}
3139 
3140 		rfd = nic_rx_pkts(adapter);
3141 
3142 		if (rfd == NULL)
3143 			break;
3144 
3145 		/* Do not receive any packets until a filter has been set.
3146 		 * Do not receive any packets until we have link.
3147 		 * If length is zero, return the RFD in order to advance the
3148 		 * Free buffer ring.
3149 		 */
3150 		if (!adapter->packet_filter ||
3151 		    !netif_carrier_ok(adapter->netdev) ||
3152 		    rfd->len == 0)
3153 			continue;
3154 
3155 		/* Increment the number of packets we received */
3156 		adapter->net_stats.rx_packets++;
3157 
3158 		/* Set the status on the packet, either resources or success */
3159 		if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3160 			dev_warn(&adapter->pdev->dev,
3161 				    "RFD's are running out\n");
3162 		}
3163 		count++;
3164 	}
3165 
3166 	if (count == NUM_PACKETS_HANDLED || !done) {
3167 		adapter->rx_ring.unfinished_receives = true;
3168 		writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3169 		       &adapter->regs->global.watchdog_timer);
3170 	} else
3171 		/* Watchdog timer will disable itself if appropriate. */
3172 		adapter->rx_ring.unfinished_receives = false;
3173 }
3174 
3175 /**
3176  * et131x_tx_dma_memory_alloc
3177  * @adapter: pointer to our private adapter structure
3178  *
3179  * Returns 0 on success and errno on failure (as defined in errno.h).
3180  *
3181  * Allocates memory that will be visible both to the device and to the CPU.
3182  * The OS will pass us packets, pointers to which we will insert in the Tx
3183  * Descriptor queue. The device will read this queue to find the packets in
3184  * memory. The device will update the "status" in memory each time it xmits a
3185  * packet.
3186  */
et131x_tx_dma_memory_alloc(struct et131x_adapter * adapter)3187 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3188 {
3189 	int desc_size = 0;
3190 	struct tx_ring *tx_ring = &adapter->tx_ring;
3191 
3192 	/* Allocate memory for the TCB's (Transmit Control Block) */
3193 	adapter->tx_ring.tcb_ring =
3194 		kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3195 	if (!adapter->tx_ring.tcb_ring) {
3196 		dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3197 		return -ENOMEM;
3198 	}
3199 
3200 	/* Allocate enough memory for the Tx descriptor ring, and allocate
3201 	 * some extra so that the ring can be aligned on a 4k boundary.
3202 	 */
3203 	desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3204 	tx_ring->tx_desc_ring =
3205 	    (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3206 						  desc_size,
3207 						  &tx_ring->tx_desc_ring_pa,
3208 						  GFP_KERNEL);
3209 	if (!adapter->tx_ring.tx_desc_ring) {
3210 		dev_err(&adapter->pdev->dev,
3211 			"Cannot alloc memory for Tx Ring\n");
3212 		return -ENOMEM;
3213 	}
3214 
3215 	/* Save physical address
3216 	 *
3217 	 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
3218 	 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
3219 	 * are ever returned, make sure the high part is retrieved here before
3220 	 * storing the adjusted address.
3221 	 */
3222 	/* Allocate memory for the Tx status block */
3223 	tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3224 						    sizeof(u32),
3225 						    &tx_ring->tx_status_pa,
3226 						    GFP_KERNEL);
3227 	if (!adapter->tx_ring.tx_status_pa) {
3228 		dev_err(&adapter->pdev->dev,
3229 				  "Cannot alloc memory for Tx status block\n");
3230 		return -ENOMEM;
3231 	}
3232 	return 0;
3233 }
3234 
3235 /**
3236  * et131x_tx_dma_memory_free - Free all memory allocated within this module
3237  * @adapter: pointer to our private adapter structure
3238  *
3239  * Returns 0 on success and errno on failure (as defined in errno.h).
3240  */
et131x_tx_dma_memory_free(struct et131x_adapter * adapter)3241 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3242 {
3243 	int desc_size = 0;
3244 
3245 	if (adapter->tx_ring.tx_desc_ring) {
3246 		/* Free memory relating to Tx rings here */
3247 		desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3248 								+ 4096 - 1;
3249 		dma_free_coherent(&adapter->pdev->dev,
3250 				    desc_size,
3251 				    adapter->tx_ring.tx_desc_ring,
3252 				    adapter->tx_ring.tx_desc_ring_pa);
3253 		adapter->tx_ring.tx_desc_ring = NULL;
3254 	}
3255 
3256 	/* Free memory for the Tx status block */
3257 	if (adapter->tx_ring.tx_status) {
3258 		dma_free_coherent(&adapter->pdev->dev,
3259 				    sizeof(u32),
3260 				    adapter->tx_ring.tx_status,
3261 				    adapter->tx_ring.tx_status_pa);
3262 
3263 		adapter->tx_ring.tx_status = NULL;
3264 	}
3265 	/* Free the memory for the tcb structures */
3266 	kfree(adapter->tx_ring.tcb_ring);
3267 }
3268 
3269 /**
3270  * nic_send_packet - NIC specific send handler for version B silicon.
3271  * @adapter: pointer to our adapter
3272  * @tcb: pointer to struct tcb
3273  *
3274  * Returns 0 or errno.
3275  */
nic_send_packet(struct et131x_adapter * adapter,struct tcb * tcb)3276 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3277 {
3278 	u32 i;
3279 	struct tx_desc desc[24];	/* 24 x 16 byte */
3280 	u32 frag = 0;
3281 	u32 thiscopy, remainder;
3282 	struct sk_buff *skb = tcb->skb;
3283 	u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3284 	struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3285 	unsigned long flags;
3286 	struct phy_device *phydev = adapter->phydev;
3287 
3288 	/* Part of the optimizations of this send routine restrict us to
3289 	 * sending 24 fragments at a pass.  In practice we should never see
3290 	 * more than 5 fragments.
3291 	 *
3292 	 * NOTE: The older version of this function (below) can handle any
3293 	 * number of fragments. If needed, we can call this function,
3294 	 * although it is less efficient.
3295 	 */
3296 	if (nr_frags > 23)
3297 		return -EIO;
3298 
3299 	memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3300 
3301 	for (i = 0; i < nr_frags; i++) {
3302 		/* If there is something in this element, lets get a
3303 		 * descriptor from the ring and get the necessary data
3304 		 */
3305 		if (i == 0) {
3306 			/* If the fragments are smaller than a standard MTU,
3307 			 * then map them to a single descriptor in the Tx
3308 			 * Desc ring. However, if they're larger, as is
3309 			 * possible with support for jumbo packets, then
3310 			 * split them each across 2 descriptors.
3311 			 *
3312 			 * This will work until we determine why the hardware
3313 			 * doesn't seem to like large fragments.
3314 			 */
3315 			if ((skb->len - skb->data_len) <= 1514) {
3316 				desc[frag].addr_hi = 0;
3317 				/* Low 16bits are length, high is vlan and
3318 				   unused currently so zero */
3319 				desc[frag].len_vlan =
3320 					skb->len - skb->data_len;
3321 
3322 				/* NOTE: Here, the dma_addr_t returned from
3323 				 * dma_map_single() is implicitly cast as a
3324 				 * u32. Although dma_addr_t can be
3325 				 * 64-bit, the address returned by
3326 				 * dma_map_single() is always 32-bit
3327 				 * addressable (as defined by the pci/dma
3328 				 * subsystem)
3329 				 */
3330 				desc[frag++].addr_lo =
3331 				    dma_map_single(&adapter->pdev->dev,
3332 						   skb->data,
3333 						   skb->len -
3334 						   skb->data_len,
3335 						   DMA_TO_DEVICE);
3336 			} else {
3337 				desc[frag].addr_hi = 0;
3338 				desc[frag].len_vlan =
3339 				    (skb->len - skb->data_len) / 2;
3340 
3341 				/* NOTE: Here, the dma_addr_t returned from
3342 				 * dma_map_single() is implicitly cast as a
3343 				 * u32. Although dma_addr_t can be
3344 				 * 64-bit, the address returned by
3345 				 * dma_map_single() is always 32-bit
3346 				 * addressable (as defined by the pci/dma
3347 				 * subsystem)
3348 				 */
3349 				desc[frag++].addr_lo =
3350 				    dma_map_single(&adapter->pdev->dev,
3351 						   skb->data,
3352 						   ((skb->len -
3353 						     skb->data_len) / 2),
3354 						   DMA_TO_DEVICE);
3355 				desc[frag].addr_hi = 0;
3356 
3357 				desc[frag].len_vlan =
3358 				    (skb->len - skb->data_len) / 2;
3359 
3360 				/* NOTE: Here, the dma_addr_t returned from
3361 				 * dma_map_single() is implicitly cast as a
3362 				 * u32. Although dma_addr_t can be
3363 				 * 64-bit, the address returned by
3364 				 * dma_map_single() is always 32-bit
3365 				 * addressable (as defined by the pci/dma
3366 				 * subsystem)
3367 				 */
3368 				desc[frag++].addr_lo =
3369 				    dma_map_single(&adapter->pdev->dev,
3370 						   skb->data +
3371 						   ((skb->len -
3372 						     skb->data_len) / 2),
3373 						   ((skb->len -
3374 						     skb->data_len) / 2),
3375 						   DMA_TO_DEVICE);
3376 			}
3377 		} else {
3378 			desc[frag].addr_hi = 0;
3379 			desc[frag].len_vlan =
3380 					frags[i - 1].size;
3381 
3382 			/* NOTE: Here, the dma_addr_t returned from
3383 			 * dma_map_page() is implicitly cast as a u32.
3384 			 * Although dma_addr_t can be 64-bit, the address
3385 			 * returned by dma_map_page() is always 32-bit
3386 			 * addressable (as defined by the pci/dma subsystem)
3387 			 */
3388 			desc[frag++].addr_lo = skb_frag_dma_map(
3389 							&adapter->pdev->dev,
3390 							&frags[i - 1],
3391 							0,
3392 							frags[i - 1].size,
3393 							DMA_TO_DEVICE);
3394 		}
3395 	}
3396 
3397 	if (phydev && phydev->speed == SPEED_1000) {
3398 		if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3399 			/* Last element & Interrupt flag */
3400 			desc[frag - 1].flags = 0x5;
3401 			adapter->tx_ring.since_irq = 0;
3402 		} else { /* Last element */
3403 			desc[frag - 1].flags = 0x1;
3404 		}
3405 	} else
3406 		desc[frag - 1].flags = 0x5;
3407 
3408 	desc[0].flags |= 2;	/* First element flag */
3409 
3410 	tcb->index_start = adapter->tx_ring.send_idx;
3411 	tcb->stale = 0;
3412 
3413 	spin_lock_irqsave(&adapter->send_hw_lock, flags);
3414 
3415 	thiscopy = NUM_DESC_PER_RING_TX -
3416 				INDEX10(adapter->tx_ring.send_idx);
3417 
3418 	if (thiscopy >= frag) {
3419 		remainder = 0;
3420 		thiscopy = frag;
3421 	} else {
3422 		remainder = frag - thiscopy;
3423 	}
3424 
3425 	memcpy(adapter->tx_ring.tx_desc_ring +
3426 	       INDEX10(adapter->tx_ring.send_idx), desc,
3427 	       sizeof(struct tx_desc) * thiscopy);
3428 
3429 	add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3430 
3431 	if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3432 		  INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3433 		adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3434 		adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3435 	}
3436 
3437 	if (remainder) {
3438 		memcpy(adapter->tx_ring.tx_desc_ring,
3439 		       desc + thiscopy,
3440 		       sizeof(struct tx_desc) * remainder);
3441 
3442 		add_10bit(&adapter->tx_ring.send_idx, remainder);
3443 	}
3444 
3445 	if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3446 		if (adapter->tx_ring.send_idx)
3447 			tcb->index = NUM_DESC_PER_RING_TX - 1;
3448 		else
3449 			tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3450 	} else
3451 		tcb->index = adapter->tx_ring.send_idx - 1;
3452 
3453 	spin_lock(&adapter->tcb_send_qlock);
3454 
3455 	if (adapter->tx_ring.send_tail)
3456 		adapter->tx_ring.send_tail->next = tcb;
3457 	else
3458 		adapter->tx_ring.send_head = tcb;
3459 
3460 	adapter->tx_ring.send_tail = tcb;
3461 
3462 	WARN_ON(tcb->next != NULL);
3463 
3464 	adapter->tx_ring.used++;
3465 
3466 	spin_unlock(&adapter->tcb_send_qlock);
3467 
3468 	/* Write the new write pointer back to the device. */
3469 	writel(adapter->tx_ring.send_idx,
3470 	       &adapter->regs->txdma.service_request);
3471 
3472 	/* For Gig only, we use Tx Interrupt coalescing.  Enable the software
3473 	 * timer to wake us up if this packet isn't followed by N more.
3474 	 */
3475 	if (phydev && phydev->speed == SPEED_1000) {
3476 		writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3477 		       &adapter->regs->global.watchdog_timer);
3478 	}
3479 	spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3480 
3481 	return 0;
3482 }
3483 
3484 /**
3485  * send_packet - Do the work to send a packet
3486  * @skb: the packet(s) to send
3487  * @adapter: a pointer to the device's private adapter structure
3488  *
3489  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3490  *
3491  * Assumption: Send spinlock has been acquired
3492  */
send_packet(struct sk_buff * skb,struct et131x_adapter * adapter)3493 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3494 {
3495 	int status;
3496 	struct tcb *tcb = NULL;
3497 	u16 *shbufva;
3498 	unsigned long flags;
3499 
3500 	/* All packets must have at least a MAC address and a protocol type */
3501 	if (skb->len < ETH_HLEN)
3502 		return -EIO;
3503 
3504 	/* Get a TCB for this packet */
3505 	spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3506 
3507 	tcb = adapter->tx_ring.tcb_qhead;
3508 
3509 	if (tcb == NULL) {
3510 		spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3511 		return -ENOMEM;
3512 	}
3513 
3514 	adapter->tx_ring.tcb_qhead = tcb->next;
3515 
3516 	if (adapter->tx_ring.tcb_qhead == NULL)
3517 		adapter->tx_ring.tcb_qtail = NULL;
3518 
3519 	spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3520 
3521 	tcb->skb = skb;
3522 
3523 	if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3524 		shbufva = (u16 *) skb->data;
3525 
3526 		if ((shbufva[0] == 0xffff) &&
3527 		    (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3528 			tcb->flags |= fMP_DEST_BROAD;
3529 		} else if ((shbufva[0] & 0x3) == 0x0001) {
3530 			tcb->flags |=  fMP_DEST_MULTI;
3531 		}
3532 	}
3533 
3534 	tcb->next = NULL;
3535 
3536 	/* Call the NIC specific send handler. */
3537 	status = nic_send_packet(adapter, tcb);
3538 
3539 	if (status != 0) {
3540 		spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3541 
3542 		if (adapter->tx_ring.tcb_qtail)
3543 			adapter->tx_ring.tcb_qtail->next = tcb;
3544 		else
3545 			/* Apparently ready Q is empty. */
3546 			adapter->tx_ring.tcb_qhead = tcb;
3547 
3548 		adapter->tx_ring.tcb_qtail = tcb;
3549 		spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3550 		return status;
3551 	}
3552 	WARN_ON(adapter->tx_ring.used > NUM_TCB);
3553 	return 0;
3554 }
3555 
3556 /**
3557  * et131x_send_packets - This function is called by the OS to send packets
3558  * @skb: the packet(s) to send
3559  * @netdev:device on which to TX the above packet(s)
3560  *
3561  * Return 0 in almost all cases; non-zero value in extreme hard failure only
3562  */
et131x_send_packets(struct sk_buff * skb,struct net_device * netdev)3563 static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3564 {
3565 	int status = 0;
3566 	struct et131x_adapter *adapter = netdev_priv(netdev);
3567 
3568 	/* Send these packets
3569 	 *
3570 	 * NOTE: The Linux Tx entry point is only given one packet at a time
3571 	 * to Tx, so the PacketCount and it's array used makes no sense here
3572 	 */
3573 
3574 	/* TCB is not available */
3575 	if (adapter->tx_ring.used >= NUM_TCB) {
3576 		/* NOTE: If there's an error on send, no need to queue the
3577 		 * packet under Linux; if we just send an error up to the
3578 		 * netif layer, it will resend the skb to us.
3579 		 */
3580 		status = -ENOMEM;
3581 	} else {
3582 		/* We need to see if the link is up; if it's not, make the
3583 		 * netif layer think we're good and drop the packet
3584 		 */
3585 		if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3586 					!netif_carrier_ok(netdev)) {
3587 			dev_kfree_skb_any(skb);
3588 			skb = NULL;
3589 
3590 			adapter->net_stats.tx_dropped++;
3591 		} else {
3592 			status = send_packet(skb, adapter);
3593 			if (status != 0 && status != -ENOMEM) {
3594 				/* On any other error, make netif think we're
3595 				 * OK and drop the packet
3596 				 */
3597 				dev_kfree_skb_any(skb);
3598 				skb = NULL;
3599 				adapter->net_stats.tx_dropped++;
3600 			}
3601 		}
3602 	}
3603 	return status;
3604 }
3605 
3606 /**
3607  * free_send_packet - Recycle a struct tcb
3608  * @adapter: pointer to our adapter
3609  * @tcb: pointer to struct tcb
3610  *
3611  * Complete the packet if necessary
3612  * Assumption - Send spinlock has been acquired
3613  */
free_send_packet(struct et131x_adapter * adapter,struct tcb * tcb)3614 static inline void free_send_packet(struct et131x_adapter *adapter,
3615 						struct tcb *tcb)
3616 {
3617 	unsigned long flags;
3618 	struct tx_desc *desc = NULL;
3619 	struct net_device_stats *stats = &adapter->net_stats;
3620 
3621 	if (tcb->flags & fMP_DEST_BROAD)
3622 		atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3623 	else if (tcb->flags & fMP_DEST_MULTI)
3624 		atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3625 	else
3626 		atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3627 
3628 	if (tcb->skb) {
3629 		stats->tx_bytes += tcb->skb->len;
3630 
3631 		/* Iterate through the TX descriptors on the ring
3632 		 * corresponding to this packet and umap the fragments
3633 		 * they point to
3634 		 */
3635 		do {
3636 			desc = (struct tx_desc *)
3637 				    (adapter->tx_ring.tx_desc_ring +
3638 						INDEX10(tcb->index_start));
3639 
3640 			dma_unmap_single(&adapter->pdev->dev,
3641 					 desc->addr_lo,
3642 					 desc->len_vlan, DMA_TO_DEVICE);
3643 
3644 			add_10bit(&tcb->index_start, 1);
3645 			if (INDEX10(tcb->index_start) >=
3646 							NUM_DESC_PER_RING_TX) {
3647 				tcb->index_start &= ~ET_DMA10_MASK;
3648 				tcb->index_start ^= ET_DMA10_WRAP;
3649 			}
3650 		} while (desc != (adapter->tx_ring.tx_desc_ring +
3651 				INDEX10(tcb->index)));
3652 
3653 		dev_kfree_skb_any(tcb->skb);
3654 	}
3655 
3656 	memset(tcb, 0, sizeof(struct tcb));
3657 
3658 	/* Add the TCB to the Ready Q */
3659 	spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3660 
3661 	adapter->net_stats.tx_packets++;
3662 
3663 	if (adapter->tx_ring.tcb_qtail)
3664 		adapter->tx_ring.tcb_qtail->next = tcb;
3665 	else
3666 		/* Apparently ready Q is empty. */
3667 		adapter->tx_ring.tcb_qhead = tcb;
3668 
3669 	adapter->tx_ring.tcb_qtail = tcb;
3670 
3671 	spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3672 	WARN_ON(adapter->tx_ring.used < 0);
3673 }
3674 
3675 /**
3676  * et131x_free_busy_send_packets - Free and complete the stopped active sends
3677  * @adapter: pointer to our adapter
3678  *
3679  * Assumption - Send spinlock has been acquired
3680  */
et131x_free_busy_send_packets(struct et131x_adapter * adapter)3681 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3682 {
3683 	struct tcb *tcb;
3684 	unsigned long flags;
3685 	u32 freed = 0;
3686 
3687 	/* Any packets being sent? Check the first TCB on the send list */
3688 	spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3689 
3690 	tcb = adapter->tx_ring.send_head;
3691 
3692 	while (tcb != NULL && freed < NUM_TCB) {
3693 		struct tcb *next = tcb->next;
3694 
3695 		adapter->tx_ring.send_head = next;
3696 
3697 		if (next == NULL)
3698 			adapter->tx_ring.send_tail = NULL;
3699 
3700 		adapter->tx_ring.used--;
3701 
3702 		spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3703 
3704 		freed++;
3705 		free_send_packet(adapter, tcb);
3706 
3707 		spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3708 
3709 		tcb = adapter->tx_ring.send_head;
3710 	}
3711 
3712 	WARN_ON(freed == NUM_TCB);
3713 
3714 	spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3715 
3716 	adapter->tx_ring.used = 0;
3717 }
3718 
3719 /**
3720  * et131x_handle_send_interrupt - Interrupt handler for sending processing
3721  * @adapter: pointer to our adapter
3722  *
3723  * Re-claim the send resources, complete sends and get more to send from
3724  * the send wait queue.
3725  *
3726  * Assumption - Send spinlock has been acquired
3727  */
et131x_handle_send_interrupt(struct et131x_adapter * adapter)3728 static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3729 {
3730 	unsigned long flags;
3731 	u32 serviced;
3732 	struct tcb *tcb;
3733 	u32 index;
3734 
3735 	serviced = readl(&adapter->regs->txdma.new_service_complete);
3736 	index = INDEX10(serviced);
3737 
3738 	/* Has the ring wrapped?  Process any descriptors that do not have
3739 	 * the same "wrap" indicator as the current completion indicator
3740 	 */
3741 	spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3742 
3743 	tcb = adapter->tx_ring.send_head;
3744 
3745 	while (tcb &&
3746 	       ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3747 	       index < INDEX10(tcb->index)) {
3748 		adapter->tx_ring.used--;
3749 		adapter->tx_ring.send_head = tcb->next;
3750 		if (tcb->next == NULL)
3751 			adapter->tx_ring.send_tail = NULL;
3752 
3753 		spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3754 		free_send_packet(adapter, tcb);
3755 		spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3756 
3757 		/* Goto the next packet */
3758 		tcb = adapter->tx_ring.send_head;
3759 	}
3760 	while (tcb &&
3761 	       !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3762 	       && index > (tcb->index & ET_DMA10_MASK)) {
3763 		adapter->tx_ring.used--;
3764 		adapter->tx_ring.send_head = tcb->next;
3765 		if (tcb->next == NULL)
3766 			adapter->tx_ring.send_tail = NULL;
3767 
3768 		spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3769 		free_send_packet(adapter, tcb);
3770 		spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3771 
3772 		/* Goto the next packet */
3773 		tcb = adapter->tx_ring.send_head;
3774 	}
3775 
3776 	/* Wake up the queue when we hit a low-water mark */
3777 	if (adapter->tx_ring.used <= NUM_TCB / 3)
3778 		netif_wake_queue(adapter->netdev);
3779 
3780 	spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3781 }
3782 
et131x_get_settings(struct net_device * netdev,struct ethtool_cmd * cmd)3783 static int et131x_get_settings(struct net_device *netdev,
3784 			       struct ethtool_cmd *cmd)
3785 {
3786 	struct et131x_adapter *adapter = netdev_priv(netdev);
3787 
3788 	return phy_ethtool_gset(adapter->phydev, cmd);
3789 }
3790 
et131x_set_settings(struct net_device * netdev,struct ethtool_cmd * cmd)3791 static int et131x_set_settings(struct net_device *netdev,
3792 			       struct ethtool_cmd *cmd)
3793 {
3794 	struct et131x_adapter *adapter = netdev_priv(netdev);
3795 
3796 	return phy_ethtool_sset(adapter->phydev, cmd);
3797 }
3798 
et131x_get_regs_len(struct net_device * netdev)3799 static int et131x_get_regs_len(struct net_device *netdev)
3800 {
3801 #define ET131X_REGS_LEN 256
3802 	return ET131X_REGS_LEN * sizeof(u32);
3803 }
3804 
et131x_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * regs_data)3805 static void et131x_get_regs(struct net_device *netdev,
3806 			    struct ethtool_regs *regs, void *regs_data)
3807 {
3808 	struct et131x_adapter *adapter = netdev_priv(netdev);
3809 	struct address_map __iomem *aregs = adapter->regs;
3810 	u32 *regs_buff = regs_data;
3811 	u32 num = 0;
3812 
3813 	memset(regs_data, 0, et131x_get_regs_len(netdev));
3814 
3815 	regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3816 			adapter->pdev->device;
3817 
3818 	/* PHY regs */
3819 	et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
3820 	et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
3821 	et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
3822 	et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
3823 	et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
3824 	et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
3825 	et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
3826 	/* Autoneg next page transmit reg */
3827 	et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
3828 	/* Link partner next page reg */
3829 	et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
3830 	et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
3831 	et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
3832 	et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
3833 	et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
3834 	et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
3835 	et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3836 			(u16 *)&regs_buff[num++]);
3837 	et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3838 			(u16 *)&regs_buff[num++]);
3839 	et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3840 			(u16 *)&regs_buff[num++]);
3841 	et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3842 			(u16 *)&regs_buff[num++]);
3843 	et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
3844 	et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
3845 	et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
3846 	et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3847 			(u16 *)&regs_buff[num++]);
3848 	et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
3849 	et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
3850 	et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
3851 
3852 	/* Global regs */
3853 	regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3854 	regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3855 	regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3856 	regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3857 	regs_buff[num++] = readl(&aregs->global.pm_csr);
3858 	regs_buff[num++] = adapter->stats.interrupt_status;
3859 	regs_buff[num++] = readl(&aregs->global.int_mask);
3860 	regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3861 	regs_buff[num++] = readl(&aregs->global.int_status_alias);
3862 	regs_buff[num++] = readl(&aregs->global.sw_reset);
3863 	regs_buff[num++] = readl(&aregs->global.slv_timer);
3864 	regs_buff[num++] = readl(&aregs->global.msi_config);
3865 	regs_buff[num++] = readl(&aregs->global.loopback);
3866 	regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3867 
3868 	/* TXDMA regs */
3869 	regs_buff[num++] = readl(&aregs->txdma.csr);
3870 	regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3871 	regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3872 	regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3873 	regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3874 	regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3875 	regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3876 	regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3877 	regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3878 	regs_buff[num++] = readl(&aregs->txdma.service_request);
3879 	regs_buff[num++] = readl(&aregs->txdma.service_complete);
3880 	regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3881 	regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3882 	regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3883 	regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3884 	regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3885 	regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3886 	regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3887 	regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3888 	regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3889 	regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3890 	regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3891 	regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3892 	regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3893 	regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3894 	regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3895 
3896 	/* RXDMA regs */
3897 	regs_buff[num++] = readl(&aregs->rxdma.csr);
3898 	regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3899 	regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3900 	regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3901 	regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3902 	regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3903 	regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3904 	regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3905 	regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3906 	regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3907 	regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3908 	regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3909 	regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3910 	regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3911 	regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3912 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3913 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3914 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3915 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3916 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3917 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3918 	regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3919 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3920 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3921 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3922 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3923 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3924 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3925 	regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3926 }
3927 
3928 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
et131x_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)3929 static void et131x_get_drvinfo(struct net_device *netdev,
3930 			       struct ethtool_drvinfo *info)
3931 {
3932 	struct et131x_adapter *adapter = netdev_priv(netdev);
3933 
3934 	strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3935 	strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3936 	strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3937 }
3938 
3939 static struct ethtool_ops et131x_ethtool_ops = {
3940 	.get_settings	= et131x_get_settings,
3941 	.set_settings	= et131x_set_settings,
3942 	.get_drvinfo	= et131x_get_drvinfo,
3943 	.get_regs_len	= et131x_get_regs_len,
3944 	.get_regs	= et131x_get_regs,
3945 	.get_link = ethtool_op_get_link,
3946 };
3947 
et131x_set_ethtool_ops(struct net_device * netdev)3948 static void et131x_set_ethtool_ops(struct net_device *netdev)
3949 {
3950 	SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
3951 }
3952 
3953 /**
3954  * et131x_hwaddr_init - set up the MAC Address on the ET1310
3955  * @adapter: pointer to our private adapter structure
3956  */
et131x_hwaddr_init(struct et131x_adapter * adapter)3957 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3958 {
3959 	/* If have our default mac from init and no mac address from
3960 	 * EEPROM then we need to generate the last octet and set it on the
3961 	 * device
3962 	 */
3963 	if (adapter->rom_addr[0] == 0x00 &&
3964 	    adapter->rom_addr[1] == 0x00 &&
3965 	    adapter->rom_addr[2] == 0x00 &&
3966 	    adapter->rom_addr[3] == 0x00 &&
3967 	    adapter->rom_addr[4] == 0x00 &&
3968 	    adapter->rom_addr[5] == 0x00) {
3969 		/*
3970 		 * We need to randomly generate the last octet so we
3971 		 * decrease our chances of setting the mac address to
3972 		 * same as another one of our cards in the system
3973 		 */
3974 		get_random_bytes(&adapter->addr[5], 1);
3975 		/*
3976 		 * We have the default value in the register we are
3977 		 * working with so we need to copy the current
3978 		 * address into the permanent address
3979 		 */
3980 		memcpy(adapter->rom_addr,
3981 			adapter->addr, ETH_ALEN);
3982 	} else {
3983 		/* We do not have an override address, so set the
3984 		 * current address to the permanent address and add
3985 		 * it to the device
3986 		 */
3987 		memcpy(adapter->addr,
3988 		       adapter->rom_addr, ETH_ALEN);
3989 	}
3990 }
3991 
3992 /**
3993  * et131x_pci_init	 - initial PCI setup
3994  * @adapter: pointer to our private adapter structure
3995  * @pdev: our PCI device
3996  *
3997  * Perform the initial setup of PCI registers and if possible initialise
3998  * the MAC address. At this point the I/O registers have yet to be mapped
3999  */
et131x_pci_init(struct et131x_adapter * adapter,struct pci_dev * pdev)4000 static int et131x_pci_init(struct et131x_adapter *adapter,
4001 						struct pci_dev *pdev)
4002 {
4003 	int cap = pci_pcie_cap(pdev);
4004 	u16 max_payload;
4005 	u16 ctl;
4006 	int i, rc;
4007 
4008 	rc = et131x_init_eeprom(adapter);
4009 	if (rc < 0)
4010 		goto out;
4011 
4012 	if (!cap) {
4013 		dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4014 		goto err_out;
4015 	}
4016 
4017 	/* Let's set up the PORT LOGIC Register.  First we need to know what
4018 	 * the max_payload_size is
4019 	 */
4020 	if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
4021 		dev_err(&pdev->dev,
4022 		    "Could not read PCI config space for Max Payload Size\n");
4023 		goto err_out;
4024 	}
4025 
4026 	/* Program the Ack/Nak latency and replay timers */
4027 	max_payload &= 0x07;
4028 
4029 	if (max_payload < 2) {
4030 		static const u16 acknak[2] = { 0x76, 0xD0 };
4031 		static const u16 replay[2] = { 0x1E0, 0x2ED };
4032 
4033 		if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4034 					       acknak[max_payload])) {
4035 			dev_err(&pdev->dev,
4036 			  "Could not write PCI config space for ACK/NAK\n");
4037 			goto err_out;
4038 		}
4039 		if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4040 					       replay[max_payload])) {
4041 			dev_err(&pdev->dev,
4042 			  "Could not write PCI config space for Replay Timer\n");
4043 			goto err_out;
4044 		}
4045 	}
4046 
4047 	/* l0s and l1 latency timers.  We are using default values.
4048 	 * Representing 001 for L0s and 010 for L1
4049 	 */
4050 	if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4051 		dev_err(&pdev->dev,
4052 		  "Could not write PCI config space for Latency Timers\n");
4053 		goto err_out;
4054 	}
4055 
4056 	/* Change the max read size to 2k */
4057 	if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
4058 		dev_err(&pdev->dev,
4059 			"Could not read PCI config space for Max read size\n");
4060 		goto err_out;
4061 	}
4062 
4063 	ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12);
4064 
4065 	if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
4066 		dev_err(&pdev->dev,
4067 		      "Could not write PCI config space for Max read size\n");
4068 		goto err_out;
4069 	}
4070 
4071 	/* Get MAC address from config space if an eeprom exists, otherwise
4072 	 * the MAC address there will not be valid
4073 	 */
4074 	if (!adapter->has_eeprom) {
4075 		et131x_hwaddr_init(adapter);
4076 		return 0;
4077 	}
4078 
4079 	for (i = 0; i < ETH_ALEN; i++) {
4080 		if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4081 					adapter->rom_addr + i)) {
4082 			dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4083 			goto err_out;
4084 		}
4085 	}
4086 	memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4087 out:
4088 	return rc;
4089 err_out:
4090 	rc = -EIO;
4091 	goto out;
4092 }
4093 
4094 /**
4095  * et131x_error_timer_handler
4096  * @data: timer-specific variable; here a pointer to our adapter structure
4097  *
4098  * The routine called when the error timer expires, to track the number of
4099  * recurring errors.
4100  */
et131x_error_timer_handler(unsigned long data)4101 static void et131x_error_timer_handler(unsigned long data)
4102 {
4103 	struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4104 	struct phy_device *phydev = adapter->phydev;
4105 
4106 	if (et1310_in_phy_coma(adapter)) {
4107 		/* Bring the device immediately out of coma, to
4108 		 * prevent it from sleeping indefinitely, this
4109 		 * mechanism could be improved! */
4110 		et1310_disable_phy_coma(adapter);
4111 		adapter->boot_coma = 20;
4112 	} else {
4113 		et1310_update_macstat_host_counters(adapter);
4114 	}
4115 
4116 	if (!phydev->link && adapter->boot_coma < 11)
4117 		adapter->boot_coma++;
4118 
4119 	if (adapter->boot_coma == 10) {
4120 		if (!phydev->link) {
4121 			if (!et1310_in_phy_coma(adapter)) {
4122 				/* NOTE - This was originally a 'sync with
4123 				 *  interrupt'. How to do that under Linux?
4124 				 */
4125 				et131x_enable_interrupts(adapter);
4126 				et1310_enable_phy_coma(adapter);
4127 			}
4128 		}
4129 	}
4130 
4131 	/* This is a periodic timer, so reschedule */
4132 	mod_timer(&adapter->error_timer, jiffies +
4133 					  TX_ERROR_PERIOD * HZ / 1000);
4134 }
4135 
4136 /**
4137  * et131x_adapter_memory_alloc
4138  * @adapter: pointer to our private adapter structure
4139  *
4140  * Returns 0 on success, errno on failure (as defined in errno.h).
4141  *
4142  * Allocate all the memory blocks for send, receive and others.
4143  */
et131x_adapter_memory_alloc(struct et131x_adapter * adapter)4144 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4145 {
4146 	int status;
4147 
4148 	/* Allocate memory for the Tx Ring */
4149 	status = et131x_tx_dma_memory_alloc(adapter);
4150 	if (status != 0) {
4151 		dev_err(&adapter->pdev->dev,
4152 			  "et131x_tx_dma_memory_alloc FAILED\n");
4153 		return status;
4154 	}
4155 	/* Receive buffer memory allocation */
4156 	status = et131x_rx_dma_memory_alloc(adapter);
4157 	if (status != 0) {
4158 		dev_err(&adapter->pdev->dev,
4159 			  "et131x_rx_dma_memory_alloc FAILED\n");
4160 		et131x_tx_dma_memory_free(adapter);
4161 		return status;
4162 	}
4163 
4164 	/* Init receive data structures */
4165 	status = et131x_init_recv(adapter);
4166 	if (status != 0) {
4167 		dev_err(&adapter->pdev->dev,
4168 			"et131x_init_recv FAILED\n");
4169 		et131x_tx_dma_memory_free(adapter);
4170 		et131x_rx_dma_memory_free(adapter);
4171 	}
4172 	return status;
4173 }
4174 
4175 /**
4176  * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
4177  * @adapter: pointer to our private adapter structure
4178  */
et131x_adapter_memory_free(struct et131x_adapter * adapter)4179 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4180 {
4181 	/* Free DMA memory */
4182 	et131x_tx_dma_memory_free(adapter);
4183 	et131x_rx_dma_memory_free(adapter);
4184 }
4185 
et131x_adjust_link(struct net_device * netdev)4186 static void et131x_adjust_link(struct net_device *netdev)
4187 {
4188 	struct et131x_adapter *adapter = netdev_priv(netdev);
4189 	struct  phy_device *phydev = adapter->phydev;
4190 
4191 	if (netif_carrier_ok(netdev)) {
4192 		adapter->boot_coma = 20;
4193 
4194 		if (phydev && phydev->speed == SPEED_10) {
4195 			/*
4196 			 * NOTE - Is there a way to query this without
4197 			 * TruePHY?
4198 			 * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4199 			 * EMI_TRUEPHY_A13O) {
4200 			 */
4201 			u16 register18;
4202 
4203 			et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4204 					 &register18);
4205 			et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4206 					 register18 | 0x4);
4207 			et131x_mii_write(adapter, PHY_INDEX_REG,
4208 					 register18 | 0x8402);
4209 			et131x_mii_write(adapter, PHY_DATA_REG,
4210 					 register18 | 511);
4211 			et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4212 					 register18);
4213 		}
4214 
4215 		et1310_config_flow_control(adapter);
4216 
4217 		if (phydev && phydev->speed == SPEED_1000 &&
4218 				adapter->registry_jumbo_packet > 2048) {
4219 			u16 reg;
4220 
4221 			et131x_mii_read(adapter, PHY_CONFIG, &reg);
4222 			reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4223 			reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4224 			et131x_mii_write(adapter, PHY_CONFIG, reg);
4225 		}
4226 
4227 		et131x_set_rx_dma_timer(adapter);
4228 		et1310_config_mac_regs2(adapter);
4229 	}
4230 
4231 	if (phydev && phydev->link != adapter->link) {
4232 		/*
4233 		 * Check to see if we are in coma mode and if
4234 		 * so, disable it because we will not be able
4235 		 * to read PHY values until we are out.
4236 		 */
4237 		if (et1310_in_phy_coma(adapter))
4238 			et1310_disable_phy_coma(adapter);
4239 
4240 		if (phydev->link) {
4241 			adapter->boot_coma = 20;
4242 		} else {
4243 			dev_warn(&adapter->pdev->dev,
4244 			    "Link down - cable problem ?\n");
4245 			adapter->boot_coma = 0;
4246 
4247 			if (phydev->speed == SPEED_10) {
4248 				/* NOTE - Is there a way to query this without
4249 				 * TruePHY?
4250 				 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4251 				 * EMI_TRUEPHY_A13O)
4252 				 */
4253 				u16 register18;
4254 
4255 				et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4256 						 &register18);
4257 				et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4258 						 register18 | 0x4);
4259 				et131x_mii_write(adapter, PHY_INDEX_REG,
4260 						 register18 | 0x8402);
4261 				et131x_mii_write(adapter, PHY_DATA_REG,
4262 						 register18 | 511);
4263 				et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4264 						 register18);
4265 			}
4266 
4267 			/* Free the packets being actively sent & stopped */
4268 			et131x_free_busy_send_packets(adapter);
4269 
4270 			/* Re-initialize the send structures */
4271 			et131x_init_send(adapter);
4272 
4273 			/*
4274 			 * Bring the device back to the state it was during
4275 			 * init prior to autonegotiation being complete. This
4276 			 * way, when we get the auto-neg complete interrupt,
4277 			 * we can complete init by calling config_mac_regs2.
4278 			 */
4279 			et131x_soft_reset(adapter);
4280 
4281 			/* Setup ET1310 as per the documentation */
4282 			et131x_adapter_setup(adapter);
4283 
4284 			/* perform reset of tx/rx */
4285 			et131x_disable_txrx(netdev);
4286 			et131x_enable_txrx(netdev);
4287 		}
4288 
4289 		adapter->link = phydev->link;
4290 
4291 		phy_print_status(phydev);
4292 	}
4293 }
4294 
et131x_mii_probe(struct net_device * netdev)4295 static int et131x_mii_probe(struct net_device *netdev)
4296 {
4297 	struct et131x_adapter *adapter = netdev_priv(netdev);
4298 	struct  phy_device *phydev = NULL;
4299 
4300 	phydev = phy_find_first(adapter->mii_bus);
4301 	if (!phydev) {
4302 		dev_err(&adapter->pdev->dev, "no PHY found\n");
4303 		return -ENODEV;
4304 	}
4305 
4306 	phydev = phy_connect(netdev, dev_name(&phydev->dev),
4307 			&et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4308 
4309 	if (IS_ERR(phydev)) {
4310 		dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4311 		return PTR_ERR(phydev);
4312 	}
4313 
4314 	phydev->supported &= (SUPPORTED_10baseT_Half
4315 				| SUPPORTED_10baseT_Full
4316 				| SUPPORTED_100baseT_Half
4317 				| SUPPORTED_100baseT_Full
4318 				| SUPPORTED_Autoneg
4319 				| SUPPORTED_MII
4320 				| SUPPORTED_TP);
4321 
4322 	if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4323 		phydev->supported |= SUPPORTED_1000baseT_Full;
4324 
4325 	phydev->advertising = phydev->supported;
4326 	adapter->phydev = phydev;
4327 
4328 	dev_info(&adapter->pdev->dev, "attached PHY driver [%s] "
4329 		 "(mii_bus:phy_addr=%s)\n",
4330 		 phydev->drv->name, dev_name(&phydev->dev));
4331 
4332 	return 0;
4333 }
4334 
4335 /**
4336  * et131x_adapter_init
4337  * @adapter: pointer to the private adapter struct
4338  * @pdev: pointer to the PCI device
4339  *
4340  * Initialize the data structures for the et131x_adapter object and link
4341  * them together with the platform provided device structures.
4342  */
et131x_adapter_init(struct net_device * netdev,struct pci_dev * pdev)4343 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4344 		struct pci_dev *pdev)
4345 {
4346 	static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4347 
4348 	struct et131x_adapter *adapter;
4349 
4350 	/* Allocate private adapter struct and copy in relevant information */
4351 	adapter = netdev_priv(netdev);
4352 	adapter->pdev = pci_dev_get(pdev);
4353 	adapter->netdev = netdev;
4354 
4355 	/* Initialize spinlocks here */
4356 	spin_lock_init(&adapter->lock);
4357 	spin_lock_init(&adapter->tcb_send_qlock);
4358 	spin_lock_init(&adapter->tcb_ready_qlock);
4359 	spin_lock_init(&adapter->send_hw_lock);
4360 	spin_lock_init(&adapter->rcv_lock);
4361 	spin_lock_init(&adapter->rcv_pend_lock);
4362 	spin_lock_init(&adapter->fbr_lock);
4363 	spin_lock_init(&adapter->phy_lock);
4364 
4365 	adapter->registry_jumbo_packet = 1514;	/* 1514-9216 */
4366 
4367 	/* Set the MAC address to a default */
4368 	memcpy(adapter->addr, default_mac, ETH_ALEN);
4369 
4370 	return adapter;
4371 }
4372 
4373 /**
4374  * et131x_pci_remove
4375  * @pdev: a pointer to the device's pci_dev structure
4376  *
4377  * Registered in the pci_driver structure, this function is called when the
4378  * PCI subsystem detects that a PCI device which matches the information
4379  * contained in the pci_device_id table has been removed.
4380  */
et131x_pci_remove(struct pci_dev * pdev)4381 static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4382 {
4383 	struct net_device *netdev = pci_get_drvdata(pdev);
4384 	struct et131x_adapter *adapter = netdev_priv(netdev);
4385 
4386 	unregister_netdev(netdev);
4387 	phy_disconnect(adapter->phydev);
4388 	mdiobus_unregister(adapter->mii_bus);
4389 	kfree(adapter->mii_bus->irq);
4390 	mdiobus_free(adapter->mii_bus);
4391 
4392 	et131x_adapter_memory_free(adapter);
4393 	iounmap(adapter->regs);
4394 	pci_dev_put(pdev);
4395 
4396 	free_netdev(netdev);
4397 	pci_release_regions(pdev);
4398 	pci_disable_device(pdev);
4399 }
4400 
4401 /**
4402  * et131x_up - Bring up a device for use.
4403  * @netdev: device to be opened
4404  */
et131x_up(struct net_device * netdev)4405 static void et131x_up(struct net_device *netdev)
4406 {
4407 	struct et131x_adapter *adapter = netdev_priv(netdev);
4408 
4409 	et131x_enable_txrx(netdev);
4410 	phy_start(adapter->phydev);
4411 }
4412 
4413 /**
4414  * et131x_down - Bring down the device
4415  * @netdev: device to be brought down
4416  */
et131x_down(struct net_device * netdev)4417 static void et131x_down(struct net_device *netdev)
4418 {
4419 	struct et131x_adapter *adapter = netdev_priv(netdev);
4420 
4421 	/* Save the timestamp for the TX watchdog, prevent a timeout */
4422 	netdev->trans_start = jiffies;
4423 
4424 	phy_stop(adapter->phydev);
4425 	et131x_disable_txrx(netdev);
4426 }
4427 
4428 #ifdef CONFIG_PM_SLEEP
et131x_suspend(struct device * dev)4429 static int et131x_suspend(struct device *dev)
4430 {
4431 	struct pci_dev *pdev = to_pci_dev(dev);
4432 	struct net_device *netdev = pci_get_drvdata(pdev);
4433 
4434 	if (netif_running(netdev)) {
4435 		netif_device_detach(netdev);
4436 		et131x_down(netdev);
4437 		pci_save_state(pdev);
4438 	}
4439 
4440 	return 0;
4441 }
4442 
et131x_resume(struct device * dev)4443 static int et131x_resume(struct device *dev)
4444 {
4445 	struct pci_dev *pdev = to_pci_dev(dev);
4446 	struct net_device *netdev = pci_get_drvdata(pdev);
4447 
4448 	if (netif_running(netdev)) {
4449 		pci_restore_state(pdev);
4450 		et131x_up(netdev);
4451 		netif_device_attach(netdev);
4452 	}
4453 
4454 	return 0;
4455 }
4456 
4457 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4458 #define ET131X_PM_OPS (&et131x_pm_ops)
4459 #else
4460 #define ET131X_PM_OPS NULL
4461 #endif
4462 
4463 /**
4464  * et131x_isr - The Interrupt Service Routine for the driver.
4465  * @irq: the IRQ on which the interrupt was received.
4466  * @dev_id: device-specific info (here a pointer to a net_device struct)
4467  *
4468  * Returns a value indicating if the interrupt was handled.
4469  */
et131x_isr(int irq,void * dev_id)4470 irqreturn_t et131x_isr(int irq, void *dev_id)
4471 {
4472 	bool handled = true;
4473 	struct net_device *netdev = (struct net_device *)dev_id;
4474 	struct et131x_adapter *adapter = NULL;
4475 	u32 status;
4476 
4477 	if (!netif_device_present(netdev)) {
4478 		handled = false;
4479 		goto out;
4480 	}
4481 
4482 	adapter = netdev_priv(netdev);
4483 
4484 	/* If the adapter is in low power state, then it should not
4485 	 * recognize any interrupt
4486 	 */
4487 
4488 	/* Disable Device Interrupts */
4489 	et131x_disable_interrupts(adapter);
4490 
4491 	/* Get a copy of the value in the interrupt status register
4492 	 * so we can process the interrupting section
4493 	 */
4494 	status = readl(&adapter->regs->global.int_status);
4495 
4496 	if (adapter->flowcontrol == FLOW_TXONLY ||
4497 	    adapter->flowcontrol == FLOW_BOTH) {
4498 		status &= ~INT_MASK_ENABLE;
4499 	} else {
4500 		status &= ~INT_MASK_ENABLE_NO_FLOW;
4501 	}
4502 
4503 	/* Make sure this is our interrupt */
4504 	if (!status) {
4505 		handled = false;
4506 		et131x_enable_interrupts(adapter);
4507 		goto out;
4508 	}
4509 
4510 	/* This is our interrupt, so process accordingly */
4511 
4512 	if (status & ET_INTR_WATCHDOG) {
4513 		struct tcb *tcb = adapter->tx_ring.send_head;
4514 
4515 		if (tcb)
4516 			if (++tcb->stale > 1)
4517 				status |= ET_INTR_TXDMA_ISR;
4518 
4519 		if (adapter->rx_ring.unfinished_receives)
4520 			status |= ET_INTR_RXDMA_XFR_DONE;
4521 		else if (tcb == NULL)
4522 			writel(0, &adapter->regs->global.watchdog_timer);
4523 
4524 		status &= ~ET_INTR_WATCHDOG;
4525 	}
4526 
4527 	if (status == 0) {
4528 		/* This interrupt has in some way been "handled" by
4529 		 * the ISR. Either it was a spurious Rx interrupt, or
4530 		 * it was a Tx interrupt that has been filtered by
4531 		 * the ISR.
4532 		 */
4533 		et131x_enable_interrupts(adapter);
4534 		goto out;
4535 	}
4536 
4537 	/* We need to save the interrupt status value for use in our
4538 	 * DPC. We will clear the software copy of that in that
4539 	 * routine.
4540 	 */
4541 	adapter->stats.interrupt_status = status;
4542 
4543 	/* Schedule the ISR handler as a bottom-half task in the
4544 	 * kernel's tq_immediate queue, and mark the queue for
4545 	 * execution
4546 	 */
4547 	schedule_work(&adapter->task);
4548 out:
4549 	return IRQ_RETVAL(handled);
4550 }
4551 
4552 /**
4553  * et131x_isr_handler - The ISR handler
4554  * @p_adapter, a pointer to the device's private adapter structure
4555  *
4556  * scheduled to run in a deferred context by the ISR. This is where the ISR's
4557  * work actually gets done.
4558  */
et131x_isr_handler(struct work_struct * work)4559 static void et131x_isr_handler(struct work_struct *work)
4560 {
4561 	struct et131x_adapter *adapter =
4562 		container_of(work, struct et131x_adapter, task);
4563 	u32 status = adapter->stats.interrupt_status;
4564 	struct address_map __iomem *iomem = adapter->regs;
4565 
4566 	/*
4567 	 * These first two are by far the most common.  Once handled, we clear
4568 	 * their two bits in the status word.  If the word is now zero, we
4569 	 * exit.
4570 	 */
4571 	/* Handle all the completed Transmit interrupts */
4572 	if (status & ET_INTR_TXDMA_ISR)
4573 		et131x_handle_send_interrupt(adapter);
4574 
4575 	/* Handle all the completed Receives interrupts */
4576 	if (status & ET_INTR_RXDMA_XFR_DONE)
4577 		et131x_handle_recv_interrupt(adapter);
4578 
4579 	status &= 0xffffffd7;
4580 
4581 	if (status) {
4582 		/* Handle the TXDMA Error interrupt */
4583 		if (status & ET_INTR_TXDMA_ERR) {
4584 			u32 txdma_err;
4585 
4586 			/* Following read also clears the register (COR) */
4587 			txdma_err = readl(&iomem->txdma.tx_dma_error);
4588 
4589 			dev_warn(&adapter->pdev->dev,
4590 				    "TXDMA_ERR interrupt, error = %d\n",
4591 				    txdma_err);
4592 		}
4593 
4594 		/* Handle Free Buffer Ring 0 and 1 Low interrupt */
4595 		if (status &
4596 		    (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4597 			/*
4598 			 * This indicates the number of unused buffers in
4599 			 * RXDMA free buffer ring 0 is <= the limit you
4600 			 * programmed. Free buffer resources need to be
4601 			 * returned.  Free buffers are consumed as packets
4602 			 * are passed from the network to the host. The host
4603 			 * becomes aware of the packets from the contents of
4604 			 * the packet status ring. This ring is queried when
4605 			 * the packet done interrupt occurs. Packets are then
4606 			 * passed to the OS. When the OS is done with the
4607 			 * packets the resources can be returned to the
4608 			 * ET1310 for re-use. This interrupt is one method of
4609 			 * returning resources.
4610 			 */
4611 
4612 			/* If the user has flow control on, then we will
4613 			 * send a pause packet, otherwise just exit
4614 			 */
4615 			if (adapter->flowcontrol == FLOW_TXONLY ||
4616 			    adapter->flowcontrol == FLOW_BOTH) {
4617 				u32 pm_csr;
4618 
4619 				/* Tell the device to send a pause packet via
4620 				 * the back pressure register (bp req  and
4621 				 * bp xon/xoff)
4622 				 */
4623 				pm_csr = readl(&iomem->global.pm_csr);
4624 				if (!et1310_in_phy_coma(adapter))
4625 					writel(3, &iomem->txmac.bp_ctrl);
4626 			}
4627 		}
4628 
4629 		/* Handle Packet Status Ring Low Interrupt */
4630 		if (status & ET_INTR_RXDMA_STAT_LOW) {
4631 
4632 			/*
4633 			 * Same idea as with the two Free Buffer Rings.
4634 			 * Packets going from the network to the host each
4635 			 * consume a free buffer resource and a packet status
4636 			 * resource.  These resoures are passed to the OS.
4637 			 * When the OS is done with the resources, they need
4638 			 * to be returned to the ET1310. This is one method
4639 			 * of returning the resources.
4640 			 */
4641 		}
4642 
4643 		/* Handle RXDMA Error Interrupt */
4644 		if (status & ET_INTR_RXDMA_ERR) {
4645 			/*
4646 			 * The rxdma_error interrupt is sent when a time-out
4647 			 * on a request issued by the JAGCore has occurred or
4648 			 * a completion is returned with an un-successful
4649 			 * status.  In both cases the request is considered
4650 			 * complete. The JAGCore will automatically re-try the
4651 			 * request in question. Normally information on events
4652 			 * like these are sent to the host using the "Advanced
4653 			 * Error Reporting" capability. This interrupt is
4654 			 * another way of getting similar information. The
4655 			 * only thing required is to clear the interrupt by
4656 			 * reading the ISR in the global resources. The
4657 			 * JAGCore will do a re-try on the request.  Normally
4658 			 * you should never see this interrupt. If you start
4659 			 * to see this interrupt occurring frequently then
4660 			 * something bad has occurred. A reset might be the
4661 			 * thing to do.
4662 			 */
4663 			/* TRAP();*/
4664 
4665 			dev_warn(&adapter->pdev->dev,
4666 				    "RxDMA_ERR interrupt, error %x\n",
4667 				    readl(&iomem->txmac.tx_test));
4668 		}
4669 
4670 		/* Handle the Wake on LAN Event */
4671 		if (status & ET_INTR_WOL) {
4672 			/*
4673 			 * This is a secondary interrupt for wake on LAN.
4674 			 * The driver should never see this, if it does,
4675 			 * something serious is wrong. We will TRAP the
4676 			 * message when we are in DBG mode, otherwise we
4677 			 * will ignore it.
4678 			 */
4679 			dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4680 		}
4681 
4682 		/* Let's move on to the TxMac */
4683 		if (status & ET_INTR_TXMAC) {
4684 			u32 err = readl(&iomem->txmac.err);
4685 
4686 			/*
4687 			 * When any of the errors occur and TXMAC generates
4688 			 * an interrupt to report these errors, it usually
4689 			 * means that TXMAC has detected an error in the data
4690 			 * stream retrieved from the on-chip Tx Q. All of
4691 			 * these errors are catastrophic and TXMAC won't be
4692 			 * able to recover data when these errors occur.  In
4693 			 * a nutshell, the whole Tx path will have to be reset
4694 			 * and re-configured afterwards.
4695 			 */
4696 			dev_warn(&adapter->pdev->dev,
4697 				    "TXMAC interrupt, error 0x%08x\n",
4698 				    err);
4699 
4700 			/* If we are debugging, we want to see this error,
4701 			 * otherwise we just want the device to be reset and
4702 			 * continue
4703 			 */
4704 		}
4705 
4706 		/* Handle RXMAC Interrupt */
4707 		if (status & ET_INTR_RXMAC) {
4708 			/*
4709 			 * These interrupts are catastrophic to the device,
4710 			 * what we need to do is disable the interrupts and
4711 			 * set the flag to cause us to reset so we can solve
4712 			 * this issue.
4713 			 */
4714 			/* MP_SET_FLAG( adapter,
4715 						fMP_ADAPTER_HARDWARE_ERROR); */
4716 
4717 			dev_warn(&adapter->pdev->dev,
4718 			  "RXMAC interrupt, error 0x%08x.  Requesting reset\n",
4719 				    readl(&iomem->rxmac.err_reg));
4720 
4721 			dev_warn(&adapter->pdev->dev,
4722 				    "Enable 0x%08x, Diag 0x%08x\n",
4723 				    readl(&iomem->rxmac.ctrl),
4724 				    readl(&iomem->rxmac.rxq_diag));
4725 
4726 			/*
4727 			 * If we are debugging, we want to see this error,
4728 			 * otherwise we just want the device to be reset and
4729 			 * continue
4730 			 */
4731 		}
4732 
4733 		/* Handle MAC_STAT Interrupt */
4734 		if (status & ET_INTR_MAC_STAT) {
4735 			/*
4736 			 * This means at least one of the un-masked counters
4737 			 * in the MAC_STAT block has rolled over.  Use this
4738 			 * to maintain the top, software managed bits of the
4739 			 * counter(s).
4740 			 */
4741 			et1310_handle_macstat_interrupt(adapter);
4742 		}
4743 
4744 		/* Handle SLV Timeout Interrupt */
4745 		if (status & ET_INTR_SLV_TIMEOUT) {
4746 			/*
4747 			 * This means a timeout has occurred on a read or
4748 			 * write request to one of the JAGCore registers. The
4749 			 * Global Resources block has terminated the request
4750 			 * and on a read request, returned a "fake" value.
4751 			 * The most likely reasons are: Bad Address or the
4752 			 * addressed module is in a power-down state and
4753 			 * can't respond.
4754 			 */
4755 		}
4756 	}
4757 	et131x_enable_interrupts(adapter);
4758 }
4759 
4760 /**
4761  * et131x_stats - Return the current device statistics.
4762  * @netdev: device whose stats are being queried
4763  *
4764  * Returns 0 on success, errno on failure (as defined in errno.h)
4765  */
et131x_stats(struct net_device * netdev)4766 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4767 {
4768 	struct et131x_adapter *adapter = netdev_priv(netdev);
4769 	struct net_device_stats *stats = &adapter->net_stats;
4770 	struct ce_stats *devstat = &adapter->stats;
4771 
4772 	stats->rx_errors = devstat->rx_length_errs +
4773 			   devstat->rx_align_errs +
4774 			   devstat->rx_crc_errs +
4775 			   devstat->rx_code_violations +
4776 			   devstat->rx_other_errs;
4777 	stats->tx_errors = devstat->tx_max_pkt_errs;
4778 	stats->multicast = devstat->multicast_pkts_rcvd;
4779 	stats->collisions = devstat->tx_collisions;
4780 
4781 	stats->rx_length_errors = devstat->rx_length_errs;
4782 	stats->rx_over_errors = devstat->rx_overflows;
4783 	stats->rx_crc_errors = devstat->rx_crc_errs;
4784 
4785 	/* NOTE: These stats don't have corresponding values in CE_STATS,
4786 	 * so we're going to have to update these directly from within the
4787 	 * TX/RX code
4788 	 */
4789 	/* stats->rx_bytes            = 20; devstat->; */
4790 	/* stats->tx_bytes            = 20;  devstat->; */
4791 	/* stats->rx_dropped          = devstat->; */
4792 	/* stats->tx_dropped          = devstat->; */
4793 
4794 	/*  NOTE: Not used, can't find analogous statistics */
4795 	/* stats->rx_frame_errors     = devstat->; */
4796 	/* stats->rx_fifo_errors      = devstat->; */
4797 	/* stats->rx_missed_errors    = devstat->; */
4798 
4799 	/* stats->tx_aborted_errors   = devstat->; */
4800 	/* stats->tx_carrier_errors   = devstat->; */
4801 	/* stats->tx_fifo_errors      = devstat->; */
4802 	/* stats->tx_heartbeat_errors = devstat->; */
4803 	/* stats->tx_window_errors    = devstat->; */
4804 	return stats;
4805 }
4806 
4807 /**
4808  * et131x_open - Open the device for use.
4809  * @netdev: device to be opened
4810  *
4811  * Returns 0 on success, errno on failure (as defined in errno.h)
4812  */
et131x_open(struct net_device * netdev)4813 static int et131x_open(struct net_device *netdev)
4814 {
4815 	struct et131x_adapter *adapter = netdev_priv(netdev);
4816 	struct pci_dev *pdev = adapter->pdev;
4817 	unsigned int irq = pdev->irq;
4818 	int result;
4819 
4820 	/* Start the timer to track NIC errors */
4821 	init_timer(&adapter->error_timer);
4822 	adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4823 	adapter->error_timer.function = et131x_error_timer_handler;
4824 	adapter->error_timer.data = (unsigned long)adapter;
4825 	add_timer(&adapter->error_timer);
4826 
4827 	result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev);
4828 	if (result) {
4829 		dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4830 		return result;
4831 	}
4832 
4833 	adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4834 
4835 	et131x_up(netdev);
4836 
4837 	return result;
4838 }
4839 
4840 /**
4841  * et131x_close - Close the device
4842  * @netdev: device to be closed
4843  *
4844  * Returns 0 on success, errno on failure (as defined in errno.h)
4845  */
et131x_close(struct net_device * netdev)4846 static int et131x_close(struct net_device *netdev)
4847 {
4848 	struct et131x_adapter *adapter = netdev_priv(netdev);
4849 
4850 	et131x_down(netdev);
4851 
4852 	adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4853 	free_irq(adapter->pdev->irq, netdev);
4854 
4855 	/* Stop the error timer */
4856 	return del_timer_sync(&adapter->error_timer);
4857 }
4858 
4859 /**
4860  * et131x_ioctl - The I/O Control handler for the driver
4861  * @netdev: device on which the control request is being made
4862  * @reqbuf: a pointer to the IOCTL request buffer
4863  * @cmd: the IOCTL command code
4864  *
4865  * Returns 0 on success, errno on failure (as defined in errno.h)
4866  */
et131x_ioctl(struct net_device * netdev,struct ifreq * reqbuf,int cmd)4867 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4868 			int cmd)
4869 {
4870 	struct et131x_adapter *adapter = netdev_priv(netdev);
4871 
4872 	if (!adapter->phydev)
4873 		return -EINVAL;
4874 
4875 	return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4876 }
4877 
4878 /**
4879  * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
4880  * @adapter: pointer to our private adapter structure
4881  *
4882  * FIXME: lot of dups with MAC code
4883  *
4884  * Returns 0 on success, errno on failure
4885  */
et131x_set_packet_filter(struct et131x_adapter * adapter)4886 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4887 {
4888 	int filter = adapter->packet_filter;
4889 	int status = 0;
4890 	u32 ctrl;
4891 	u32 pf_ctrl;
4892 
4893 	ctrl = readl(&adapter->regs->rxmac.ctrl);
4894 	pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4895 
4896 	/* Default to disabled packet filtering.  Enable it in the individual
4897 	 * case statements that require the device to filter something
4898 	 */
4899 	ctrl |= 0x04;
4900 
4901 	/* Set us to be in promiscuous mode so we receive everything, this
4902 	 * is also true when we get a packet filter of 0
4903 	 */
4904 	if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4905 		pf_ctrl &= ~7;	/* Clear filter bits */
4906 	else {
4907 		/*
4908 		 * Set us up with Multicast packet filtering.  Three cases are
4909 		 * possible - (1) we have a multi-cast list, (2) we receive ALL
4910 		 * multicast entries or (3) we receive none.
4911 		 */
4912 		if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4913 			pf_ctrl &= ~2;	/* Multicast filter bit */
4914 		else {
4915 			et1310_setup_device_for_multicast(adapter);
4916 			pf_ctrl |= 2;
4917 			ctrl &= ~0x04;
4918 		}
4919 
4920 		/* Set us up with Unicast packet filtering */
4921 		if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4922 			et1310_setup_device_for_unicast(adapter);
4923 			pf_ctrl |= 4;
4924 			ctrl &= ~0x04;
4925 		}
4926 
4927 		/* Set us up with Broadcast packet filtering */
4928 		if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4929 			pf_ctrl |= 1;	/* Broadcast filter bit */
4930 			ctrl &= ~0x04;
4931 		} else
4932 			pf_ctrl &= ~1;
4933 
4934 		/* Setup the receive mac configuration registers - Packet
4935 		 * Filter control + the enable / disable for packet filter
4936 		 * in the control reg.
4937 		 */
4938 		writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4939 		writel(ctrl, &adapter->regs->rxmac.ctrl);
4940 	}
4941 	return status;
4942 }
4943 
4944 /**
4945  * et131x_multicast - The handler to configure multicasting on the interface
4946  * @netdev: a pointer to a net_device struct representing the device
4947  */
et131x_multicast(struct net_device * netdev)4948 static void et131x_multicast(struct net_device *netdev)
4949 {
4950 	struct et131x_adapter *adapter = netdev_priv(netdev);
4951 	int packet_filter;
4952 	unsigned long flags;
4953 	struct netdev_hw_addr *ha;
4954 	int i;
4955 
4956 	spin_lock_irqsave(&adapter->lock, flags);
4957 
4958 	/* Before we modify the platform-independent filter flags, store them
4959 	 * locally. This allows us to determine if anything's changed and if
4960 	 * we even need to bother the hardware
4961 	 */
4962 	packet_filter = adapter->packet_filter;
4963 
4964 	/* Clear the 'multicast' flag locally; because we only have a single
4965 	 * flag to check multicast, and multiple multicast addresses can be
4966 	 * set, this is the easiest way to determine if more than one
4967 	 * multicast address is being set.
4968 	 */
4969 	packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4970 
4971 	/* Check the net_device flags and set the device independent flags
4972 	 * accordingly
4973 	 */
4974 
4975 	if (netdev->flags & IFF_PROMISC)
4976 		adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4977 	else
4978 		adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4979 
4980 	if (netdev->flags & IFF_ALLMULTI)
4981 		adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4982 
4983 	if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4984 		adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4985 
4986 	if (netdev_mc_count(netdev) < 1) {
4987 		adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4988 		adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4989 	} else
4990 		adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4991 
4992 	/* Set values in the private adapter struct */
4993 	i = 0;
4994 	netdev_for_each_mc_addr(ha, netdev) {
4995 		if (i == NIC_MAX_MCAST_LIST)
4996 			break;
4997 		memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4998 	}
4999 	adapter->multicast_addr_count = i;
5000 
5001 	/* Are the new flags different from the previous ones? If not, then no
5002 	 * action is required
5003 	 *
5004 	 * NOTE - This block will always update the multicast_list with the
5005 	 *        hardware, even if the addresses aren't the same.
5006 	 */
5007 	if (packet_filter != adapter->packet_filter) {
5008 		/* Call the device's filter function */
5009 		et131x_set_packet_filter(adapter);
5010 	}
5011 	spin_unlock_irqrestore(&adapter->lock, flags);
5012 }
5013 
5014 /**
5015  * et131x_tx - The handler to tx a packet on the device
5016  * @skb: data to be Tx'd
5017  * @netdev: device on which data is to be Tx'd
5018  *
5019  * Returns 0 on success, errno on failure (as defined in errno.h)
5020  */
et131x_tx(struct sk_buff * skb,struct net_device * netdev)5021 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5022 {
5023 	int status = 0;
5024 	struct et131x_adapter *adapter = netdev_priv(netdev);
5025 
5026 	/* stop the queue if it's getting full */
5027 	if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5028 	    !netif_queue_stopped(netdev))
5029 		netif_stop_queue(netdev);
5030 
5031 	/* Save the timestamp for the TX timeout watchdog */
5032 	netdev->trans_start = jiffies;
5033 
5034 	/* Call the device-specific data Tx routine */
5035 	status = et131x_send_packets(skb, netdev);
5036 
5037 	/* Check status and manage the netif queue if necessary */
5038 	if (status != 0) {
5039 		if (status == -ENOMEM)
5040 			status = NETDEV_TX_BUSY;
5041 		else
5042 			status = NETDEV_TX_OK;
5043 	}
5044 	return status;
5045 }
5046 
5047 /**
5048  * et131x_tx_timeout - Timeout handler
5049  * @netdev: a pointer to a net_device struct representing the device
5050  *
5051  * The handler called when a Tx request times out. The timeout period is
5052  * specified by the 'tx_timeo" element in the net_device structure (see
5053  * et131x_alloc_device() to see how this value is set).
5054  */
et131x_tx_timeout(struct net_device * netdev)5055 static void et131x_tx_timeout(struct net_device *netdev)
5056 {
5057 	struct et131x_adapter *adapter = netdev_priv(netdev);
5058 	struct tcb *tcb;
5059 	unsigned long flags;
5060 
5061 	/* If the device is closed, ignore the timeout */
5062 	if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5063 		return;
5064 
5065 	/* Any nonrecoverable hardware error?
5066 	 * Checks adapter->flags for any failure in phy reading
5067 	 */
5068 	if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5069 		return;
5070 
5071 	/* Hardware failure? */
5072 	if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5073 		dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5074 		return;
5075 	}
5076 
5077 	/* Is send stuck? */
5078 	spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5079 
5080 	tcb = adapter->tx_ring.send_head;
5081 
5082 	if (tcb != NULL) {
5083 		tcb->count++;
5084 
5085 		if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5086 			spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5087 					       flags);
5088 
5089 			dev_warn(&adapter->pdev->dev,
5090 				"Send stuck - reset.  tcb->WrIndex %x, flags 0x%08x\n",
5091 				tcb->index,
5092 				tcb->flags);
5093 
5094 			adapter->net_stats.tx_errors++;
5095 
5096 			/* perform reset of tx/rx */
5097 			et131x_disable_txrx(netdev);
5098 			et131x_enable_txrx(netdev);
5099 			return;
5100 		}
5101 	}
5102 
5103 	spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5104 }
5105 
5106 /**
5107  * et131x_change_mtu - The handler called to change the MTU for the device
5108  * @netdev: device whose MTU is to be changed
5109  * @new_mtu: the desired MTU
5110  *
5111  * Returns 0 on success, errno on failure (as defined in errno.h)
5112  */
et131x_change_mtu(struct net_device * netdev,int new_mtu)5113 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5114 {
5115 	int result = 0;
5116 	struct et131x_adapter *adapter = netdev_priv(netdev);
5117 
5118 	/* Make sure the requested MTU is valid */
5119 	if (new_mtu < 64 || new_mtu > 9216)
5120 		return -EINVAL;
5121 
5122 	et131x_disable_txrx(netdev);
5123 	et131x_handle_send_interrupt(adapter);
5124 	et131x_handle_recv_interrupt(adapter);
5125 
5126 	/* Set the new MTU */
5127 	netdev->mtu = new_mtu;
5128 
5129 	/* Free Rx DMA memory */
5130 	et131x_adapter_memory_free(adapter);
5131 
5132 	/* Set the config parameter for Jumbo Packet support */
5133 	adapter->registry_jumbo_packet = new_mtu + 14;
5134 	et131x_soft_reset(adapter);
5135 
5136 	/* Alloc and init Rx DMA memory */
5137 	result = et131x_adapter_memory_alloc(adapter);
5138 	if (result != 0) {
5139 		dev_warn(&adapter->pdev->dev,
5140 			"Change MTU failed; couldn't re-alloc DMA memory\n");
5141 		return result;
5142 	}
5143 
5144 	et131x_init_send(adapter);
5145 
5146 	et131x_hwaddr_init(adapter);
5147 	memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5148 
5149 	/* Init the device with the new settings */
5150 	et131x_adapter_setup(adapter);
5151 
5152 	et131x_enable_txrx(netdev);
5153 
5154 	return result;
5155 }
5156 
5157 /**
5158  * et131x_set_mac_addr - handler to change the MAC address for the device
5159  * @netdev: device whose MAC is to be changed
5160  * @new_mac: the desired MAC address
5161  *
5162  * Returns 0 on success, errno on failure (as defined in errno.h)
5163  *
5164  * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
5165  */
et131x_set_mac_addr(struct net_device * netdev,void * new_mac)5166 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5167 {
5168 	int result = 0;
5169 	struct et131x_adapter *adapter = netdev_priv(netdev);
5170 	struct sockaddr *address = new_mac;
5171 
5172 	/* begin blux */
5173 
5174 	if (adapter == NULL)
5175 		return -ENODEV;
5176 
5177 	/* Make sure the requested MAC is valid */
5178 	if (!is_valid_ether_addr(address->sa_data))
5179 		return -EADDRNOTAVAIL;
5180 
5181 	et131x_disable_txrx(netdev);
5182 	et131x_handle_send_interrupt(adapter);
5183 	et131x_handle_recv_interrupt(adapter);
5184 
5185 	/* Set the new MAC */
5186 	/* netdev->set_mac_address  = &new_mac; */
5187 
5188 	memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5189 
5190 	printk(KERN_INFO "%s: Setting MAC address to %pM\n",
5191 			netdev->name, netdev->dev_addr);
5192 
5193 	/* Free Rx DMA memory */
5194 	et131x_adapter_memory_free(adapter);
5195 
5196 	et131x_soft_reset(adapter);
5197 
5198 	/* Alloc and init Rx DMA memory */
5199 	result = et131x_adapter_memory_alloc(adapter);
5200 	if (result != 0) {
5201 		dev_err(&adapter->pdev->dev,
5202 			"Change MAC failed; couldn't re-alloc DMA memory\n");
5203 		return result;
5204 	}
5205 
5206 	et131x_init_send(adapter);
5207 
5208 	et131x_hwaddr_init(adapter);
5209 
5210 	/* Init the device with the new settings */
5211 	et131x_adapter_setup(adapter);
5212 
5213 	et131x_enable_txrx(netdev);
5214 
5215 	return result;
5216 }
5217 
5218 static const struct net_device_ops et131x_netdev_ops = {
5219 	.ndo_open		= et131x_open,
5220 	.ndo_stop		= et131x_close,
5221 	.ndo_start_xmit		= et131x_tx,
5222 	.ndo_set_rx_mode	= et131x_multicast,
5223 	.ndo_tx_timeout		= et131x_tx_timeout,
5224 	.ndo_change_mtu		= et131x_change_mtu,
5225 	.ndo_set_mac_address	= et131x_set_mac_addr,
5226 	.ndo_validate_addr	= eth_validate_addr,
5227 	.ndo_get_stats		= et131x_stats,
5228 	.ndo_do_ioctl		= et131x_ioctl,
5229 };
5230 
5231 /**
5232  * et131x_pci_setup - Perform device initialization
5233  * @pdev: a pointer to the device's pci_dev structure
5234  * @ent: this device's entry in the pci_device_id table
5235  *
5236  * Returns 0 on success, errno on failure (as defined in errno.h)
5237  *
5238  * Registered in the pci_driver structure, this function is called when the
5239  * PCI subsystem finds a new PCI device which matches the information
5240  * contained in the pci_device_id table. This routine is the equivalent to
5241  * a device insertion routine.
5242  */
et131x_pci_setup(struct pci_dev * pdev,const struct pci_device_id * ent)5243 static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5244 			       const struct pci_device_id *ent)
5245 {
5246 	struct net_device *netdev;
5247 	struct et131x_adapter *adapter;
5248 	int rc;
5249 	int ii;
5250 
5251 	rc = pci_enable_device(pdev);
5252 	if (rc < 0) {
5253 		dev_err(&pdev->dev, "pci_enable_device() failed\n");
5254 		goto out;
5255 	}
5256 
5257 	/* Perform some basic PCI checks */
5258 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5259 		dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5260 		rc = -ENODEV;
5261 		goto err_disable;
5262 	}
5263 
5264 	rc = pci_request_regions(pdev, DRIVER_NAME);
5265 	if (rc < 0) {
5266 		dev_err(&pdev->dev, "Can't get PCI resources\n");
5267 		goto err_disable;
5268 	}
5269 
5270 	pci_set_master(pdev);
5271 
5272 	/* Check the DMA addressing support of this device */
5273 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5274 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5275 		if (rc < 0) {
5276 			dev_err(&pdev->dev,
5277 			  "Unable to obtain 64 bit DMA for consistent allocations\n");
5278 			goto err_release_res;
5279 		}
5280 	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5281 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5282 		if (rc < 0) {
5283 			dev_err(&pdev->dev,
5284 			  "Unable to obtain 32 bit DMA for consistent allocations\n");
5285 			goto err_release_res;
5286 		}
5287 	} else {
5288 		dev_err(&pdev->dev, "No usable DMA addressing method\n");
5289 		rc = -EIO;
5290 		goto err_release_res;
5291 	}
5292 
5293 	/* Allocate netdev and private adapter structs */
5294 	netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5295 	if (!netdev) {
5296 		dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5297 		rc = -ENOMEM;
5298 		goto err_release_res;
5299 	}
5300 
5301 	netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5302 	netdev->netdev_ops     = &et131x_netdev_ops;
5303 
5304 	SET_NETDEV_DEV(netdev, &pdev->dev);
5305 	et131x_set_ethtool_ops(netdev);
5306 
5307 	adapter = et131x_adapter_init(netdev, pdev);
5308 
5309 	rc = et131x_pci_init(adapter, pdev);
5310 	if (rc < 0)
5311 		goto err_free_dev;
5312 
5313 	/* Map the bus-relative registers to system virtual memory */
5314 	adapter->regs = pci_ioremap_bar(pdev, 0);
5315 	if (!adapter->regs) {
5316 		dev_err(&pdev->dev, "Cannot map device registers\n");
5317 		rc = -ENOMEM;
5318 		goto err_free_dev;
5319 	}
5320 
5321 	/* If Phy COMA mode was enabled when we went down, disable it here. */
5322 	writel(ET_PMCSR_INIT,  &adapter->regs->global.pm_csr);
5323 
5324 	/* Issue a global reset to the et1310 */
5325 	et131x_soft_reset(adapter);
5326 
5327 	/* Disable all interrupts (paranoid) */
5328 	et131x_disable_interrupts(adapter);
5329 
5330 	/* Allocate DMA memory */
5331 	rc = et131x_adapter_memory_alloc(adapter);
5332 	if (rc < 0) {
5333 		dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5334 		goto err_iounmap;
5335 	}
5336 
5337 	/* Init send data structures */
5338 	et131x_init_send(adapter);
5339 
5340 	/* Set up the task structure for the ISR's deferred handler */
5341 	INIT_WORK(&adapter->task, et131x_isr_handler);
5342 
5343 	/* Copy address into the net_device struct */
5344 	memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5345 
5346 	/* Init variable for counting how long we do not have link status */
5347 	adapter->boot_coma = 0;
5348 	et1310_disable_phy_coma(adapter);
5349 
5350 	rc = -ENOMEM;
5351 
5352 	/* Setup the mii_bus struct */
5353 	adapter->mii_bus = mdiobus_alloc();
5354 	if (!adapter->mii_bus) {
5355 		dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5356 		goto err_mem_free;
5357 	}
5358 
5359 	adapter->mii_bus->name = "et131x_eth_mii";
5360 	snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5361 		(adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5362 	adapter->mii_bus->priv = netdev;
5363 	adapter->mii_bus->read = et131x_mdio_read;
5364 	adapter->mii_bus->write = et131x_mdio_write;
5365 	adapter->mii_bus->reset = et131x_mdio_reset;
5366 	adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5367 	if (!adapter->mii_bus->irq) {
5368 		dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5369 		goto err_mdio_free;
5370 	}
5371 
5372 	for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5373 		adapter->mii_bus->irq[ii] = PHY_POLL;
5374 
5375 	rc = mdiobus_register(adapter->mii_bus);
5376 	if (rc < 0) {
5377 		dev_err(&pdev->dev, "failed to register MII bus\n");
5378 		goto err_mdio_free_irq;
5379 	}
5380 
5381 	rc = et131x_mii_probe(netdev);
5382 	if (rc < 0) {
5383 		dev_err(&pdev->dev, "failed to probe MII bus\n");
5384 		goto err_mdio_unregister;
5385 	}
5386 
5387 	/* Setup et1310 as per the documentation */
5388 	et131x_adapter_setup(adapter);
5389 
5390 	/* We can enable interrupts now
5391 	 *
5392 	 *  NOTE - Because registration of interrupt handler is done in the
5393 	 *         device's open(), defer enabling device interrupts to that
5394 	 *         point
5395 	 */
5396 
5397 	/* Register the net_device struct with the Linux network layer */
5398 	rc = register_netdev(netdev);
5399 	if (rc < 0) {
5400 		dev_err(&pdev->dev, "register_netdev() failed\n");
5401 		goto err_phy_disconnect;
5402 	}
5403 
5404 	/* Register the net_device struct with the PCI subsystem. Save a copy
5405 	 * of the PCI config space for this device now that the device has
5406 	 * been initialized, just in case it needs to be quickly restored.
5407 	 */
5408 	pci_set_drvdata(pdev, netdev);
5409 out:
5410 	return rc;
5411 
5412 err_phy_disconnect:
5413 	phy_disconnect(adapter->phydev);
5414 err_mdio_unregister:
5415 	mdiobus_unregister(adapter->mii_bus);
5416 err_mdio_free_irq:
5417 	kfree(adapter->mii_bus->irq);
5418 err_mdio_free:
5419 	mdiobus_free(adapter->mii_bus);
5420 err_mem_free:
5421 	et131x_adapter_memory_free(adapter);
5422 err_iounmap:
5423 	iounmap(adapter->regs);
5424 err_free_dev:
5425 	pci_dev_put(pdev);
5426 	free_netdev(netdev);
5427 err_release_res:
5428 	pci_release_regions(pdev);
5429 err_disable:
5430 	pci_disable_device(pdev);
5431 	goto out;
5432 }
5433 
5434 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5435 	{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5436 	{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5437 	{0,}
5438 };
5439 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5440 
5441 static struct pci_driver et131x_driver = {
5442 	.name		= DRIVER_NAME,
5443 	.id_table	= et131x_pci_table,
5444 	.probe		= et131x_pci_setup,
5445 	.remove		= __devexit_p(et131x_pci_remove),
5446 	.driver.pm	= ET131X_PM_OPS,
5447 };
5448 
5449 /**
5450  * et131x_init_module - The "main" entry point called on driver initialization
5451  *
5452  * Returns 0 on success, errno on failure (as defined in errno.h)
5453  */
et131x_init_module(void)5454 static int __init et131x_init_module(void)
5455 {
5456 	return pci_register_driver(&et131x_driver);
5457 }
5458 
5459 /**
5460  * et131x_cleanup_module - The entry point called on driver cleanup
5461  */
et131x_cleanup_module(void)5462 static void __exit et131x_cleanup_module(void)
5463 {
5464 	pci_unregister_driver(&et131x_driver);
5465 }
5466 
5467 module_init(et131x_init_module);
5468 module_exit(et131x_cleanup_module);
5469 
5470