1 /*
2    sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3 
4    Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5    Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6    Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7 
8    Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9    genuine driver.
10 
11    This software may be used and distributed according to the terms of
12    the GNU General Public License (GPL), incorporated herein by reference.
13    Drivers based on or derived from this code fall under the GPL and must
14    retain the authorship, copyright and license notice.  This file is not
15    a complete program and may only be used when the entire operating
16    system is licensed under the GPL.
17 
18    See the file COPYING in this distribution for more information.
19 
20 */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/netdevice.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/pci.h>
32 #include <linux/mii.h>
33 #include <linux/delay.h>
34 #include <linux/crc32.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <asm/irq.h>
38 
39 #define PHY_MAX_ADDR		32
40 #define PHY_ID_ANY		0x1f
41 #define MII_REG_ANY		0x1f
42 
43 #define DRV_VERSION		"1.4"
44 #define DRV_NAME		"sis190"
45 #define SIS190_DRIVER_NAME	DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46 
47 #define sis190_rx_skb			netif_rx
48 #define sis190_rx_quota(count, quota)	count
49 
50 #define NUM_TX_DESC		64	/* [8..1024] */
51 #define NUM_RX_DESC		64	/* [8..8192] */
52 #define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
53 #define RX_RING_BYTES		(NUM_RX_DESC * sizeof(struct RxDesc))
54 #define RX_BUF_SIZE		1536
55 #define RX_BUF_MASK		0xfff8
56 
57 #define SIS190_REGS_SIZE	0x80
58 #define SIS190_TX_TIMEOUT	(6*HZ)
59 #define SIS190_PHY_TIMEOUT	(10*HZ)
60 #define SIS190_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61 				 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62 				 NETIF_MSG_IFDOWN)
63 
64 /* Enhanced PHY access register bit definitions */
65 #define EhnMIIread		0x0000
66 #define EhnMIIwrite		0x0020
67 #define EhnMIIdataShift		16
68 #define EhnMIIpmdShift		6	/* 7016 only */
69 #define EhnMIIregShift		11
70 #define EhnMIIreq		0x0010
71 #define EhnMIInotDone		0x0010
72 
73 /* Write/read MMIO register */
74 #define SIS_W8(reg, val)	writeb ((val), ioaddr + (reg))
75 #define SIS_W16(reg, val)	writew ((val), ioaddr + (reg))
76 #define SIS_W32(reg, val)	writel ((val), ioaddr + (reg))
77 #define SIS_R8(reg)		readb (ioaddr + (reg))
78 #define SIS_R16(reg)		readw (ioaddr + (reg))
79 #define SIS_R32(reg)		readl (ioaddr + (reg))
80 
81 #define SIS_PCI_COMMIT()	SIS_R32(IntrControl)
82 
83 enum sis190_registers {
84 	TxControl		= 0x00,
85 	TxDescStartAddr		= 0x04,
86 	rsv0			= 0x08,	// reserved
87 	TxSts			= 0x0c,	// unused (Control/Status)
88 	RxControl		= 0x10,
89 	RxDescStartAddr		= 0x14,
90 	rsv1			= 0x18,	// reserved
91 	RxSts			= 0x1c,	// unused
92 	IntrStatus		= 0x20,
93 	IntrMask		= 0x24,
94 	IntrControl		= 0x28,
95 	IntrTimer		= 0x2c,	// unused (Interrupt Timer)
96 	PMControl		= 0x30,	// unused (Power Mgmt Control/Status)
97 	rsv2			= 0x34,	// reserved
98 	ROMControl		= 0x38,
99 	ROMInterface		= 0x3c,
100 	StationControl		= 0x40,
101 	GMIIControl		= 0x44,
102 	GIoCR			= 0x48, // unused (GMAC IO Compensation)
103 	GIoCtrl			= 0x4c, // unused (GMAC IO Control)
104 	TxMacControl		= 0x50,
105 	TxLimit			= 0x54, // unused (Tx MAC Timer/TryLimit)
106 	RGDelay			= 0x58, // unused (RGMII Tx Internal Delay)
107 	rsv3			= 0x5c, // reserved
108 	RxMacControl		= 0x60,
109 	RxMacAddr		= 0x62,
110 	RxHashTable		= 0x68,
111 	// Undocumented		= 0x6c,
112 	RxWolCtrl		= 0x70,
113 	RxWolData		= 0x74, // unused (Rx WOL Data Access)
114 	RxMPSControl		= 0x78,	// unused (Rx MPS Control)
115 	rsv4			= 0x7c, // reserved
116 };
117 
118 enum sis190_register_content {
119 	/* IntrStatus */
120 	SoftInt			= 0x40000000,	// unused
121 	Timeup			= 0x20000000,	// unused
122 	PauseFrame		= 0x00080000,	// unused
123 	MagicPacket		= 0x00040000,	// unused
124 	WakeupFrame		= 0x00020000,	// unused
125 	LinkChange		= 0x00010000,
126 	RxQEmpty		= 0x00000080,
127 	RxQInt			= 0x00000040,
128 	TxQ1Empty		= 0x00000020,	// unused
129 	TxQ1Int			= 0x00000010,
130 	TxQ0Empty		= 0x00000008,	// unused
131 	TxQ0Int			= 0x00000004,
132 	RxHalt			= 0x00000002,
133 	TxHalt			= 0x00000001,
134 
135 	/* {Rx/Tx}CmdBits */
136 	CmdReset		= 0x10,
137 	CmdRxEnb		= 0x08,		// unused
138 	CmdTxEnb		= 0x01,
139 	RxBufEmpty		= 0x01,		// unused
140 
141 	/* Cfg9346Bits */
142 	Cfg9346_Lock		= 0x00,		// unused
143 	Cfg9346_Unlock		= 0xc0,		// unused
144 
145 	/* RxMacControl */
146 	AcceptErr		= 0x20,		// unused
147 	AcceptRunt		= 0x10,		// unused
148 	AcceptBroadcast		= 0x0800,
149 	AcceptMulticast		= 0x0400,
150 	AcceptMyPhys		= 0x0200,
151 	AcceptAllPhys		= 0x0100,
152 
153 	/* RxConfigBits */
154 	RxCfgFIFOShift		= 13,
155 	RxCfgDMAShift		= 8,		// 0x1a in RxControl ?
156 
157 	/* TxConfigBits */
158 	TxInterFrameGapShift	= 24,
159 	TxDMAShift		= 8, /* DMA burst value (0-7) is shift this many bits */
160 
161 	LinkStatus		= 0x02,		// unused
162 	FullDup			= 0x01,		// unused
163 
164 	/* TBICSRBit */
165 	TBILinkOK		= 0x02000000,	// unused
166 };
167 
168 struct TxDesc {
169 	__le32 PSize;
170 	__le32 status;
171 	__le32 addr;
172 	__le32 size;
173 };
174 
175 struct RxDesc {
176 	__le32 PSize;
177 	__le32 status;
178 	__le32 addr;
179 	__le32 size;
180 };
181 
182 enum _DescStatusBit {
183 	/* _Desc.status */
184 	OWNbit		= 0x80000000, // RXOWN/TXOWN
185 	INTbit		= 0x40000000, // RXINT/TXINT
186 	CRCbit		= 0x00020000, // CRCOFF/CRCEN
187 	PADbit		= 0x00010000, // PREADD/PADEN
188 	/* _Desc.size */
189 	RingEnd		= 0x80000000,
190 	/* TxDesc.status */
191 	LSEN		= 0x08000000, // TSO ? -- FR
192 	IPCS		= 0x04000000,
193 	TCPCS		= 0x02000000,
194 	UDPCS		= 0x01000000,
195 	BSTEN		= 0x00800000,
196 	EXTEN		= 0x00400000,
197 	DEFEN		= 0x00200000,
198 	BKFEN		= 0x00100000,
199 	CRSEN		= 0x00080000,
200 	COLEN		= 0x00040000,
201 	THOL3		= 0x30000000,
202 	THOL2		= 0x20000000,
203 	THOL1		= 0x10000000,
204 	THOL0		= 0x00000000,
205 
206 	WND		= 0x00080000,
207 	TABRT		= 0x00040000,
208 	FIFO		= 0x00020000,
209 	LINK		= 0x00010000,
210 	ColCountMask	= 0x0000ffff,
211 	/* RxDesc.status */
212 	IPON		= 0x20000000,
213 	TCPON		= 0x10000000,
214 	UDPON		= 0x08000000,
215 	Wakup		= 0x00400000,
216 	Magic		= 0x00200000,
217 	Pause		= 0x00100000,
218 	DEFbit		= 0x00200000,
219 	BCAST		= 0x000c0000,
220 	MCAST		= 0x00080000,
221 	UCAST		= 0x00040000,
222 	/* RxDesc.PSize */
223 	TAGON		= 0x80000000,
224 	RxDescCountMask	= 0x7f000000, // multi-desc pkt when > 1 ? -- FR
225 	ABORT		= 0x00800000,
226 	SHORT		= 0x00400000,
227 	LIMIT		= 0x00200000,
228 	MIIER		= 0x00100000,
229 	OVRUN		= 0x00080000,
230 	NIBON		= 0x00040000,
231 	COLON		= 0x00020000,
232 	CRCOK		= 0x00010000,
233 	RxSizeMask	= 0x0000ffff
234 	/*
235 	 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236 	 * provide two (unused with Linux) Tx queues. No publicly
237 	 * available documentation alas.
238 	 */
239 };
240 
241 enum sis190_eeprom_access_register_bits {
242 	EECS	= 0x00000001,	// unused
243 	EECLK	= 0x00000002,	// unused
244 	EEDO	= 0x00000008,	// unused
245 	EEDI	= 0x00000004,	// unused
246 	EEREQ	= 0x00000080,
247 	EEROP	= 0x00000200,
248 	EEWOP	= 0x00000100	// unused
249 };
250 
251 /* EEPROM Addresses */
252 enum sis190_eeprom_address {
253 	EEPROMSignature	= 0x00,
254 	EEPROMCLK	= 0x01,	// unused
255 	EEPROMInfo	= 0x02,
256 	EEPROMMACAddr	= 0x03
257 };
258 
259 enum sis190_feature {
260 	F_HAS_RGMII	= 1,
261 	F_PHY_88E1111	= 2,
262 	F_PHY_BCM5461	= 4
263 };
264 
265 struct sis190_private {
266 	void __iomem *mmio_addr;
267 	struct pci_dev *pci_dev;
268 	struct net_device *dev;
269 	spinlock_t lock;
270 	u32 rx_buf_sz;
271 	u32 cur_rx;
272 	u32 cur_tx;
273 	u32 dirty_rx;
274 	u32 dirty_tx;
275 	dma_addr_t rx_dma;
276 	dma_addr_t tx_dma;
277 	struct RxDesc *RxDescRing;
278 	struct TxDesc *TxDescRing;
279 	struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280 	struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281 	struct work_struct phy_task;
282 	struct timer_list timer;
283 	u32 msg_enable;
284 	struct mii_if_info mii_if;
285 	struct list_head first_phy;
286 	u32 features;
287 	u32 negotiated_lpa;
288 	enum {
289 		LNK_OFF,
290 		LNK_ON,
291 		LNK_AUTONEG,
292 	} link_status;
293 };
294 
295 struct sis190_phy {
296 	struct list_head list;
297 	int phy_id;
298 	u16 id[2];
299 	u16 status;
300 	u8  type;
301 };
302 
303 enum sis190_phy_type {
304 	UNKNOWN	= 0x00,
305 	HOME	= 0x01,
306 	LAN	= 0x02,
307 	MIX	= 0x03
308 };
309 
310 static struct mii_chip_info {
311         const char *name;
312         u16 id[2];
313         unsigned int type;
314 	u32 feature;
315 } mii_chip_table[] = {
316 	{ "Atheros PHY",          { 0x004d, 0xd010 }, LAN, 0 },
317 	{ "Atheros PHY AR8012",   { 0x004d, 0xd020 }, LAN, 0 },
318 	{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319 	{ "Broadcom PHY AC131",   { 0x0143, 0xbc70 }, LAN, 0 },
320 	{ "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN, 0 },
321 	{ "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322 	{ "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN, 0 },
323 	{ NULL, }
324 };
325 
326 static const struct {
327 	const char *name;
328 } sis_chip_info[] = {
329 	{ "SiS 190 PCI Fast Ethernet adapter" },
330 	{ "SiS 191 PCI Gigabit Ethernet adapter" },
331 };
332 
333 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
334 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336 	{ 0, },
337 };
338 
339 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
340 
341 static int rx_copybreak = 200;
342 
343 static struct {
344 	u32 msg_enable;
345 } debug = { -1 };
346 
347 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348 module_param(rx_copybreak, int, 0);
349 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350 module_param_named(debug, debug.msg_enable, int, 0);
351 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
353 MODULE_VERSION(DRV_VERSION);
354 MODULE_LICENSE("GPL");
355 
356 static const u32 sis190_intr_mask =
357 	RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
358 
359 /*
360  * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
361  * The chips use a 64 element hash table based on the Ethernet CRC.
362  */
363 static const int multicast_filter_limit = 32;
364 
__mdio_cmd(void __iomem * ioaddr,u32 ctl)365 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
366 {
367 	unsigned int i;
368 
369 	SIS_W32(GMIIControl, ctl);
370 
371 	msleep(1);
372 
373 	for (i = 0; i < 100; i++) {
374 		if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375 			break;
376 		msleep(1);
377 	}
378 
379 	if (i > 99)
380 		pr_err("PHY command failed !\n");
381 }
382 
mdio_write(void __iomem * ioaddr,int phy_id,int reg,int val)383 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
384 {
385 	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386 		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387 		(((u32) val) << EhnMIIdataShift));
388 }
389 
mdio_read(void __iomem * ioaddr,int phy_id,int reg)390 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
391 {
392 	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393 		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
394 
395 	return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
396 }
397 
__mdio_write(struct net_device * dev,int phy_id,int reg,int val)398 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
399 {
400 	struct sis190_private *tp = netdev_priv(dev);
401 
402 	mdio_write(tp->mmio_addr, phy_id, reg, val);
403 }
404 
__mdio_read(struct net_device * dev,int phy_id,int reg)405 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
406 {
407 	struct sis190_private *tp = netdev_priv(dev);
408 
409 	return mdio_read(tp->mmio_addr, phy_id, reg);
410 }
411 
mdio_read_latched(void __iomem * ioaddr,int phy_id,int reg)412 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
413 {
414 	mdio_read(ioaddr, phy_id, reg);
415 	return mdio_read(ioaddr, phy_id, reg);
416 }
417 
sis190_read_eeprom(void __iomem * ioaddr,u32 reg)418 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419 {
420 	u16 data = 0xffff;
421 	unsigned int i;
422 
423 	if (!(SIS_R32(ROMControl) & 0x0002))
424 		return 0;
425 
426 	SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
427 
428 	for (i = 0; i < 200; i++) {
429 		if (!(SIS_R32(ROMInterface) & EEREQ)) {
430 			data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431 			break;
432 		}
433 		msleep(1);
434 	}
435 
436 	return data;
437 }
438 
sis190_irq_mask_and_ack(void __iomem * ioaddr)439 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
440 {
441 	SIS_W32(IntrMask, 0x00);
442 	SIS_W32(IntrStatus, 0xffffffff);
443 	SIS_PCI_COMMIT();
444 }
445 
sis190_asic_down(void __iomem * ioaddr)446 static void sis190_asic_down(void __iomem *ioaddr)
447 {
448 	/* Stop the chip's Tx and Rx DMA processes. */
449 
450 	SIS_W32(TxControl, 0x1a00);
451 	SIS_W32(RxControl, 0x1a00);
452 
453 	sis190_irq_mask_and_ack(ioaddr);
454 }
455 
sis190_mark_as_last_descriptor(struct RxDesc * desc)456 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
457 {
458 	desc->size |= cpu_to_le32(RingEnd);
459 }
460 
sis190_give_to_asic(struct RxDesc * desc,u32 rx_buf_sz)461 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
462 {
463 	u32 eor = le32_to_cpu(desc->size) & RingEnd;
464 
465 	desc->PSize = 0x0;
466 	desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467 	wmb();
468 	desc->status = cpu_to_le32(OWNbit | INTbit);
469 }
470 
sis190_map_to_asic(struct RxDesc * desc,dma_addr_t mapping,u32 rx_buf_sz)471 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472 				      u32 rx_buf_sz)
473 {
474 	desc->addr = cpu_to_le32(mapping);
475 	sis190_give_to_asic(desc, rx_buf_sz);
476 }
477 
sis190_make_unusable_by_asic(struct RxDesc * desc)478 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
479 {
480 	desc->PSize = 0x0;
481 	desc->addr = cpu_to_le32(0xdeadbeef);
482 	desc->size &= cpu_to_le32(RingEnd);
483 	wmb();
484 	desc->status = 0x0;
485 }
486 
sis190_alloc_rx_skb(struct sis190_private * tp,struct RxDesc * desc)487 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488 					   struct RxDesc *desc)
489 {
490 	u32 rx_buf_sz = tp->rx_buf_sz;
491 	struct sk_buff *skb;
492 	dma_addr_t mapping;
493 
494 	skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495 	if (unlikely(!skb))
496 		goto skb_alloc_failed;
497 	mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
498 			PCI_DMA_FROMDEVICE);
499 	if (pci_dma_mapping_error(tp->pci_dev, mapping))
500 		goto out;
501 	sis190_map_to_asic(desc, mapping, rx_buf_sz);
502 
503 	return skb;
504 
505 out:
506 	dev_kfree_skb_any(skb);
507 skb_alloc_failed:
508 	sis190_make_unusable_by_asic(desc);
509 	return NULL;
510 }
511 
sis190_rx_fill(struct sis190_private * tp,struct net_device * dev,u32 start,u32 end)512 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513 			  u32 start, u32 end)
514 {
515 	u32 cur;
516 
517 	for (cur = start; cur < end; cur++) {
518 		unsigned int i = cur % NUM_RX_DESC;
519 
520 		if (tp->Rx_skbuff[i])
521 			continue;
522 
523 		tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524 
525 		if (!tp->Rx_skbuff[i])
526 			break;
527 	}
528 	return cur - start;
529 }
530 
sis190_try_rx_copy(struct sis190_private * tp,struct sk_buff ** sk_buff,int pkt_size,dma_addr_t addr)531 static bool sis190_try_rx_copy(struct sis190_private *tp,
532 			       struct sk_buff **sk_buff, int pkt_size,
533 			       dma_addr_t addr)
534 {
535 	struct sk_buff *skb;
536 	bool done = false;
537 
538 	if (pkt_size >= rx_copybreak)
539 		goto out;
540 
541 	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542 	if (!skb)
543 		goto out;
544 
545 	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
546 				PCI_DMA_FROMDEVICE);
547 	skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548 	*sk_buff = skb;
549 	done = true;
550 out:
551 	return done;
552 }
553 
sis190_rx_pkt_err(u32 status,struct net_device_stats * stats)554 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 {
556 #define ErrMask	(OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 
558 	if ((status & CRCOK) && !(status & ErrMask))
559 		return 0;
560 
561 	if (!(status & CRCOK))
562 		stats->rx_crc_errors++;
563 	else if (status & OVRUN)
564 		stats->rx_over_errors++;
565 	else if (status & (SHORT | LIMIT))
566 		stats->rx_length_errors++;
567 	else if (status & (MIIER | NIBON | COLON))
568 		stats->rx_frame_errors++;
569 
570 	stats->rx_errors++;
571 	return -1;
572 }
573 
sis190_rx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)574 static int sis190_rx_interrupt(struct net_device *dev,
575 			       struct sis190_private *tp, void __iomem *ioaddr)
576 {
577 	struct net_device_stats *stats = &dev->stats;
578 	u32 rx_left, cur_rx = tp->cur_rx;
579 	u32 delta, count;
580 
581 	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582 	rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 
584 	for (; rx_left > 0; rx_left--, cur_rx++) {
585 		unsigned int entry = cur_rx % NUM_RX_DESC;
586 		struct RxDesc *desc = tp->RxDescRing + entry;
587 		u32 status;
588 
589 		if (le32_to_cpu(desc->status) & OWNbit)
590 			break;
591 
592 		status = le32_to_cpu(desc->PSize);
593 
594 		//netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
595 
596 		if (sis190_rx_pkt_err(status, stats) < 0)
597 			sis190_give_to_asic(desc, tp->rx_buf_sz);
598 		else {
599 			struct sk_buff *skb = tp->Rx_skbuff[entry];
600 			dma_addr_t addr = le32_to_cpu(desc->addr);
601 			int pkt_size = (status & RxSizeMask) - 4;
602 			struct pci_dev *pdev = tp->pci_dev;
603 
604 			if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 				netif_info(tp, intr, dev,
606 					   "(frag) status = %08x\n", status);
607 				stats->rx_dropped++;
608 				stats->rx_length_errors++;
609 				sis190_give_to_asic(desc, tp->rx_buf_sz);
610 				continue;
611 			}
612 
613 
614 			if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 				pci_dma_sync_single_for_device(pdev, addr,
616 					tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
617 				sis190_give_to_asic(desc, tp->rx_buf_sz);
618 			} else {
619 				pci_unmap_single(pdev, addr, tp->rx_buf_sz,
620 						 PCI_DMA_FROMDEVICE);
621 				tp->Rx_skbuff[entry] = NULL;
622 				sis190_make_unusable_by_asic(desc);
623 			}
624 
625 			skb_put(skb, pkt_size);
626 			skb->protocol = eth_type_trans(skb, dev);
627 
628 			sis190_rx_skb(skb);
629 
630 			stats->rx_packets++;
631 			stats->rx_bytes += pkt_size;
632 			if ((status & BCAST) == MCAST)
633 				stats->multicast++;
634 		}
635 	}
636 	count = cur_rx - tp->cur_rx;
637 	tp->cur_rx = cur_rx;
638 
639 	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 	if (!delta && count)
641 		netif_info(tp, intr, dev, "no Rx buffer allocated\n");
642 	tp->dirty_rx += delta;
643 
644 	if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
645 		netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
646 
647 	return count;
648 }
649 
sis190_unmap_tx_skb(struct pci_dev * pdev,struct sk_buff * skb,struct TxDesc * desc)650 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651 				struct TxDesc *desc)
652 {
653 	unsigned int len;
654 
655 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656 
657 	pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658 
659 	memset(desc, 0x00, sizeof(*desc));
660 }
661 
sis190_tx_pkt_err(u32 status,struct net_device_stats * stats)662 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663 {
664 #define TxErrMask	(WND | TABRT | FIFO | LINK)
665 
666 	if (!unlikely(status & TxErrMask))
667 		return 0;
668 
669 	if (status & WND)
670 		stats->tx_window_errors++;
671 	if (status & TABRT)
672 		stats->tx_aborted_errors++;
673 	if (status & FIFO)
674 		stats->tx_fifo_errors++;
675 	if (status & LINK)
676 		stats->tx_carrier_errors++;
677 
678 	stats->tx_errors++;
679 
680 	return -1;
681 }
682 
sis190_tx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)683 static void sis190_tx_interrupt(struct net_device *dev,
684 				struct sis190_private *tp, void __iomem *ioaddr)
685 {
686 	struct net_device_stats *stats = &dev->stats;
687 	u32 pending, dirty_tx = tp->dirty_tx;
688 	/*
689 	 * It would not be needed if queueing was allowed to be enabled
690 	 * again too early (hint: think preempt and unclocked smp systems).
691 	 */
692 	unsigned int queue_stopped;
693 
694 	smp_rmb();
695 	pending = tp->cur_tx - dirty_tx;
696 	queue_stopped = (pending == NUM_TX_DESC);
697 
698 	for (; pending; pending--, dirty_tx++) {
699 		unsigned int entry = dirty_tx % NUM_TX_DESC;
700 		struct TxDesc *txd = tp->TxDescRing + entry;
701 		u32 status = le32_to_cpu(txd->status);
702 		struct sk_buff *skb;
703 
704 		if (status & OWNbit)
705 			break;
706 
707 		skb = tp->Tx_skbuff[entry];
708 
709 		if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
710 			stats->tx_packets++;
711 			stats->tx_bytes += skb->len;
712 			stats->collisions += ((status & ColCountMask) - 1);
713 		}
714 
715 		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
716 		tp->Tx_skbuff[entry] = NULL;
717 		dev_kfree_skb_irq(skb);
718 	}
719 
720 	if (tp->dirty_tx != dirty_tx) {
721 		tp->dirty_tx = dirty_tx;
722 		smp_wmb();
723 		if (queue_stopped)
724 			netif_wake_queue(dev);
725 	}
726 }
727 
728 /*
729  * The interrupt handler does all of the Rx thread work and cleans up after
730  * the Tx thread.
731  */
sis190_interrupt(int irq,void * __dev)732 static irqreturn_t sis190_interrupt(int irq, void *__dev)
733 {
734 	struct net_device *dev = __dev;
735 	struct sis190_private *tp = netdev_priv(dev);
736 	void __iomem *ioaddr = tp->mmio_addr;
737 	unsigned int handled = 0;
738 	u32 status;
739 
740 	status = SIS_R32(IntrStatus);
741 
742 	if ((status == 0xffffffff) || !status)
743 		goto out;
744 
745 	handled = 1;
746 
747 	if (unlikely(!netif_running(dev))) {
748 		sis190_asic_down(ioaddr);
749 		goto out;
750 	}
751 
752 	SIS_W32(IntrStatus, status);
753 
754 //	netif_info(tp, intr, dev, "status = %08x\n", status);
755 
756 	if (status & LinkChange) {
757 		netif_info(tp, intr, dev, "link change\n");
758 		del_timer(&tp->timer);
759 		schedule_work(&tp->phy_task);
760 	}
761 
762 	if (status & RxQInt)
763 		sis190_rx_interrupt(dev, tp, ioaddr);
764 
765 	if (status & TxQ0Int)
766 		sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768 	return IRQ_RETVAL(handled);
769 }
770 
771 #ifdef CONFIG_NET_POLL_CONTROLLER
sis190_netpoll(struct net_device * dev)772 static void sis190_netpoll(struct net_device *dev)
773 {
774 	struct sis190_private *tp = netdev_priv(dev);
775 	struct pci_dev *pdev = tp->pci_dev;
776 
777 	disable_irq(pdev->irq);
778 	sis190_interrupt(pdev->irq, dev);
779 	enable_irq(pdev->irq);
780 }
781 #endif
782 
sis190_free_rx_skb(struct sis190_private * tp,struct sk_buff ** sk_buff,struct RxDesc * desc)783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 			       struct sk_buff **sk_buff, struct RxDesc *desc)
785 {
786 	struct pci_dev *pdev = tp->pci_dev;
787 
788 	pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 			 PCI_DMA_FROMDEVICE);
790 	dev_kfree_skb(*sk_buff);
791 	*sk_buff = NULL;
792 	sis190_make_unusable_by_asic(desc);
793 }
794 
sis190_rx_clear(struct sis190_private * tp)795 static void sis190_rx_clear(struct sis190_private *tp)
796 {
797 	unsigned int i;
798 
799 	for (i = 0; i < NUM_RX_DESC; i++) {
800 		if (!tp->Rx_skbuff[i])
801 			continue;
802 		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
803 	}
804 }
805 
sis190_init_ring_indexes(struct sis190_private * tp)806 static void sis190_init_ring_indexes(struct sis190_private *tp)
807 {
808 	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
809 }
810 
sis190_init_ring(struct net_device * dev)811 static int sis190_init_ring(struct net_device *dev)
812 {
813 	struct sis190_private *tp = netdev_priv(dev);
814 
815 	sis190_init_ring_indexes(tp);
816 
817 	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
819 
820 	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 		goto err_rx_clear;
822 
823 	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
824 
825 	return 0;
826 
827 err_rx_clear:
828 	sis190_rx_clear(tp);
829 	return -ENOMEM;
830 }
831 
sis190_set_rx_mode(struct net_device * dev)832 static void sis190_set_rx_mode(struct net_device *dev)
833 {
834 	struct sis190_private *tp = netdev_priv(dev);
835 	void __iomem *ioaddr = tp->mmio_addr;
836 	unsigned long flags;
837 	u32 mc_filter[2];	/* Multicast hash filter */
838 	u16 rx_mode;
839 
840 	if (dev->flags & IFF_PROMISC) {
841 		rx_mode =
842 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 			AcceptAllPhys;
844 		mc_filter[1] = mc_filter[0] = 0xffffffff;
845 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
846 		   (dev->flags & IFF_ALLMULTI)) {
847 		/* Too many to filter perfectly -- accept all multicasts. */
848 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 		mc_filter[1] = mc_filter[0] = 0xffffffff;
850 	} else {
851 		struct netdev_hw_addr *ha;
852 
853 		rx_mode = AcceptBroadcast | AcceptMyPhys;
854 		mc_filter[1] = mc_filter[0] = 0;
855 		netdev_for_each_mc_addr(ha, dev) {
856 			int bit_nr =
857 				ether_crc(ETH_ALEN, ha->addr) & 0x3f;
858 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
859 			rx_mode |= AcceptMulticast;
860 		}
861 	}
862 
863 	spin_lock_irqsave(&tp->lock, flags);
864 
865 	SIS_W16(RxMacControl, rx_mode | 0x2);
866 	SIS_W32(RxHashTable, mc_filter[0]);
867 	SIS_W32(RxHashTable + 4, mc_filter[1]);
868 
869 	spin_unlock_irqrestore(&tp->lock, flags);
870 }
871 
sis190_soft_reset(void __iomem * ioaddr)872 static void sis190_soft_reset(void __iomem *ioaddr)
873 {
874 	SIS_W32(IntrControl, 0x8000);
875 	SIS_PCI_COMMIT();
876 	SIS_W32(IntrControl, 0x0);
877 	sis190_asic_down(ioaddr);
878 }
879 
sis190_hw_start(struct net_device * dev)880 static void sis190_hw_start(struct net_device *dev)
881 {
882 	struct sis190_private *tp = netdev_priv(dev);
883 	void __iomem *ioaddr = tp->mmio_addr;
884 
885 	sis190_soft_reset(ioaddr);
886 
887 	SIS_W32(TxDescStartAddr, tp->tx_dma);
888 	SIS_W32(RxDescStartAddr, tp->rx_dma);
889 
890 	SIS_W32(IntrStatus, 0xffffffff);
891 	SIS_W32(IntrMask, 0x0);
892 	SIS_W32(GMIIControl, 0x0);
893 	SIS_W32(TxMacControl, 0x60);
894 	SIS_W16(RxMacControl, 0x02);
895 	SIS_W32(RxHashTable, 0x0);
896 	SIS_W32(0x6c, 0x0);
897 	SIS_W32(RxWolCtrl, 0x0);
898 	SIS_W32(RxWolData, 0x0);
899 
900 	SIS_PCI_COMMIT();
901 
902 	sis190_set_rx_mode(dev);
903 
904 	/* Enable all known interrupts by setting the interrupt mask. */
905 	SIS_W32(IntrMask, sis190_intr_mask);
906 
907 	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
908 	SIS_W32(RxControl, 0x1a1d);
909 
910 	netif_start_queue(dev);
911 }
912 
sis190_phy_task(struct work_struct * work)913 static void sis190_phy_task(struct work_struct *work)
914 {
915 	struct sis190_private *tp =
916 		container_of(work, struct sis190_private, phy_task);
917 	struct net_device *dev = tp->dev;
918 	void __iomem *ioaddr = tp->mmio_addr;
919 	int phy_id = tp->mii_if.phy_id;
920 	u16 val;
921 
922 	rtnl_lock();
923 
924 	if (!netif_running(dev))
925 		goto out_unlock;
926 
927 	val = mdio_read(ioaddr, phy_id, MII_BMCR);
928 	if (val & BMCR_RESET) {
929 		// FIXME: needlessly high ?  -- FR 02/07/2005
930 		mod_timer(&tp->timer, jiffies + HZ/10);
931 		goto out_unlock;
932 	}
933 
934 	val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
935 	if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
936 		netif_carrier_off(dev);
937 		netif_warn(tp, link, dev, "auto-negotiating...\n");
938 		tp->link_status = LNK_AUTONEG;
939 	} else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
940 		/* Rejoice ! */
941 		struct {
942 			int val;
943 			u32 ctl;
944 			const char *msg;
945 		} reg31[] = {
946 			{ LPA_1000FULL, 0x07000c00 | 0x00001000,
947 				"1000 Mbps Full Duplex" },
948 			{ LPA_1000HALF, 0x07000c00,
949 				"1000 Mbps Half Duplex" },
950 			{ LPA_100FULL, 0x04000800 | 0x00001000,
951 				"100 Mbps Full Duplex" },
952 			{ LPA_100HALF, 0x04000800,
953 				"100 Mbps Half Duplex" },
954 			{ LPA_10FULL, 0x04000400 | 0x00001000,
955 				"10 Mbps Full Duplex" },
956 			{ LPA_10HALF, 0x04000400,
957 				"10 Mbps Half Duplex" },
958 			{ 0, 0x04000400, "unknown" }
959 		}, *p = NULL;
960 		u16 adv, autoexp, gigadv, gigrec;
961 
962 		val = mdio_read(ioaddr, phy_id, 0x1f);
963 		netif_info(tp, link, dev, "mii ext = %04x\n", val);
964 
965 		val = mdio_read(ioaddr, phy_id, MII_LPA);
966 		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 		autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 		netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
969 			   val, adv, autoexp);
970 
971 		if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972 			/* check for gigabit speed */
973 			gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 			gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 			val = (gigadv & (gigrec >> 2));
976 			if (val & ADVERTISE_1000FULL)
977 				p = reg31;
978 			else if (val & ADVERTISE_1000HALF)
979 				p = reg31 + 1;
980 		}
981 		if (!p) {
982 			val &= adv;
983 
984 			for (p = reg31; p->val; p++) {
985 				if ((val & p->val) == p->val)
986 					break;
987 			}
988 		}
989 
990 		p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
991 
992 		if ((tp->features & F_HAS_RGMII) &&
993 		    (tp->features & F_PHY_BCM5461)) {
994 			// Set Tx Delay in RGMII mode.
995 			mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 			udelay(200);
997 			mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 			p->ctl |= 0x03000000;
999 		}
1000 
1001 		SIS_W32(StationControl, p->ctl);
1002 
1003 		if (tp->features & F_HAS_RGMII) {
1004 			SIS_W32(RGDelay, 0x0441);
1005 			SIS_W32(RGDelay, 0x0440);
1006 		}
1007 
1008 		tp->negotiated_lpa = p->val;
1009 
1010 		netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1011 		netif_carrier_on(dev);
1012 		tp->link_status = LNK_ON;
1013 	} else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1014 		tp->link_status = LNK_OFF;
1015 	mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1016 
1017 out_unlock:
1018 	rtnl_unlock();
1019 }
1020 
sis190_phy_timer(unsigned long __opaque)1021 static void sis190_phy_timer(unsigned long __opaque)
1022 {
1023 	struct net_device *dev = (struct net_device *)__opaque;
1024 	struct sis190_private *tp = netdev_priv(dev);
1025 
1026 	if (likely(netif_running(dev)))
1027 		schedule_work(&tp->phy_task);
1028 }
1029 
sis190_delete_timer(struct net_device * dev)1030 static inline void sis190_delete_timer(struct net_device *dev)
1031 {
1032 	struct sis190_private *tp = netdev_priv(dev);
1033 
1034 	del_timer_sync(&tp->timer);
1035 }
1036 
sis190_request_timer(struct net_device * dev)1037 static inline void sis190_request_timer(struct net_device *dev)
1038 {
1039 	struct sis190_private *tp = netdev_priv(dev);
1040 	struct timer_list *timer = &tp->timer;
1041 
1042 	init_timer(timer);
1043 	timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1044 	timer->data = (unsigned long)dev;
1045 	timer->function = sis190_phy_timer;
1046 	add_timer(timer);
1047 }
1048 
sis190_set_rxbufsize(struct sis190_private * tp,struct net_device * dev)1049 static void sis190_set_rxbufsize(struct sis190_private *tp,
1050 				 struct net_device *dev)
1051 {
1052 	unsigned int mtu = dev->mtu;
1053 
1054 	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1055 	/* RxDesc->size has a licence to kill the lower bits */
1056 	if (tp->rx_buf_sz & 0x07) {
1057 		tp->rx_buf_sz += 8;
1058 		tp->rx_buf_sz &= RX_BUF_MASK;
1059 	}
1060 }
1061 
sis190_open(struct net_device * dev)1062 static int sis190_open(struct net_device *dev)
1063 {
1064 	struct sis190_private *tp = netdev_priv(dev);
1065 	struct pci_dev *pdev = tp->pci_dev;
1066 	int rc = -ENOMEM;
1067 
1068 	sis190_set_rxbufsize(tp, dev);
1069 
1070 	/*
1071 	 * Rx and Tx descriptors need 256 bytes alignment.
1072 	 * pci_alloc_consistent() guarantees a stronger alignment.
1073 	 */
1074 	tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1075 	if (!tp->TxDescRing)
1076 		goto out;
1077 
1078 	tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1079 	if (!tp->RxDescRing)
1080 		goto err_free_tx_0;
1081 
1082 	rc = sis190_init_ring(dev);
1083 	if (rc < 0)
1084 		goto err_free_rx_1;
1085 
1086 	sis190_request_timer(dev);
1087 
1088 	rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1089 	if (rc < 0)
1090 		goto err_release_timer_2;
1091 
1092 	sis190_hw_start(dev);
1093 out:
1094 	return rc;
1095 
1096 err_release_timer_2:
1097 	sis190_delete_timer(dev);
1098 	sis190_rx_clear(tp);
1099 err_free_rx_1:
1100 	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1101 		tp->rx_dma);
1102 err_free_tx_0:
1103 	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1104 		tp->tx_dma);
1105 	goto out;
1106 }
1107 
sis190_tx_clear(struct sis190_private * tp)1108 static void sis190_tx_clear(struct sis190_private *tp)
1109 {
1110 	unsigned int i;
1111 
1112 	for (i = 0; i < NUM_TX_DESC; i++) {
1113 		struct sk_buff *skb = tp->Tx_skbuff[i];
1114 
1115 		if (!skb)
1116 			continue;
1117 
1118 		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1119 		tp->Tx_skbuff[i] = NULL;
1120 		dev_kfree_skb(skb);
1121 
1122 		tp->dev->stats.tx_dropped++;
1123 	}
1124 	tp->cur_tx = tp->dirty_tx = 0;
1125 }
1126 
sis190_down(struct net_device * dev)1127 static void sis190_down(struct net_device *dev)
1128 {
1129 	struct sis190_private *tp = netdev_priv(dev);
1130 	void __iomem *ioaddr = tp->mmio_addr;
1131 	unsigned int poll_locked = 0;
1132 
1133 	sis190_delete_timer(dev);
1134 
1135 	netif_stop_queue(dev);
1136 
1137 	do {
1138 		spin_lock_irq(&tp->lock);
1139 
1140 		sis190_asic_down(ioaddr);
1141 
1142 		spin_unlock_irq(&tp->lock);
1143 
1144 		synchronize_irq(dev->irq);
1145 
1146 		if (!poll_locked)
1147 			poll_locked++;
1148 
1149 		synchronize_sched();
1150 
1151 	} while (SIS_R32(IntrMask));
1152 
1153 	sis190_tx_clear(tp);
1154 	sis190_rx_clear(tp);
1155 }
1156 
sis190_close(struct net_device * dev)1157 static int sis190_close(struct net_device *dev)
1158 {
1159 	struct sis190_private *tp = netdev_priv(dev);
1160 	struct pci_dev *pdev = tp->pci_dev;
1161 
1162 	sis190_down(dev);
1163 
1164 	free_irq(dev->irq, dev);
1165 
1166 	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1167 	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1168 
1169 	tp->TxDescRing = NULL;
1170 	tp->RxDescRing = NULL;
1171 
1172 	return 0;
1173 }
1174 
sis190_start_xmit(struct sk_buff * skb,struct net_device * dev)1175 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1176 				     struct net_device *dev)
1177 {
1178 	struct sis190_private *tp = netdev_priv(dev);
1179 	void __iomem *ioaddr = tp->mmio_addr;
1180 	u32 len, entry, dirty_tx;
1181 	struct TxDesc *desc;
1182 	dma_addr_t mapping;
1183 
1184 	if (unlikely(skb->len < ETH_ZLEN)) {
1185 		if (skb_padto(skb, ETH_ZLEN)) {
1186 			dev->stats.tx_dropped++;
1187 			goto out;
1188 		}
1189 		len = ETH_ZLEN;
1190 	} else {
1191 		len = skb->len;
1192 	}
1193 
1194 	entry = tp->cur_tx % NUM_TX_DESC;
1195 	desc = tp->TxDescRing + entry;
1196 
1197 	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1198 		netif_stop_queue(dev);
1199 		netif_err(tp, tx_err, dev,
1200 			  "BUG! Tx Ring full when queue awake!\n");
1201 		return NETDEV_TX_BUSY;
1202 	}
1203 
1204 	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1205 	if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1206 		netif_err(tp, tx_err, dev,
1207 				"PCI mapping failed, dropping packet");
1208 		return NETDEV_TX_BUSY;
1209 	}
1210 
1211 	tp->Tx_skbuff[entry] = skb;
1212 
1213 	desc->PSize = cpu_to_le32(len);
1214 	desc->addr = cpu_to_le32(mapping);
1215 
1216 	desc->size = cpu_to_le32(len);
1217 	if (entry == (NUM_TX_DESC - 1))
1218 		desc->size |= cpu_to_le32(RingEnd);
1219 
1220 	wmb();
1221 
1222 	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1223 	if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1224 		/* Half Duplex */
1225 		desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1226 		if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1227 			desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1228 	}
1229 
1230 	tp->cur_tx++;
1231 
1232 	smp_wmb();
1233 
1234 	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1235 
1236 	dirty_tx = tp->dirty_tx;
1237 	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1238 		netif_stop_queue(dev);
1239 		smp_rmb();
1240 		if (dirty_tx != tp->dirty_tx)
1241 			netif_wake_queue(dev);
1242 	}
1243 out:
1244 	return NETDEV_TX_OK;
1245 }
1246 
sis190_free_phy(struct list_head * first_phy)1247 static void sis190_free_phy(struct list_head *first_phy)
1248 {
1249 	struct sis190_phy *cur, *next;
1250 
1251 	list_for_each_entry_safe(cur, next, first_phy, list) {
1252 		kfree(cur);
1253 	}
1254 }
1255 
1256 /**
1257  *	sis190_default_phy - Select default PHY for sis190 mac.
1258  *	@dev: the net device to probe for
1259  *
1260  *	Select first detected PHY with link as default.
1261  *	If no one is link on, select PHY whose types is HOME as default.
1262  *	If HOME doesn't exist, select LAN.
1263  */
sis190_default_phy(struct net_device * dev)1264 static u16 sis190_default_phy(struct net_device *dev)
1265 {
1266 	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1267 	struct sis190_private *tp = netdev_priv(dev);
1268 	struct mii_if_info *mii_if = &tp->mii_if;
1269 	void __iomem *ioaddr = tp->mmio_addr;
1270 	u16 status;
1271 
1272 	phy_home = phy_default = phy_lan = NULL;
1273 
1274 	list_for_each_entry(phy, &tp->first_phy, list) {
1275 		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1276 
1277 		// Link ON & Not select default PHY & not ghost PHY.
1278 		if ((status & BMSR_LSTATUS) &&
1279 		    !phy_default &&
1280 		    (phy->type != UNKNOWN)) {
1281 			phy_default = phy;
1282 		} else {
1283 			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1284 			mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1285 				   status | BMCR_ANENABLE | BMCR_ISOLATE);
1286 			if (phy->type == HOME)
1287 				phy_home = phy;
1288 			else if (phy->type == LAN)
1289 				phy_lan = phy;
1290 		}
1291 	}
1292 
1293 	if (!phy_default) {
1294 		if (phy_home)
1295 			phy_default = phy_home;
1296 		else if (phy_lan)
1297 			phy_default = phy_lan;
1298 		else
1299 			phy_default = list_first_entry(&tp->first_phy,
1300 						 struct sis190_phy, list);
1301 	}
1302 
1303 	if (mii_if->phy_id != phy_default->phy_id) {
1304 		mii_if->phy_id = phy_default->phy_id;
1305 		if (netif_msg_probe(tp))
1306 			pr_info("%s: Using transceiver at address %d as default\n",
1307 				pci_name(tp->pci_dev), mii_if->phy_id);
1308 	}
1309 
1310 	status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1311 	status &= (~BMCR_ISOLATE);
1312 
1313 	mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1314 	status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1315 
1316 	return status;
1317 }
1318 
sis190_init_phy(struct net_device * dev,struct sis190_private * tp,struct sis190_phy * phy,unsigned int phy_id,u16 mii_status)1319 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1320 			    struct sis190_phy *phy, unsigned int phy_id,
1321 			    u16 mii_status)
1322 {
1323 	void __iomem *ioaddr = tp->mmio_addr;
1324 	struct mii_chip_info *p;
1325 
1326 	INIT_LIST_HEAD(&phy->list);
1327 	phy->status = mii_status;
1328 	phy->phy_id = phy_id;
1329 
1330 	phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1331 	phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1332 
1333 	for (p = mii_chip_table; p->type; p++) {
1334 		if ((p->id[0] == phy->id[0]) &&
1335 		    (p->id[1] == (phy->id[1] & 0xfff0))) {
1336 			break;
1337 		}
1338 	}
1339 
1340 	if (p->id[1]) {
1341 		phy->type = (p->type == MIX) ?
1342 			((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1343 				LAN : HOME) : p->type;
1344 		tp->features |= p->feature;
1345 		if (netif_msg_probe(tp))
1346 			pr_info("%s: %s transceiver at address %d\n",
1347 				pci_name(tp->pci_dev), p->name, phy_id);
1348 	} else {
1349 		phy->type = UNKNOWN;
1350 		if (netif_msg_probe(tp))
1351 			pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1352 				pci_name(tp->pci_dev),
1353 				phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1354 	}
1355 }
1356 
sis190_mii_probe_88e1111_fixup(struct sis190_private * tp)1357 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1358 {
1359 	if (tp->features & F_PHY_88E1111) {
1360 		void __iomem *ioaddr = tp->mmio_addr;
1361 		int phy_id = tp->mii_if.phy_id;
1362 		u16 reg[2][2] = {
1363 			{ 0x808b, 0x0ce1 },
1364 			{ 0x808f, 0x0c60 }
1365 		}, *p;
1366 
1367 		p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1368 
1369 		mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1370 		udelay(200);
1371 		mdio_write(ioaddr, phy_id, 0x14, p[1]);
1372 		udelay(200);
1373 	}
1374 }
1375 
1376 /**
1377  *	sis190_mii_probe - Probe MII PHY for sis190
1378  *	@dev: the net device to probe for
1379  *
1380  *	Search for total of 32 possible mii phy addresses.
1381  *	Identify and set current phy if found one,
1382  *	return error if it failed to found.
1383  */
sis190_mii_probe(struct net_device * dev)1384 static int __devinit sis190_mii_probe(struct net_device *dev)
1385 {
1386 	struct sis190_private *tp = netdev_priv(dev);
1387 	struct mii_if_info *mii_if = &tp->mii_if;
1388 	void __iomem *ioaddr = tp->mmio_addr;
1389 	int phy_id;
1390 	int rc = 0;
1391 
1392 	INIT_LIST_HEAD(&tp->first_phy);
1393 
1394 	for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1395 		struct sis190_phy *phy;
1396 		u16 status;
1397 
1398 		status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1399 
1400 		// Try next mii if the current one is not accessible.
1401 		if (status == 0xffff || status == 0x0000)
1402 			continue;
1403 
1404 		phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1405 		if (!phy) {
1406 			sis190_free_phy(&tp->first_phy);
1407 			rc = -ENOMEM;
1408 			goto out;
1409 		}
1410 
1411 		sis190_init_phy(dev, tp, phy, phy_id, status);
1412 
1413 		list_add(&tp->first_phy, &phy->list);
1414 	}
1415 
1416 	if (list_empty(&tp->first_phy)) {
1417 		if (netif_msg_probe(tp))
1418 			pr_info("%s: No MII transceivers found!\n",
1419 				pci_name(tp->pci_dev));
1420 		rc = -EIO;
1421 		goto out;
1422 	}
1423 
1424 	/* Select default PHY for mac */
1425 	sis190_default_phy(dev);
1426 
1427 	sis190_mii_probe_88e1111_fixup(tp);
1428 
1429 	mii_if->dev = dev;
1430 	mii_if->mdio_read = __mdio_read;
1431 	mii_if->mdio_write = __mdio_write;
1432 	mii_if->phy_id_mask = PHY_ID_ANY;
1433 	mii_if->reg_num_mask = MII_REG_ANY;
1434 out:
1435 	return rc;
1436 }
1437 
sis190_mii_remove(struct net_device * dev)1438 static void sis190_mii_remove(struct net_device *dev)
1439 {
1440 	struct sis190_private *tp = netdev_priv(dev);
1441 
1442 	sis190_free_phy(&tp->first_phy);
1443 }
1444 
sis190_release_board(struct pci_dev * pdev)1445 static void sis190_release_board(struct pci_dev *pdev)
1446 {
1447 	struct net_device *dev = pci_get_drvdata(pdev);
1448 	struct sis190_private *tp = netdev_priv(dev);
1449 
1450 	iounmap(tp->mmio_addr);
1451 	pci_release_regions(pdev);
1452 	pci_disable_device(pdev);
1453 	free_netdev(dev);
1454 }
1455 
sis190_init_board(struct pci_dev * pdev)1456 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1457 {
1458 	struct sis190_private *tp;
1459 	struct net_device *dev;
1460 	void __iomem *ioaddr;
1461 	int rc;
1462 
1463 	dev = alloc_etherdev(sizeof(*tp));
1464 	if (!dev) {
1465 		rc = -ENOMEM;
1466 		goto err_out_0;
1467 	}
1468 
1469 	SET_NETDEV_DEV(dev, &pdev->dev);
1470 
1471 	tp = netdev_priv(dev);
1472 	tp->dev = dev;
1473 	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1474 
1475 	rc = pci_enable_device(pdev);
1476 	if (rc < 0) {
1477 		if (netif_msg_probe(tp))
1478 			pr_err("%s: enable failure\n", pci_name(pdev));
1479 		goto err_free_dev_1;
1480 	}
1481 
1482 	rc = -ENODEV;
1483 
1484 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1485 		if (netif_msg_probe(tp))
1486 			pr_err("%s: region #0 is no MMIO resource\n",
1487 			       pci_name(pdev));
1488 		goto err_pci_disable_2;
1489 	}
1490 	if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1491 		if (netif_msg_probe(tp))
1492 			pr_err("%s: invalid PCI region size(s)\n",
1493 			       pci_name(pdev));
1494 		goto err_pci_disable_2;
1495 	}
1496 
1497 	rc = pci_request_regions(pdev, DRV_NAME);
1498 	if (rc < 0) {
1499 		if (netif_msg_probe(tp))
1500 			pr_err("%s: could not request regions\n",
1501 			       pci_name(pdev));
1502 		goto err_pci_disable_2;
1503 	}
1504 
1505 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1506 	if (rc < 0) {
1507 		if (netif_msg_probe(tp))
1508 			pr_err("%s: DMA configuration failed\n",
1509 			       pci_name(pdev));
1510 		goto err_free_res_3;
1511 	}
1512 
1513 	pci_set_master(pdev);
1514 
1515 	ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1516 	if (!ioaddr) {
1517 		if (netif_msg_probe(tp))
1518 			pr_err("%s: cannot remap MMIO, aborting\n",
1519 			       pci_name(pdev));
1520 		rc = -EIO;
1521 		goto err_free_res_3;
1522 	}
1523 
1524 	tp->pci_dev = pdev;
1525 	tp->mmio_addr = ioaddr;
1526 	tp->link_status = LNK_OFF;
1527 
1528 	sis190_irq_mask_and_ack(ioaddr);
1529 
1530 	sis190_soft_reset(ioaddr);
1531 out:
1532 	return dev;
1533 
1534 err_free_res_3:
1535 	pci_release_regions(pdev);
1536 err_pci_disable_2:
1537 	pci_disable_device(pdev);
1538 err_free_dev_1:
1539 	free_netdev(dev);
1540 err_out_0:
1541 	dev = ERR_PTR(rc);
1542 	goto out;
1543 }
1544 
sis190_tx_timeout(struct net_device * dev)1545 static void sis190_tx_timeout(struct net_device *dev)
1546 {
1547 	struct sis190_private *tp = netdev_priv(dev);
1548 	void __iomem *ioaddr = tp->mmio_addr;
1549 	u8 tmp8;
1550 
1551 	/* Disable Tx, if not already */
1552 	tmp8 = SIS_R8(TxControl);
1553 	if (tmp8 & CmdTxEnb)
1554 		SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1555 
1556 	netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1557 		   SIS_R32(TxControl), SIS_R32(TxSts));
1558 
1559 	/* Disable interrupts by clearing the interrupt mask. */
1560 	SIS_W32(IntrMask, 0x0000);
1561 
1562 	/* Stop a shared interrupt from scavenging while we are. */
1563 	spin_lock_irq(&tp->lock);
1564 	sis190_tx_clear(tp);
1565 	spin_unlock_irq(&tp->lock);
1566 
1567 	/* ...and finally, reset everything. */
1568 	sis190_hw_start(dev);
1569 
1570 	netif_wake_queue(dev);
1571 }
1572 
sis190_set_rgmii(struct sis190_private * tp,u8 reg)1573 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1574 {
1575 	tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1576 }
1577 
sis190_get_mac_addr_from_eeprom(struct pci_dev * pdev,struct net_device * dev)1578 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1579 						     struct net_device *dev)
1580 {
1581 	struct sis190_private *tp = netdev_priv(dev);
1582 	void __iomem *ioaddr = tp->mmio_addr;
1583 	u16 sig;
1584 	int i;
1585 
1586 	if (netif_msg_probe(tp))
1587 		pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1588 
1589 	/* Check to see if there is a sane EEPROM */
1590 	sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1591 
1592 	if ((sig == 0xffff) || (sig == 0x0000)) {
1593 		if (netif_msg_probe(tp))
1594 			pr_info("%s: Error EEPROM read %x\n",
1595 				pci_name(pdev), sig);
1596 		return -EIO;
1597 	}
1598 
1599 	/* Get MAC address from EEPROM */
1600 	for (i = 0; i < ETH_ALEN / 2; i++) {
1601 		u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1602 
1603 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1604 	}
1605 
1606 	sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1607 
1608 	return 0;
1609 }
1610 
1611 /**
1612  *	sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1613  *	@pdev: PCI device
1614  *	@dev:  network device to get address for
1615  *
1616  *	SiS96x model, use APC CMOS RAM to store MAC address.
1617  *	APC CMOS RAM is accessed through ISA bridge.
1618  *	MAC address is read into @net_dev->dev_addr.
1619  */
sis190_get_mac_addr_from_apc(struct pci_dev * pdev,struct net_device * dev)1620 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1621 						  struct net_device *dev)
1622 {
1623 	static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1624 	struct sis190_private *tp = netdev_priv(dev);
1625 	struct pci_dev *isa_bridge;
1626 	u8 reg, tmp8;
1627 	unsigned int i;
1628 
1629 	if (netif_msg_probe(tp))
1630 		pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1631 
1632 	for (i = 0; i < ARRAY_SIZE(ids); i++) {
1633 		isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1634 		if (isa_bridge)
1635 			break;
1636 	}
1637 
1638 	if (!isa_bridge) {
1639 		if (netif_msg_probe(tp))
1640 			pr_info("%s: Can not find ISA bridge\n",
1641 				pci_name(pdev));
1642 		return -EIO;
1643 	}
1644 
1645 	/* Enable port 78h & 79h to access APC Registers. */
1646 	pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1647 	reg = (tmp8 & ~0x02);
1648 	pci_write_config_byte(isa_bridge, 0x48, reg);
1649 	udelay(50);
1650 	pci_read_config_byte(isa_bridge, 0x48, &reg);
1651 
1652         for (i = 0; i < ETH_ALEN; i++) {
1653                 outb(0x9 + i, 0x78);
1654                 dev->dev_addr[i] = inb(0x79);
1655         }
1656 
1657 	outb(0x12, 0x78);
1658 	reg = inb(0x79);
1659 
1660 	sis190_set_rgmii(tp, reg);
1661 
1662 	/* Restore the value to ISA Bridge */
1663 	pci_write_config_byte(isa_bridge, 0x48, tmp8);
1664 	pci_dev_put(isa_bridge);
1665 
1666 	return 0;
1667 }
1668 
1669 /**
1670  *      sis190_init_rxfilter - Initialize the Rx filter
1671  *      @dev: network device to initialize
1672  *
1673  *      Set receive filter address to our MAC address
1674  *      and enable packet filtering.
1675  */
sis190_init_rxfilter(struct net_device * dev)1676 static inline void sis190_init_rxfilter(struct net_device *dev)
1677 {
1678 	struct sis190_private *tp = netdev_priv(dev);
1679 	void __iomem *ioaddr = tp->mmio_addr;
1680 	u16 ctl;
1681 	int i;
1682 
1683 	ctl = SIS_R16(RxMacControl);
1684 	/*
1685 	 * Disable packet filtering before setting filter.
1686 	 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1687 	 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1688 	 */
1689 	SIS_W16(RxMacControl, ctl & ~0x0f00);
1690 
1691 	for (i = 0; i < ETH_ALEN; i++)
1692 		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1693 
1694 	SIS_W16(RxMacControl, ctl);
1695 	SIS_PCI_COMMIT();
1696 }
1697 
sis190_get_mac_addr(struct pci_dev * pdev,struct net_device * dev)1698 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1699 					 struct net_device *dev)
1700 {
1701 	int rc;
1702 
1703 	rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1704 	if (rc < 0) {
1705 		u8 reg;
1706 
1707 		pci_read_config_byte(pdev, 0x73, &reg);
1708 
1709 		if (reg & 0x00000001)
1710 			rc = sis190_get_mac_addr_from_apc(pdev, dev);
1711 	}
1712 	return rc;
1713 }
1714 
sis190_set_speed_auto(struct net_device * dev)1715 static void sis190_set_speed_auto(struct net_device *dev)
1716 {
1717 	struct sis190_private *tp = netdev_priv(dev);
1718 	void __iomem *ioaddr = tp->mmio_addr;
1719 	int phy_id = tp->mii_if.phy_id;
1720 	int val;
1721 
1722 	netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1723 
1724 	val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1725 
1726 	// Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1727 	// unchanged.
1728 	mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1729 		   ADVERTISE_100FULL | ADVERTISE_10FULL |
1730 		   ADVERTISE_100HALF | ADVERTISE_10HALF);
1731 
1732 	// Enable 1000 Full Mode.
1733 	mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1734 
1735 	// Enable auto-negotiation and restart auto-negotiation.
1736 	mdio_write(ioaddr, phy_id, MII_BMCR,
1737 		   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1738 }
1739 
sis190_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1740 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1741 {
1742 	struct sis190_private *tp = netdev_priv(dev);
1743 
1744 	return mii_ethtool_gset(&tp->mii_if, cmd);
1745 }
1746 
sis190_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1747 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1748 {
1749 	struct sis190_private *tp = netdev_priv(dev);
1750 
1751 	return mii_ethtool_sset(&tp->mii_if, cmd);
1752 }
1753 
sis190_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1754 static void sis190_get_drvinfo(struct net_device *dev,
1755 			       struct ethtool_drvinfo *info)
1756 {
1757 	struct sis190_private *tp = netdev_priv(dev);
1758 
1759 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1760 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1761 	strlcpy(info->bus_info, pci_name(tp->pci_dev),
1762 		sizeof(info->bus_info));
1763 }
1764 
sis190_get_regs_len(struct net_device * dev)1765 static int sis190_get_regs_len(struct net_device *dev)
1766 {
1767 	return SIS190_REGS_SIZE;
1768 }
1769 
sis190_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1770 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1771 			    void *p)
1772 {
1773 	struct sis190_private *tp = netdev_priv(dev);
1774 	unsigned long flags;
1775 
1776 	if (regs->len > SIS190_REGS_SIZE)
1777 		regs->len = SIS190_REGS_SIZE;
1778 
1779 	spin_lock_irqsave(&tp->lock, flags);
1780 	memcpy_fromio(p, tp->mmio_addr, regs->len);
1781 	spin_unlock_irqrestore(&tp->lock, flags);
1782 }
1783 
sis190_nway_reset(struct net_device * dev)1784 static int sis190_nway_reset(struct net_device *dev)
1785 {
1786 	struct sis190_private *tp = netdev_priv(dev);
1787 
1788 	return mii_nway_restart(&tp->mii_if);
1789 }
1790 
sis190_get_msglevel(struct net_device * dev)1791 static u32 sis190_get_msglevel(struct net_device *dev)
1792 {
1793 	struct sis190_private *tp = netdev_priv(dev);
1794 
1795 	return tp->msg_enable;
1796 }
1797 
sis190_set_msglevel(struct net_device * dev,u32 value)1798 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1799 {
1800 	struct sis190_private *tp = netdev_priv(dev);
1801 
1802 	tp->msg_enable = value;
1803 }
1804 
1805 static const struct ethtool_ops sis190_ethtool_ops = {
1806 	.get_settings	= sis190_get_settings,
1807 	.set_settings	= sis190_set_settings,
1808 	.get_drvinfo	= sis190_get_drvinfo,
1809 	.get_regs_len	= sis190_get_regs_len,
1810 	.get_regs	= sis190_get_regs,
1811 	.get_link	= ethtool_op_get_link,
1812 	.get_msglevel	= sis190_get_msglevel,
1813 	.set_msglevel	= sis190_set_msglevel,
1814 	.nway_reset	= sis190_nway_reset,
1815 };
1816 
sis190_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1817 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1818 {
1819 	struct sis190_private *tp = netdev_priv(dev);
1820 
1821 	return !netif_running(dev) ? -EINVAL :
1822 		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1823 }
1824 
sis190_mac_addr(struct net_device * dev,void * p)1825 static int sis190_mac_addr(struct net_device  *dev, void *p)
1826 {
1827 	int rc;
1828 
1829 	rc = eth_mac_addr(dev, p);
1830 	if (!rc)
1831 		sis190_init_rxfilter(dev);
1832 	return rc;
1833 }
1834 
1835 static const struct net_device_ops sis190_netdev_ops = {
1836 	.ndo_open		= sis190_open,
1837 	.ndo_stop		= sis190_close,
1838 	.ndo_do_ioctl		= sis190_ioctl,
1839 	.ndo_start_xmit		= sis190_start_xmit,
1840 	.ndo_tx_timeout		= sis190_tx_timeout,
1841 	.ndo_set_rx_mode	= sis190_set_rx_mode,
1842 	.ndo_change_mtu		= eth_change_mtu,
1843 	.ndo_set_mac_address	= sis190_mac_addr,
1844 	.ndo_validate_addr	= eth_validate_addr,
1845 #ifdef CONFIG_NET_POLL_CONTROLLER
1846 	.ndo_poll_controller	 = sis190_netpoll,
1847 #endif
1848 };
1849 
sis190_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1850 static int __devinit sis190_init_one(struct pci_dev *pdev,
1851 				     const struct pci_device_id *ent)
1852 {
1853 	static int printed_version = 0;
1854 	struct sis190_private *tp;
1855 	struct net_device *dev;
1856 	void __iomem *ioaddr;
1857 	int rc;
1858 
1859 	if (!printed_version) {
1860 		if (netif_msg_drv(&debug))
1861 			pr_info(SIS190_DRIVER_NAME " loaded\n");
1862 		printed_version = 1;
1863 	}
1864 
1865 	dev = sis190_init_board(pdev);
1866 	if (IS_ERR(dev)) {
1867 		rc = PTR_ERR(dev);
1868 		goto out;
1869 	}
1870 
1871 	pci_set_drvdata(pdev, dev);
1872 
1873 	tp = netdev_priv(dev);
1874 	ioaddr = tp->mmio_addr;
1875 
1876 	rc = sis190_get_mac_addr(pdev, dev);
1877 	if (rc < 0)
1878 		goto err_release_board;
1879 
1880 	sis190_init_rxfilter(dev);
1881 
1882 	INIT_WORK(&tp->phy_task, sis190_phy_task);
1883 
1884 	dev->netdev_ops = &sis190_netdev_ops;
1885 
1886 	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1887 	dev->irq = pdev->irq;
1888 	dev->base_addr = (unsigned long) 0xdead;
1889 	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1890 
1891 	spin_lock_init(&tp->lock);
1892 
1893 	rc = sis190_mii_probe(dev);
1894 	if (rc < 0)
1895 		goto err_release_board;
1896 
1897 	rc = register_netdev(dev);
1898 	if (rc < 0)
1899 		goto err_remove_mii;
1900 
1901 	if (netif_msg_probe(tp)) {
1902 		netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1903 			    pci_name(pdev),
1904 			    sis_chip_info[ent->driver_data].name,
1905 			    ioaddr, dev->irq, dev->dev_addr);
1906 		netdev_info(dev, "%s mode.\n",
1907 			    (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1908 	}
1909 
1910 	netif_carrier_off(dev);
1911 
1912 	sis190_set_speed_auto(dev);
1913 out:
1914 	return rc;
1915 
1916 err_remove_mii:
1917 	sis190_mii_remove(dev);
1918 err_release_board:
1919 	sis190_release_board(pdev);
1920 	goto out;
1921 }
1922 
sis190_remove_one(struct pci_dev * pdev)1923 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1924 {
1925 	struct net_device *dev = pci_get_drvdata(pdev);
1926 	struct sis190_private *tp = netdev_priv(dev);
1927 
1928 	sis190_mii_remove(dev);
1929 	cancel_work_sync(&tp->phy_task);
1930 	unregister_netdev(dev);
1931 	sis190_release_board(pdev);
1932 	pci_set_drvdata(pdev, NULL);
1933 }
1934 
1935 static struct pci_driver sis190_pci_driver = {
1936 	.name		= DRV_NAME,
1937 	.id_table	= sis190_pci_tbl,
1938 	.probe		= sis190_init_one,
1939 	.remove		= __devexit_p(sis190_remove_one),
1940 };
1941 
sis190_init_module(void)1942 static int __init sis190_init_module(void)
1943 {
1944 	return pci_register_driver(&sis190_pci_driver);
1945 }
1946 
sis190_cleanup_module(void)1947 static void __exit sis190_cleanup_module(void)
1948 {
1949 	pci_unregister_driver(&sis190_pci_driver);
1950 }
1951 
1952 module_init(sis190_init_module);
1953 module_exit(sis190_cleanup_module);
1954