1 /*
2    sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3 
4    Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5    Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6    Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7 
8    Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9    genuine driver.
10 
11    This software may be used and distributed according to the terms of
12    the GNU General Public License (GPL), incorporated herein by reference.
13    Drivers based on or derived from this code fall under the GPL and must
14    retain the authorship, copyright and license notice.  This file is not
15    a complete program and may only be used when the entire operating
16    system is licensed under the GPL.
17 
18    See the file COPYING in this distribution for more information.
19 
20 */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/netdevice.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/pci.h>
32 #include <linux/mii.h>
33 #include <linux/delay.h>
34 #include <linux/crc32.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <asm/irq.h>
38 
39 #define PHY_MAX_ADDR		32
40 #define PHY_ID_ANY		0x1f
41 #define MII_REG_ANY		0x1f
42 
43 #define DRV_VERSION		"1.4"
44 #define DRV_NAME		"sis190"
45 #define SIS190_DRIVER_NAME	DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46 
47 #define sis190_rx_skb			netif_rx
48 #define sis190_rx_quota(count, quota)	count
49 
50 #define NUM_TX_DESC		64	/* [8..1024] */
51 #define NUM_RX_DESC		64	/* [8..8192] */
52 #define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
53 #define RX_RING_BYTES		(NUM_RX_DESC * sizeof(struct RxDesc))
54 #define RX_BUF_SIZE		1536
55 #define RX_BUF_MASK		0xfff8
56 
57 #define SIS190_REGS_SIZE	0x80
58 #define SIS190_TX_TIMEOUT	(6*HZ)
59 #define SIS190_PHY_TIMEOUT	(10*HZ)
60 #define SIS190_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61 				 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62 				 NETIF_MSG_IFDOWN)
63 
64 /* Enhanced PHY access register bit definitions */
65 #define EhnMIIread		0x0000
66 #define EhnMIIwrite		0x0020
67 #define EhnMIIdataShift		16
68 #define EhnMIIpmdShift		6	/* 7016 only */
69 #define EhnMIIregShift		11
70 #define EhnMIIreq		0x0010
71 #define EhnMIInotDone		0x0010
72 
73 /* Write/read MMIO register */
74 #define SIS_W8(reg, val)	writeb ((val), ioaddr + (reg))
75 #define SIS_W16(reg, val)	writew ((val), ioaddr + (reg))
76 #define SIS_W32(reg, val)	writel ((val), ioaddr + (reg))
77 #define SIS_R8(reg)		readb (ioaddr + (reg))
78 #define SIS_R16(reg)		readw (ioaddr + (reg))
79 #define SIS_R32(reg)		readl (ioaddr + (reg))
80 
81 #define SIS_PCI_COMMIT()	SIS_R32(IntrControl)
82 
83 enum sis190_registers {
84 	TxControl		= 0x00,
85 	TxDescStartAddr		= 0x04,
86 	rsv0			= 0x08,	// reserved
87 	TxSts			= 0x0c,	// unused (Control/Status)
88 	RxControl		= 0x10,
89 	RxDescStartAddr		= 0x14,
90 	rsv1			= 0x18,	// reserved
91 	RxSts			= 0x1c,	// unused
92 	IntrStatus		= 0x20,
93 	IntrMask		= 0x24,
94 	IntrControl		= 0x28,
95 	IntrTimer		= 0x2c,	// unused (Interrupt Timer)
96 	PMControl		= 0x30,	// unused (Power Mgmt Control/Status)
97 	rsv2			= 0x34,	// reserved
98 	ROMControl		= 0x38,
99 	ROMInterface		= 0x3c,
100 	StationControl		= 0x40,
101 	GMIIControl		= 0x44,
102 	GIoCR			= 0x48, // unused (GMAC IO Compensation)
103 	GIoCtrl			= 0x4c, // unused (GMAC IO Control)
104 	TxMacControl		= 0x50,
105 	TxLimit			= 0x54, // unused (Tx MAC Timer/TryLimit)
106 	RGDelay			= 0x58, // unused (RGMII Tx Internal Delay)
107 	rsv3			= 0x5c, // reserved
108 	RxMacControl		= 0x60,
109 	RxMacAddr		= 0x62,
110 	RxHashTable		= 0x68,
111 	// Undocumented		= 0x6c,
112 	RxWolCtrl		= 0x70,
113 	RxWolData		= 0x74, // unused (Rx WOL Data Access)
114 	RxMPSControl		= 0x78,	// unused (Rx MPS Control)
115 	rsv4			= 0x7c, // reserved
116 };
117 
118 enum sis190_register_content {
119 	/* IntrStatus */
120 	SoftInt			= 0x40000000,	// unused
121 	Timeup			= 0x20000000,	// unused
122 	PauseFrame		= 0x00080000,	// unused
123 	MagicPacket		= 0x00040000,	// unused
124 	WakeupFrame		= 0x00020000,	// unused
125 	LinkChange		= 0x00010000,
126 	RxQEmpty		= 0x00000080,
127 	RxQInt			= 0x00000040,
128 	TxQ1Empty		= 0x00000020,	// unused
129 	TxQ1Int			= 0x00000010,
130 	TxQ0Empty		= 0x00000008,	// unused
131 	TxQ0Int			= 0x00000004,
132 	RxHalt			= 0x00000002,
133 	TxHalt			= 0x00000001,
134 
135 	/* {Rx/Tx}CmdBits */
136 	CmdReset		= 0x10,
137 	CmdRxEnb		= 0x08,		// unused
138 	CmdTxEnb		= 0x01,
139 	RxBufEmpty		= 0x01,		// unused
140 
141 	/* Cfg9346Bits */
142 	Cfg9346_Lock		= 0x00,		// unused
143 	Cfg9346_Unlock		= 0xc0,		// unused
144 
145 	/* RxMacControl */
146 	AcceptErr		= 0x20,		// unused
147 	AcceptRunt		= 0x10,		// unused
148 	AcceptBroadcast		= 0x0800,
149 	AcceptMulticast		= 0x0400,
150 	AcceptMyPhys		= 0x0200,
151 	AcceptAllPhys		= 0x0100,
152 
153 	/* RxConfigBits */
154 	RxCfgFIFOShift		= 13,
155 	RxCfgDMAShift		= 8,		// 0x1a in RxControl ?
156 
157 	/* TxConfigBits */
158 	TxInterFrameGapShift	= 24,
159 	TxDMAShift		= 8, /* DMA burst value (0-7) is shift this many bits */
160 
161 	LinkStatus		= 0x02,		// unused
162 	FullDup			= 0x01,		// unused
163 
164 	/* TBICSRBit */
165 	TBILinkOK		= 0x02000000,	// unused
166 };
167 
168 struct TxDesc {
169 	__le32 PSize;
170 	__le32 status;
171 	__le32 addr;
172 	__le32 size;
173 };
174 
175 struct RxDesc {
176 	__le32 PSize;
177 	__le32 status;
178 	__le32 addr;
179 	__le32 size;
180 };
181 
182 enum _DescStatusBit {
183 	/* _Desc.status */
184 	OWNbit		= 0x80000000, // RXOWN/TXOWN
185 	INTbit		= 0x40000000, // RXINT/TXINT
186 	CRCbit		= 0x00020000, // CRCOFF/CRCEN
187 	PADbit		= 0x00010000, // PREADD/PADEN
188 	/* _Desc.size */
189 	RingEnd		= 0x80000000,
190 	/* TxDesc.status */
191 	LSEN		= 0x08000000, // TSO ? -- FR
192 	IPCS		= 0x04000000,
193 	TCPCS		= 0x02000000,
194 	UDPCS		= 0x01000000,
195 	BSTEN		= 0x00800000,
196 	EXTEN		= 0x00400000,
197 	DEFEN		= 0x00200000,
198 	BKFEN		= 0x00100000,
199 	CRSEN		= 0x00080000,
200 	COLEN		= 0x00040000,
201 	THOL3		= 0x30000000,
202 	THOL2		= 0x20000000,
203 	THOL1		= 0x10000000,
204 	THOL0		= 0x00000000,
205 
206 	WND		= 0x00080000,
207 	TABRT		= 0x00040000,
208 	FIFO		= 0x00020000,
209 	LINK		= 0x00010000,
210 	ColCountMask	= 0x0000ffff,
211 	/* RxDesc.status */
212 	IPON		= 0x20000000,
213 	TCPON		= 0x10000000,
214 	UDPON		= 0x08000000,
215 	Wakup		= 0x00400000,
216 	Magic		= 0x00200000,
217 	Pause		= 0x00100000,
218 	DEFbit		= 0x00200000,
219 	BCAST		= 0x000c0000,
220 	MCAST		= 0x00080000,
221 	UCAST		= 0x00040000,
222 	/* RxDesc.PSize */
223 	TAGON		= 0x80000000,
224 	RxDescCountMask	= 0x7f000000, // multi-desc pkt when > 1 ? -- FR
225 	ABORT		= 0x00800000,
226 	SHORT		= 0x00400000,
227 	LIMIT		= 0x00200000,
228 	MIIER		= 0x00100000,
229 	OVRUN		= 0x00080000,
230 	NIBON		= 0x00040000,
231 	COLON		= 0x00020000,
232 	CRCOK		= 0x00010000,
233 	RxSizeMask	= 0x0000ffff
234 	/*
235 	 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236 	 * provide two (unused with Linux) Tx queues. No publicly
237 	 * available documentation alas.
238 	 */
239 };
240 
241 enum sis190_eeprom_access_register_bits {
242 	EECS	= 0x00000001,	// unused
243 	EECLK	= 0x00000002,	// unused
244 	EEDO	= 0x00000008,	// unused
245 	EEDI	= 0x00000004,	// unused
246 	EEREQ	= 0x00000080,
247 	EEROP	= 0x00000200,
248 	EEWOP	= 0x00000100	// unused
249 };
250 
251 /* EEPROM Addresses */
252 enum sis190_eeprom_address {
253 	EEPROMSignature	= 0x00,
254 	EEPROMCLK	= 0x01,	// unused
255 	EEPROMInfo	= 0x02,
256 	EEPROMMACAddr	= 0x03
257 };
258 
259 enum sis190_feature {
260 	F_HAS_RGMII	= 1,
261 	F_PHY_88E1111	= 2,
262 	F_PHY_BCM5461	= 4
263 };
264 
265 struct sis190_private {
266 	void __iomem *mmio_addr;
267 	struct pci_dev *pci_dev;
268 	struct net_device *dev;
269 	spinlock_t lock;
270 	u32 rx_buf_sz;
271 	u32 cur_rx;
272 	u32 cur_tx;
273 	u32 dirty_rx;
274 	u32 dirty_tx;
275 	dma_addr_t rx_dma;
276 	dma_addr_t tx_dma;
277 	struct RxDesc *RxDescRing;
278 	struct TxDesc *TxDescRing;
279 	struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280 	struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281 	struct work_struct phy_task;
282 	struct timer_list timer;
283 	u32 msg_enable;
284 	struct mii_if_info mii_if;
285 	struct list_head first_phy;
286 	u32 features;
287 	u32 negotiated_lpa;
288 	enum {
289 		LNK_OFF,
290 		LNK_ON,
291 		LNK_AUTONEG,
292 	} link_status;
293 };
294 
295 struct sis190_phy {
296 	struct list_head list;
297 	int phy_id;
298 	u16 id[2];
299 	u16 status;
300 	u8  type;
301 };
302 
303 enum sis190_phy_type {
304 	UNKNOWN	= 0x00,
305 	HOME	= 0x01,
306 	LAN	= 0x02,
307 	MIX	= 0x03
308 };
309 
310 static struct mii_chip_info {
311         const char *name;
312         u16 id[2];
313         unsigned int type;
314 	u32 feature;
315 } mii_chip_table[] = {
316 	{ "Atheros PHY",          { 0x004d, 0xd010 }, LAN, 0 },
317 	{ "Atheros PHY AR8012",   { 0x004d, 0xd020 }, LAN, 0 },
318 	{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319 	{ "Broadcom PHY AC131",   { 0x0143, 0xbc70 }, LAN, 0 },
320 	{ "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN, 0 },
321 	{ "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322 	{ "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN, 0 },
323 	{ NULL, }
324 };
325 
326 static const struct {
327 	const char *name;
328 } sis_chip_info[] = {
329 	{ "SiS 190 PCI Fast Ethernet adapter" },
330 	{ "SiS 191 PCI Gigabit Ethernet adapter" },
331 };
332 
333 static const struct pci_device_id sis190_pci_tbl[] = {
334 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336 	{ 0, },
337 };
338 
339 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
340 
341 static int rx_copybreak = 200;
342 
343 static struct {
344 	u32 msg_enable;
345 } debug = { -1 };
346 
347 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348 module_param(rx_copybreak, int, 0);
349 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350 module_param_named(debug, debug.msg_enable, int, 0);
351 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
353 MODULE_VERSION(DRV_VERSION);
354 MODULE_LICENSE("GPL");
355 
356 static const u32 sis190_intr_mask =
357 	RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
358 
359 /*
360  * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
361  * The chips use a 64 element hash table based on the Ethernet CRC.
362  */
363 static const int multicast_filter_limit = 32;
364 
__mdio_cmd(void __iomem * ioaddr,u32 ctl)365 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
366 {
367 	unsigned int i;
368 
369 	SIS_W32(GMIIControl, ctl);
370 
371 	msleep(1);
372 
373 	for (i = 0; i < 100; i++) {
374 		if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375 			break;
376 		msleep(1);
377 	}
378 
379 	if (i > 99)
380 		pr_err("PHY command failed !\n");
381 }
382 
mdio_write(void __iomem * ioaddr,int phy_id,int reg,int val)383 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
384 {
385 	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386 		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387 		(((u32) val) << EhnMIIdataShift));
388 }
389 
mdio_read(void __iomem * ioaddr,int phy_id,int reg)390 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
391 {
392 	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393 		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
394 
395 	return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
396 }
397 
__mdio_write(struct net_device * dev,int phy_id,int reg,int val)398 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
399 {
400 	struct sis190_private *tp = netdev_priv(dev);
401 
402 	mdio_write(tp->mmio_addr, phy_id, reg, val);
403 }
404 
__mdio_read(struct net_device * dev,int phy_id,int reg)405 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
406 {
407 	struct sis190_private *tp = netdev_priv(dev);
408 
409 	return mdio_read(tp->mmio_addr, phy_id, reg);
410 }
411 
mdio_read_latched(void __iomem * ioaddr,int phy_id,int reg)412 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
413 {
414 	mdio_read(ioaddr, phy_id, reg);
415 	return mdio_read(ioaddr, phy_id, reg);
416 }
417 
sis190_read_eeprom(void __iomem * ioaddr,u32 reg)418 static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419 {
420 	u16 data = 0xffff;
421 	unsigned int i;
422 
423 	if (!(SIS_R32(ROMControl) & 0x0002))
424 		return 0;
425 
426 	SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
427 
428 	for (i = 0; i < 200; i++) {
429 		if (!(SIS_R32(ROMInterface) & EEREQ)) {
430 			data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431 			break;
432 		}
433 		msleep(1);
434 	}
435 
436 	return data;
437 }
438 
sis190_irq_mask_and_ack(void __iomem * ioaddr)439 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
440 {
441 	SIS_W32(IntrMask, 0x00);
442 	SIS_W32(IntrStatus, 0xffffffff);
443 	SIS_PCI_COMMIT();
444 }
445 
sis190_asic_down(void __iomem * ioaddr)446 static void sis190_asic_down(void __iomem *ioaddr)
447 {
448 	/* Stop the chip's Tx and Rx DMA processes. */
449 
450 	SIS_W32(TxControl, 0x1a00);
451 	SIS_W32(RxControl, 0x1a00);
452 
453 	sis190_irq_mask_and_ack(ioaddr);
454 }
455 
sis190_mark_as_last_descriptor(struct RxDesc * desc)456 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
457 {
458 	desc->size |= cpu_to_le32(RingEnd);
459 }
460 
sis190_give_to_asic(struct RxDesc * desc,u32 rx_buf_sz)461 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
462 {
463 	u32 eor = le32_to_cpu(desc->size) & RingEnd;
464 
465 	desc->PSize = 0x0;
466 	desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467 	wmb();
468 	desc->status = cpu_to_le32(OWNbit | INTbit);
469 }
470 
sis190_map_to_asic(struct RxDesc * desc,dma_addr_t mapping,u32 rx_buf_sz)471 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472 				      u32 rx_buf_sz)
473 {
474 	desc->addr = cpu_to_le32(mapping);
475 	sis190_give_to_asic(desc, rx_buf_sz);
476 }
477 
sis190_make_unusable_by_asic(struct RxDesc * desc)478 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
479 {
480 	desc->PSize = 0x0;
481 	desc->addr = cpu_to_le32(0xdeadbeef);
482 	desc->size &= cpu_to_le32(RingEnd);
483 	wmb();
484 	desc->status = 0x0;
485 }
486 
sis190_alloc_rx_skb(struct sis190_private * tp,struct RxDesc * desc)487 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488 					   struct RxDesc *desc)
489 {
490 	u32 rx_buf_sz = tp->rx_buf_sz;
491 	struct sk_buff *skb;
492 	dma_addr_t mapping;
493 
494 	skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495 	if (unlikely(!skb))
496 		goto skb_alloc_failed;
497 	mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz,
498 				 DMA_FROM_DEVICE);
499 	if (dma_mapping_error(&tp->pci_dev->dev, mapping))
500 		goto out;
501 	sis190_map_to_asic(desc, mapping, rx_buf_sz);
502 
503 	return skb;
504 
505 out:
506 	dev_kfree_skb_any(skb);
507 skb_alloc_failed:
508 	sis190_make_unusable_by_asic(desc);
509 	return NULL;
510 }
511 
sis190_rx_fill(struct sis190_private * tp,struct net_device * dev,u32 start,u32 end)512 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513 			  u32 start, u32 end)
514 {
515 	u32 cur;
516 
517 	for (cur = start; cur < end; cur++) {
518 		unsigned int i = cur % NUM_RX_DESC;
519 
520 		if (tp->Rx_skbuff[i])
521 			continue;
522 
523 		tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524 
525 		if (!tp->Rx_skbuff[i])
526 			break;
527 	}
528 	return cur - start;
529 }
530 
sis190_try_rx_copy(struct sis190_private * tp,struct sk_buff ** sk_buff,int pkt_size,dma_addr_t addr)531 static bool sis190_try_rx_copy(struct sis190_private *tp,
532 			       struct sk_buff **sk_buff, int pkt_size,
533 			       dma_addr_t addr)
534 {
535 	struct sk_buff *skb;
536 	bool done = false;
537 
538 	if (pkt_size >= rx_copybreak)
539 		goto out;
540 
541 	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542 	if (!skb)
543 		goto out;
544 
545 	dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, tp->rx_buf_sz,
546 				DMA_FROM_DEVICE);
547 	skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548 	*sk_buff = skb;
549 	done = true;
550 out:
551 	return done;
552 }
553 
sis190_rx_pkt_err(u32 status,struct net_device_stats * stats)554 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 {
556 #define ErrMask	(OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 
558 	if ((status & CRCOK) && !(status & ErrMask))
559 		return 0;
560 
561 	if (!(status & CRCOK))
562 		stats->rx_crc_errors++;
563 	else if (status & OVRUN)
564 		stats->rx_over_errors++;
565 	else if (status & (SHORT | LIMIT))
566 		stats->rx_length_errors++;
567 	else if (status & (MIIER | NIBON | COLON))
568 		stats->rx_frame_errors++;
569 
570 	stats->rx_errors++;
571 	return -1;
572 }
573 
sis190_rx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)574 static int sis190_rx_interrupt(struct net_device *dev,
575 			       struct sis190_private *tp, void __iomem *ioaddr)
576 {
577 	struct net_device_stats *stats = &dev->stats;
578 	u32 rx_left, cur_rx = tp->cur_rx;
579 	u32 delta, count;
580 
581 	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582 	rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 
584 	for (; rx_left > 0; rx_left--, cur_rx++) {
585 		unsigned int entry = cur_rx % NUM_RX_DESC;
586 		struct RxDesc *desc = tp->RxDescRing + entry;
587 		u32 status;
588 
589 		if (le32_to_cpu(desc->status) & OWNbit)
590 			break;
591 
592 		status = le32_to_cpu(desc->PSize);
593 
594 		//netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
595 
596 		if (sis190_rx_pkt_err(status, stats) < 0)
597 			sis190_give_to_asic(desc, tp->rx_buf_sz);
598 		else {
599 			struct sk_buff *skb = tp->Rx_skbuff[entry];
600 			dma_addr_t addr = le32_to_cpu(desc->addr);
601 			int pkt_size = (status & RxSizeMask) - 4;
602 			struct pci_dev *pdev = tp->pci_dev;
603 
604 			if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 				netif_info(tp, intr, dev,
606 					   "(frag) status = %08x\n", status);
607 				stats->rx_dropped++;
608 				stats->rx_length_errors++;
609 				sis190_give_to_asic(desc, tp->rx_buf_sz);
610 				continue;
611 			}
612 
613 
614 			if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 				dma_sync_single_for_device(&pdev->dev, addr,
616 							   tp->rx_buf_sz,
617 							   DMA_FROM_DEVICE);
618 				sis190_give_to_asic(desc, tp->rx_buf_sz);
619 			} else {
620 				dma_unmap_single(&pdev->dev, addr,
621 						 tp->rx_buf_sz,
622 						 DMA_FROM_DEVICE);
623 				tp->Rx_skbuff[entry] = NULL;
624 				sis190_make_unusable_by_asic(desc);
625 			}
626 
627 			skb_put(skb, pkt_size);
628 			skb->protocol = eth_type_trans(skb, dev);
629 
630 			sis190_rx_skb(skb);
631 
632 			stats->rx_packets++;
633 			stats->rx_bytes += pkt_size;
634 			if ((status & BCAST) == MCAST)
635 				stats->multicast++;
636 		}
637 	}
638 	count = cur_rx - tp->cur_rx;
639 	tp->cur_rx = cur_rx;
640 
641 	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
642 	if (!delta && count)
643 		netif_info(tp, intr, dev, "no Rx buffer allocated\n");
644 	tp->dirty_rx += delta;
645 
646 	if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
647 		netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
648 
649 	return count;
650 }
651 
sis190_unmap_tx_skb(struct pci_dev * pdev,struct sk_buff * skb,struct TxDesc * desc)652 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
653 				struct TxDesc *desc)
654 {
655 	unsigned int len;
656 
657 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
658 
659 	dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), len,
660 			 DMA_TO_DEVICE);
661 
662 	memset(desc, 0x00, sizeof(*desc));
663 }
664 
sis190_tx_pkt_err(u32 status,struct net_device_stats * stats)665 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
666 {
667 #define TxErrMask	(WND | TABRT | FIFO | LINK)
668 
669 	if (!unlikely(status & TxErrMask))
670 		return 0;
671 
672 	if (status & WND)
673 		stats->tx_window_errors++;
674 	if (status & TABRT)
675 		stats->tx_aborted_errors++;
676 	if (status & FIFO)
677 		stats->tx_fifo_errors++;
678 	if (status & LINK)
679 		stats->tx_carrier_errors++;
680 
681 	stats->tx_errors++;
682 
683 	return -1;
684 }
685 
sis190_tx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)686 static void sis190_tx_interrupt(struct net_device *dev,
687 				struct sis190_private *tp, void __iomem *ioaddr)
688 {
689 	struct net_device_stats *stats = &dev->stats;
690 	u32 pending, dirty_tx = tp->dirty_tx;
691 	/*
692 	 * It would not be needed if queueing was allowed to be enabled
693 	 * again too early (hint: think preempt and unclocked smp systems).
694 	 */
695 	unsigned int queue_stopped;
696 
697 	smp_rmb();
698 	pending = tp->cur_tx - dirty_tx;
699 	queue_stopped = (pending == NUM_TX_DESC);
700 
701 	for (; pending; pending--, dirty_tx++) {
702 		unsigned int entry = dirty_tx % NUM_TX_DESC;
703 		struct TxDesc *txd = tp->TxDescRing + entry;
704 		u32 status = le32_to_cpu(txd->status);
705 		struct sk_buff *skb;
706 
707 		if (status & OWNbit)
708 			break;
709 
710 		skb = tp->Tx_skbuff[entry];
711 
712 		if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
713 			stats->tx_packets++;
714 			stats->tx_bytes += skb->len;
715 			stats->collisions += ((status & ColCountMask) - 1);
716 		}
717 
718 		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
719 		tp->Tx_skbuff[entry] = NULL;
720 		dev_consume_skb_irq(skb);
721 	}
722 
723 	if (tp->dirty_tx != dirty_tx) {
724 		tp->dirty_tx = dirty_tx;
725 		smp_wmb();
726 		if (queue_stopped)
727 			netif_wake_queue(dev);
728 	}
729 }
730 
731 /*
732  * The interrupt handler does all of the Rx thread work and cleans up after
733  * the Tx thread.
734  */
sis190_irq(int irq,void * __dev)735 static irqreturn_t sis190_irq(int irq, void *__dev)
736 {
737 	struct net_device *dev = __dev;
738 	struct sis190_private *tp = netdev_priv(dev);
739 	void __iomem *ioaddr = tp->mmio_addr;
740 	unsigned int handled = 0;
741 	u32 status;
742 
743 	status = SIS_R32(IntrStatus);
744 
745 	if ((status == 0xffffffff) || !status)
746 		goto out;
747 
748 	handled = 1;
749 
750 	if (unlikely(!netif_running(dev))) {
751 		sis190_asic_down(ioaddr);
752 		goto out;
753 	}
754 
755 	SIS_W32(IntrStatus, status);
756 
757 //	netif_info(tp, intr, dev, "status = %08x\n", status);
758 
759 	if (status & LinkChange) {
760 		netif_info(tp, intr, dev, "link change\n");
761 		del_timer(&tp->timer);
762 		schedule_work(&tp->phy_task);
763 	}
764 
765 	if (status & RxQInt)
766 		sis190_rx_interrupt(dev, tp, ioaddr);
767 
768 	if (status & TxQ0Int)
769 		sis190_tx_interrupt(dev, tp, ioaddr);
770 out:
771 	return IRQ_RETVAL(handled);
772 }
773 
774 #ifdef CONFIG_NET_POLL_CONTROLLER
sis190_netpoll(struct net_device * dev)775 static void sis190_netpoll(struct net_device *dev)
776 {
777 	struct sis190_private *tp = netdev_priv(dev);
778 	const int irq = tp->pci_dev->irq;
779 
780 	disable_irq(irq);
781 	sis190_irq(irq, dev);
782 	enable_irq(irq);
783 }
784 #endif
785 
sis190_free_rx_skb(struct sis190_private * tp,struct sk_buff ** sk_buff,struct RxDesc * desc)786 static void sis190_free_rx_skb(struct sis190_private *tp,
787 			       struct sk_buff **sk_buff, struct RxDesc *desc)
788 {
789 	struct pci_dev *pdev = tp->pci_dev;
790 
791 	dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
792 			 DMA_FROM_DEVICE);
793 	dev_kfree_skb(*sk_buff);
794 	*sk_buff = NULL;
795 	sis190_make_unusable_by_asic(desc);
796 }
797 
sis190_rx_clear(struct sis190_private * tp)798 static void sis190_rx_clear(struct sis190_private *tp)
799 {
800 	unsigned int i;
801 
802 	for (i = 0; i < NUM_RX_DESC; i++) {
803 		if (!tp->Rx_skbuff[i])
804 			continue;
805 		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
806 	}
807 }
808 
sis190_init_ring_indexes(struct sis190_private * tp)809 static void sis190_init_ring_indexes(struct sis190_private *tp)
810 {
811 	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
812 }
813 
sis190_init_ring(struct net_device * dev)814 static int sis190_init_ring(struct net_device *dev)
815 {
816 	struct sis190_private *tp = netdev_priv(dev);
817 
818 	sis190_init_ring_indexes(tp);
819 
820 	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
821 	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
822 
823 	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
824 		goto err_rx_clear;
825 
826 	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
827 
828 	return 0;
829 
830 err_rx_clear:
831 	sis190_rx_clear(tp);
832 	return -ENOMEM;
833 }
834 
sis190_set_rx_mode(struct net_device * dev)835 static void sis190_set_rx_mode(struct net_device *dev)
836 {
837 	struct sis190_private *tp = netdev_priv(dev);
838 	void __iomem *ioaddr = tp->mmio_addr;
839 	unsigned long flags;
840 	u32 mc_filter[2];	/* Multicast hash filter */
841 	u16 rx_mode;
842 
843 	if (dev->flags & IFF_PROMISC) {
844 		rx_mode =
845 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
846 			AcceptAllPhys;
847 		mc_filter[1] = mc_filter[0] = 0xffffffff;
848 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
849 		   (dev->flags & IFF_ALLMULTI)) {
850 		/* Too many to filter perfectly -- accept all multicasts. */
851 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
852 		mc_filter[1] = mc_filter[0] = 0xffffffff;
853 	} else {
854 		struct netdev_hw_addr *ha;
855 
856 		rx_mode = AcceptBroadcast | AcceptMyPhys;
857 		mc_filter[1] = mc_filter[0] = 0;
858 		netdev_for_each_mc_addr(ha, dev) {
859 			int bit_nr =
860 				ether_crc(ETH_ALEN, ha->addr) & 0x3f;
861 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
862 			rx_mode |= AcceptMulticast;
863 		}
864 	}
865 
866 	spin_lock_irqsave(&tp->lock, flags);
867 
868 	SIS_W16(RxMacControl, rx_mode | 0x2);
869 	SIS_W32(RxHashTable, mc_filter[0]);
870 	SIS_W32(RxHashTable + 4, mc_filter[1]);
871 
872 	spin_unlock_irqrestore(&tp->lock, flags);
873 }
874 
sis190_soft_reset(void __iomem * ioaddr)875 static void sis190_soft_reset(void __iomem *ioaddr)
876 {
877 	SIS_W32(IntrControl, 0x8000);
878 	SIS_PCI_COMMIT();
879 	SIS_W32(IntrControl, 0x0);
880 	sis190_asic_down(ioaddr);
881 }
882 
sis190_hw_start(struct net_device * dev)883 static void sis190_hw_start(struct net_device *dev)
884 {
885 	struct sis190_private *tp = netdev_priv(dev);
886 	void __iomem *ioaddr = tp->mmio_addr;
887 
888 	sis190_soft_reset(ioaddr);
889 
890 	SIS_W32(TxDescStartAddr, tp->tx_dma);
891 	SIS_W32(RxDescStartAddr, tp->rx_dma);
892 
893 	SIS_W32(IntrStatus, 0xffffffff);
894 	SIS_W32(IntrMask, 0x0);
895 	SIS_W32(GMIIControl, 0x0);
896 	SIS_W32(TxMacControl, 0x60);
897 	SIS_W16(RxMacControl, 0x02);
898 	SIS_W32(RxHashTable, 0x0);
899 	SIS_W32(0x6c, 0x0);
900 	SIS_W32(RxWolCtrl, 0x0);
901 	SIS_W32(RxWolData, 0x0);
902 
903 	SIS_PCI_COMMIT();
904 
905 	sis190_set_rx_mode(dev);
906 
907 	/* Enable all known interrupts by setting the interrupt mask. */
908 	SIS_W32(IntrMask, sis190_intr_mask);
909 
910 	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
911 	SIS_W32(RxControl, 0x1a1d);
912 
913 	netif_start_queue(dev);
914 }
915 
sis190_phy_task(struct work_struct * work)916 static void sis190_phy_task(struct work_struct *work)
917 {
918 	struct sis190_private *tp =
919 		container_of(work, struct sis190_private, phy_task);
920 	struct net_device *dev = tp->dev;
921 	void __iomem *ioaddr = tp->mmio_addr;
922 	int phy_id = tp->mii_if.phy_id;
923 	u16 val;
924 
925 	rtnl_lock();
926 
927 	if (!netif_running(dev))
928 		goto out_unlock;
929 
930 	val = mdio_read(ioaddr, phy_id, MII_BMCR);
931 	if (val & BMCR_RESET) {
932 		// FIXME: needlessly high ?  -- FR 02/07/2005
933 		mod_timer(&tp->timer, jiffies + HZ/10);
934 		goto out_unlock;
935 	}
936 
937 	val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
938 	if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
939 		netif_carrier_off(dev);
940 		netif_warn(tp, link, dev, "auto-negotiating...\n");
941 		tp->link_status = LNK_AUTONEG;
942 	} else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
943 		/* Rejoice ! */
944 		struct {
945 			int val;
946 			u32 ctl;
947 			const char *msg;
948 		} reg31[] = {
949 			{ LPA_1000FULL, 0x07000c00 | 0x00001000,
950 				"1000 Mbps Full Duplex" },
951 			{ LPA_1000HALF, 0x07000c00,
952 				"1000 Mbps Half Duplex" },
953 			{ LPA_100FULL, 0x04000800 | 0x00001000,
954 				"100 Mbps Full Duplex" },
955 			{ LPA_100HALF, 0x04000800,
956 				"100 Mbps Half Duplex" },
957 			{ LPA_10FULL, 0x04000400 | 0x00001000,
958 				"10 Mbps Full Duplex" },
959 			{ LPA_10HALF, 0x04000400,
960 				"10 Mbps Half Duplex" },
961 			{ 0, 0x04000400, "unknown" }
962 		}, *p = NULL;
963 		u16 adv, autoexp, gigadv, gigrec;
964 
965 		val = mdio_read(ioaddr, phy_id, 0x1f);
966 		netif_info(tp, link, dev, "mii ext = %04x\n", val);
967 
968 		val = mdio_read(ioaddr, phy_id, MII_LPA);
969 		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
970 		autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
971 		netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
972 			   val, adv, autoexp);
973 
974 		if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
975 			/* check for gigabit speed */
976 			gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
977 			gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
978 			val = (gigadv & (gigrec >> 2));
979 			if (val & ADVERTISE_1000FULL)
980 				p = reg31;
981 			else if (val & ADVERTISE_1000HALF)
982 				p = reg31 + 1;
983 		}
984 		if (!p) {
985 			val &= adv;
986 
987 			for (p = reg31; p->val; p++) {
988 				if ((val & p->val) == p->val)
989 					break;
990 			}
991 		}
992 
993 		p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
994 
995 		if ((tp->features & F_HAS_RGMII) &&
996 		    (tp->features & F_PHY_BCM5461)) {
997 			// Set Tx Delay in RGMII mode.
998 			mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
999 			udelay(200);
1000 			mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
1001 			p->ctl |= 0x03000000;
1002 		}
1003 
1004 		SIS_W32(StationControl, p->ctl);
1005 
1006 		if (tp->features & F_HAS_RGMII) {
1007 			SIS_W32(RGDelay, 0x0441);
1008 			SIS_W32(RGDelay, 0x0440);
1009 		}
1010 
1011 		tp->negotiated_lpa = p->val;
1012 
1013 		netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1014 		netif_carrier_on(dev);
1015 		tp->link_status = LNK_ON;
1016 	} else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1017 		tp->link_status = LNK_OFF;
1018 	mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1019 
1020 out_unlock:
1021 	rtnl_unlock();
1022 }
1023 
sis190_phy_timer(struct timer_list * t)1024 static void sis190_phy_timer(struct timer_list *t)
1025 {
1026 	struct sis190_private *tp = from_timer(tp, t, timer);
1027 	struct net_device *dev = tp->dev;
1028 
1029 	if (likely(netif_running(dev)))
1030 		schedule_work(&tp->phy_task);
1031 }
1032 
sis190_delete_timer(struct net_device * dev)1033 static inline void sis190_delete_timer(struct net_device *dev)
1034 {
1035 	struct sis190_private *tp = netdev_priv(dev);
1036 
1037 	del_timer_sync(&tp->timer);
1038 }
1039 
sis190_request_timer(struct net_device * dev)1040 static inline void sis190_request_timer(struct net_device *dev)
1041 {
1042 	struct sis190_private *tp = netdev_priv(dev);
1043 	struct timer_list *timer = &tp->timer;
1044 
1045 	timer_setup(timer, sis190_phy_timer, 0);
1046 	timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1047 	add_timer(timer);
1048 }
1049 
sis190_set_rxbufsize(struct sis190_private * tp,struct net_device * dev)1050 static void sis190_set_rxbufsize(struct sis190_private *tp,
1051 				 struct net_device *dev)
1052 {
1053 	unsigned int mtu = dev->mtu;
1054 
1055 	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1056 	/* RxDesc->size has a licence to kill the lower bits */
1057 	if (tp->rx_buf_sz & 0x07) {
1058 		tp->rx_buf_sz += 8;
1059 		tp->rx_buf_sz &= RX_BUF_MASK;
1060 	}
1061 }
1062 
sis190_open(struct net_device * dev)1063 static int sis190_open(struct net_device *dev)
1064 {
1065 	struct sis190_private *tp = netdev_priv(dev);
1066 	struct pci_dev *pdev = tp->pci_dev;
1067 	int rc = -ENOMEM;
1068 
1069 	sis190_set_rxbufsize(tp, dev);
1070 
1071 	/*
1072 	 * Rx and Tx descriptors need 256 bytes alignment.
1073 	 * dma_alloc_coherent() guarantees a stronger alignment.
1074 	 */
1075 	tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
1076 					    &tp->tx_dma, GFP_KERNEL);
1077 	if (!tp->TxDescRing)
1078 		goto out;
1079 
1080 	tp->RxDescRing = dma_alloc_coherent(&pdev->dev, RX_RING_BYTES,
1081 					    &tp->rx_dma, GFP_KERNEL);
1082 	if (!tp->RxDescRing)
1083 		goto err_free_tx_0;
1084 
1085 	rc = sis190_init_ring(dev);
1086 	if (rc < 0)
1087 		goto err_free_rx_1;
1088 
1089 	sis190_request_timer(dev);
1090 
1091 	rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1092 	if (rc < 0)
1093 		goto err_release_timer_2;
1094 
1095 	sis190_hw_start(dev);
1096 out:
1097 	return rc;
1098 
1099 err_release_timer_2:
1100 	sis190_delete_timer(dev);
1101 	sis190_rx_clear(tp);
1102 err_free_rx_1:
1103 	dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
1104 			  tp->rx_dma);
1105 err_free_tx_0:
1106 	dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
1107 			  tp->tx_dma);
1108 	goto out;
1109 }
1110 
sis190_tx_clear(struct sis190_private * tp)1111 static void sis190_tx_clear(struct sis190_private *tp)
1112 {
1113 	unsigned int i;
1114 
1115 	for (i = 0; i < NUM_TX_DESC; i++) {
1116 		struct sk_buff *skb = tp->Tx_skbuff[i];
1117 
1118 		if (!skb)
1119 			continue;
1120 
1121 		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1122 		tp->Tx_skbuff[i] = NULL;
1123 		dev_kfree_skb(skb);
1124 
1125 		tp->dev->stats.tx_dropped++;
1126 	}
1127 	tp->cur_tx = tp->dirty_tx = 0;
1128 }
1129 
sis190_down(struct net_device * dev)1130 static void sis190_down(struct net_device *dev)
1131 {
1132 	struct sis190_private *tp = netdev_priv(dev);
1133 	void __iomem *ioaddr = tp->mmio_addr;
1134 	unsigned int poll_locked = 0;
1135 
1136 	sis190_delete_timer(dev);
1137 
1138 	netif_stop_queue(dev);
1139 
1140 	do {
1141 		spin_lock_irq(&tp->lock);
1142 
1143 		sis190_asic_down(ioaddr);
1144 
1145 		spin_unlock_irq(&tp->lock);
1146 
1147 		synchronize_irq(tp->pci_dev->irq);
1148 
1149 		if (!poll_locked)
1150 			poll_locked++;
1151 
1152 		synchronize_rcu();
1153 
1154 	} while (SIS_R32(IntrMask));
1155 
1156 	sis190_tx_clear(tp);
1157 	sis190_rx_clear(tp);
1158 }
1159 
sis190_close(struct net_device * dev)1160 static int sis190_close(struct net_device *dev)
1161 {
1162 	struct sis190_private *tp = netdev_priv(dev);
1163 	struct pci_dev *pdev = tp->pci_dev;
1164 
1165 	sis190_down(dev);
1166 
1167 	free_irq(pdev->irq, dev);
1168 
1169 	dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
1170 			  tp->tx_dma);
1171 	dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
1172 			  tp->rx_dma);
1173 
1174 	tp->TxDescRing = NULL;
1175 	tp->RxDescRing = NULL;
1176 
1177 	return 0;
1178 }
1179 
sis190_start_xmit(struct sk_buff * skb,struct net_device * dev)1180 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1181 				     struct net_device *dev)
1182 {
1183 	struct sis190_private *tp = netdev_priv(dev);
1184 	void __iomem *ioaddr = tp->mmio_addr;
1185 	u32 len, entry, dirty_tx;
1186 	struct TxDesc *desc;
1187 	dma_addr_t mapping;
1188 
1189 	if (unlikely(skb->len < ETH_ZLEN)) {
1190 		if (skb_padto(skb, ETH_ZLEN)) {
1191 			dev->stats.tx_dropped++;
1192 			goto out;
1193 		}
1194 		len = ETH_ZLEN;
1195 	} else {
1196 		len = skb->len;
1197 	}
1198 
1199 	entry = tp->cur_tx % NUM_TX_DESC;
1200 	desc = tp->TxDescRing + entry;
1201 
1202 	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1203 		netif_stop_queue(dev);
1204 		netif_err(tp, tx_err, dev,
1205 			  "BUG! Tx Ring full when queue awake!\n");
1206 		return NETDEV_TX_BUSY;
1207 	}
1208 
1209 	mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
1210 				 DMA_TO_DEVICE);
1211 	if (dma_mapping_error(&tp->pci_dev->dev, mapping)) {
1212 		netif_err(tp, tx_err, dev,
1213 				"PCI mapping failed, dropping packet");
1214 		return NETDEV_TX_BUSY;
1215 	}
1216 
1217 	tp->Tx_skbuff[entry] = skb;
1218 
1219 	desc->PSize = cpu_to_le32(len);
1220 	desc->addr = cpu_to_le32(mapping);
1221 
1222 	desc->size = cpu_to_le32(len);
1223 	if (entry == (NUM_TX_DESC - 1))
1224 		desc->size |= cpu_to_le32(RingEnd);
1225 
1226 	wmb();
1227 
1228 	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1229 	if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1230 		/* Half Duplex */
1231 		desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1232 		if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1233 			desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1234 	}
1235 
1236 	tp->cur_tx++;
1237 
1238 	smp_wmb();
1239 
1240 	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1241 
1242 	dirty_tx = tp->dirty_tx;
1243 	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1244 		netif_stop_queue(dev);
1245 		smp_rmb();
1246 		if (dirty_tx != tp->dirty_tx)
1247 			netif_wake_queue(dev);
1248 	}
1249 out:
1250 	return NETDEV_TX_OK;
1251 }
1252 
sis190_free_phy(struct list_head * first_phy)1253 static void sis190_free_phy(struct list_head *first_phy)
1254 {
1255 	struct sis190_phy *cur, *next;
1256 
1257 	list_for_each_entry_safe(cur, next, first_phy, list) {
1258 		kfree(cur);
1259 	}
1260 }
1261 
1262 /**
1263  *	sis190_default_phy - Select default PHY for sis190 mac.
1264  *	@dev: the net device to probe for
1265  *
1266  *	Select first detected PHY with link as default.
1267  *	If no one is link on, select PHY whose types is HOME as default.
1268  *	If HOME doesn't exist, select LAN.
1269  */
sis190_default_phy(struct net_device * dev)1270 static u16 sis190_default_phy(struct net_device *dev)
1271 {
1272 	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1273 	struct sis190_private *tp = netdev_priv(dev);
1274 	struct mii_if_info *mii_if = &tp->mii_if;
1275 	void __iomem *ioaddr = tp->mmio_addr;
1276 	u16 status;
1277 
1278 	phy_home = phy_default = phy_lan = NULL;
1279 
1280 	list_for_each_entry(phy, &tp->first_phy, list) {
1281 		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1282 
1283 		// Link ON & Not select default PHY & not ghost PHY.
1284 		if ((status & BMSR_LSTATUS) &&
1285 		    !phy_default &&
1286 		    (phy->type != UNKNOWN)) {
1287 			phy_default = phy;
1288 		} else {
1289 			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1290 			mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1291 				   status | BMCR_ANENABLE | BMCR_ISOLATE);
1292 			if (phy->type == HOME)
1293 				phy_home = phy;
1294 			else if (phy->type == LAN)
1295 				phy_lan = phy;
1296 		}
1297 	}
1298 
1299 	if (!phy_default) {
1300 		if (phy_home)
1301 			phy_default = phy_home;
1302 		else if (phy_lan)
1303 			phy_default = phy_lan;
1304 		else
1305 			phy_default = list_first_entry(&tp->first_phy,
1306 						 struct sis190_phy, list);
1307 	}
1308 
1309 	if (mii_if->phy_id != phy_default->phy_id) {
1310 		mii_if->phy_id = phy_default->phy_id;
1311 		if (netif_msg_probe(tp))
1312 			pr_info("%s: Using transceiver at address %d as default\n",
1313 				pci_name(tp->pci_dev), mii_if->phy_id);
1314 	}
1315 
1316 	status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1317 	status &= (~BMCR_ISOLATE);
1318 
1319 	mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1320 	status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1321 
1322 	return status;
1323 }
1324 
sis190_init_phy(struct net_device * dev,struct sis190_private * tp,struct sis190_phy * phy,unsigned int phy_id,u16 mii_status)1325 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1326 			    struct sis190_phy *phy, unsigned int phy_id,
1327 			    u16 mii_status)
1328 {
1329 	void __iomem *ioaddr = tp->mmio_addr;
1330 	struct mii_chip_info *p;
1331 
1332 	INIT_LIST_HEAD(&phy->list);
1333 	phy->status = mii_status;
1334 	phy->phy_id = phy_id;
1335 
1336 	phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1337 	phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1338 
1339 	for (p = mii_chip_table; p->type; p++) {
1340 		if ((p->id[0] == phy->id[0]) &&
1341 		    (p->id[1] == (phy->id[1] & 0xfff0))) {
1342 			break;
1343 		}
1344 	}
1345 
1346 	if (p->id[1]) {
1347 		phy->type = (p->type == MIX) ?
1348 			((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1349 				LAN : HOME) : p->type;
1350 		tp->features |= p->feature;
1351 		if (netif_msg_probe(tp))
1352 			pr_info("%s: %s transceiver at address %d\n",
1353 				pci_name(tp->pci_dev), p->name, phy_id);
1354 	} else {
1355 		phy->type = UNKNOWN;
1356 		if (netif_msg_probe(tp))
1357 			pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1358 				pci_name(tp->pci_dev),
1359 				phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1360 	}
1361 }
1362 
sis190_mii_probe_88e1111_fixup(struct sis190_private * tp)1363 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1364 {
1365 	if (tp->features & F_PHY_88E1111) {
1366 		void __iomem *ioaddr = tp->mmio_addr;
1367 		int phy_id = tp->mii_if.phy_id;
1368 		u16 reg[2][2] = {
1369 			{ 0x808b, 0x0ce1 },
1370 			{ 0x808f, 0x0c60 }
1371 		}, *p;
1372 
1373 		p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1374 
1375 		mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1376 		udelay(200);
1377 		mdio_write(ioaddr, phy_id, 0x14, p[1]);
1378 		udelay(200);
1379 	}
1380 }
1381 
1382 /**
1383  *	sis190_mii_probe - Probe MII PHY for sis190
1384  *	@dev: the net device to probe for
1385  *
1386  *	Search for total of 32 possible mii phy addresses.
1387  *	Identify and set current phy if found one,
1388  *	return error if it failed to found.
1389  */
sis190_mii_probe(struct net_device * dev)1390 static int sis190_mii_probe(struct net_device *dev)
1391 {
1392 	struct sis190_private *tp = netdev_priv(dev);
1393 	struct mii_if_info *mii_if = &tp->mii_if;
1394 	void __iomem *ioaddr = tp->mmio_addr;
1395 	int phy_id;
1396 	int rc = 0;
1397 
1398 	INIT_LIST_HEAD(&tp->first_phy);
1399 
1400 	for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1401 		struct sis190_phy *phy;
1402 		u16 status;
1403 
1404 		status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1405 
1406 		// Try next mii if the current one is not accessible.
1407 		if (status == 0xffff || status == 0x0000)
1408 			continue;
1409 
1410 		phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1411 		if (!phy) {
1412 			sis190_free_phy(&tp->first_phy);
1413 			rc = -ENOMEM;
1414 			goto out;
1415 		}
1416 
1417 		sis190_init_phy(dev, tp, phy, phy_id, status);
1418 
1419 		list_add(&tp->first_phy, &phy->list);
1420 	}
1421 
1422 	if (list_empty(&tp->first_phy)) {
1423 		if (netif_msg_probe(tp))
1424 			pr_info("%s: No MII transceivers found!\n",
1425 				pci_name(tp->pci_dev));
1426 		rc = -EIO;
1427 		goto out;
1428 	}
1429 
1430 	/* Select default PHY for mac */
1431 	sis190_default_phy(dev);
1432 
1433 	sis190_mii_probe_88e1111_fixup(tp);
1434 
1435 	mii_if->dev = dev;
1436 	mii_if->mdio_read = __mdio_read;
1437 	mii_if->mdio_write = __mdio_write;
1438 	mii_if->phy_id_mask = PHY_ID_ANY;
1439 	mii_if->reg_num_mask = MII_REG_ANY;
1440 out:
1441 	return rc;
1442 }
1443 
sis190_mii_remove(struct net_device * dev)1444 static void sis190_mii_remove(struct net_device *dev)
1445 {
1446 	struct sis190_private *tp = netdev_priv(dev);
1447 
1448 	sis190_free_phy(&tp->first_phy);
1449 }
1450 
sis190_release_board(struct pci_dev * pdev)1451 static void sis190_release_board(struct pci_dev *pdev)
1452 {
1453 	struct net_device *dev = pci_get_drvdata(pdev);
1454 	struct sis190_private *tp = netdev_priv(dev);
1455 
1456 	iounmap(tp->mmio_addr);
1457 	pci_release_regions(pdev);
1458 	pci_disable_device(pdev);
1459 	free_netdev(dev);
1460 }
1461 
sis190_init_board(struct pci_dev * pdev)1462 static struct net_device *sis190_init_board(struct pci_dev *pdev)
1463 {
1464 	struct sis190_private *tp;
1465 	struct net_device *dev;
1466 	void __iomem *ioaddr;
1467 	int rc;
1468 
1469 	dev = alloc_etherdev(sizeof(*tp));
1470 	if (!dev) {
1471 		rc = -ENOMEM;
1472 		goto err_out_0;
1473 	}
1474 
1475 	SET_NETDEV_DEV(dev, &pdev->dev);
1476 
1477 	tp = netdev_priv(dev);
1478 	tp->dev = dev;
1479 	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1480 
1481 	rc = pci_enable_device(pdev);
1482 	if (rc < 0) {
1483 		if (netif_msg_probe(tp))
1484 			pr_err("%s: enable failure\n", pci_name(pdev));
1485 		goto err_free_dev_1;
1486 	}
1487 
1488 	rc = -ENODEV;
1489 
1490 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1491 		if (netif_msg_probe(tp))
1492 			pr_err("%s: region #0 is no MMIO resource\n",
1493 			       pci_name(pdev));
1494 		goto err_pci_disable_2;
1495 	}
1496 	if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1497 		if (netif_msg_probe(tp))
1498 			pr_err("%s: invalid PCI region size(s)\n",
1499 			       pci_name(pdev));
1500 		goto err_pci_disable_2;
1501 	}
1502 
1503 	rc = pci_request_regions(pdev, DRV_NAME);
1504 	if (rc < 0) {
1505 		if (netif_msg_probe(tp))
1506 			pr_err("%s: could not request regions\n",
1507 			       pci_name(pdev));
1508 		goto err_pci_disable_2;
1509 	}
1510 
1511 	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1512 	if (rc < 0) {
1513 		if (netif_msg_probe(tp))
1514 			pr_err("%s: DMA configuration failed\n",
1515 			       pci_name(pdev));
1516 		goto err_free_res_3;
1517 	}
1518 
1519 	pci_set_master(pdev);
1520 
1521 	ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1522 	if (!ioaddr) {
1523 		if (netif_msg_probe(tp))
1524 			pr_err("%s: cannot remap MMIO, aborting\n",
1525 			       pci_name(pdev));
1526 		rc = -EIO;
1527 		goto err_free_res_3;
1528 	}
1529 
1530 	tp->pci_dev = pdev;
1531 	tp->mmio_addr = ioaddr;
1532 	tp->link_status = LNK_OFF;
1533 
1534 	sis190_irq_mask_and_ack(ioaddr);
1535 
1536 	sis190_soft_reset(ioaddr);
1537 out:
1538 	return dev;
1539 
1540 err_free_res_3:
1541 	pci_release_regions(pdev);
1542 err_pci_disable_2:
1543 	pci_disable_device(pdev);
1544 err_free_dev_1:
1545 	free_netdev(dev);
1546 err_out_0:
1547 	dev = ERR_PTR(rc);
1548 	goto out;
1549 }
1550 
sis190_tx_timeout(struct net_device * dev,unsigned int txqueue)1551 static void sis190_tx_timeout(struct net_device *dev, unsigned int txqueue)
1552 {
1553 	struct sis190_private *tp = netdev_priv(dev);
1554 	void __iomem *ioaddr = tp->mmio_addr;
1555 	u8 tmp8;
1556 
1557 	/* Disable Tx, if not already */
1558 	tmp8 = SIS_R8(TxControl);
1559 	if (tmp8 & CmdTxEnb)
1560 		SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1561 
1562 	netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1563 		   SIS_R32(TxControl), SIS_R32(TxSts));
1564 
1565 	/* Disable interrupts by clearing the interrupt mask. */
1566 	SIS_W32(IntrMask, 0x0000);
1567 
1568 	/* Stop a shared interrupt from scavenging while we are. */
1569 	spin_lock_irq(&tp->lock);
1570 	sis190_tx_clear(tp);
1571 	spin_unlock_irq(&tp->lock);
1572 
1573 	/* ...and finally, reset everything. */
1574 	sis190_hw_start(dev);
1575 
1576 	netif_wake_queue(dev);
1577 }
1578 
sis190_set_rgmii(struct sis190_private * tp,u8 reg)1579 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1580 {
1581 	tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1582 }
1583 
sis190_get_mac_addr_from_eeprom(struct pci_dev * pdev,struct net_device * dev)1584 static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1585 					   struct net_device *dev)
1586 {
1587 	struct sis190_private *tp = netdev_priv(dev);
1588 	void __iomem *ioaddr = tp->mmio_addr;
1589 	__le16 addr[ETH_ALEN / 2];
1590 	u16 sig;
1591 	int i;
1592 
1593 	if (netif_msg_probe(tp))
1594 		pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1595 
1596 	/* Check to see if there is a sane EEPROM */
1597 	sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1598 
1599 	if ((sig == 0xffff) || (sig == 0x0000)) {
1600 		if (netif_msg_probe(tp))
1601 			pr_info("%s: Error EEPROM read %x\n",
1602 				pci_name(pdev), sig);
1603 		return -EIO;
1604 	}
1605 
1606 	/* Get MAC address from EEPROM */
1607 	for (i = 0; i < ETH_ALEN / 2; i++) {
1608 		u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1609 
1610 		addr[i] = cpu_to_le16(w);
1611 	}
1612 	eth_hw_addr_set(dev, (u8 *)addr);
1613 
1614 	sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  *	sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1621  *	@pdev: PCI device
1622  *	@dev:  network device to get address for
1623  *
1624  *	SiS96x model, use APC CMOS RAM to store MAC address.
1625  *	APC CMOS RAM is accessed through ISA bridge.
1626  *	MAC address is read into @net_dev->dev_addr.
1627  */
sis190_get_mac_addr_from_apc(struct pci_dev * pdev,struct net_device * dev)1628 static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1629 					struct net_device *dev)
1630 {
1631 	static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
1632 	struct sis190_private *tp = netdev_priv(dev);
1633 	struct pci_dev *isa_bridge;
1634 	u8 addr[ETH_ALEN];
1635 	u8 reg, tmp8;
1636 	unsigned int i;
1637 
1638 	if (netif_msg_probe(tp))
1639 		pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1640 
1641 	for (i = 0; i < ARRAY_SIZE(ids); i++) {
1642 		isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1643 		if (isa_bridge)
1644 			break;
1645 	}
1646 
1647 	if (!isa_bridge) {
1648 		if (netif_msg_probe(tp))
1649 			pr_info("%s: Can not find ISA bridge\n",
1650 				pci_name(pdev));
1651 		return -EIO;
1652 	}
1653 
1654 	/* Enable port 78h & 79h to access APC Registers. */
1655 	pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1656 	reg = (tmp8 & ~0x02);
1657 	pci_write_config_byte(isa_bridge, 0x48, reg);
1658 	udelay(50);
1659 	pci_read_config_byte(isa_bridge, 0x48, &reg);
1660 
1661         for (i = 0; i < ETH_ALEN; i++) {
1662                 outb(0x9 + i, 0x78);
1663                 addr[i] = inb(0x79);
1664         }
1665 	eth_hw_addr_set(dev, addr);
1666 
1667 	outb(0x12, 0x78);
1668 	reg = inb(0x79);
1669 
1670 	sis190_set_rgmii(tp, reg);
1671 
1672 	/* Restore the value to ISA Bridge */
1673 	pci_write_config_byte(isa_bridge, 0x48, tmp8);
1674 	pci_dev_put(isa_bridge);
1675 
1676 	return 0;
1677 }
1678 
1679 /**
1680  *      sis190_init_rxfilter - Initialize the Rx filter
1681  *      @dev: network device to initialize
1682  *
1683  *      Set receive filter address to our MAC address
1684  *      and enable packet filtering.
1685  */
sis190_init_rxfilter(struct net_device * dev)1686 static inline void sis190_init_rxfilter(struct net_device *dev)
1687 {
1688 	struct sis190_private *tp = netdev_priv(dev);
1689 	void __iomem *ioaddr = tp->mmio_addr;
1690 	u16 ctl;
1691 	int i;
1692 
1693 	ctl = SIS_R16(RxMacControl);
1694 	/*
1695 	 * Disable packet filtering before setting filter.
1696 	 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1697 	 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1698 	 */
1699 	SIS_W16(RxMacControl, ctl & ~0x0f00);
1700 
1701 	for (i = 0; i < ETH_ALEN; i++)
1702 		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1703 
1704 	SIS_W16(RxMacControl, ctl);
1705 	SIS_PCI_COMMIT();
1706 }
1707 
sis190_get_mac_addr(struct pci_dev * pdev,struct net_device * dev)1708 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1709 {
1710 	int rc;
1711 
1712 	rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1713 	if (rc < 0) {
1714 		u8 reg;
1715 
1716 		pci_read_config_byte(pdev, 0x73, &reg);
1717 
1718 		if (reg & 0x00000001)
1719 			rc = sis190_get_mac_addr_from_apc(pdev, dev);
1720 	}
1721 	return rc;
1722 }
1723 
sis190_set_speed_auto(struct net_device * dev)1724 static void sis190_set_speed_auto(struct net_device *dev)
1725 {
1726 	struct sis190_private *tp = netdev_priv(dev);
1727 	void __iomem *ioaddr = tp->mmio_addr;
1728 	int phy_id = tp->mii_if.phy_id;
1729 	int val;
1730 
1731 	netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1732 
1733 	val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1734 
1735 	// Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1736 	// unchanged.
1737 	mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1738 		   ADVERTISE_100FULL | ADVERTISE_10FULL |
1739 		   ADVERTISE_100HALF | ADVERTISE_10HALF);
1740 
1741 	// Enable 1000 Full Mode.
1742 	mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1743 
1744 	// Enable auto-negotiation and restart auto-negotiation.
1745 	mdio_write(ioaddr, phy_id, MII_BMCR,
1746 		   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1747 }
1748 
sis190_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1749 static int sis190_get_link_ksettings(struct net_device *dev,
1750 				     struct ethtool_link_ksettings *cmd)
1751 {
1752 	struct sis190_private *tp = netdev_priv(dev);
1753 
1754 	mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
1755 
1756 	return 0;
1757 }
1758 
sis190_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1759 static int sis190_set_link_ksettings(struct net_device *dev,
1760 				     const struct ethtool_link_ksettings *cmd)
1761 {
1762 	struct sis190_private *tp = netdev_priv(dev);
1763 
1764 	return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
1765 }
1766 
sis190_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1767 static void sis190_get_drvinfo(struct net_device *dev,
1768 			       struct ethtool_drvinfo *info)
1769 {
1770 	struct sis190_private *tp = netdev_priv(dev);
1771 
1772 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1773 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1774 	strscpy(info->bus_info, pci_name(tp->pci_dev),
1775 		sizeof(info->bus_info));
1776 }
1777 
sis190_get_regs_len(struct net_device * dev)1778 static int sis190_get_regs_len(struct net_device *dev)
1779 {
1780 	return SIS190_REGS_SIZE;
1781 }
1782 
sis190_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1783 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1784 			    void *p)
1785 {
1786 	struct sis190_private *tp = netdev_priv(dev);
1787 	unsigned long flags;
1788 
1789 	spin_lock_irqsave(&tp->lock, flags);
1790 	memcpy_fromio(p, tp->mmio_addr, regs->len);
1791 	spin_unlock_irqrestore(&tp->lock, flags);
1792 }
1793 
sis190_nway_reset(struct net_device * dev)1794 static int sis190_nway_reset(struct net_device *dev)
1795 {
1796 	struct sis190_private *tp = netdev_priv(dev);
1797 
1798 	return mii_nway_restart(&tp->mii_if);
1799 }
1800 
sis190_get_msglevel(struct net_device * dev)1801 static u32 sis190_get_msglevel(struct net_device *dev)
1802 {
1803 	struct sis190_private *tp = netdev_priv(dev);
1804 
1805 	return tp->msg_enable;
1806 }
1807 
sis190_set_msglevel(struct net_device * dev,u32 value)1808 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1809 {
1810 	struct sis190_private *tp = netdev_priv(dev);
1811 
1812 	tp->msg_enable = value;
1813 }
1814 
1815 static const struct ethtool_ops sis190_ethtool_ops = {
1816 	.get_drvinfo	= sis190_get_drvinfo,
1817 	.get_regs_len	= sis190_get_regs_len,
1818 	.get_regs	= sis190_get_regs,
1819 	.get_link	= ethtool_op_get_link,
1820 	.get_msglevel	= sis190_get_msglevel,
1821 	.set_msglevel	= sis190_set_msglevel,
1822 	.nway_reset	= sis190_nway_reset,
1823 	.get_link_ksettings = sis190_get_link_ksettings,
1824 	.set_link_ksettings = sis190_set_link_ksettings,
1825 };
1826 
sis190_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1827 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1828 {
1829 	struct sis190_private *tp = netdev_priv(dev);
1830 
1831 	return !netif_running(dev) ? -EINVAL :
1832 		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1833 }
1834 
sis190_mac_addr(struct net_device * dev,void * p)1835 static int sis190_mac_addr(struct net_device  *dev, void *p)
1836 {
1837 	int rc;
1838 
1839 	rc = eth_mac_addr(dev, p);
1840 	if (!rc)
1841 		sis190_init_rxfilter(dev);
1842 	return rc;
1843 }
1844 
1845 static const struct net_device_ops sis190_netdev_ops = {
1846 	.ndo_open		= sis190_open,
1847 	.ndo_stop		= sis190_close,
1848 	.ndo_eth_ioctl		= sis190_ioctl,
1849 	.ndo_start_xmit		= sis190_start_xmit,
1850 	.ndo_tx_timeout		= sis190_tx_timeout,
1851 	.ndo_set_rx_mode	= sis190_set_rx_mode,
1852 	.ndo_set_mac_address	= sis190_mac_addr,
1853 	.ndo_validate_addr	= eth_validate_addr,
1854 #ifdef CONFIG_NET_POLL_CONTROLLER
1855 	.ndo_poll_controller	 = sis190_netpoll,
1856 #endif
1857 };
1858 
sis190_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1859 static int sis190_init_one(struct pci_dev *pdev,
1860 			   const struct pci_device_id *ent)
1861 {
1862 	static int printed_version = 0;
1863 	struct sis190_private *tp;
1864 	struct net_device *dev;
1865 	void __iomem *ioaddr;
1866 	int rc;
1867 
1868 	if (!printed_version) {
1869 		if (netif_msg_drv(&debug))
1870 			pr_info(SIS190_DRIVER_NAME " loaded\n");
1871 		printed_version = 1;
1872 	}
1873 
1874 	dev = sis190_init_board(pdev);
1875 	if (IS_ERR(dev)) {
1876 		rc = PTR_ERR(dev);
1877 		goto out;
1878 	}
1879 
1880 	pci_set_drvdata(pdev, dev);
1881 
1882 	tp = netdev_priv(dev);
1883 	ioaddr = tp->mmio_addr;
1884 
1885 	rc = sis190_get_mac_addr(pdev, dev);
1886 	if (rc < 0)
1887 		goto err_release_board;
1888 
1889 	sis190_init_rxfilter(dev);
1890 
1891 	INIT_WORK(&tp->phy_task, sis190_phy_task);
1892 
1893 	dev->netdev_ops = &sis190_netdev_ops;
1894 
1895 	dev->ethtool_ops = &sis190_ethtool_ops;
1896 	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1897 
1898 	spin_lock_init(&tp->lock);
1899 
1900 	rc = sis190_mii_probe(dev);
1901 	if (rc < 0)
1902 		goto err_release_board;
1903 
1904 	rc = register_netdev(dev);
1905 	if (rc < 0)
1906 		goto err_remove_mii;
1907 
1908 	if (netif_msg_probe(tp)) {
1909 		netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1910 			    pci_name(pdev),
1911 			    sis_chip_info[ent->driver_data].name,
1912 			    ioaddr, pdev->irq, dev->dev_addr);
1913 		netdev_info(dev, "%s mode.\n",
1914 			    (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1915 	}
1916 
1917 	netif_carrier_off(dev);
1918 
1919 	sis190_set_speed_auto(dev);
1920 out:
1921 	return rc;
1922 
1923 err_remove_mii:
1924 	sis190_mii_remove(dev);
1925 err_release_board:
1926 	sis190_release_board(pdev);
1927 	goto out;
1928 }
1929 
sis190_remove_one(struct pci_dev * pdev)1930 static void sis190_remove_one(struct pci_dev *pdev)
1931 {
1932 	struct net_device *dev = pci_get_drvdata(pdev);
1933 	struct sis190_private *tp = netdev_priv(dev);
1934 
1935 	sis190_mii_remove(dev);
1936 	cancel_work_sync(&tp->phy_task);
1937 	unregister_netdev(dev);
1938 	sis190_release_board(pdev);
1939 }
1940 
1941 static struct pci_driver sis190_pci_driver = {
1942 	.name		= DRV_NAME,
1943 	.id_table	= sis190_pci_tbl,
1944 	.probe		= sis190_init_one,
1945 	.remove		= sis190_remove_one,
1946 };
1947 
1948 module_pci_driver(sis190_pci_driver);
1949