1 /*
2 =========================================================================
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
4 --------------------------------------------------------------------
5
6 History:
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
8 May 20 2002 - Add link status force-mode and TBI mode support.
9 =========================================================================
10 1. The media can be forced in 5 modes.
11 Command: 'insmod r8169 media = SET_MEDIA'
12 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
13
14 SET_MEDIA can be:
15 _10_Half = 0x01
16 _10_Full = 0x02
17 _100_Half = 0x04
18 _100_Full = 0x08
19 _1000_Full = 0x10
20
21 2. Support TBI mode.
22 =========================================================================
23 VERSION 1.1 <2002/10/4>
24
25 The bit4:0 of MII register 4 is called "selector field", and have to be
26 00001b to indicate support of IEEE std 802.3 during NWay process of
27 exchanging Link Code Word (FLP).
28
29 VERSION 1.2 <2002/11/30>
30
31 - Large style cleanup
32 - Use ether_crc in stock kernel (linux/crc32.h)
33 - Copy mc_filter setup code from 8139cp
34 (includes an optimization, and avoids set_bit use)
35
36 <2003/11/30>
37
38 - Add new rtl8169_{suspend/resume}() support
39 */
40
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/delay.h>
46 #include <linux/ethtool.h>
47 #include <linux/crc32.h>
48 #include <linux/init.h>
49 #include <linux/tqueue.h>
50 #include <linux/rtnetlink.h>
51
52 #include <asm/io.h>
53
54 #define DMA_64BIT_MASK 0xffffffffffffffffULL
55 #define DMA_32BIT_MASK 0x00000000ffffffffULL
56
57 #define RTL8169_VERSION "1.2"
58 #define MODULENAME "r8169"
59 #define RTL8169_DRIVER_NAME MODULENAME " Gigabit Ethernet driver " RTL8169_VERSION
60 #define PFX MODULENAME ": "
61
62 #ifdef RTL8169_DEBUG
63 #define assert(expr) \
64 if(!(expr)) { \
65 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
66 #expr,__FILE__,__FUNCTION__,__LINE__); \
67 }
68 #define dprintk(fmt, args...) \
69 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
70 #else
71 #define assert(expr) do {} while (0)
72 #define dprintk(fmt, args...) do {} while (0)
73 #endif /* RTL8169_DEBUG */
74
75 /* media options */
76 #define MAX_UNITS 8
77 static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
78
79 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
80 static int max_interrupt_work = 20;
81
82 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
83 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
84 static int multicast_filter_limit = 32;
85
86 /* MAC address length*/
87 #define MAC_ADDR_LEN 6
88
89 /* max supported gigabit ethernet frame size -- must be at least (dev->mtu+14+4).*/
90 #define MAX_ETH_FRAME_SIZE 1536
91
92 #define TX_FIFO_THRESH 256 /* In bytes */
93
94 #define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
95 #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
96 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
97 #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
98 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
99
100 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
101 #define NUM_RX_DESC 64 /* Number of Rx descriptor registers */
102 #define RX_BUF_SIZE 1536 /* Rx Buffer size */
103 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
104 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
105
106 #define RTL_MIN_IO_SIZE 0x80
107 #define RTL8169_TX_TIMEOUT (6*HZ)
108 #define RTL8169_PHY_TIMEOUT (HZ)
109
110 /* write/read MMIO register */
111 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
112 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
113 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
114 #define RTL_R8(reg) readb (ioaddr + (reg))
115 #define RTL_R16(reg) readw (ioaddr + (reg))
116 #define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
117
118 enum mac_version {
119 RTL_GIGA_MAC_VER_B = 0x00,
120 /* RTL_GIGA_MAC_VER_C = 0x03, */
121 RTL_GIGA_MAC_VER_D = 0x01,
122 RTL_GIGA_MAC_VER_E = 0x02
123 };
124
125 enum phy_version {
126 RTL_GIGA_PHY_VER_C = 0x03, /* PHY Reg 0x03 bit0-3 == 0x0000 */
127 RTL_GIGA_PHY_VER_D = 0x04, /* PHY Reg 0x03 bit0-3 == 0x0000 */
128 RTL_GIGA_PHY_VER_E = 0x05, /* PHY Reg 0x03 bit0-3 == 0x0000 */
129 RTL_GIGA_PHY_VER_F = 0x06, /* PHY Reg 0x03 bit0-3 == 0x0001 */
130 RTL_GIGA_PHY_VER_G = 0x07, /* PHY Reg 0x03 bit0-3 == 0x0002 */
131 };
132
133
134 #define _R(NAME,MAC,MASK) \
135 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
136
137 const static struct {
138 const char *name;
139 u8 mac_version;
140 u32 RxConfigMask; /* Clears the bits supported by this chip */
141 } rtl_chip_info[] = {
142 _R("RTL8169", RTL_GIGA_MAC_VER_B, 0xff7e1880),
143 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_D, 0xff7e1880),
144 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_E, 0xff7e1880)
145 };
146 #undef _R
147
148 static struct pci_device_id rtl8169_pci_tbl[] = {
149 {0x10ec, 0x8169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
150 {0,},
151 };
152
153 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
154
155 static int rx_copybreak = 200;
156
157 enum RTL8169_registers {
158 MAC0 = 0, /* Ethernet hardware address. */
159 MAR0 = 8, /* Multicast filter. */
160 TxDescStartAddrLow = 0x20,
161 TxDescStartAddrHigh = 0x24,
162 TxHDescStartAddrLow = 0x28,
163 TxHDescStartAddrHigh = 0x2c,
164 FLASH = 0x30,
165 ERSR = 0x36,
166 ChipCmd = 0x37,
167 TxPoll = 0x38,
168 IntrMask = 0x3C,
169 IntrStatus = 0x3E,
170 TxConfig = 0x40,
171 RxConfig = 0x44,
172 RxMissed = 0x4C,
173 Cfg9346 = 0x50,
174 Config0 = 0x51,
175 Config1 = 0x52,
176 Config2 = 0x53,
177 Config3 = 0x54,
178 Config4 = 0x55,
179 Config5 = 0x56,
180 MultiIntr = 0x5C,
181 PHYAR = 0x60,
182 TBICSR = 0x64,
183 TBI_ANAR = 0x68,
184 TBI_LPAR = 0x6A,
185 PHYstatus = 0x6C,
186 RxMaxSize = 0xDA,
187 CPlusCmd = 0xE0,
188 RxDescAddrLow = 0xE4,
189 RxDescAddrHigh = 0xE8,
190 EarlyTxThres = 0xEC,
191 FuncEvent = 0xF0,
192 FuncEventMask = 0xF4,
193 FuncPresetState = 0xF8,
194 FuncForceEvent = 0xFC,
195 };
196
197 enum RTL8169_register_content {
198 /*InterruptStatusBits */
199 SYSErr = 0x8000,
200 PCSTimeout = 0x4000,
201 SWInt = 0x0100,
202 TxDescUnavail = 0x80,
203 RxFIFOOver = 0x40,
204 RxUnderrun = 0x20,
205 RxOverflow = 0x10,
206 TxErr = 0x08,
207 TxOK = 0x04,
208 RxErr = 0x02,
209 RxOK = 0x01,
210
211 /*RxStatusDesc */
212 RxRES = 0x00200000,
213 RxCRC = 0x00080000,
214 RxRUNT = 0x00100000,
215 RxRWT = 0x00400000,
216 RxOVF = 0x00800000,
217
218 /*ChipCmdBits */
219 CmdReset = 0x10,
220 CmdRxEnb = 0x08,
221 CmdTxEnb = 0x04,
222 RxBufEmpty = 0x01,
223
224 /*Cfg9346Bits */
225 Cfg9346_Lock = 0x00,
226 Cfg9346_Unlock = 0xC0,
227
228 /*rx_mode_bits */
229 AcceptErr = 0x20,
230 AcceptRunt = 0x10,
231 AcceptBroadcast = 0x08,
232 AcceptMulticast = 0x04,
233 AcceptMyPhys = 0x02,
234 AcceptAllPhys = 0x01,
235
236 /*RxConfigBits */
237 RxCfgFIFOShift = 13,
238 RxCfgDMAShift = 8,
239
240 /*TxConfigBits */
241 TxInterFrameGapShift = 24,
242 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
243
244 /* CPlusCmd p.31 */
245 RxVlan = (1 << 6),
246 RxChkSum = (1 << 5),
247 PCIDAC = (1 << 4),
248 PCIMulRW = (1 << 3),
249
250 /*rtl8169_PHYstatus */
251 TBI_Enable = 0x80,
252 TxFlowCtrl = 0x40,
253 RxFlowCtrl = 0x20,
254 _1000bpsF = 0x10,
255 _100bps = 0x08,
256 _10bps = 0x04,
257 LinkStatus = 0x02,
258 FullDup = 0x01,
259
260 /*GIGABIT_PHY_registers */
261 PHY_CTRL_REG = 0,
262 PHY_STAT_REG = 1,
263 PHY_AUTO_NEGO_REG = 4,
264 PHY_1000_CTRL_REG = 9,
265
266 /*GIGABIT_PHY_REG_BIT */
267 PHY_Restart_Auto_Nego = 0x0200,
268 PHY_Enable_Auto_Nego = 0x1000,
269
270 //PHY_STAT_REG = 1;
271 PHY_Auto_Neco_Comp = 0x0020,
272
273 //PHY_AUTO_NEGO_REG = 4;
274 PHY_Cap_10_Half = 0x0020,
275 PHY_Cap_10_Full = 0x0040,
276 PHY_Cap_100_Half = 0x0080,
277 PHY_Cap_100_Full = 0x0100,
278
279 //PHY_1000_CTRL_REG = 9;
280 PHY_Cap_1000_Full = 0x0200,
281
282 PHY_Cap_Null = 0x0,
283
284 /*_MediaType*/
285 _10_Half = 0x01,
286 _10_Full = 0x02,
287 _100_Half = 0x04,
288 _100_Full = 0x08,
289 _1000_Full = 0x10,
290
291 /*_TBICSRBit*/
292 TBILinkOK = 0x02000000,
293 };
294
295 enum _DescStatusBit {
296 OWNbit = 0x80000000,
297 EORbit = 0x40000000,
298 FSbit = 0x20000000,
299 LSbit = 0x10000000,
300 };
301
302 #define RsvdMask 0x3fffc000
303
304 struct TxDesc {
305 u32 status;
306 u32 vlan_tag;
307 u64 addr;
308 };
309
310 struct RxDesc {
311 u32 status;
312 u32 vlan_tag;
313 u64 addr;
314 };
315
316 struct rtl8169_private {
317 void *mmio_addr; /* memory map physical address */
318 struct pci_dev *pci_dev; /* Index of PCI device */
319 struct net_device_stats stats; /* statistics of net device */
320 spinlock_t lock; /* spin lock flag */
321 int chipset;
322 int mac_version;
323 int phy_version;
324 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
325 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
326 u32 dirty_rx;
327 u32 dirty_tx;
328 struct TxDesc *TxDescArray; /* Index of 256-alignment Tx Descriptor buffer */
329 struct RxDesc *RxDescArray; /* Index of 256-alignment Rx Descriptor buffer */
330 dma_addr_t TxPhyAddr;
331 dma_addr_t RxPhyAddr;
332 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
333 struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Index of Transmit data buffer */
334 struct timer_list timer;
335 unsigned long phy_link_down_cnt;
336 u16 cp_cmd;
337 struct tq_struct reset_task;
338 };
339
340 MODULE_AUTHOR("Realtek");
341 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
342 MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "i");
343 MODULE_PARM(rx_copybreak, "i");
344 MODULE_LICENSE("GPL");
345
346 static int rtl8169_open(struct net_device *dev);
347 static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
348 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance,
349 struct pt_regs *regs);
350 static int rtl8169_init_ring(struct net_device *dev);
351 static void rtl8169_hw_start(struct net_device *dev);
352 static int rtl8169_close(struct net_device *dev);
353 static void rtl8169_set_rx_mode(struct net_device *dev);
354 static void rtl8169_tx_timeout(struct net_device *dev);
355 static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
356 static void rtl8169_reset_task(struct net_device *dev);
357 static void rtl8169_rx_interrupt(struct net_device *dev,
358 struct rtl8169_private *tp, void *ioaddr);
359
360 static const u16 rtl8169_intr_mask =
361 RxUnderrun | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
362 static const unsigned int rtl8169_rx_config =
363 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
364
365 #define PHY_Cap_10_Half_Or_Less PHY_Cap_10_Half
366 #define PHY_Cap_10_Full_Or_Less PHY_Cap_10_Full | PHY_Cap_10_Half_Or_Less
367 #define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
368 #define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
369
mdio_write(void * ioaddr,int RegAddr,int value)370 static void mdio_write(void *ioaddr, int RegAddr, int value)
371 {
372 int i;
373
374 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
375 udelay(1000);
376
377 for (i = 2000; i > 0; i--) {
378 // Check if the RTL8169 has completed writing to the specified MII register
379 if (!(RTL_R32(PHYAR) & 0x80000000)) {
380 break;
381 } else {
382 udelay(100);
383 }
384 }
385 }
386
mdio_read(void * ioaddr,int RegAddr)387 static int mdio_read(void *ioaddr, int RegAddr)
388 {
389 int i, value = -1;
390
391 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
392 udelay(1000);
393
394 for (i = 2000; i > 0; i--) {
395 // Check if the RTL8169 has completed retrieving data from the specified MII register
396 if (RTL_R32(PHYAR) & 0x80000000) {
397 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
398 break;
399 }
400 udelay(100);
401 }
402 return value;
403 }
404
rtl8169_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)405 static void rtl8169_get_drvinfo(struct net_device *dev,
406 struct ethtool_drvinfo *info)
407 {
408 struct rtl8169_private *tp = dev->priv;
409
410 strcpy(info->driver, RTL8169_DRIVER_NAME);
411 strcpy(info->version, RTL8169_VERSION );
412 strcpy(info->bus_info, pci_name(tp->pci_dev));
413 }
414
415 static struct ethtool_ops rtl8169_ethtool_ops = {
416 .get_drvinfo = rtl8169_get_drvinfo,
417 };
418
rtl8169_write_gmii_reg_bit(void * ioaddr,int reg,int bitnum,int bitval)419 static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
420 int bitval)
421 {
422 int val;
423
424 val = mdio_read(ioaddr, reg);
425 val = (bitval == 1) ?
426 val | (bitval << bitnum) : val & ~(0x0001 << bitnum);
427 mdio_write(ioaddr, reg, val & 0xffff);
428 }
429
rtl8169_get_mac_version(struct rtl8169_private * tp,void * ioaddr)430 static void rtl8169_get_mac_version(struct rtl8169_private *tp, void *ioaddr)
431 {
432 const struct {
433 u32 mask;
434 int mac_version;
435 } mac_info[] = {
436 { 0x1 << 26, RTL_GIGA_MAC_VER_E },
437 { 0x1 << 23, RTL_GIGA_MAC_VER_D },
438 { 0x00000000, RTL_GIGA_MAC_VER_B } /* Catch-all */
439 }, *p = mac_info;
440 u32 reg;
441
442 reg = RTL_R32(TxConfig) & 0x7c800000;
443 while ((reg & p->mask) != p->mask)
444 p++;
445 tp->mac_version = p->mac_version;
446 }
447
rtl8169_print_mac_version(struct rtl8169_private * tp)448 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
449 {
450 struct {
451 int version;
452 char *msg;
453 } mac_print[] = {
454 { RTL_GIGA_MAC_VER_E, "RTL_GIGA_MAC_VER_E" },
455 { RTL_GIGA_MAC_VER_D, "RTL_GIGA_MAC_VER_D" },
456 { RTL_GIGA_MAC_VER_B, "RTL_GIGA_MAC_VER_B" },
457 { 0, NULL }
458 }, *p;
459
460 for (p = mac_print; p->msg; p++) {
461 if (tp->mac_version == p->version) {
462 dprintk("mac_version == %s (%04d)\n", p->msg,
463 p->version);
464 return;
465 }
466 }
467 dprintk("mac_version == Unknown\n");
468 }
469
rtl8169_get_phy_version(struct rtl8169_private * tp,void * ioaddr)470 static void rtl8169_get_phy_version(struct rtl8169_private *tp, void *ioaddr)
471 {
472 const struct {
473 u16 mask;
474 u16 set;
475 int phy_version;
476 } phy_info[] = {
477 { 0x000f, 0x0002, RTL_GIGA_PHY_VER_G },
478 { 0x000f, 0x0001, RTL_GIGA_PHY_VER_F },
479 { 0x000f, 0x0000, RTL_GIGA_PHY_VER_E },
480 { 0x0000, 0x0000, RTL_GIGA_PHY_VER_D } /* Catch-all */
481 }, *p = phy_info;
482 u16 reg;
483
484 reg = mdio_read(ioaddr, 3) & 0xffff;
485 while ((reg & p->mask) != p->set)
486 p++;
487 tp->phy_version = p->phy_version;
488 }
489
rtl8169_print_phy_version(struct rtl8169_private * tp)490 static void rtl8169_print_phy_version(struct rtl8169_private *tp)
491 {
492 struct {
493 int version;
494 char *msg;
495 u32 reg;
496 } phy_print[] = {
497 { RTL_GIGA_PHY_VER_G, "RTL_GIGA_PHY_VER_G", 0x0002 },
498 { RTL_GIGA_PHY_VER_F, "RTL_GIGA_PHY_VER_F", 0x0001 },
499 { RTL_GIGA_PHY_VER_E, "RTL_GIGA_PHY_VER_E", 0x0000 },
500 { RTL_GIGA_PHY_VER_D, "RTL_GIGA_PHY_VER_D", 0x0000 },
501 { 0, NULL, 0x0000 }
502 }, *p;
503
504 for (p = phy_print; p->msg; p++) {
505 if (tp->phy_version == p->version) {
506 dprintk("phy_version == %s (%04x)\n", p->msg, p->reg);
507 return;
508 }
509 }
510 dprintk("phy_version == Unknown\n");
511 }
512
rtl8169_hw_phy_config(struct net_device * dev)513 static void rtl8169_hw_phy_config(struct net_device *dev)
514 {
515 struct rtl8169_private *tp = dev->priv;
516 void *ioaddr = tp->mmio_addr;
517 struct {
518 u16 regs[5]; /* Beware of bit-sign propagation */
519 } phy_magic[5] = { {
520 { 0x0000, //w 4 15 12 0
521 0x00a1, //w 3 15 0 00a1
522 0x0008, //w 2 15 0 0008
523 0x1020, //w 1 15 0 1020
524 0x1000 } },{ //w 0 15 0 1000
525 { 0x7000, //w 4 15 12 7
526 0xff41, //w 3 15 0 ff41
527 0xde60, //w 2 15 0 de60
528 0x0140, //w 1 15 0 0140
529 0x0077 } },{ //w 0 15 0 0077
530 { 0xa000, //w 4 15 12 a
531 0xdf01, //w 3 15 0 df01
532 0xdf20, //w 2 15 0 df20
533 0xff95, //w 1 15 0 ff95
534 0xfa00 } },{ //w 0 15 0 fa00
535 { 0xb000, //w 4 15 12 b
536 0xff41, //w 3 15 0 ff41
537 0xde20, //w 2 15 0 de20
538 0x0140, //w 1 15 0 0140
539 0x00bb } },{ //w 0 15 0 00bb
540 { 0xf000, //w 4 15 12 f
541 0xdf01, //w 3 15 0 df01
542 0xdf20, //w 2 15 0 df20
543 0xff95, //w 1 15 0 ff95
544 0xbf00 } //w 0 15 0 bf00
545 }
546 }, *p = phy_magic;
547 int i;
548
549 rtl8169_print_mac_version(tp);
550 rtl8169_print_phy_version(tp);
551
552 if (tp->mac_version <= RTL_GIGA_MAC_VER_B)
553 return;
554 if (tp->phy_version >= RTL_GIGA_PHY_VER_F)
555 return;
556
557 dprintk("MAC version != 0 && PHY version == 0 or 1\n");
558 dprintk("Do final_reg2.cfg\n");
559
560 /* Shazam ! */
561
562 // phy config for RTL8169s mac_version C chip
563 mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
564 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
565 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
566 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
567
568 for (i = 0; i < ARRAY_SIZE(phy_magic); i++, p++) {
569 int val, pos = 4;
570
571 val = (mdio_read(ioaddr, pos) & 0x0fff) | (p->regs[0] & 0xffff);
572 mdio_write(ioaddr, pos, val);
573 while (--pos >= 0)
574 mdio_write(ioaddr, pos, p->regs[4 - pos] & 0xffff);
575 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 1); //w 4 11 11 1
576 rtl8169_write_gmii_reg_bit(ioaddr, 4, 11, 0); //w 4 11 11 0
577 }
578 mdio_write(ioaddr, 31, 0x0000); //w 31 2 0 0
579 }
580
rtl8169_hw_phy_reset(struct net_device * dev)581 static void rtl8169_hw_phy_reset(struct net_device *dev)
582 {
583 struct rtl8169_private *tp = dev->priv;
584 void *ioaddr = tp->mmio_addr;
585 int i, val;
586
587 printk(KERN_WARNING PFX "%s: Reset RTL8169s PHY\n", dev->name);
588
589 val = (mdio_read(ioaddr, 0) | 0x8000) & 0xffff;
590 mdio_write(ioaddr, 0, val);
591
592 for (i = 50; i >= 0; i--) {
593 if (!(mdio_read(ioaddr, 0) & 0x8000))
594 break;
595 udelay(100); /* Gross */
596 }
597
598 if (i < 0) {
599 printk(KERN_WARNING PFX "%s: no PHY Reset ack. Giving up.\n",
600 dev->name);
601 }
602 }
603
rtl8169_phy_timer(unsigned long __opaque)604 static void rtl8169_phy_timer(unsigned long __opaque)
605 {
606 struct net_device *dev = (struct net_device *)__opaque;
607 struct rtl8169_private *tp = dev->priv;
608 struct timer_list *timer = &tp->timer;
609 void *ioaddr = tp->mmio_addr;
610
611 assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
612 assert(tp->phy_version < RTL_GIGA_PHY_VER_G);
613
614 if (RTL_R8(PHYstatus) & LinkStatus)
615 tp->phy_link_down_cnt = 0;
616 else {
617 tp->phy_link_down_cnt++;
618 if (tp->phy_link_down_cnt >= 12) {
619 int reg;
620
621 // If link on 1000, perform phy reset.
622 reg = mdio_read(ioaddr, PHY_1000_CTRL_REG);
623 if (reg & PHY_Cap_1000_Full)
624 rtl8169_hw_phy_reset(dev);
625
626 tp->phy_link_down_cnt = 0;
627 }
628 }
629
630 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
631 }
632
rtl8169_delete_timer(struct net_device * dev)633 static inline void rtl8169_delete_timer(struct net_device *dev)
634 {
635 struct rtl8169_private *tp = dev->priv;
636 struct timer_list *timer = &tp->timer;
637
638 if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
639 (tp->phy_version >= RTL_GIGA_PHY_VER_G))
640 return;
641
642 del_timer_sync(timer);
643
644 tp->phy_link_down_cnt = 0;
645 }
646
rtl8169_request_timer(struct net_device * dev)647 static inline void rtl8169_request_timer(struct net_device *dev)
648 {
649 struct rtl8169_private *tp = dev->priv;
650 struct timer_list *timer = &tp->timer;
651
652 if ((tp->mac_version <= RTL_GIGA_MAC_VER_B) ||
653 (tp->phy_version >= RTL_GIGA_PHY_VER_G))
654 return;
655
656 tp->phy_link_down_cnt = 0;
657
658 init_timer(timer);
659 timer->expires = jiffies + RTL8169_PHY_TIMEOUT;
660 timer->data = (unsigned long)(dev);
661 timer->function = rtl8169_phy_timer;
662 add_timer(timer);
663 }
664
665 static int __devinit
rtl8169_init_board(struct pci_dev * pdev,struct net_device ** dev_out,void ** ioaddr_out)666 rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
667 void **ioaddr_out)
668 {
669 void *ioaddr = NULL;
670 struct net_device *dev;
671 struct rtl8169_private *tp;
672 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
673 int rc, i, acpi_idle_state = 0, pm_cap;
674
675
676 assert(pdev != NULL);
677 assert(ioaddr_out != NULL);
678
679 *ioaddr_out = NULL;
680 *dev_out = NULL;
681
682 // dev zeroed in alloc_etherdev
683 dev = alloc_etherdev(sizeof (*tp));
684 if (dev == NULL) {
685 printk(KERN_ERR PFX "unable to alloc new ethernet\n");
686 return -ENOMEM;
687 }
688
689 SET_MODULE_OWNER(dev);
690 SET_NETDEV_DEV(dev, &pdev->dev);
691 tp = dev->priv;
692
693 // enable device (incl. PCI PM wakeup and hotplug setup)
694 rc = pci_enable_device(pdev);
695 if (rc) {
696 printk(KERN_ERR PFX "%s: unable to enable device\n", pdev->slot_name);
697 goto err_out;
698 }
699
700 /* save power state before pci_enable_device overwrites it */
701 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
702 if (pm_cap) {
703 u16 pwr_command;
704
705 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
706 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
707 } else {
708 printk(KERN_ERR PFX "Cannot find PowerManagement capability, aborting.\n");
709 goto err_out_free_res;
710 }
711
712 mmio_start = pci_resource_start(pdev, 1);
713 mmio_end = pci_resource_end(pdev, 1);
714 mmio_flags = pci_resource_flags(pdev, 1);
715 mmio_len = pci_resource_len(pdev, 1);
716
717 // make sure PCI base addr 1 is MMIO
718 if (!(mmio_flags & IORESOURCE_MEM)) {
719 printk(KERN_ERR PFX
720 "region #1 not an MMIO resource, aborting\n");
721 rc = -ENODEV;
722 goto err_out_disable;
723 }
724 // check for weird/broken PCI region reporting
725 if (mmio_len < RTL_MIN_IO_SIZE) {
726 printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
727 rc = -ENODEV;
728 goto err_out_disable;
729 }
730
731 rc = pci_request_regions(pdev, dev->name);
732 if (rc) {
733 printk(KERN_ERR PFX "%s: Could not request regions.\n", pdev->slot_name);
734 goto err_out_disable;
735 }
736
737 tp->cp_cmd = PCIMulRW | RxChkSum;
738
739 if ((sizeof(dma_addr_t) > 4) &&
740 !pci_set_dma_mask(pdev, DMA_64BIT_MASK))
741 tp->cp_cmd |= PCIDAC;
742 else {
743 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
744 if (rc < 0) {
745 printk(KERN_ERR PFX "DMA configuration failed.\n");
746 goto err_out_free_res;
747 }
748 }
749
750
751 // enable PCI bus-mastering
752 pci_set_master(pdev);
753
754 // ioremap MMIO region
755 ioaddr = ioremap(mmio_start, mmio_len);
756 if (ioaddr == NULL) {
757 printk(KERN_ERR PFX "cannot remap MMIO, aborting\n");
758 rc = -EIO;
759 goto err_out_free_res;
760 }
761
762 // Soft reset the chip.
763 RTL_W8(ChipCmd, CmdReset);
764
765 // Check that the chip has finished the reset.
766 for (i = 1000; i > 0; i--) {
767 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
768 break;
769 udelay(10);
770 }
771
772 // Identify chip attached to board
773 rtl8169_get_mac_version(tp, ioaddr);
774 rtl8169_get_phy_version(tp, ioaddr);
775
776 rtl8169_print_mac_version(tp);
777 rtl8169_print_phy_version(tp);
778
779 for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
780 if (tp->mac_version == rtl_chip_info[i].mac_version)
781 break;
782 }
783 if (i < 0) {
784 /* Unknown chip: assume array element #0, original RTL-8169 */
785 printk(KERN_DEBUG PFX
786 "PCI device %s: unknown chip version, assuming %s\n",
787 pci_name(pdev), rtl_chip_info[0].name);
788 i++;
789 }
790 tp->chipset = i;
791
792 *ioaddr_out = ioaddr;
793 *dev_out = dev;
794 return 0;
795
796 err_out_free_res:
797 pci_release_regions(pdev);
798
799 err_out_disable:
800 pci_disable_device(pdev);
801
802 err_out:
803 free_netdev(dev);
804 return rc;
805 }
806
807 static int __devinit
rtl8169_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)808 rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
809 {
810 struct net_device *dev = NULL;
811 struct rtl8169_private *tp = NULL;
812 void *ioaddr = NULL;
813 static int board_idx = -1;
814 static int printed_version = 0;
815 int i, rc;
816 int option = -1, Cap10_100 = 0, Cap1000 = 0;
817
818 assert(pdev != NULL);
819 assert(ent != NULL);
820
821 board_idx++;
822
823 if (!printed_version) {
824 printk(KERN_INFO RTL8169_DRIVER_NAME " loaded\n");
825 printed_version = 1;
826 }
827
828 rc = rtl8169_init_board(pdev, &dev, &ioaddr);
829 if (rc)
830 return rc;
831
832 tp = dev->priv;
833 assert(ioaddr != NULL);
834 assert(dev != NULL);
835 assert(tp != NULL);
836
837 // Get MAC address. FIXME: read EEPROM
838 for (i = 0; i < MAC_ADDR_LEN; i++)
839 dev->dev_addr[i] = RTL_R8(MAC0 + i);
840
841 dev->open = rtl8169_open;
842 dev->hard_start_xmit = rtl8169_start_xmit;
843 dev->get_stats = rtl8169_get_stats;
844 dev->ethtool_ops = &rtl8169_ethtool_ops;
845 dev->stop = rtl8169_close;
846 dev->tx_timeout = rtl8169_tx_timeout;
847 dev->set_multicast_list = rtl8169_set_rx_mode;
848 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
849 dev->irq = pdev->irq;
850 dev->base_addr = (unsigned long) ioaddr;
851 // dev->do_ioctl = mii_ioctl;
852
853 tp = dev->priv; // private data //
854 tp->pci_dev = pdev;
855 tp->mmio_addr = ioaddr;
856
857 spin_lock_init(&tp->lock);
858
859 rc = register_netdev(dev);
860 if (rc) {
861 iounmap(ioaddr);
862 pci_release_regions(pdev);
863 pci_disable_device(pdev);
864 free_netdev(dev);
865 return rc;
866 }
867
868 printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n", dev->name,
869 rtl_chip_info[tp->chipset].name);
870
871 pci_set_drvdata(pdev, dev);
872
873 printk(KERN_INFO "%s: %s at 0x%lx, "
874 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
875 "IRQ %d\n",
876 dev->name,
877 rtl_chip_info[ent->driver_data].name,
878 dev->base_addr,
879 dev->dev_addr[0], dev->dev_addr[1],
880 dev->dev_addr[2], dev->dev_addr[3],
881 dev->dev_addr[4], dev->dev_addr[5], dev->irq);
882
883 rtl8169_hw_phy_config(dev);
884
885 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
886 RTL_W8(0x82, 0x01);
887
888 if (tp->mac_version < RTL_GIGA_MAC_VER_E) {
889 dprintk("Set PCI Latency=0x40\n");
890 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
891 }
892
893 if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
894 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
895 RTL_W8(0x82, 0x01);
896 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
897 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
898 }
899
900 // if TBI is not endbled
901 if (!(RTL_R8(PHYstatus) & TBI_Enable)) {
902 int val = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
903
904 option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
905 // Force RTL8169 in 10/100/1000 Full/Half mode.
906 if (option > 0) {
907 printk(KERN_INFO "%s: Force-mode Enabled.\n",
908 dev->name);
909 Cap10_100 = 0, Cap1000 = 0;
910 switch (option) {
911 case _10_Half:
912 Cap10_100 = PHY_Cap_10_Half_Or_Less;
913 Cap1000 = PHY_Cap_Null;
914 break;
915 case _10_Full:
916 Cap10_100 = PHY_Cap_10_Full_Or_Less;
917 Cap1000 = PHY_Cap_Null;
918 break;
919 case _100_Half:
920 Cap10_100 = PHY_Cap_100_Half_Or_Less;
921 Cap1000 = PHY_Cap_Null;
922 break;
923 case _100_Full:
924 Cap10_100 = PHY_Cap_100_Full_Or_Less;
925 Cap1000 = PHY_Cap_Null;
926 break;
927 case _1000_Full:
928 Cap10_100 = PHY_Cap_100_Full_Or_Less;
929 Cap1000 = PHY_Cap_1000_Full;
930 break;
931 default:
932 break;
933 }
934 mdio_write(ioaddr, PHY_AUTO_NEGO_REG, Cap10_100 | (val & 0x1F)); //leave PHY_AUTO_NEGO_REG bit4:0 unchanged
935 mdio_write(ioaddr, PHY_1000_CTRL_REG, Cap1000);
936 } else {
937 printk(KERN_INFO "%s: Auto-negotiation Enabled.\n",
938 dev->name);
939
940 // enable 10/100 Full/Half Mode, leave PHY_AUTO_NEGO_REG bit4:0 unchanged
941 mdio_write(ioaddr, PHY_AUTO_NEGO_REG,
942 PHY_Cap_100_Full_Or_Less | (val & 0x1f));
943
944 // enable 1000 Full Mode
945 mdio_write(ioaddr, PHY_1000_CTRL_REG,
946 PHY_Cap_1000_Full);
947
948 }
949
950 // Enable auto-negotiation and restart auto-nigotiation
951 mdio_write(ioaddr, PHY_CTRL_REG,
952 PHY_Enable_Auto_Nego | PHY_Restart_Auto_Nego);
953 udelay(100);
954
955 // wait for auto-negotiation process
956 for (i = 10000; i > 0; i--) {
957 //check if auto-negotiation complete
958 if (mdio_read(ioaddr, PHY_STAT_REG) &
959 PHY_Auto_Neco_Comp) {
960 udelay(100);
961 option = RTL_R8(PHYstatus);
962 if (option & _1000bpsF) {
963 printk(KERN_INFO
964 "%s: 1000Mbps Full-duplex operation.\n",
965 dev->name);
966 } else {
967 printk(KERN_INFO
968 "%s: %sMbps %s-duplex operation.\n",
969 dev->name,
970 (option & _100bps) ? "100" :
971 "10",
972 (option & FullDup) ? "Full" :
973 "Half");
974 }
975 break;
976 } else {
977 udelay(100);
978 }
979 } // end for-loop to wait for auto-negotiation process
980
981 } else {
982 udelay(100);
983 printk(KERN_INFO
984 "%s: 1000Mbps Full-duplex operation, TBI Link %s!\n",
985 dev->name,
986 (RTL_R32(TBICSR) & TBILinkOK) ? "OK" : "Failed");
987
988 }
989
990 INIT_TQUEUE(&tp->reset_task, (void (*)(void *))rtl8169_reset_task, dev);
991
992 return 0;
993 }
994
995 static void __devexit
rtl8169_remove_one(struct pci_dev * pdev)996 rtl8169_remove_one(struct pci_dev *pdev)
997 {
998 struct net_device *dev = pci_get_drvdata(pdev);
999 struct rtl8169_private *tp = dev->priv;
1000
1001 assert(dev != NULL);
1002 assert(tp != NULL);
1003
1004 flush_scheduled_tasks();
1005 unregister_netdev(dev);
1006 iounmap(tp->mmio_addr);
1007 pci_release_regions(pdev);
1008
1009 pci_disable_device(pdev);
1010 free_netdev(dev);
1011 pci_set_drvdata(pdev, NULL);
1012 }
1013
1014 #ifdef CONFIG_PM
1015
rtl8169_suspend(struct pci_dev * pdev,u32 state)1016 static int rtl8169_suspend(struct pci_dev *pdev, u32 state)
1017 {
1018 struct net_device *dev = pci_get_drvdata(pdev);
1019 struct rtl8169_private *tp = dev->priv;
1020 void *ioaddr = tp->mmio_addr;
1021 unsigned long flags;
1022
1023 if (!netif_running(dev))
1024 return 0;
1025
1026 netif_device_detach(dev);
1027 netif_stop_queue(dev);
1028 spin_lock_irqsave(&tp->lock, flags);
1029
1030 /* Disable interrupts, stop Rx and Tx */
1031 RTL_W16(IntrMask, 0);
1032 RTL_W8(ChipCmd, 0);
1033
1034 /* Update the error counts. */
1035 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
1036 RTL_W32(RxMissed, 0);
1037 spin_unlock_irqrestore(&tp->lock, flags);
1038
1039 return 0;
1040 }
1041
rtl8169_resume(struct pci_dev * pdev)1042 static int rtl8169_resume(struct pci_dev *pdev)
1043 {
1044 struct net_device *dev = pci_get_drvdata(pdev);
1045
1046 if (!netif_running(dev))
1047 return 0;
1048
1049 netif_device_attach(dev);
1050 rtl8169_hw_start(dev);
1051
1052 return 0;
1053 }
1054
1055 #endif /* CONFIG_PM */
1056
1057 static int
rtl8169_open(struct net_device * dev)1058 rtl8169_open(struct net_device *dev)
1059 {
1060 struct rtl8169_private *tp = dev->priv;
1061 struct pci_dev *pdev = tp->pci_dev;
1062 int retval;
1063
1064 retval =
1065 request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
1066 if (retval < 0)
1067 goto out;
1068
1069 retval = -ENOMEM;
1070
1071 /*
1072 * Rx and Tx desscriptors needs 256 bytes alignment.
1073 * pci_alloc_consistent provides more.
1074 */
1075 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
1076 &tp->TxPhyAddr);
1077 if (!tp->TxDescArray)
1078 goto err_free_irq;
1079
1080 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
1081 &tp->RxPhyAddr);
1082 if (!tp->RxDescArray)
1083 goto err_free_tx;
1084
1085 retval = rtl8169_init_ring(dev);
1086 if (retval < 0)
1087 goto err_free_rx;
1088
1089 rtl8169_hw_start(dev);
1090
1091 rtl8169_request_timer(dev);
1092 out:
1093 return retval;
1094
1095 err_free_rx:
1096 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1097 tp->RxPhyAddr);
1098 err_free_tx:
1099 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1100 tp->TxPhyAddr);
1101 err_free_irq:
1102 free_irq(dev->irq, dev);
1103 goto out;
1104 }
1105
1106 static void
rtl8169_hw_start(struct net_device * dev)1107 rtl8169_hw_start(struct net_device *dev)
1108 {
1109 struct rtl8169_private *tp = dev->priv;
1110 void *ioaddr = tp->mmio_addr;
1111 u32 i;
1112
1113 /* Soft reset the chip. */
1114 RTL_W8(ChipCmd, CmdReset);
1115
1116 /* Check that the chip has finished the reset. */
1117 for (i = 1000; i > 0; i--) {
1118 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1119 break;
1120 else
1121 udelay(10);
1122 }
1123
1124 RTL_W8(Cfg9346, Cfg9346_Unlock);
1125 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1126 RTL_W8(EarlyTxThres, EarlyTxThld);
1127
1128 // For gigabit rtl8169
1129 RTL_W16(RxMaxSize, RX_BUF_SIZE);
1130
1131 // Set Rx Config register
1132 i = rtl8169_rx_config | (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].
1133 RxConfigMask);
1134 RTL_W32(RxConfig, i);
1135
1136 /* Set DMA burst size and Interframe Gap Time */
1137 RTL_W32(TxConfig,
1138 (TX_DMA_BURST << TxDMAShift) | (InterFrameGap <<
1139 TxInterFrameGapShift));
1140 tp->cp_cmd |= RTL_R16(CPlusCmd);
1141 RTL_W16(CPlusCmd, tp->cp_cmd);
1142
1143 if (tp->mac_version == RTL_GIGA_MAC_VER_D) {
1144 dprintk("Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14 MUST be 1\n");
1145 tp->cp_cmd |= (1 << 14) | PCIMulRW;
1146 RTL_W16(CPlusCmd, tp->cp_cmd);
1147 }
1148
1149 tp->cur_rx = 0;
1150
1151 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
1152 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
1153 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
1154 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
1155 RTL_W8(Cfg9346, Cfg9346_Lock);
1156 udelay(10);
1157
1158 RTL_W32(RxMissed, 0);
1159
1160 rtl8169_set_rx_mode(dev);
1161
1162 /* no early-rx interrupts */
1163 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1164
1165 /* Enable all known interrupts by setting the interrupt mask. */
1166 RTL_W16(IntrMask, rtl8169_intr_mask);
1167
1168 netif_start_queue(dev);
1169
1170 }
1171
rtl8169_make_unusable_by_asic(struct RxDesc * desc)1172 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
1173 {
1174 desc->addr = 0x0badbadbadbadbad;
1175 desc->status &= ~cpu_to_le32(OWNbit | RsvdMask);
1176 }
1177
rtl8169_free_rx_skb(struct pci_dev * pdev,struct sk_buff ** sk_buff,struct RxDesc * desc)1178 static void rtl8169_free_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
1179 struct RxDesc *desc)
1180 {
1181 pci_unmap_single(pdev, le64_to_cpu(desc->addr), RX_BUF_SIZE,
1182 PCI_DMA_FROMDEVICE);
1183 dev_kfree_skb(*sk_buff);
1184 *sk_buff = NULL;
1185 rtl8169_make_unusable_by_asic(desc);
1186 }
1187
rtl8169_return_to_asic(struct RxDesc * desc)1188 static inline void rtl8169_return_to_asic(struct RxDesc *desc)
1189 {
1190 desc->status = (desc->status & EORbit) | cpu_to_le32(OWNbit + RX_BUF_SIZE);
1191 }
1192
rtl8169_give_to_asic(struct RxDesc * desc,dma_addr_t mapping)1193 static inline void rtl8169_give_to_asic(struct RxDesc *desc, dma_addr_t mapping)
1194 {
1195 desc->addr = cpu_to_le64(mapping);
1196 rtl8169_return_to_asic(desc);
1197 }
1198
rtl8169_alloc_rx_skb(struct pci_dev * pdev,struct net_device * dev,struct sk_buff ** sk_buff,struct RxDesc * desc)1199 static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev,
1200 struct sk_buff **sk_buff, struct RxDesc *desc)
1201 {
1202 struct sk_buff *skb;
1203 dma_addr_t mapping;
1204 int ret = 0;
1205
1206 skb = dev_alloc_skb(RX_BUF_SIZE + 2);
1207 if (!skb)
1208 goto err_out;
1209
1210 skb->dev = dev;
1211 skb_reserve(skb, 2);
1212 *sk_buff = skb;
1213
1214 mapping = pci_map_single(pdev, skb->tail, RX_BUF_SIZE,
1215 PCI_DMA_FROMDEVICE);
1216
1217 rtl8169_give_to_asic(desc, mapping);
1218
1219 out:
1220 return ret;
1221
1222 err_out:
1223 ret = -ENOMEM;
1224 rtl8169_make_unusable_by_asic(desc);
1225 goto out;
1226 }
1227
rtl8169_rx_clear(struct rtl8169_private * tp)1228 static void rtl8169_rx_clear(struct rtl8169_private *tp)
1229 {
1230 int i;
1231
1232 for (i = 0; i < NUM_RX_DESC; i++) {
1233 if (tp->Rx_skbuff[i]) {
1234 rtl8169_free_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
1235 tp->RxDescArray + i);
1236 }
1237 }
1238 }
1239
rtl8169_rx_fill(struct rtl8169_private * tp,struct net_device * dev,u32 start,u32 end)1240 static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
1241 u32 start, u32 end)
1242 {
1243 u32 cur;
1244
1245 for (cur = start; end - cur > 0; cur++) {
1246 int ret, i = cur % NUM_RX_DESC;
1247
1248 if (tp->Rx_skbuff[i])
1249 continue;
1250
1251 ret = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->Rx_skbuff + i,
1252 tp->RxDescArray + i);
1253 if (ret < 0)
1254 break;
1255 }
1256 return cur - start;
1257 }
1258
rtl8169_mark_as_last_descriptor(struct RxDesc * desc)1259 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
1260 {
1261 desc->status |= cpu_to_le32(EORbit);
1262 }
1263
rtl8169_init_ring(struct net_device * dev)1264 static int rtl8169_init_ring(struct net_device *dev)
1265 {
1266 struct rtl8169_private *tp = dev->priv;
1267
1268 tp->cur_rx = tp->dirty_rx = 0;
1269 tp->cur_tx = tp->dirty_tx = 0;
1270 memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof (struct TxDesc));
1271 memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof (struct RxDesc));
1272
1273 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
1274 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
1275
1276 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
1277 goto err_out;
1278
1279 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
1280
1281 return 0;
1282
1283 err_out:
1284 rtl8169_rx_clear(tp);
1285 return -ENOMEM;
1286 }
1287
rtl8169_unmap_tx_skb(struct pci_dev * pdev,struct sk_buff ** sk_buff,struct TxDesc * desc)1288 static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
1289 struct TxDesc *desc)
1290 {
1291 u32 len = sk_buff[0]->len;
1292
1293 pci_unmap_single(pdev, le64_to_cpu(desc->addr),
1294 len < ETH_ZLEN ? ETH_ZLEN : len, PCI_DMA_TODEVICE);
1295 desc->addr = 0x00;
1296 *sk_buff = NULL;
1297 }
1298
1299 static void
rtl8169_tx_clear(struct rtl8169_private * tp)1300 rtl8169_tx_clear(struct rtl8169_private *tp)
1301 {
1302 int i;
1303
1304 tp->cur_tx = 0;
1305 for (i = 0; i < NUM_TX_DESC; i++) {
1306 struct sk_buff *skb = tp->Tx_skbuff[i];
1307
1308 if (skb) {
1309 rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + i,
1310 tp->TxDescArray + i);
1311 dev_kfree_skb(skb);
1312 tp->stats.tx_dropped++;
1313 }
1314 }
1315 }
1316
1317 static void
rtl8169_reset_task(struct net_device * dev)1318 rtl8169_reset_task(struct net_device *dev)
1319 {
1320 struct rtl8169_private *tp = dev->priv;
1321 void *ioaddr = tp->mmio_addr;
1322
1323 rtnl_lock();
1324 RTL_W16(IntrMask, 0);
1325
1326 if (!netif_running(dev))
1327 goto out_unlock;
1328
1329 rtl8169_rx_interrupt(dev, tp, ioaddr);
1330
1331 spin_lock_irq(&tp->lock);
1332 rtl8169_tx_clear(tp);
1333 spin_unlock_irq(&tp->lock);
1334
1335 if (tp->dirty_rx == tp->cur_rx) {
1336 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
1337 rtl8169_hw_start(dev);
1338 netif_wake_queue(dev);
1339 } else {
1340 if (net_ratelimit()) {
1341 printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
1342 dev->name);
1343 }
1344 schedule_task(&tp->reset_task);
1345 }
1346 out_unlock:
1347 RTL_W16(IntrMask, rtl8169_intr_mask);
1348 rtnl_unlock();
1349 }
1350
1351 static void
rtl8169_tx_timeout(struct net_device * dev)1352 rtl8169_tx_timeout(struct net_device *dev)
1353 {
1354 struct rtl8169_private *tp = dev->priv;
1355 void *ioaddr = tp->mmio_addr;
1356 u8 tmp8;
1357
1358 /* disable Tx, if not already */
1359 tmp8 = RTL_R8(ChipCmd);
1360 if (tmp8 & CmdTxEnb)
1361 RTL_W8(ChipCmd, tmp8 & ~CmdTxEnb);
1362
1363 /* Disable interrupts by clearing the interrupt mask. */
1364 RTL_W16(IntrMask, 0x0000);
1365
1366 /* Stop a shared interrupt from scavenging while we are. */
1367 spin_lock_irq(&tp->lock);
1368 rtl8169_tx_clear(tp);
1369 spin_unlock_irq(&tp->lock);
1370
1371 /* ...and finally, reset everything */
1372 rtl8169_hw_start(dev);
1373
1374 netif_wake_queue(dev);
1375 }
1376
1377 static int
rtl8169_start_xmit(struct sk_buff * skb,struct net_device * dev)1378 rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
1379 {
1380 struct rtl8169_private *tp = dev->priv;
1381 void *ioaddr = tp->mmio_addr;
1382 int entry = tp->cur_tx % NUM_TX_DESC;
1383 u32 len = skb->len;
1384
1385 if (unlikely(skb->len < ETH_ZLEN)) {
1386 skb = skb_padto(skb, ETH_ZLEN);
1387 if (!skb)
1388 goto err_update_stats;
1389 len = ETH_ZLEN;
1390 }
1391
1392 spin_lock_irq(&tp->lock);
1393
1394 if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
1395 dma_addr_t mapping;
1396
1397 mapping = pci_map_single(tp->pci_dev, skb->data, len,
1398 PCI_DMA_TODEVICE);
1399
1400 tp->Tx_skbuff[entry] = skb;
1401 tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
1402
1403 tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
1404 LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));
1405
1406 RTL_W8(TxPoll, 0x40); //set polling bit
1407
1408 dev->trans_start = jiffies;
1409
1410 tp->cur_tx++;
1411 } else
1412 goto err_drop;
1413
1414
1415 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
1416 netif_stop_queue(dev);
1417 }
1418 out:
1419 spin_unlock_irq(&tp->lock);
1420
1421 return 0;
1422
1423 err_drop:
1424 dev_kfree_skb(skb);
1425 err_update_stats:
1426 tp->stats.tx_dropped++;
1427 goto out;
1428 }
1429
1430 static void
rtl8169_tx_interrupt(struct net_device * dev,struct rtl8169_private * tp,void * ioaddr)1431 rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
1432 void *ioaddr)
1433 {
1434 unsigned long dirty_tx, tx_left = 0;
1435
1436 assert(dev != NULL);
1437 assert(tp != NULL);
1438 assert(ioaddr != NULL);
1439
1440 dirty_tx = tp->dirty_tx;
1441 tx_left = tp->cur_tx - dirty_tx;
1442
1443 while (tx_left > 0) {
1444 int entry = dirty_tx % NUM_TX_DESC;
1445 struct sk_buff *skb = tp->Tx_skbuff[entry];
1446 u32 status;
1447
1448 rmb();
1449 status = le32_to_cpu(tp->TxDescArray[entry].status);
1450 if (status & OWNbit)
1451 break;
1452
1453 /* FIXME: is it really accurate for TxErr ? */
1454 tp->stats.tx_bytes += skb->len >= ETH_ZLEN ?
1455 skb->len : ETH_ZLEN;
1456 tp->stats.tx_packets++;
1457 rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + entry,
1458 tp->TxDescArray + entry);
1459 dev_kfree_skb_irq(skb);
1460 dirty_tx++;
1461 tx_left--;
1462 }
1463
1464 if (tp->dirty_tx != dirty_tx) {
1465 tp->dirty_tx = dirty_tx;
1466 if (netif_queue_stopped(dev))
1467 netif_wake_queue(dev);
1468 }
1469 }
1470
rtl8169_try_rx_copy(struct sk_buff ** sk_buff,int pkt_size,struct RxDesc * desc,struct net_device * dev)1471 static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
1472 struct RxDesc *desc,
1473 struct net_device *dev)
1474 {
1475 int ret = -1;
1476
1477 if (pkt_size < rx_copybreak) {
1478 struct sk_buff *skb;
1479
1480 skb = dev_alloc_skb(pkt_size + 2);
1481 if (skb) {
1482 skb->dev = dev;
1483 skb_reserve(skb, 2);
1484 eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
1485 *sk_buff = skb;
1486 rtl8169_return_to_asic(desc);
1487 ret = 0;
1488 }
1489 }
1490 return ret;
1491 }
1492
rtl8169_fragmented_frame(u32 status)1493 static inline int rtl8169_fragmented_frame(u32 status)
1494 {
1495 return (status & (FSbit | LSbit)) != (FSbit | LSbit);
1496 }
1497
1498 static void
rtl8169_rx_interrupt(struct net_device * dev,struct rtl8169_private * tp,void * ioaddr)1499 rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
1500 void *ioaddr)
1501 {
1502 unsigned long cur_rx, rx_left;
1503 int delta;
1504
1505 assert(dev != NULL);
1506 assert(tp != NULL);
1507 assert(ioaddr != NULL);
1508
1509 cur_rx = tp->cur_rx;
1510 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
1511
1512 for (; rx_left > 0; rx_left--, cur_rx++) {
1513 int entry = cur_rx % NUM_RX_DESC;
1514 u32 status;
1515
1516 rmb();
1517 status = le32_to_cpu(tp->RxDescArray[entry].status);
1518
1519 if (status & OWNbit)
1520 break;
1521
1522 if (status & RxRES) {
1523 if (net_ratelimit())
1524 printk(KERN_INFO "%s: Rx ERROR, status=0x%08x !\n",
1525 dev->name, status);
1526 tp->stats.rx_errors++;
1527 if (status & (RxRWT | RxRUNT))
1528 tp->stats.rx_length_errors++;
1529 if (status & RxCRC)
1530 tp->stats.rx_crc_errors++;
1531 if (status & RxOVF) {
1532 tp->stats.rx_fifo_errors++;
1533 schedule_task(&tp->reset_task);
1534 }
1535
1536 rtl8169_return_to_asic(tp->RxDescArray + entry);
1537 continue;
1538 } else {
1539 struct RxDesc *desc = tp->RxDescArray + entry;
1540 struct sk_buff *skb = tp->Rx_skbuff[entry];
1541 int pkt_size = (status & 0x00001FFF) - 4;
1542
1543 /* Backport from 2.6 to cover a panic on large frames.
1544 * The driver does not support incoming fragmented
1545 * frames. They are seen as a symptom of over-mtu
1546 * sized frames (0x3ff0 bytes).
1547 */
1548 if (unlikely(rtl8169_fragmented_frame(status))) {
1549 tp->stats.rx_dropped++;
1550 tp->stats.rx_length_errors++;
1551 rtl8169_return_to_asic(desc);
1552 continue;
1553 }
1554
1555 pci_dma_sync_single(tp->pci_dev, le64_to_cpu(desc->addr),
1556 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1557
1558 if (rtl8169_try_rx_copy(&skb, pkt_size, desc, dev)) {
1559 pci_unmap_single(tp->pci_dev, le64_to_cpu(desc->addr),
1560 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1561 tp->Rx_skbuff[entry] = NULL;
1562 }
1563
1564
1565 skb_put(skb, pkt_size);
1566 skb->protocol = eth_type_trans(skb, dev);
1567 netif_rx(skb);
1568
1569 dev->last_rx = jiffies;
1570 tp->stats.rx_bytes += pkt_size;
1571 tp->stats.rx_packets++;
1572 }
1573 }
1574
1575 tp->cur_rx = cur_rx;
1576
1577 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
1578 if (delta > 0)
1579 tp->dirty_rx += delta;
1580 else if (delta < 0 && net_ratelimit())
1581 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
1582
1583 /*
1584 * FIXME: until there is periodic timer to try and refill the ring,
1585 * a temporary shortage may definitely kill the Rx process.
1586 * - disable the asic to try and avoid an overflow and kick it again
1587 * after refill ?
1588 * - how do others driver handle this condition (Uh oh...).
1589 */
1590 if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx && net_ratelimit())
1591 printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
1592 }
1593
1594 /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
1595 static irqreturn_t
rtl8169_interrupt(int irq,void * dev_instance,struct pt_regs * regs)1596 rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1597 {
1598 struct net_device *dev = (struct net_device *) dev_instance;
1599 struct rtl8169_private *tp = dev->priv;
1600 int boguscnt = max_interrupt_work;
1601 void *ioaddr = tp->mmio_addr;
1602 int status = 0;
1603 int handled = 0;
1604
1605 do {
1606 status = RTL_R16(IntrStatus);
1607
1608 /* hotplug/major error/no more work/shared irq */
1609 if ((status == 0xFFFF) || !status)
1610 break;
1611
1612 handled = 1;
1613 /*
1614 if (status & RxUnderrun)
1615 link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
1616 */
1617 RTL_W16(IntrStatus,
1618 (status & RxFIFOOver) ? (status | RxOverflow) : status);
1619
1620 if (!(status & rtl8169_intr_mask))
1621 break;
1622
1623 // Rx interrupt
1624 if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) {
1625 rtl8169_rx_interrupt(dev, tp, ioaddr);
1626 }
1627 // Tx interrupt
1628 if (status & (TxOK | TxErr)) {
1629 spin_lock(&tp->lock);
1630 rtl8169_tx_interrupt(dev, tp, ioaddr);
1631 spin_unlock(&tp->lock);
1632 }
1633
1634 boguscnt--;
1635 } while (boguscnt > 0);
1636
1637 if (boguscnt <= 0) {
1638 if (net_ratelimit())
1639 printk(KERN_WARNING "%s: Too much work at interrupt!\n",
1640 dev->name);
1641 /* Clear all interrupt sources. */
1642 RTL_W16(IntrStatus, 0xffff);
1643 }
1644 return IRQ_RETVAL(handled);
1645 }
1646
1647 static int
rtl8169_close(struct net_device * dev)1648 rtl8169_close(struct net_device *dev)
1649 {
1650 struct rtl8169_private *tp = dev->priv;
1651 struct pci_dev *pdev = tp->pci_dev;
1652 void *ioaddr = tp->mmio_addr;
1653
1654 netif_stop_queue(dev);
1655
1656 rtl8169_delete_timer(dev);
1657
1658 spin_lock_irq(&tp->lock);
1659
1660 /* Stop the chip's Tx and Rx DMA processes. */
1661 RTL_W8(ChipCmd, 0x00);
1662
1663 /* Disable interrupts by clearing the interrupt mask. */
1664 RTL_W16(IntrMask, 0x0000);
1665
1666 /* Update the error counts. */
1667 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
1668 RTL_W32(RxMissed, 0);
1669
1670 spin_unlock_irq(&tp->lock);
1671
1672 synchronize_irq();
1673 free_irq(dev->irq, dev);
1674
1675 rtl8169_tx_clear(tp);
1676
1677 rtl8169_rx_clear(tp);
1678
1679 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
1680 tp->RxPhyAddr);
1681 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
1682 tp->TxPhyAddr);
1683 tp->TxDescArray = NULL;
1684 tp->RxDescArray = NULL;
1685
1686 return 0;
1687 }
1688
1689 static void
rtl8169_set_rx_mode(struct net_device * dev)1690 rtl8169_set_rx_mode(struct net_device *dev)
1691 {
1692 struct rtl8169_private *tp = dev->priv;
1693 void *ioaddr = tp->mmio_addr;
1694 unsigned long flags;
1695 u32 mc_filter[2]; /* Multicast hash filter */
1696 int i, rx_mode;
1697 u32 tmp = 0;
1698
1699 if (dev->flags & IFF_PROMISC) {
1700 /* Unconditionally log net taps. */
1701 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1702 dev->name);
1703 rx_mode =
1704 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
1705 AcceptAllPhys;
1706 mc_filter[1] = mc_filter[0] = 0xffffffff;
1707 } else if ((dev->mc_count > multicast_filter_limit)
1708 || (dev->flags & IFF_ALLMULTI)) {
1709 /* Too many to filter perfectly -- accept all multicasts. */
1710 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1711 mc_filter[1] = mc_filter[0] = 0xffffffff;
1712 } else {
1713 struct dev_mc_list *mclist;
1714 rx_mode = AcceptBroadcast | AcceptMyPhys;
1715 mc_filter[1] = mc_filter[0] = 0;
1716 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1717 i++, mclist = mclist->next) {
1718 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1719 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1720 rx_mode |= AcceptMulticast;
1721 }
1722 }
1723
1724 spin_lock_irqsave(&tp->lock, flags);
1725
1726 tmp =
1727 rtl8169_rx_config | rx_mode | (RTL_R32(RxConfig) &
1728 rtl_chip_info[tp->chipset].
1729 RxConfigMask);
1730
1731 RTL_W32(RxConfig, tmp);
1732 RTL_W32(MAR0 + 0, mc_filter[0]);
1733 RTL_W32(MAR0 + 4, mc_filter[1]);
1734
1735 spin_unlock_irqrestore(&tp->lock, flags);
1736 }
1737
1738 /**
1739 * rtl8169_get_stats - Get rtl8169 read/write statistics
1740 * @dev: The Ethernet Device to get statistics for
1741 *
1742 * Get TX/RX statistics for rtl8169
1743 */
rtl8169_get_stats(struct net_device * dev)1744 static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
1745 {
1746 struct rtl8169_private *tp = dev->priv;
1747 void *ioaddr = tp->mmio_addr;
1748 unsigned long flags;
1749
1750 if (netif_running(dev)) {
1751 spin_lock_irqsave(&tp->lock, flags);
1752 tp->stats.rx_missed_errors += RTL_R32(RxMissed);
1753 RTL_W32(RxMissed, 0);
1754 spin_unlock_irqrestore(&tp->lock, flags);
1755 }
1756
1757 return &tp->stats;
1758 }
1759
1760 static struct pci_driver rtl8169_pci_driver = {
1761 .name = MODULENAME,
1762 .id_table = rtl8169_pci_tbl,
1763 .probe = rtl8169_init_one,
1764 .remove = __devexit_p(rtl8169_remove_one),
1765 #ifdef CONFIG_PM
1766 .suspend = rtl8169_suspend,
1767 .resume = rtl8169_resume,
1768 #endif
1769 };
1770
1771 static int __init
rtl8169_init_module(void)1772 rtl8169_init_module(void)
1773 {
1774 return pci_module_init(&rtl8169_pci_driver);
1775 }
1776
1777 static void __exit
rtl8169_cleanup_module(void)1778 rtl8169_cleanup_module(void)
1779 {
1780 pci_unregister_driver(&rtl8169_pci_driver);
1781 }
1782
1783 module_init(rtl8169_init_module);
1784 module_exit(rtl8169_cleanup_module);
1785