1 /*
2 * drivers/net/ethernet/nxp/lpc_eth.c
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/crc32.h>
32 #include <linux/platform_device.h>
33 #include <linux/spinlock.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/clk.h>
37 #include <linux/workqueue.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/phy.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/of_net.h>
44 #include <linux/types.h>
45
46 #include <linux/delay.h>
47 #include <linux/io.h>
48 #include <mach/board.h>
49 #include <mach/platform.h>
50 #include <mach/hardware.h>
51
52 #define MODNAME "lpc-eth"
53 #define DRV_VERSION "1.00"
54 #define PHYDEF_ADDR 0x00
55
56 #define ENET_MAXF_SIZE 1536
57 #define ENET_RX_DESC 48
58 #define ENET_TX_DESC 16
59
60 #define NAPI_WEIGHT 16
61
62 /*
63 * Ethernet MAC controller Register offsets
64 */
65 #define LPC_ENET_MAC1(x) (x + 0x000)
66 #define LPC_ENET_MAC2(x) (x + 0x004)
67 #define LPC_ENET_IPGT(x) (x + 0x008)
68 #define LPC_ENET_IPGR(x) (x + 0x00C)
69 #define LPC_ENET_CLRT(x) (x + 0x010)
70 #define LPC_ENET_MAXF(x) (x + 0x014)
71 #define LPC_ENET_SUPP(x) (x + 0x018)
72 #define LPC_ENET_TEST(x) (x + 0x01C)
73 #define LPC_ENET_MCFG(x) (x + 0x020)
74 #define LPC_ENET_MCMD(x) (x + 0x024)
75 #define LPC_ENET_MADR(x) (x + 0x028)
76 #define LPC_ENET_MWTD(x) (x + 0x02C)
77 #define LPC_ENET_MRDD(x) (x + 0x030)
78 #define LPC_ENET_MIND(x) (x + 0x034)
79 #define LPC_ENET_SA0(x) (x + 0x040)
80 #define LPC_ENET_SA1(x) (x + 0x044)
81 #define LPC_ENET_SA2(x) (x + 0x048)
82 #define LPC_ENET_COMMAND(x) (x + 0x100)
83 #define LPC_ENET_STATUS(x) (x + 0x104)
84 #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
85 #define LPC_ENET_RXSTATUS(x) (x + 0x10C)
86 #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
87 #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
88 #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
89 #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
90 #define LPC_ENET_TXSTATUS(x) (x + 0x120)
91 #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
92 #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
93 #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
94 #define LPC_ENET_TSV0(x) (x + 0x158)
95 #define LPC_ENET_TSV1(x) (x + 0x15C)
96 #define LPC_ENET_RSV(x) (x + 0x160)
97 #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
98 #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
99 #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
100 #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
101 #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
102 #define LPC_ENET_HASHFILTERL(x) (x + 0x210)
103 #define LPC_ENET_HASHFILTERH(x) (x + 0x214)
104 #define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
105 #define LPC_ENET_INTENABLE(x) (x + 0xFE4)
106 #define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
107 #define LPC_ENET_INTSET(x) (x + 0xFEC)
108 #define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
109
110 /*
111 * mac1 register definitions
112 */
113 #define LPC_MAC1_RECV_ENABLE (1 << 0)
114 #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
115 #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
116 #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
117 #define LPC_MAC1_LOOPBACK (1 << 4)
118 #define LPC_MAC1_RESET_TX (1 << 8)
119 #define LPC_MAC1_RESET_MCS_TX (1 << 9)
120 #define LPC_MAC1_RESET_RX (1 << 10)
121 #define LPC_MAC1_RESET_MCS_RX (1 << 11)
122 #define LPC_MAC1_SIMULATION_RESET (1 << 14)
123 #define LPC_MAC1_SOFT_RESET (1 << 15)
124
125 /*
126 * mac2 register definitions
127 */
128 #define LPC_MAC2_FULL_DUPLEX (1 << 0)
129 #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
130 #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
131 #define LPC_MAC2_DELAYED_CRC (1 << 3)
132 #define LPC_MAC2_CRC_ENABLE (1 << 4)
133 #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
134 #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
135 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
136 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
137 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
138 #define LPC_MAC2_NO_BACKOFF (1 << 12)
139 #define LPC_MAC2_BACK_PRESSURE (1 << 13)
140 #define LPC_MAC2_EXCESS_DEFER (1 << 14)
141
142 /*
143 * ipgt register definitions
144 */
145 #define LPC_IPGT_LOAD(n) ((n) & 0x7F)
146
147 /*
148 * ipgr register definitions
149 */
150 #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
151 #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
152
153 /*
154 * clrt register definitions
155 */
156 #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
157 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
158
159 /*
160 * maxf register definitions
161 */
162 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
163
164 /*
165 * supp register definitions
166 */
167 #define LPC_SUPP_SPEED (1 << 8)
168 #define LPC_SUPP_RESET_RMII (1 << 11)
169
170 /*
171 * test register definitions
172 */
173 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
174 #define LPC_TEST_PAUSE (1 << 1)
175 #define LPC_TEST_BACKPRESSURE (1 << 2)
176
177 /*
178 * mcfg register definitions
179 */
180 #define LPC_MCFG_SCAN_INCREMENT (1 << 0)
181 #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
182 #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
183 #define LPC_MCFG_CLOCK_HOST_DIV_4 0
184 #define LPC_MCFG_CLOCK_HOST_DIV_6 2
185 #define LPC_MCFG_CLOCK_HOST_DIV_8 3
186 #define LPC_MCFG_CLOCK_HOST_DIV_10 4
187 #define LPC_MCFG_CLOCK_HOST_DIV_14 5
188 #define LPC_MCFG_CLOCK_HOST_DIV_20 6
189 #define LPC_MCFG_CLOCK_HOST_DIV_28 7
190 #define LPC_MCFG_RESET_MII_MGMT (1 << 15)
191
192 /*
193 * mcmd register definitions
194 */
195 #define LPC_MCMD_READ (1 << 0)
196 #define LPC_MCMD_SCAN (1 << 1)
197
198 /*
199 * madr register definitions
200 */
201 #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
202 #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
203
204 /*
205 * mwtd register definitions
206 */
207 #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
208
209 /*
210 * mrdd register definitions
211 */
212 #define LPC_MRDD_READ_MASK 0xFFFF
213
214 /*
215 * mind register definitions
216 */
217 #define LPC_MIND_BUSY (1 << 0)
218 #define LPC_MIND_SCANNING (1 << 1)
219 #define LPC_MIND_NOT_VALID (1 << 2)
220 #define LPC_MIND_MII_LINK_FAIL (1 << 3)
221
222 /*
223 * command register definitions
224 */
225 #define LPC_COMMAND_RXENABLE (1 << 0)
226 #define LPC_COMMAND_TXENABLE (1 << 1)
227 #define LPC_COMMAND_REG_RESET (1 << 3)
228 #define LPC_COMMAND_TXRESET (1 << 4)
229 #define LPC_COMMAND_RXRESET (1 << 5)
230 #define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
231 #define LPC_COMMAND_PASSRXFILTER (1 << 7)
232 #define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
233 #define LPC_COMMAND_RMII (1 << 9)
234 #define LPC_COMMAND_FULLDUPLEX (1 << 10)
235
236 /*
237 * status register definitions
238 */
239 #define LPC_STATUS_RXACTIVE (1 << 0)
240 #define LPC_STATUS_TXACTIVE (1 << 1)
241
242 /*
243 * tsv0 register definitions
244 */
245 #define LPC_TSV0_CRC_ERROR (1 << 0)
246 #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
247 #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
248 #define LPC_TSV0_DONE (1 << 3)
249 #define LPC_TSV0_MULTICAST (1 << 4)
250 #define LPC_TSV0_BROADCAST (1 << 5)
251 #define LPC_TSV0_PACKET_DEFER (1 << 6)
252 #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
253 #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
254 #define LPC_TSV0_LATE_COLLISION (1 << 9)
255 #define LPC_TSV0_GIANT (1 << 10)
256 #define LPC_TSV0_UNDERRUN (1 << 11)
257 #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
258 #define LPC_TSV0_CONTROL_FRAME (1 << 28)
259 #define LPC_TSV0_PAUSE (1 << 29)
260 #define LPC_TSV0_BACKPRESSURE (1 << 30)
261 #define LPC_TSV0_VLAN (1 << 31)
262
263 /*
264 * tsv1 register definitions
265 */
266 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
267 #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
268
269 /*
270 * rsv register definitions
271 */
272 #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
273 #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
274 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
275 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
276 #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
277 #define LPC_RSV_CRC_ERROR (1 << 20)
278 #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
279 #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
280 #define LPC_RSV_RECEIVE_OK (1 << 23)
281 #define LPC_RSV_MULTICAST (1 << 24)
282 #define LPC_RSV_BROADCAST (1 << 25)
283 #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
284 #define LPC_RSV_CONTROL_FRAME (1 << 27)
285 #define LPC_RSV_PAUSE (1 << 28)
286 #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
287 #define LPC_RSV_VLAN (1 << 30)
288
289 /*
290 * flowcontrolcounter register definitions
291 */
292 #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
293 #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
294
295 /*
296 * flowcontrolstatus register definitions
297 */
298 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
299
300 /*
301 * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
302 * register definitions
303 */
304 #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
305 #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
306 #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
307 #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
308 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
309 #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
310
311 /*
312 * rxfliterctrl register definitions
313 */
314 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
315 #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
316
317 /*
318 * rxfilterwolstatus/rxfilterwolclear register definitions
319 */
320 #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
321 #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
322
323 /*
324 * intstatus, intenable, intclear, and Intset shared register
325 * definitions
326 */
327 #define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
328 #define LPC_MACINT_RXERRORONINT (1 << 1)
329 #define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
330 #define LPC_MACINT_RXDONEINTEN (1 << 3)
331 #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
332 #define LPC_MACINT_TXERRORINTEN (1 << 5)
333 #define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
334 #define LPC_MACINT_TXDONEINTEN (1 << 7)
335 #define LPC_MACINT_SOFTINTEN (1 << 12)
336 #define LPC_MACINT_WAKEUPINTEN (1 << 13)
337
338 /*
339 * powerdown register definitions
340 */
341 #define LPC_POWERDOWN_MACAHB (1 << 31)
342
343 /* Upon the upcoming introduction of device tree usage in LPC32xx,
344 * lpc_phy_interface_mode() and use_iram_for_net() will be extended with a
345 * device parameter for access to device tree information at runtime, instead
346 * of defining the values at compile time
347 */
lpc_phy_interface_mode(void)348 static inline phy_interface_t lpc_phy_interface_mode(void)
349 {
350 #ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
351 return PHY_INTERFACE_MODE_MII;
352 #else
353 return PHY_INTERFACE_MODE_RMII;
354 #endif
355 }
356
use_iram_for_net(void)357 static inline int use_iram_for_net(void)
358 {
359 #ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
360 return 1;
361 #else
362 return 0;
363 #endif
364 }
365
366 /* Receive Status information word */
367 #define RXSTATUS_SIZE 0x000007FF
368 #define RXSTATUS_CONTROL (1 << 18)
369 #define RXSTATUS_VLAN (1 << 19)
370 #define RXSTATUS_FILTER (1 << 20)
371 #define RXSTATUS_MULTICAST (1 << 21)
372 #define RXSTATUS_BROADCAST (1 << 22)
373 #define RXSTATUS_CRC (1 << 23)
374 #define RXSTATUS_SYMBOL (1 << 24)
375 #define RXSTATUS_LENGTH (1 << 25)
376 #define RXSTATUS_RANGE (1 << 26)
377 #define RXSTATUS_ALIGN (1 << 27)
378 #define RXSTATUS_OVERRUN (1 << 28)
379 #define RXSTATUS_NODESC (1 << 29)
380 #define RXSTATUS_LAST (1 << 30)
381 #define RXSTATUS_ERROR (1 << 31)
382
383 #define RXSTATUS_STATUS_ERROR \
384 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
385 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
386
387 /* Receive Descriptor control word */
388 #define RXDESC_CONTROL_SIZE 0x000007FF
389 #define RXDESC_CONTROL_INT (1 << 31)
390
391 /* Transmit Status information word */
392 #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
393 #define TXSTATUS_DEFER (1 << 25)
394 #define TXSTATUS_EXCESSDEFER (1 << 26)
395 #define TXSTATUS_EXCESSCOLL (1 << 27)
396 #define TXSTATUS_LATECOLL (1 << 28)
397 #define TXSTATUS_UNDERRUN (1 << 29)
398 #define TXSTATUS_NODESC (1 << 30)
399 #define TXSTATUS_ERROR (1 << 31)
400
401 /* Transmit Descriptor control word */
402 #define TXDESC_CONTROL_SIZE 0x000007FF
403 #define TXDESC_CONTROL_OVERRIDE (1 << 26)
404 #define TXDESC_CONTROL_HUGE (1 << 27)
405 #define TXDESC_CONTROL_PAD (1 << 28)
406 #define TXDESC_CONTROL_CRC (1 << 29)
407 #define TXDESC_CONTROL_LAST (1 << 30)
408 #define TXDESC_CONTROL_INT (1 << 31)
409
410 static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
411 struct net_device *ndev);
412
413 /*
414 * Structure of a TX/RX descriptors and RX status
415 */
416 struct txrx_desc_t {
417 __le32 packet;
418 __le32 control;
419 };
420 struct rx_status_t {
421 __le32 statusinfo;
422 __le32 statushashcrc;
423 };
424
425 /*
426 * Device driver data structure
427 */
428 struct netdata_local {
429 struct platform_device *pdev;
430 struct net_device *ndev;
431 spinlock_t lock;
432 void __iomem *net_base;
433 u32 msg_enable;
434 struct sk_buff *skb[ENET_TX_DESC];
435 unsigned int last_tx_idx;
436 unsigned int num_used_tx_buffs;
437 struct mii_bus *mii_bus;
438 struct phy_device *phy_dev;
439 struct clk *clk;
440 dma_addr_t dma_buff_base_p;
441 void *dma_buff_base_v;
442 size_t dma_buff_size;
443 struct txrx_desc_t *tx_desc_v;
444 u32 *tx_stat_v;
445 void *tx_buff_v;
446 struct txrx_desc_t *rx_desc_v;
447 struct rx_status_t *rx_stat_v;
448 void *rx_buff_v;
449 int link;
450 int speed;
451 int duplex;
452 struct napi_struct napi;
453 };
454
455 /*
456 * MAC support functions
457 */
__lpc_set_mac(struct netdata_local * pldat,u8 * mac)458 static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
459 {
460 u32 tmp;
461
462 /* Set station address */
463 tmp = mac[0] | ((u32)mac[1] << 8);
464 writel(tmp, LPC_ENET_SA2(pldat->net_base));
465 tmp = mac[2] | ((u32)mac[3] << 8);
466 writel(tmp, LPC_ENET_SA1(pldat->net_base));
467 tmp = mac[4] | ((u32)mac[5] << 8);
468 writel(tmp, LPC_ENET_SA0(pldat->net_base));
469
470 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
471 }
472
__lpc_get_mac(struct netdata_local * pldat,u8 * mac)473 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
474 {
475 u32 tmp;
476
477 /* Get station address */
478 tmp = readl(LPC_ENET_SA2(pldat->net_base));
479 mac[0] = tmp & 0xFF;
480 mac[1] = tmp >> 8;
481 tmp = readl(LPC_ENET_SA1(pldat->net_base));
482 mac[2] = tmp & 0xFF;
483 mac[3] = tmp >> 8;
484 tmp = readl(LPC_ENET_SA0(pldat->net_base));
485 mac[4] = tmp & 0xFF;
486 mac[5] = tmp >> 8;
487 }
488
__lpc_eth_clock_enable(struct netdata_local * pldat,bool enable)489 static void __lpc_eth_clock_enable(struct netdata_local *pldat,
490 bool enable)
491 {
492 if (enable)
493 clk_enable(pldat->clk);
494 else
495 clk_disable(pldat->clk);
496 }
497
__lpc_params_setup(struct netdata_local * pldat)498 static void __lpc_params_setup(struct netdata_local *pldat)
499 {
500 u32 tmp;
501
502 if (pldat->duplex == DUPLEX_FULL) {
503 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
504 tmp |= LPC_MAC2_FULL_DUPLEX;
505 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
506 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
507 tmp |= LPC_COMMAND_FULLDUPLEX;
508 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
509 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
510 } else {
511 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
512 tmp &= ~LPC_MAC2_FULL_DUPLEX;
513 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
514 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
515 tmp &= ~LPC_COMMAND_FULLDUPLEX;
516 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
517 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
518 }
519
520 if (pldat->speed == SPEED_100)
521 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
522 else
523 writel(0, LPC_ENET_SUPP(pldat->net_base));
524 }
525
__lpc_eth_reset(struct netdata_local * pldat)526 static void __lpc_eth_reset(struct netdata_local *pldat)
527 {
528 /* Reset all MAC logic */
529 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
530 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
531 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
532 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
533 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
534 }
535
__lpc_mii_mngt_reset(struct netdata_local * pldat)536 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
537 {
538 /* Reset MII management hardware */
539 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
540
541 /* Setup MII clock to slowest rate with a /28 divider */
542 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
543 LPC_ENET_MCFG(pldat->net_base));
544
545 return 0;
546 }
547
__va_to_pa(void * addr,struct netdata_local * pldat)548 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
549 {
550 phys_addr_t phaddr;
551
552 phaddr = addr - pldat->dma_buff_base_v;
553 phaddr += pldat->dma_buff_base_p;
554
555 return phaddr;
556 }
557
lpc_eth_enable_int(void __iomem * regbase)558 static void lpc_eth_enable_int(void __iomem *regbase)
559 {
560 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
561 LPC_ENET_INTENABLE(regbase));
562 }
563
lpc_eth_disable_int(void __iomem * regbase)564 static void lpc_eth_disable_int(void __iomem *regbase)
565 {
566 writel(0, LPC_ENET_INTENABLE(regbase));
567 }
568
569 /* Setup TX/RX descriptors */
__lpc_txrx_desc_setup(struct netdata_local * pldat)570 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
571 {
572 u32 *ptxstat;
573 void *tbuff;
574 int i;
575 struct txrx_desc_t *ptxrxdesc;
576 struct rx_status_t *prxstat;
577
578 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
579
580 /* Setup TX descriptors, status, and buffers */
581 pldat->tx_desc_v = tbuff;
582 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
583
584 pldat->tx_stat_v = tbuff;
585 tbuff += sizeof(u32) * ENET_TX_DESC;
586
587 tbuff = PTR_ALIGN(tbuff, 16);
588 pldat->tx_buff_v = tbuff;
589 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
590
591 /* Setup RX descriptors, status, and buffers */
592 pldat->rx_desc_v = tbuff;
593 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
594
595 tbuff = PTR_ALIGN(tbuff, 16);
596 pldat->rx_stat_v = tbuff;
597 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
598
599 tbuff = PTR_ALIGN(tbuff, 16);
600 pldat->rx_buff_v = tbuff;
601 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
602
603 /* Map the TX descriptors to the TX buffers in hardware */
604 for (i = 0; i < ENET_TX_DESC; i++) {
605 ptxstat = &pldat->tx_stat_v[i];
606 ptxrxdesc = &pldat->tx_desc_v[i];
607
608 ptxrxdesc->packet = __va_to_pa(
609 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
610 ptxrxdesc->control = 0;
611 *ptxstat = 0;
612 }
613
614 /* Map the RX descriptors to the RX buffers in hardware */
615 for (i = 0; i < ENET_RX_DESC; i++) {
616 prxstat = &pldat->rx_stat_v[i];
617 ptxrxdesc = &pldat->rx_desc_v[i];
618
619 ptxrxdesc->packet = __va_to_pa(
620 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
621 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
622 prxstat->statusinfo = 0;
623 prxstat->statushashcrc = 0;
624 }
625
626 /* Setup base addresses in hardware to point to buffers and
627 * descriptors
628 */
629 writel((ENET_TX_DESC - 1),
630 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
631 writel(__va_to_pa(pldat->tx_desc_v, pldat),
632 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
633 writel(__va_to_pa(pldat->tx_stat_v, pldat),
634 LPC_ENET_TXSTATUS(pldat->net_base));
635 writel((ENET_RX_DESC - 1),
636 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
637 writel(__va_to_pa(pldat->rx_desc_v, pldat),
638 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
639 writel(__va_to_pa(pldat->rx_stat_v, pldat),
640 LPC_ENET_RXSTATUS(pldat->net_base));
641 }
642
__lpc_eth_init(struct netdata_local * pldat)643 static void __lpc_eth_init(struct netdata_local *pldat)
644 {
645 u32 tmp;
646
647 /* Disable controller and reset */
648 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
649 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
650 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
651 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
652 tmp &= ~LPC_MAC1_RECV_ENABLE;
653 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
654
655 /* Initial MAC setup */
656 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
657 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
658 LPC_ENET_MAC2(pldat->net_base));
659 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
660
661 /* Collision window, gap */
662 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
663 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
664 LPC_ENET_CLRT(pldat->net_base));
665 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
666
667 if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
668 writel(LPC_COMMAND_PASSRUNTFRAME,
669 LPC_ENET_COMMAND(pldat->net_base));
670 else {
671 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
672 LPC_ENET_COMMAND(pldat->net_base));
673 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
674 }
675
676 __lpc_params_setup(pldat);
677
678 /* Setup TX and RX descriptors */
679 __lpc_txrx_desc_setup(pldat);
680
681 /* Setup packet filtering */
682 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
683 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
684
685 /* Get the next TX buffer output index */
686 pldat->num_used_tx_buffs = 0;
687 pldat->last_tx_idx =
688 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
689
690 /* Clear and enable interrupts */
691 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
692 smp_wmb();
693 lpc_eth_enable_int(pldat->net_base);
694
695 /* Enable controller */
696 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
697 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
698 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
699 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
700 tmp |= LPC_MAC1_RECV_ENABLE;
701 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
702 }
703
__lpc_eth_shutdown(struct netdata_local * pldat)704 static void __lpc_eth_shutdown(struct netdata_local *pldat)
705 {
706 /* Reset ethernet and power down PHY */
707 __lpc_eth_reset(pldat);
708 writel(0, LPC_ENET_MAC1(pldat->net_base));
709 writel(0, LPC_ENET_MAC2(pldat->net_base));
710 }
711
712 /*
713 * MAC<--->PHY support functions
714 */
lpc_mdio_read(struct mii_bus * bus,int phy_id,int phyreg)715 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
716 {
717 struct netdata_local *pldat = bus->priv;
718 unsigned long timeout = jiffies + msecs_to_jiffies(100);
719 int lps;
720
721 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
722 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
723
724 /* Wait for unbusy status */
725 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
726 if (time_after(jiffies, timeout))
727 return -EIO;
728 cpu_relax();
729 }
730
731 lps = readl(LPC_ENET_MRDD(pldat->net_base));
732 writel(0, LPC_ENET_MCMD(pldat->net_base));
733
734 return lps;
735 }
736
lpc_mdio_write(struct mii_bus * bus,int phy_id,int phyreg,u16 phydata)737 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
738 u16 phydata)
739 {
740 struct netdata_local *pldat = bus->priv;
741 unsigned long timeout = jiffies + msecs_to_jiffies(100);
742
743 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
744 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
745
746 /* Wait for completion */
747 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
748 if (time_after(jiffies, timeout))
749 return -EIO;
750 cpu_relax();
751 }
752
753 return 0;
754 }
755
lpc_mdio_reset(struct mii_bus * bus)756 static int lpc_mdio_reset(struct mii_bus *bus)
757 {
758 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
759 }
760
lpc_handle_link_change(struct net_device * ndev)761 static void lpc_handle_link_change(struct net_device *ndev)
762 {
763 struct netdata_local *pldat = netdev_priv(ndev);
764 struct phy_device *phydev = pldat->phy_dev;
765 unsigned long flags;
766
767 bool status_change = false;
768
769 spin_lock_irqsave(&pldat->lock, flags);
770
771 if (phydev->link) {
772 if ((pldat->speed != phydev->speed) ||
773 (pldat->duplex != phydev->duplex)) {
774 pldat->speed = phydev->speed;
775 pldat->duplex = phydev->duplex;
776 status_change = true;
777 }
778 }
779
780 if (phydev->link != pldat->link) {
781 if (!phydev->link) {
782 pldat->speed = 0;
783 pldat->duplex = -1;
784 }
785 pldat->link = phydev->link;
786
787 status_change = true;
788 }
789
790 spin_unlock_irqrestore(&pldat->lock, flags);
791
792 if (status_change)
793 __lpc_params_setup(pldat);
794 }
795
lpc_mii_probe(struct net_device * ndev)796 static int lpc_mii_probe(struct net_device *ndev)
797 {
798 struct netdata_local *pldat = netdev_priv(ndev);
799 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
800
801 if (!phydev) {
802 netdev_err(ndev, "no PHY found\n");
803 return -ENODEV;
804 }
805
806 /* Attach to the PHY */
807 if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
808 netdev_info(ndev, "using MII interface\n");
809 else
810 netdev_info(ndev, "using RMII interface\n");
811 phydev = phy_connect(ndev, dev_name(&phydev->dev),
812 &lpc_handle_link_change, 0, lpc_phy_interface_mode());
813
814 if (IS_ERR(phydev)) {
815 netdev_err(ndev, "Could not attach to PHY\n");
816 return PTR_ERR(phydev);
817 }
818
819 /* mask with MAC supported features */
820 phydev->supported &= PHY_BASIC_FEATURES;
821
822 phydev->advertising = phydev->supported;
823
824 pldat->link = 0;
825 pldat->speed = 0;
826 pldat->duplex = -1;
827 pldat->phy_dev = phydev;
828
829 netdev_info(ndev,
830 "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
831 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
832 return 0;
833 }
834
lpc_mii_init(struct netdata_local * pldat)835 static int lpc_mii_init(struct netdata_local *pldat)
836 {
837 int err = -ENXIO, i;
838
839 pldat->mii_bus = mdiobus_alloc();
840 if (!pldat->mii_bus) {
841 err = -ENOMEM;
842 goto err_out;
843 }
844
845 /* Setup MII mode */
846 if (lpc_phy_interface_mode() == PHY_INTERFACE_MODE_MII)
847 writel(LPC_COMMAND_PASSRUNTFRAME,
848 LPC_ENET_COMMAND(pldat->net_base));
849 else {
850 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
851 LPC_ENET_COMMAND(pldat->net_base));
852 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
853 }
854
855 pldat->mii_bus->name = "lpc_mii_bus";
856 pldat->mii_bus->read = &lpc_mdio_read;
857 pldat->mii_bus->write = &lpc_mdio_write;
858 pldat->mii_bus->reset = &lpc_mdio_reset;
859 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
860 pldat->pdev->name, pldat->pdev->id);
861 pldat->mii_bus->priv = pldat;
862 pldat->mii_bus->parent = &pldat->pdev->dev;
863
864 pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
865 if (!pldat->mii_bus->irq) {
866 err = -ENOMEM;
867 goto err_out_1;
868 }
869
870 for (i = 0; i < PHY_MAX_ADDR; i++)
871 pldat->mii_bus->irq[i] = PHY_POLL;
872
873 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
874
875 if (mdiobus_register(pldat->mii_bus))
876 goto err_out_free_mdio_irq;
877
878 if (lpc_mii_probe(pldat->ndev) != 0)
879 goto err_out_unregister_bus;
880
881 return 0;
882
883 err_out_unregister_bus:
884 mdiobus_unregister(pldat->mii_bus);
885 err_out_free_mdio_irq:
886 kfree(pldat->mii_bus->irq);
887 err_out_1:
888 mdiobus_free(pldat->mii_bus);
889 err_out:
890 return err;
891 }
892
__lpc_handle_xmit(struct net_device * ndev)893 static void __lpc_handle_xmit(struct net_device *ndev)
894 {
895 struct netdata_local *pldat = netdev_priv(ndev);
896 struct sk_buff *skb;
897 u32 txcidx, *ptxstat, txstat;
898
899 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
900 while (pldat->last_tx_idx != txcidx) {
901 skb = pldat->skb[pldat->last_tx_idx];
902
903 /* A buffer is available, get buffer status */
904 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
905 txstat = *ptxstat;
906
907 /* Next buffer and decrement used buffer counter */
908 pldat->num_used_tx_buffs--;
909 pldat->last_tx_idx++;
910 if (pldat->last_tx_idx >= ENET_TX_DESC)
911 pldat->last_tx_idx = 0;
912
913 /* Update collision counter */
914 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
915
916 /* Any errors occurred? */
917 if (txstat & TXSTATUS_ERROR) {
918 if (txstat & TXSTATUS_UNDERRUN) {
919 /* FIFO underrun */
920 ndev->stats.tx_fifo_errors++;
921 }
922 if (txstat & TXSTATUS_LATECOLL) {
923 /* Late collision */
924 ndev->stats.tx_aborted_errors++;
925 }
926 if (txstat & TXSTATUS_EXCESSCOLL) {
927 /* Excessive collision */
928 ndev->stats.tx_aborted_errors++;
929 }
930 if (txstat & TXSTATUS_EXCESSDEFER) {
931 /* Defer limit */
932 ndev->stats.tx_aborted_errors++;
933 }
934 ndev->stats.tx_errors++;
935 } else {
936 /* Update stats */
937 ndev->stats.tx_packets++;
938 ndev->stats.tx_bytes += skb->len;
939 }
940 dev_kfree_skb_irq(skb);
941
942 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
943 }
944
945 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
946 if (netif_queue_stopped(ndev))
947 netif_wake_queue(ndev);
948 }
949 }
950
__lpc_handle_recv(struct net_device * ndev,int budget)951 static int __lpc_handle_recv(struct net_device *ndev, int budget)
952 {
953 struct netdata_local *pldat = netdev_priv(ndev);
954 struct sk_buff *skb;
955 u32 rxconsidx, len, ethst;
956 struct rx_status_t *prxstat;
957 u8 *prdbuf;
958 int rx_done = 0;
959
960 /* Get the current RX buffer indexes */
961 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
962 while (rx_done < budget && rxconsidx !=
963 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
964 /* Get pointer to receive status */
965 prxstat = &pldat->rx_stat_v[rxconsidx];
966 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
967
968 /* Status error? */
969 ethst = prxstat->statusinfo;
970 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
971 (RXSTATUS_ERROR | RXSTATUS_RANGE))
972 ethst &= ~RXSTATUS_ERROR;
973
974 if (ethst & RXSTATUS_ERROR) {
975 int si = prxstat->statusinfo;
976 /* Check statuses */
977 if (si & RXSTATUS_OVERRUN) {
978 /* Overrun error */
979 ndev->stats.rx_fifo_errors++;
980 } else if (si & RXSTATUS_CRC) {
981 /* CRC error */
982 ndev->stats.rx_crc_errors++;
983 } else if (si & RXSTATUS_LENGTH) {
984 /* Length error */
985 ndev->stats.rx_length_errors++;
986 } else if (si & RXSTATUS_ERROR) {
987 /* Other error */
988 ndev->stats.rx_length_errors++;
989 }
990 ndev->stats.rx_errors++;
991 } else {
992 /* Packet is good */
993 skb = dev_alloc_skb(len + 8);
994 if (!skb)
995 ndev->stats.rx_dropped++;
996 else {
997 prdbuf = skb_put(skb, len);
998
999 /* Copy packet from buffer */
1000 memcpy(prdbuf, pldat->rx_buff_v +
1001 rxconsidx * ENET_MAXF_SIZE, len);
1002
1003 /* Pass to upper layer */
1004 skb->protocol = eth_type_trans(skb, ndev);
1005 netif_receive_skb(skb);
1006 ndev->stats.rx_packets++;
1007 ndev->stats.rx_bytes += len;
1008 }
1009 }
1010
1011 /* Increment consume index */
1012 rxconsidx = rxconsidx + 1;
1013 if (rxconsidx >= ENET_RX_DESC)
1014 rxconsidx = 0;
1015 writel(rxconsidx,
1016 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
1017 rx_done++;
1018 }
1019
1020 return rx_done;
1021 }
1022
lpc_eth_poll(struct napi_struct * napi,int budget)1023 static int lpc_eth_poll(struct napi_struct *napi, int budget)
1024 {
1025 struct netdata_local *pldat = container_of(napi,
1026 struct netdata_local, napi);
1027 struct net_device *ndev = pldat->ndev;
1028 int rx_done = 0;
1029 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
1030
1031 __netif_tx_lock(txq, smp_processor_id());
1032 __lpc_handle_xmit(ndev);
1033 __netif_tx_unlock(txq);
1034 rx_done = __lpc_handle_recv(ndev, budget);
1035
1036 if (rx_done < budget) {
1037 napi_complete(napi);
1038 lpc_eth_enable_int(pldat->net_base);
1039 }
1040
1041 return rx_done;
1042 }
1043
__lpc_eth_interrupt(int irq,void * dev_id)1044 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1045 {
1046 struct net_device *ndev = dev_id;
1047 struct netdata_local *pldat = netdev_priv(ndev);
1048 u32 tmp;
1049
1050 spin_lock(&pldat->lock);
1051
1052 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1053 /* Clear interrupts */
1054 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1055
1056 lpc_eth_disable_int(pldat->net_base);
1057 if (likely(napi_schedule_prep(&pldat->napi)))
1058 __napi_schedule(&pldat->napi);
1059
1060 spin_unlock(&pldat->lock);
1061
1062 return IRQ_HANDLED;
1063 }
1064
lpc_eth_close(struct net_device * ndev)1065 static int lpc_eth_close(struct net_device *ndev)
1066 {
1067 unsigned long flags;
1068 struct netdata_local *pldat = netdev_priv(ndev);
1069
1070 if (netif_msg_ifdown(pldat))
1071 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1072
1073 napi_disable(&pldat->napi);
1074 netif_stop_queue(ndev);
1075
1076 if (pldat->phy_dev)
1077 phy_stop(pldat->phy_dev);
1078
1079 spin_lock_irqsave(&pldat->lock, flags);
1080 __lpc_eth_reset(pldat);
1081 netif_carrier_off(ndev);
1082 writel(0, LPC_ENET_MAC1(pldat->net_base));
1083 writel(0, LPC_ENET_MAC2(pldat->net_base));
1084 spin_unlock_irqrestore(&pldat->lock, flags);
1085
1086 __lpc_eth_clock_enable(pldat, false);
1087
1088 return 0;
1089 }
1090
lpc_eth_hard_start_xmit(struct sk_buff * skb,struct net_device * ndev)1091 static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1092 {
1093 struct netdata_local *pldat = netdev_priv(ndev);
1094 u32 len, txidx;
1095 u32 *ptxstat;
1096 struct txrx_desc_t *ptxrxdesc;
1097
1098 len = skb->len;
1099
1100 spin_lock_irq(&pldat->lock);
1101
1102 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1103 /* This function should never be called when there are no
1104 buffers */
1105 netif_stop_queue(ndev);
1106 spin_unlock_irq(&pldat->lock);
1107 WARN(1, "BUG! TX request when no free TX buffers!\n");
1108 return NETDEV_TX_BUSY;
1109 }
1110
1111 /* Get the next TX descriptor index */
1112 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1113
1114 /* Setup control for the transfer */
1115 ptxstat = &pldat->tx_stat_v[txidx];
1116 *ptxstat = 0;
1117 ptxrxdesc = &pldat->tx_desc_v[txidx];
1118 ptxrxdesc->control =
1119 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1120
1121 /* Copy data to the DMA buffer */
1122 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1123
1124 /* Save the buffer and increment the buffer counter */
1125 pldat->skb[txidx] = skb;
1126 pldat->num_used_tx_buffs++;
1127
1128 /* Start transmit */
1129 txidx++;
1130 if (txidx >= ENET_TX_DESC)
1131 txidx = 0;
1132 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1133
1134 /* Stop queue if no more TX buffers */
1135 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1136 netif_stop_queue(ndev);
1137
1138 spin_unlock_irq(&pldat->lock);
1139
1140 return NETDEV_TX_OK;
1141 }
1142
lpc_set_mac_address(struct net_device * ndev,void * p)1143 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1144 {
1145 struct sockaddr *addr = p;
1146 struct netdata_local *pldat = netdev_priv(ndev);
1147 unsigned long flags;
1148
1149 if (!is_valid_ether_addr(addr->sa_data))
1150 return -EADDRNOTAVAIL;
1151 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1152
1153 spin_lock_irqsave(&pldat->lock, flags);
1154
1155 /* Set station address */
1156 __lpc_set_mac(pldat, ndev->dev_addr);
1157
1158 spin_unlock_irqrestore(&pldat->lock, flags);
1159
1160 return 0;
1161 }
1162
lpc_eth_set_multicast_list(struct net_device * ndev)1163 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1164 {
1165 struct netdata_local *pldat = netdev_priv(ndev);
1166 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1167 struct netdev_hw_addr *ha;
1168 u32 tmp32, hash_val, hashlo, hashhi;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(&pldat->lock, flags);
1172
1173 /* Set station address */
1174 __lpc_set_mac(pldat, ndev->dev_addr);
1175
1176 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1177
1178 if (ndev->flags & IFF_PROMISC)
1179 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1180 LPC_RXFLTRW_ACCEPTUMULTICAST;
1181 if (ndev->flags & IFF_ALLMULTI)
1182 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1183
1184 if (netdev_hw_addr_list_count(mcptr))
1185 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1186
1187 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1188
1189
1190 /* Set initial hash table */
1191 hashlo = 0x0;
1192 hashhi = 0x0;
1193
1194 /* 64 bits : multicast address in hash table */
1195 netdev_hw_addr_list_for_each(ha, mcptr) {
1196 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1197
1198 if (hash_val >= 32)
1199 hashhi |= 1 << (hash_val - 32);
1200 else
1201 hashlo |= 1 << hash_val;
1202 }
1203
1204 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1205 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1206
1207 spin_unlock_irqrestore(&pldat->lock, flags);
1208 }
1209
lpc_eth_ioctl(struct net_device * ndev,struct ifreq * req,int cmd)1210 static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1211 {
1212 struct netdata_local *pldat = netdev_priv(ndev);
1213 struct phy_device *phydev = pldat->phy_dev;
1214
1215 if (!netif_running(ndev))
1216 return -EINVAL;
1217
1218 if (!phydev)
1219 return -ENODEV;
1220
1221 return phy_mii_ioctl(phydev, req, cmd);
1222 }
1223
lpc_eth_open(struct net_device * ndev)1224 static int lpc_eth_open(struct net_device *ndev)
1225 {
1226 struct netdata_local *pldat = netdev_priv(ndev);
1227
1228 if (netif_msg_ifup(pldat))
1229 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1230
1231 if (!is_valid_ether_addr(ndev->dev_addr))
1232 return -EADDRNOTAVAIL;
1233
1234 __lpc_eth_clock_enable(pldat, true);
1235
1236 /* Reset and initialize */
1237 __lpc_eth_reset(pldat);
1238 __lpc_eth_init(pldat);
1239
1240 /* schedule a link state check */
1241 phy_start(pldat->phy_dev);
1242 netif_start_queue(ndev);
1243 napi_enable(&pldat->napi);
1244
1245 return 0;
1246 }
1247
1248 /*
1249 * Ethtool ops
1250 */
lpc_eth_ethtool_getdrvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)1251 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1252 struct ethtool_drvinfo *info)
1253 {
1254 strcpy(info->driver, MODNAME);
1255 strcpy(info->version, DRV_VERSION);
1256 strcpy(info->bus_info, dev_name(ndev->dev.parent));
1257 }
1258
lpc_eth_ethtool_getmsglevel(struct net_device * ndev)1259 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1260 {
1261 struct netdata_local *pldat = netdev_priv(ndev);
1262
1263 return pldat->msg_enable;
1264 }
1265
lpc_eth_ethtool_setmsglevel(struct net_device * ndev,u32 level)1266 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1267 {
1268 struct netdata_local *pldat = netdev_priv(ndev);
1269
1270 pldat->msg_enable = level;
1271 }
1272
lpc_eth_ethtool_getsettings(struct net_device * ndev,struct ethtool_cmd * cmd)1273 static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1274 struct ethtool_cmd *cmd)
1275 {
1276 struct netdata_local *pldat = netdev_priv(ndev);
1277 struct phy_device *phydev = pldat->phy_dev;
1278
1279 if (!phydev)
1280 return -EOPNOTSUPP;
1281
1282 return phy_ethtool_gset(phydev, cmd);
1283 }
1284
lpc_eth_ethtool_setsettings(struct net_device * ndev,struct ethtool_cmd * cmd)1285 static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1286 struct ethtool_cmd *cmd)
1287 {
1288 struct netdata_local *pldat = netdev_priv(ndev);
1289 struct phy_device *phydev = pldat->phy_dev;
1290
1291 if (!phydev)
1292 return -EOPNOTSUPP;
1293
1294 return phy_ethtool_sset(phydev, cmd);
1295 }
1296
1297 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1298 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1299 .get_settings = lpc_eth_ethtool_getsettings,
1300 .set_settings = lpc_eth_ethtool_setsettings,
1301 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1302 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1303 .get_link = ethtool_op_get_link,
1304 };
1305
1306 static const struct net_device_ops lpc_netdev_ops = {
1307 .ndo_open = lpc_eth_open,
1308 .ndo_stop = lpc_eth_close,
1309 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1310 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1311 .ndo_do_ioctl = lpc_eth_ioctl,
1312 .ndo_set_mac_address = lpc_set_mac_address,
1313 .ndo_change_mtu = eth_change_mtu,
1314 };
1315
lpc_eth_drv_probe(struct platform_device * pdev)1316 static int lpc_eth_drv_probe(struct platform_device *pdev)
1317 {
1318 struct resource *res;
1319 struct resource *dma_res;
1320 struct net_device *ndev;
1321 struct netdata_local *pldat;
1322 struct phy_device *phydev;
1323 dma_addr_t dma_handle;
1324 int irq, ret;
1325
1326 /* Get platform resources */
1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1328 dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1329 irq = platform_get_irq(pdev, 0);
1330 if ((!res) || (!dma_res) || (irq < 0) || (irq >= NR_IRQS)) {
1331 dev_err(&pdev->dev, "error getting resources.\n");
1332 ret = -ENXIO;
1333 goto err_exit;
1334 }
1335
1336 /* Allocate net driver data structure */
1337 ndev = alloc_etherdev(sizeof(struct netdata_local));
1338 if (!ndev) {
1339 dev_err(&pdev->dev, "could not allocate device.\n");
1340 ret = -ENOMEM;
1341 goto err_exit;
1342 }
1343
1344 SET_NETDEV_DEV(ndev, &pdev->dev);
1345
1346 pldat = netdev_priv(ndev);
1347 pldat->pdev = pdev;
1348 pldat->ndev = ndev;
1349
1350 spin_lock_init(&pldat->lock);
1351
1352 /* Save resources */
1353 ndev->irq = irq;
1354
1355 /* Get clock for the device */
1356 pldat->clk = clk_get(&pdev->dev, NULL);
1357 if (IS_ERR(pldat->clk)) {
1358 dev_err(&pdev->dev, "error getting clock.\n");
1359 ret = PTR_ERR(pldat->clk);
1360 goto err_out_free_dev;
1361 }
1362
1363 /* Enable network clock */
1364 __lpc_eth_clock_enable(pldat, true);
1365
1366 /* Map IO space */
1367 pldat->net_base = ioremap(res->start, res->end - res->start + 1);
1368 if (!pldat->net_base) {
1369 dev_err(&pdev->dev, "failed to map registers\n");
1370 ret = -ENOMEM;
1371 goto err_out_disable_clocks;
1372 }
1373 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1374 ndev->name, ndev);
1375 if (ret) {
1376 dev_err(&pdev->dev, "error requesting interrupt.\n");
1377 goto err_out_iounmap;
1378 }
1379
1380 /* Fill in the fields of the device structure with ethernet values. */
1381 ether_setup(ndev);
1382
1383 /* Setup driver functions */
1384 ndev->netdev_ops = &lpc_netdev_ops;
1385 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1386 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1387
1388 /* Get size of DMA buffers/descriptors region */
1389 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1390 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1391 pldat->dma_buff_base_v = 0;
1392
1393 if (use_iram_for_net()) {
1394 dma_handle = dma_res->start;
1395 if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1396 pldat->dma_buff_base_v =
1397 io_p2v(dma_res->start);
1398 else
1399 netdev_err(ndev,
1400 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1401 }
1402
1403 if (pldat->dma_buff_base_v == 0) {
1404 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1405
1406 /* Allocate a chunk of memory for the DMA ethernet buffers
1407 and descriptors */
1408 pldat->dma_buff_base_v =
1409 dma_alloc_coherent(&pldat->pdev->dev,
1410 pldat->dma_buff_size, &dma_handle,
1411 GFP_KERNEL);
1412
1413 if (pldat->dma_buff_base_v == NULL) {
1414 dev_err(&pdev->dev, "error getting DMA region.\n");
1415 ret = -ENOMEM;
1416 goto err_out_free_irq;
1417 }
1418 }
1419 pldat->dma_buff_base_p = dma_handle;
1420
1421 netdev_dbg(ndev, "IO address start :0x%08x\n",
1422 res->start);
1423 netdev_dbg(ndev, "IO address size :%d\n",
1424 res->end - res->start + 1);
1425 netdev_err(ndev, "IO address (mapped) :0x%p\n",
1426 pldat->net_base);
1427 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1428 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
1429 netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1430 pldat->dma_buff_base_p);
1431 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1432 pldat->dma_buff_base_v);
1433
1434 /* Get MAC address from current HW setting (POR state is all zeros) */
1435 __lpc_get_mac(pldat, ndev->dev_addr);
1436
1437 #ifdef CONFIG_OF_NET
1438 if (!is_valid_ether_addr(ndev->dev_addr)) {
1439 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1440 if (macaddr)
1441 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1442 }
1443 #endif
1444 if (!is_valid_ether_addr(ndev->dev_addr))
1445 eth_hw_addr_random(ndev);
1446
1447 /* Reset the ethernet controller */
1448 __lpc_eth_reset(pldat);
1449
1450 /* then shut everything down to save power */
1451 __lpc_eth_shutdown(pldat);
1452
1453 /* Set default parameters */
1454 pldat->msg_enable = NETIF_MSG_LINK;
1455
1456 /* Force an MII interface reset and clock setup */
1457 __lpc_mii_mngt_reset(pldat);
1458
1459 /* Force default PHY interface setup in chip, this will probably be
1460 changed by the PHY driver */
1461 pldat->link = 0;
1462 pldat->speed = 100;
1463 pldat->duplex = DUPLEX_FULL;
1464 __lpc_params_setup(pldat);
1465
1466 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1467
1468 ret = register_netdev(ndev);
1469 if (ret) {
1470 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1471 goto err_out_dma_unmap;
1472 }
1473 platform_set_drvdata(pdev, ndev);
1474
1475 if (lpc_mii_init(pldat) != 0)
1476 goto err_out_unregister_netdev;
1477
1478 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1479 res->start, ndev->irq);
1480
1481 phydev = pldat->phy_dev;
1482
1483 device_init_wakeup(&pdev->dev, 1);
1484 device_set_wakeup_enable(&pdev->dev, 0);
1485
1486 return 0;
1487
1488 err_out_unregister_netdev:
1489 platform_set_drvdata(pdev, NULL);
1490 unregister_netdev(ndev);
1491 err_out_dma_unmap:
1492 if (!use_iram_for_net() ||
1493 pldat->dma_buff_size > lpc32xx_return_iram_size())
1494 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1495 pldat->dma_buff_base_v,
1496 pldat->dma_buff_base_p);
1497 err_out_free_irq:
1498 free_irq(ndev->irq, ndev);
1499 err_out_iounmap:
1500 iounmap(pldat->net_base);
1501 err_out_disable_clocks:
1502 clk_disable(pldat->clk);
1503 clk_put(pldat->clk);
1504 err_out_free_dev:
1505 free_netdev(ndev);
1506 err_exit:
1507 pr_err("%s: not found (%d).\n", MODNAME, ret);
1508 return ret;
1509 }
1510
lpc_eth_drv_remove(struct platform_device * pdev)1511 static int lpc_eth_drv_remove(struct platform_device *pdev)
1512 {
1513 struct net_device *ndev = platform_get_drvdata(pdev);
1514 struct netdata_local *pldat = netdev_priv(ndev);
1515
1516 unregister_netdev(ndev);
1517 platform_set_drvdata(pdev, NULL);
1518
1519 if (!use_iram_for_net() ||
1520 pldat->dma_buff_size > lpc32xx_return_iram_size())
1521 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1522 pldat->dma_buff_base_v,
1523 pldat->dma_buff_base_p);
1524 free_irq(ndev->irq, ndev);
1525 iounmap(pldat->net_base);
1526 mdiobus_unregister(pldat->mii_bus);
1527 mdiobus_free(pldat->mii_bus);
1528 clk_disable(pldat->clk);
1529 clk_put(pldat->clk);
1530 free_netdev(ndev);
1531
1532 return 0;
1533 }
1534
1535 #ifdef CONFIG_PM
lpc_eth_drv_suspend(struct platform_device * pdev,pm_message_t state)1536 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1537 pm_message_t state)
1538 {
1539 struct net_device *ndev = platform_get_drvdata(pdev);
1540 struct netdata_local *pldat = netdev_priv(ndev);
1541
1542 if (device_may_wakeup(&pdev->dev))
1543 enable_irq_wake(ndev->irq);
1544
1545 if (ndev) {
1546 if (netif_running(ndev)) {
1547 netif_device_detach(ndev);
1548 __lpc_eth_shutdown(pldat);
1549 clk_disable(pldat->clk);
1550
1551 /*
1552 * Reset again now clock is disable to be sure
1553 * EMC_MDC is down
1554 */
1555 __lpc_eth_reset(pldat);
1556 }
1557 }
1558
1559 return 0;
1560 }
1561
lpc_eth_drv_resume(struct platform_device * pdev)1562 static int lpc_eth_drv_resume(struct platform_device *pdev)
1563 {
1564 struct net_device *ndev = platform_get_drvdata(pdev);
1565 struct netdata_local *pldat;
1566
1567 if (device_may_wakeup(&pdev->dev))
1568 disable_irq_wake(ndev->irq);
1569
1570 if (ndev) {
1571 if (netif_running(ndev)) {
1572 pldat = netdev_priv(ndev);
1573
1574 /* Enable interface clock */
1575 clk_enable(pldat->clk);
1576
1577 /* Reset and initialize */
1578 __lpc_eth_reset(pldat);
1579 __lpc_eth_init(pldat);
1580
1581 netif_device_attach(ndev);
1582 }
1583 }
1584
1585 return 0;
1586 }
1587 #endif
1588
1589 static struct platform_driver lpc_eth_driver = {
1590 .probe = lpc_eth_drv_probe,
1591 .remove = __devexit_p(lpc_eth_drv_remove),
1592 #ifdef CONFIG_PM
1593 .suspend = lpc_eth_drv_suspend,
1594 .resume = lpc_eth_drv_resume,
1595 #endif
1596 .driver = {
1597 .name = MODNAME,
1598 },
1599 };
1600
1601 module_platform_driver(lpc_eth_driver);
1602
1603 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1604 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1605 MODULE_DESCRIPTION("LPC Ethernet Driver");
1606 MODULE_LICENSE("GPL");
1607