1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
15 *
16 * Gianfar: AKA Lambda Draconis, "Dragon"
17 * RA 11 31 24.2
18 * Dec +69 19 52
19 * V 3.84
20 * B-V +1.62
21 *
22 * Theory of operation
23 *
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
26 *
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
32 *
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
48 * skb.
49 *
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
58 */
59
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62 #include <linux/kernel.h>
63 #include <linux/string.h>
64 #include <linux/errno.h>
65 #include <linux/unistd.h>
66 #include <linux/slab.h>
67 #include <linux/interrupt.h>
68 #include <linux/delay.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/skbuff.h>
72 #include <linux/if_vlan.h>
73 #include <linux/spinlock.h>
74 #include <linux/mm.h>
75 #include <linux/of_address.h>
76 #include <linux/of_irq.h>
77 #include <linux/of_mdio.h>
78 #include <linux/of_platform.h>
79 #include <linux/ip.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
82 #include <linux/in.h>
83 #include <linux/net_tstamp.h>
84
85 #include <asm/io.h>
86 #ifdef CONFIG_PPC
87 #include <asm/reg.h>
88 #include <asm/mpc85xx.h>
89 #endif
90 #include <asm/irq.h>
91 #include <linux/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/dma-mapping.h>
94 #include <linux/crc32.h>
95 #include <linux/mii.h>
96 #include <linux/phy.h>
97 #include <linux/phy_fixed.h>
98 #include <linux/of.h>
99 #include <linux/of_net.h>
100
101 #include "gianfar.h"
102
103 #define TX_TIMEOUT (5*HZ)
104
105 MODULE_AUTHOR("Freescale Semiconductor, Inc");
106 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107 MODULE_LICENSE("GPL");
108
gfar_init_rxbdp(struct gfar_priv_rx_q * rx_queue,struct rxbd8 * bdp,dma_addr_t buf)109 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
110 dma_addr_t buf)
111 {
112 u32 lstatus;
113
114 bdp->bufPtr = cpu_to_be32(buf);
115
116 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118 lstatus |= BD_LFLAG(RXBD_WRAP);
119
120 gfar_wmb();
121
122 bdp->lstatus = cpu_to_be32(lstatus);
123 }
124
gfar_init_tx_rx_base(struct gfar_private * priv)125 static void gfar_init_tx_rx_base(struct gfar_private *priv)
126 {
127 struct gfar __iomem *regs = priv->gfargrp[0].regs;
128 u32 __iomem *baddr;
129 int i;
130
131 baddr = ®s->tbase0;
132 for (i = 0; i < priv->num_tx_queues; i++) {
133 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
134 baddr += 2;
135 }
136
137 baddr = ®s->rbase0;
138 for (i = 0; i < priv->num_rx_queues; i++) {
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
140 baddr += 2;
141 }
142 }
143
gfar_init_rqprm(struct gfar_private * priv)144 static void gfar_init_rqprm(struct gfar_private *priv)
145 {
146 struct gfar __iomem *regs = priv->gfargrp[0].regs;
147 u32 __iomem *baddr;
148 int i;
149
150 baddr = ®s->rqprm0;
151 for (i = 0; i < priv->num_rx_queues; i++) {
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
153 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
154 baddr++;
155 }
156 }
157
gfar_rx_offload_en(struct gfar_private * priv)158 static void gfar_rx_offload_en(struct gfar_private *priv)
159 {
160 /* set this when rx hw offload (TOE) functions are being used */
161 priv->uses_rxfcb = 0;
162
163 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
164 priv->uses_rxfcb = 1;
165
166 if (priv->hwts_rx_en || priv->rx_filer_enable)
167 priv->uses_rxfcb = 1;
168 }
169
gfar_mac_rx_config(struct gfar_private * priv)170 static void gfar_mac_rx_config(struct gfar_private *priv)
171 {
172 struct gfar __iomem *regs = priv->gfargrp[0].regs;
173 u32 rctrl = 0;
174
175 if (priv->rx_filer_enable) {
176 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177 /* Program the RIR0 reg with the required distribution */
178 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
179 }
180
181 /* Restore PROMISC mode */
182 if (priv->ndev->flags & IFF_PROMISC)
183 rctrl |= RCTRL_PROM;
184
185 if (priv->ndev->features & NETIF_F_RXCSUM)
186 rctrl |= RCTRL_CHECKSUMMING;
187
188 if (priv->extended_hash)
189 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
190
191 if (priv->padding) {
192 rctrl &= ~RCTRL_PAL_MASK;
193 rctrl |= RCTRL_PADDING(priv->padding);
194 }
195
196 /* Enable HW time stamping if requested from user space */
197 if (priv->hwts_rx_en)
198 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
199
200 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
201 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
202
203 /* Clear the LFC bit */
204 gfar_write(®s->rctrl, rctrl);
205 /* Init flow control threshold values */
206 gfar_init_rqprm(priv);
207 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
208 rctrl |= RCTRL_LFC;
209
210 /* Init rctrl based on our settings */
211 gfar_write(®s->rctrl, rctrl);
212 }
213
gfar_mac_tx_config(struct gfar_private * priv)214 static void gfar_mac_tx_config(struct gfar_private *priv)
215 {
216 struct gfar __iomem *regs = priv->gfargrp[0].regs;
217 u32 tctrl = 0;
218
219 if (priv->ndev->features & NETIF_F_IP_CSUM)
220 tctrl |= TCTRL_INIT_CSUM;
221
222 if (priv->prio_sched_en)
223 tctrl |= TCTRL_TXSCHED_PRIO;
224 else {
225 tctrl |= TCTRL_TXSCHED_WRRS;
226 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
227 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
228 }
229
230 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
231 tctrl |= TCTRL_VLINS;
232
233 gfar_write(®s->tctrl, tctrl);
234 }
235
gfar_configure_coalescing(struct gfar_private * priv,unsigned long tx_mask,unsigned long rx_mask)236 static void gfar_configure_coalescing(struct gfar_private *priv,
237 unsigned long tx_mask, unsigned long rx_mask)
238 {
239 struct gfar __iomem *regs = priv->gfargrp[0].regs;
240 u32 __iomem *baddr;
241
242 if (priv->mode == MQ_MG_MODE) {
243 int i = 0;
244
245 baddr = ®s->txic0;
246 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
247 gfar_write(baddr + i, 0);
248 if (likely(priv->tx_queue[i]->txcoalescing))
249 gfar_write(baddr + i, priv->tx_queue[i]->txic);
250 }
251
252 baddr = ®s->rxic0;
253 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
254 gfar_write(baddr + i, 0);
255 if (likely(priv->rx_queue[i]->rxcoalescing))
256 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
257 }
258 } else {
259 /* Backward compatible case -- even if we enable
260 * multiple queues, there's only single reg to program
261 */
262 gfar_write(®s->txic, 0);
263 if (likely(priv->tx_queue[0]->txcoalescing))
264 gfar_write(®s->txic, priv->tx_queue[0]->txic);
265
266 gfar_write(®s->rxic, 0);
267 if (unlikely(priv->rx_queue[0]->rxcoalescing))
268 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
269 }
270 }
271
gfar_configure_coalescing_all(struct gfar_private * priv)272 static void gfar_configure_coalescing_all(struct gfar_private *priv)
273 {
274 gfar_configure_coalescing(priv, 0xFF, 0xFF);
275 }
276
gfar_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)277 static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
278 {
279 struct gfar_private *priv = netdev_priv(dev);
280 int i;
281
282 for (i = 0; i < priv->num_rx_queues; i++) {
283 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
284 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
285 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
286 }
287
288 for (i = 0; i < priv->num_tx_queues; i++) {
289 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
290 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
291 }
292
293 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
294 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
295 unsigned long flags;
296 u32 rdrp, car, car_before;
297 u64 rdrp_offset;
298
299 spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
300 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
301 do {
302 car_before = car;
303 rdrp = gfar_read(&rmon->rdrp);
304 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
305 } while (car != car_before);
306 if (car) {
307 priv->rmon_overflow.rdrp++;
308 gfar_write(&rmon->car1, car);
309 }
310 rdrp_offset = priv->rmon_overflow.rdrp;
311 spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
312
313 stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
314 }
315 }
316
317 /* Set the appropriate hash bit for the given addr */
318 /* The algorithm works like so:
319 * 1) Take the Destination Address (ie the multicast address), and
320 * do a CRC on it (little endian), and reverse the bits of the
321 * result.
322 * 2) Use the 8 most significant bits as a hash into a 256-entry
323 * table. The table is controlled through 8 32-bit registers:
324 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
325 * gaddr7. This means that the 3 most significant bits in the
326 * hash index which gaddr register to use, and the 5 other bits
327 * indicate which bit (assuming an IBM numbering scheme, which
328 * for PowerPC (tm) is usually the case) in the register holds
329 * the entry.
330 */
gfar_set_hash_for_addr(struct net_device * dev,u8 * addr)331 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
332 {
333 u32 tempval;
334 struct gfar_private *priv = netdev_priv(dev);
335 u32 result = ether_crc(ETH_ALEN, addr);
336 int width = priv->hash_width;
337 u8 whichbit = (result >> (32 - width)) & 0x1f;
338 u8 whichreg = result >> (32 - width + 5);
339 u32 value = (1 << (31-whichbit));
340
341 tempval = gfar_read(priv->hash_regs[whichreg]);
342 tempval |= value;
343 gfar_write(priv->hash_regs[whichreg], tempval);
344 }
345
346 /* There are multiple MAC Address register pairs on some controllers
347 * This function sets the numth pair to a given address
348 */
gfar_set_mac_for_addr(struct net_device * dev,int num,const u8 * addr)349 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
350 const u8 *addr)
351 {
352 struct gfar_private *priv = netdev_priv(dev);
353 struct gfar __iomem *regs = priv->gfargrp[0].regs;
354 u32 tempval;
355 u32 __iomem *macptr = ®s->macstnaddr1;
356
357 macptr += num*2;
358
359 /* For a station address of 0x12345678ABCD in transmission
360 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
361 * MACnADDR2 is set to 0x34120000.
362 */
363 tempval = (addr[5] << 24) | (addr[4] << 16) |
364 (addr[3] << 8) | addr[2];
365
366 gfar_write(macptr, tempval);
367
368 tempval = (addr[1] << 24) | (addr[0] << 16);
369
370 gfar_write(macptr+1, tempval);
371 }
372
gfar_set_mac_addr(struct net_device * dev,void * p)373 static int gfar_set_mac_addr(struct net_device *dev, void *p)
374 {
375 int ret;
376
377 ret = eth_mac_addr(dev, p);
378 if (ret)
379 return ret;
380
381 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
382
383 return 0;
384 }
385
gfar_ints_disable(struct gfar_private * priv)386 static void gfar_ints_disable(struct gfar_private *priv)
387 {
388 int i;
389 for (i = 0; i < priv->num_grps; i++) {
390 struct gfar __iomem *regs = priv->gfargrp[i].regs;
391 /* Clear IEVENT */
392 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
393
394 /* Initialize IMASK */
395 gfar_write(®s->imask, IMASK_INIT_CLEAR);
396 }
397 }
398
gfar_ints_enable(struct gfar_private * priv)399 static void gfar_ints_enable(struct gfar_private *priv)
400 {
401 int i;
402 for (i = 0; i < priv->num_grps; i++) {
403 struct gfar __iomem *regs = priv->gfargrp[i].regs;
404 /* Unmask the interrupts we look for */
405 gfar_write(®s->imask,
406 IMASK_DEFAULT | priv->rmon_overflow.imask);
407 }
408 }
409
gfar_alloc_tx_queues(struct gfar_private * priv)410 static int gfar_alloc_tx_queues(struct gfar_private *priv)
411 {
412 int i;
413
414 for (i = 0; i < priv->num_tx_queues; i++) {
415 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
416 GFP_KERNEL);
417 if (!priv->tx_queue[i])
418 return -ENOMEM;
419
420 priv->tx_queue[i]->tx_skbuff = NULL;
421 priv->tx_queue[i]->qindex = i;
422 priv->tx_queue[i]->dev = priv->ndev;
423 spin_lock_init(&(priv->tx_queue[i]->txlock));
424 }
425 return 0;
426 }
427
gfar_alloc_rx_queues(struct gfar_private * priv)428 static int gfar_alloc_rx_queues(struct gfar_private *priv)
429 {
430 int i;
431
432 for (i = 0; i < priv->num_rx_queues; i++) {
433 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
434 GFP_KERNEL);
435 if (!priv->rx_queue[i])
436 return -ENOMEM;
437
438 priv->rx_queue[i]->qindex = i;
439 priv->rx_queue[i]->ndev = priv->ndev;
440 }
441 return 0;
442 }
443
gfar_free_tx_queues(struct gfar_private * priv)444 static void gfar_free_tx_queues(struct gfar_private *priv)
445 {
446 int i;
447
448 for (i = 0; i < priv->num_tx_queues; i++)
449 kfree(priv->tx_queue[i]);
450 }
451
gfar_free_rx_queues(struct gfar_private * priv)452 static void gfar_free_rx_queues(struct gfar_private *priv)
453 {
454 int i;
455
456 for (i = 0; i < priv->num_rx_queues; i++)
457 kfree(priv->rx_queue[i]);
458 }
459
unmap_group_regs(struct gfar_private * priv)460 static void unmap_group_regs(struct gfar_private *priv)
461 {
462 int i;
463
464 for (i = 0; i < MAXGROUPS; i++)
465 if (priv->gfargrp[i].regs)
466 iounmap(priv->gfargrp[i].regs);
467 }
468
free_gfar_dev(struct gfar_private * priv)469 static void free_gfar_dev(struct gfar_private *priv)
470 {
471 int i, j;
472
473 for (i = 0; i < priv->num_grps; i++)
474 for (j = 0; j < GFAR_NUM_IRQS; j++) {
475 kfree(priv->gfargrp[i].irqinfo[j]);
476 priv->gfargrp[i].irqinfo[j] = NULL;
477 }
478
479 free_netdev(priv->ndev);
480 }
481
disable_napi(struct gfar_private * priv)482 static void disable_napi(struct gfar_private *priv)
483 {
484 int i;
485
486 for (i = 0; i < priv->num_grps; i++) {
487 napi_disable(&priv->gfargrp[i].napi_rx);
488 napi_disable(&priv->gfargrp[i].napi_tx);
489 }
490 }
491
enable_napi(struct gfar_private * priv)492 static void enable_napi(struct gfar_private *priv)
493 {
494 int i;
495
496 for (i = 0; i < priv->num_grps; i++) {
497 napi_enable(&priv->gfargrp[i].napi_rx);
498 napi_enable(&priv->gfargrp[i].napi_tx);
499 }
500 }
501
gfar_parse_group(struct device_node * np,struct gfar_private * priv,const char * model)502 static int gfar_parse_group(struct device_node *np,
503 struct gfar_private *priv, const char *model)
504 {
505 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
506 int i;
507
508 for (i = 0; i < GFAR_NUM_IRQS; i++) {
509 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
510 GFP_KERNEL);
511 if (!grp->irqinfo[i])
512 return -ENOMEM;
513 }
514
515 grp->regs = of_iomap(np, 0);
516 if (!grp->regs)
517 return -ENOMEM;
518
519 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
520
521 /* If we aren't the FEC we have multiple interrupts */
522 if (model && strcasecmp(model, "FEC")) {
523 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
524 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
525 if (!gfar_irq(grp, TX)->irq ||
526 !gfar_irq(grp, RX)->irq ||
527 !gfar_irq(grp, ER)->irq)
528 return -EINVAL;
529 }
530
531 grp->priv = priv;
532 spin_lock_init(&grp->grplock);
533 if (priv->mode == MQ_MG_MODE) {
534 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
535 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
536 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
537 } else {
538 grp->rx_bit_map = 0xFF;
539 grp->tx_bit_map = 0xFF;
540 }
541
542 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
543 * right to left, so we need to revert the 8 bits to get the q index
544 */
545 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
546 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
547
548 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
549 * also assign queues to groups
550 */
551 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
552 if (!grp->rx_queue)
553 grp->rx_queue = priv->rx_queue[i];
554 grp->num_rx_queues++;
555 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
556 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
557 priv->rx_queue[i]->grp = grp;
558 }
559
560 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
561 if (!grp->tx_queue)
562 grp->tx_queue = priv->tx_queue[i];
563 grp->num_tx_queues++;
564 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
565 priv->tqueue |= (TQUEUE_EN0 >> i);
566 priv->tx_queue[i]->grp = grp;
567 }
568
569 priv->num_grps++;
570
571 return 0;
572 }
573
gfar_of_group_count(struct device_node * np)574 static int gfar_of_group_count(struct device_node *np)
575 {
576 struct device_node *child;
577 int num = 0;
578
579 for_each_available_child_of_node(np, child)
580 if (of_node_name_eq(child, "queue-group"))
581 num++;
582
583 return num;
584 }
585
586 /* Reads the controller's registers to determine what interface
587 * connects it to the PHY.
588 */
gfar_get_interface(struct net_device * dev)589 static phy_interface_t gfar_get_interface(struct net_device *dev)
590 {
591 struct gfar_private *priv = netdev_priv(dev);
592 struct gfar __iomem *regs = priv->gfargrp[0].regs;
593 u32 ecntrl;
594
595 ecntrl = gfar_read(®s->ecntrl);
596
597 if (ecntrl & ECNTRL_SGMII_MODE)
598 return PHY_INTERFACE_MODE_SGMII;
599
600 if (ecntrl & ECNTRL_TBI_MODE) {
601 if (ecntrl & ECNTRL_REDUCED_MODE)
602 return PHY_INTERFACE_MODE_RTBI;
603 else
604 return PHY_INTERFACE_MODE_TBI;
605 }
606
607 if (ecntrl & ECNTRL_REDUCED_MODE) {
608 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
609 return PHY_INTERFACE_MODE_RMII;
610 }
611 else {
612 phy_interface_t interface = priv->interface;
613
614 /* This isn't autodetected right now, so it must
615 * be set by the device tree or platform code.
616 */
617 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
618 return PHY_INTERFACE_MODE_RGMII_ID;
619
620 return PHY_INTERFACE_MODE_RGMII;
621 }
622 }
623
624 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
625 return PHY_INTERFACE_MODE_GMII;
626
627 return PHY_INTERFACE_MODE_MII;
628 }
629
gfar_of_init(struct platform_device * ofdev,struct net_device ** pdev)630 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
631 {
632 const char *model;
633 int err = 0, i;
634 phy_interface_t interface;
635 struct net_device *dev = NULL;
636 struct gfar_private *priv = NULL;
637 struct device_node *np = ofdev->dev.of_node;
638 struct device_node *child = NULL;
639 u32 stash_len = 0;
640 u32 stash_idx = 0;
641 unsigned int num_tx_qs, num_rx_qs;
642 unsigned short mode;
643
644 if (!np)
645 return -ENODEV;
646
647 if (of_device_is_compatible(np, "fsl,etsec2"))
648 mode = MQ_MG_MODE;
649 else
650 mode = SQ_SG_MODE;
651
652 if (mode == SQ_SG_MODE) {
653 num_tx_qs = 1;
654 num_rx_qs = 1;
655 } else { /* MQ_MG_MODE */
656 /* get the actual number of supported groups */
657 unsigned int num_grps = gfar_of_group_count(np);
658
659 if (num_grps == 0 || num_grps > MAXGROUPS) {
660 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
661 num_grps);
662 pr_err("Cannot do alloc_etherdev, aborting\n");
663 return -EINVAL;
664 }
665
666 num_tx_qs = num_grps; /* one txq per int group */
667 num_rx_qs = num_grps; /* one rxq per int group */
668 }
669
670 if (num_tx_qs > MAX_TX_QS) {
671 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
672 num_tx_qs, MAX_TX_QS);
673 pr_err("Cannot do alloc_etherdev, aborting\n");
674 return -EINVAL;
675 }
676
677 if (num_rx_qs > MAX_RX_QS) {
678 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
679 num_rx_qs, MAX_RX_QS);
680 pr_err("Cannot do alloc_etherdev, aborting\n");
681 return -EINVAL;
682 }
683
684 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
685 dev = *pdev;
686 if (NULL == dev)
687 return -ENOMEM;
688
689 priv = netdev_priv(dev);
690 priv->ndev = dev;
691
692 priv->mode = mode;
693
694 priv->num_tx_queues = num_tx_qs;
695 netif_set_real_num_rx_queues(dev, num_rx_qs);
696 priv->num_rx_queues = num_rx_qs;
697
698 err = gfar_alloc_tx_queues(priv);
699 if (err)
700 goto tx_alloc_failed;
701
702 err = gfar_alloc_rx_queues(priv);
703 if (err)
704 goto rx_alloc_failed;
705
706 err = of_property_read_string(np, "model", &model);
707 if (err) {
708 pr_err("Device model property missing, aborting\n");
709 goto rx_alloc_failed;
710 }
711
712 /* Init Rx queue filer rule set linked list */
713 INIT_LIST_HEAD(&priv->rx_list.list);
714 priv->rx_list.count = 0;
715 mutex_init(&priv->rx_queue_access);
716
717 for (i = 0; i < MAXGROUPS; i++)
718 priv->gfargrp[i].regs = NULL;
719
720 /* Parse and initialize group specific information */
721 if (priv->mode == MQ_MG_MODE) {
722 for_each_available_child_of_node(np, child) {
723 if (!of_node_name_eq(child, "queue-group"))
724 continue;
725
726 err = gfar_parse_group(child, priv, model);
727 if (err) {
728 of_node_put(child);
729 goto err_grp_init;
730 }
731 }
732 } else { /* SQ_SG_MODE */
733 err = gfar_parse_group(np, priv, model);
734 if (err)
735 goto err_grp_init;
736 }
737
738 if (of_property_read_bool(np, "bd-stash")) {
739 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
740 priv->bd_stash_en = 1;
741 }
742
743 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
744
745 if (err == 0)
746 priv->rx_stash_size = stash_len;
747
748 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
749
750 if (err == 0)
751 priv->rx_stash_index = stash_idx;
752
753 if (stash_len || stash_idx)
754 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
755
756 err = of_get_ethdev_address(np, dev);
757 if (err) {
758 eth_hw_addr_random(dev);
759 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
760 }
761
762 if (model && !strcasecmp(model, "TSEC"))
763 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
764 FSL_GIANFAR_DEV_HAS_COALESCE |
765 FSL_GIANFAR_DEV_HAS_RMON |
766 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
767
768 if (model && !strcasecmp(model, "eTSEC"))
769 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
770 FSL_GIANFAR_DEV_HAS_COALESCE |
771 FSL_GIANFAR_DEV_HAS_RMON |
772 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
773 FSL_GIANFAR_DEV_HAS_CSUM |
774 FSL_GIANFAR_DEV_HAS_VLAN |
775 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
776 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
777 FSL_GIANFAR_DEV_HAS_TIMER |
778 FSL_GIANFAR_DEV_HAS_RX_FILER;
779
780 /* Use PHY connection type from the DT node if one is specified there.
781 * rgmii-id really needs to be specified. Other types can be
782 * detected by hardware
783 */
784 err = of_get_phy_mode(np, &interface);
785 if (!err)
786 priv->interface = interface;
787 else
788 priv->interface = gfar_get_interface(dev);
789
790 if (of_find_property(np, "fsl,magic-packet", NULL))
791 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
792
793 if (of_get_property(np, "fsl,wake-on-filer", NULL))
794 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
795
796 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
797
798 /* In the case of a fixed PHY, the DT node associated
799 * to the PHY is the Ethernet MAC DT node.
800 */
801 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
802 err = of_phy_register_fixed_link(np);
803 if (err)
804 goto err_grp_init;
805
806 priv->phy_node = of_node_get(np);
807 }
808
809 /* Find the TBI PHY. If it's not there, we don't support SGMII */
810 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
811
812 return 0;
813
814 err_grp_init:
815 unmap_group_regs(priv);
816 rx_alloc_failed:
817 gfar_free_rx_queues(priv);
818 tx_alloc_failed:
819 gfar_free_tx_queues(priv);
820 free_gfar_dev(priv);
821 return err;
822 }
823
cluster_entry_per_class(struct gfar_private * priv,u32 rqfar,u32 class)824 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
825 u32 class)
826 {
827 u32 rqfpr = FPR_FILER_MASK;
828 u32 rqfcr = 0x0;
829
830 rqfar--;
831 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
832 priv->ftp_rqfpr[rqfar] = rqfpr;
833 priv->ftp_rqfcr[rqfar] = rqfcr;
834 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
835
836 rqfar--;
837 rqfcr = RQFCR_CMP_NOMATCH;
838 priv->ftp_rqfpr[rqfar] = rqfpr;
839 priv->ftp_rqfcr[rqfar] = rqfcr;
840 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
841
842 rqfar--;
843 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
844 rqfpr = class;
845 priv->ftp_rqfcr[rqfar] = rqfcr;
846 priv->ftp_rqfpr[rqfar] = rqfpr;
847 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
848
849 rqfar--;
850 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
851 rqfpr = class;
852 priv->ftp_rqfcr[rqfar] = rqfcr;
853 priv->ftp_rqfpr[rqfar] = rqfpr;
854 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
855
856 return rqfar;
857 }
858
gfar_init_filer_table(struct gfar_private * priv)859 static void gfar_init_filer_table(struct gfar_private *priv)
860 {
861 int i = 0x0;
862 u32 rqfar = MAX_FILER_IDX;
863 u32 rqfcr = 0x0;
864 u32 rqfpr = FPR_FILER_MASK;
865
866 /* Default rule */
867 rqfcr = RQFCR_CMP_MATCH;
868 priv->ftp_rqfcr[rqfar] = rqfcr;
869 priv->ftp_rqfpr[rqfar] = rqfpr;
870 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
871
872 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
873 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
874 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
875 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
876 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
877 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
878
879 /* cur_filer_idx indicated the first non-masked rule */
880 priv->cur_filer_idx = rqfar;
881
882 /* Rest are masked rules */
883 rqfcr = RQFCR_CMP_NOMATCH;
884 for (i = 0; i < rqfar; i++) {
885 priv->ftp_rqfcr[i] = rqfcr;
886 priv->ftp_rqfpr[i] = rqfpr;
887 gfar_write_filer(priv, i, rqfcr, rqfpr);
888 }
889 }
890
891 #ifdef CONFIG_PPC
__gfar_detect_errata_83xx(struct gfar_private * priv)892 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
893 {
894 unsigned int pvr = mfspr(SPRN_PVR);
895 unsigned int svr = mfspr(SPRN_SVR);
896 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
897 unsigned int rev = svr & 0xffff;
898
899 /* MPC8313 Rev 2.0 and higher; All MPC837x */
900 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
901 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
902 priv->errata |= GFAR_ERRATA_74;
903
904 /* MPC8313 and MPC837x all rev */
905 if ((pvr == 0x80850010 && mod == 0x80b0) ||
906 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
907 priv->errata |= GFAR_ERRATA_76;
908
909 /* MPC8313 Rev < 2.0 */
910 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
911 priv->errata |= GFAR_ERRATA_12;
912 }
913
__gfar_detect_errata_85xx(struct gfar_private * priv)914 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
915 {
916 unsigned int svr = mfspr(SPRN_SVR);
917
918 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
919 priv->errata |= GFAR_ERRATA_12;
920 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
921 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
922 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
923 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
924 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
925 }
926 #endif
927
gfar_detect_errata(struct gfar_private * priv)928 static void gfar_detect_errata(struct gfar_private *priv)
929 {
930 struct device *dev = &priv->ofdev->dev;
931
932 /* no plans to fix */
933 priv->errata |= GFAR_ERRATA_A002;
934
935 #ifdef CONFIG_PPC
936 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
937 __gfar_detect_errata_85xx(priv);
938 else /* non-mpc85xx parts, i.e. e300 core based */
939 __gfar_detect_errata_83xx(priv);
940 #endif
941
942 if (priv->errata)
943 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
944 priv->errata);
945 }
946
gfar_init_addr_hash_table(struct gfar_private * priv)947 static void gfar_init_addr_hash_table(struct gfar_private *priv)
948 {
949 struct gfar __iomem *regs = priv->gfargrp[0].regs;
950
951 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
952 priv->extended_hash = 1;
953 priv->hash_width = 9;
954
955 priv->hash_regs[0] = ®s->igaddr0;
956 priv->hash_regs[1] = ®s->igaddr1;
957 priv->hash_regs[2] = ®s->igaddr2;
958 priv->hash_regs[3] = ®s->igaddr3;
959 priv->hash_regs[4] = ®s->igaddr4;
960 priv->hash_regs[5] = ®s->igaddr5;
961 priv->hash_regs[6] = ®s->igaddr6;
962 priv->hash_regs[7] = ®s->igaddr7;
963 priv->hash_regs[8] = ®s->gaddr0;
964 priv->hash_regs[9] = ®s->gaddr1;
965 priv->hash_regs[10] = ®s->gaddr2;
966 priv->hash_regs[11] = ®s->gaddr3;
967 priv->hash_regs[12] = ®s->gaddr4;
968 priv->hash_regs[13] = ®s->gaddr5;
969 priv->hash_regs[14] = ®s->gaddr6;
970 priv->hash_regs[15] = ®s->gaddr7;
971
972 } else {
973 priv->extended_hash = 0;
974 priv->hash_width = 8;
975
976 priv->hash_regs[0] = ®s->gaddr0;
977 priv->hash_regs[1] = ®s->gaddr1;
978 priv->hash_regs[2] = ®s->gaddr2;
979 priv->hash_regs[3] = ®s->gaddr3;
980 priv->hash_regs[4] = ®s->gaddr4;
981 priv->hash_regs[5] = ®s->gaddr5;
982 priv->hash_regs[6] = ®s->gaddr6;
983 priv->hash_regs[7] = ®s->gaddr7;
984 }
985 }
986
__gfar_is_rx_idle(struct gfar_private * priv)987 static int __gfar_is_rx_idle(struct gfar_private *priv)
988 {
989 u32 res;
990
991 /* Normaly TSEC should not hang on GRS commands, so we should
992 * actually wait for IEVENT_GRSC flag.
993 */
994 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
995 return 0;
996
997 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
998 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
999 * and the Rx can be safely reset.
1000 */
1001 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1002 res &= 0x7f807f80;
1003 if ((res & 0xffff) == (res >> 16))
1004 return 1;
1005
1006 return 0;
1007 }
1008
1009 /* Halt the receive and transmit queues */
gfar_halt_nodisable(struct gfar_private * priv)1010 static void gfar_halt_nodisable(struct gfar_private *priv)
1011 {
1012 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1013 u32 tempval;
1014 unsigned int timeout;
1015 int stopped;
1016
1017 gfar_ints_disable(priv);
1018
1019 if (gfar_is_dma_stopped(priv))
1020 return;
1021
1022 /* Stop the DMA, and wait for it to stop */
1023 tempval = gfar_read(®s->dmactrl);
1024 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1025 gfar_write(®s->dmactrl, tempval);
1026
1027 retry:
1028 timeout = 1000;
1029 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1030 cpu_relax();
1031 timeout--;
1032 }
1033
1034 if (!timeout)
1035 stopped = gfar_is_dma_stopped(priv);
1036
1037 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1038 !__gfar_is_rx_idle(priv))
1039 goto retry;
1040 }
1041
1042 /* Halt the receive and transmit queues */
gfar_halt(struct gfar_private * priv)1043 static void gfar_halt(struct gfar_private *priv)
1044 {
1045 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1046 u32 tempval;
1047
1048 /* Dissable the Rx/Tx hw queues */
1049 gfar_write(®s->rqueue, 0);
1050 gfar_write(®s->tqueue, 0);
1051
1052 mdelay(10);
1053
1054 gfar_halt_nodisable(priv);
1055
1056 /* Disable Rx/Tx DMA */
1057 tempval = gfar_read(®s->maccfg1);
1058 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1059 gfar_write(®s->maccfg1, tempval);
1060 }
1061
free_skb_tx_queue(struct gfar_priv_tx_q * tx_queue)1062 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1063 {
1064 struct txbd8 *txbdp;
1065 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1066 int i, j;
1067
1068 txbdp = tx_queue->tx_bd_base;
1069
1070 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1071 if (!tx_queue->tx_skbuff[i])
1072 continue;
1073
1074 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1075 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1076 txbdp->lstatus = 0;
1077 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1078 j++) {
1079 txbdp++;
1080 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1081 be16_to_cpu(txbdp->length),
1082 DMA_TO_DEVICE);
1083 }
1084 txbdp++;
1085 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1086 tx_queue->tx_skbuff[i] = NULL;
1087 }
1088 kfree(tx_queue->tx_skbuff);
1089 tx_queue->tx_skbuff = NULL;
1090 }
1091
free_skb_rx_queue(struct gfar_priv_rx_q * rx_queue)1092 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1093 {
1094 int i;
1095
1096 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1097
1098 dev_kfree_skb(rx_queue->skb);
1099
1100 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1101 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1102
1103 rxbdp->lstatus = 0;
1104 rxbdp->bufPtr = 0;
1105 rxbdp++;
1106
1107 if (!rxb->page)
1108 continue;
1109
1110 dma_unmap_page(rx_queue->dev, rxb->dma,
1111 PAGE_SIZE, DMA_FROM_DEVICE);
1112 __free_page(rxb->page);
1113
1114 rxb->page = NULL;
1115 }
1116
1117 kfree(rx_queue->rx_buff);
1118 rx_queue->rx_buff = NULL;
1119 }
1120
1121 /* If there are any tx skbs or rx skbs still around, free them.
1122 * Then free tx_skbuff and rx_skbuff
1123 */
free_skb_resources(struct gfar_private * priv)1124 static void free_skb_resources(struct gfar_private *priv)
1125 {
1126 struct gfar_priv_tx_q *tx_queue = NULL;
1127 struct gfar_priv_rx_q *rx_queue = NULL;
1128 int i;
1129
1130 /* Go through all the buffer descriptors and free their data buffers */
1131 for (i = 0; i < priv->num_tx_queues; i++) {
1132 struct netdev_queue *txq;
1133
1134 tx_queue = priv->tx_queue[i];
1135 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1136 if (tx_queue->tx_skbuff)
1137 free_skb_tx_queue(tx_queue);
1138 netdev_tx_reset_queue(txq);
1139 }
1140
1141 for (i = 0; i < priv->num_rx_queues; i++) {
1142 rx_queue = priv->rx_queue[i];
1143 if (rx_queue->rx_buff)
1144 free_skb_rx_queue(rx_queue);
1145 }
1146
1147 dma_free_coherent(priv->dev,
1148 sizeof(struct txbd8) * priv->total_tx_ring_size +
1149 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1150 priv->tx_queue[0]->tx_bd_base,
1151 priv->tx_queue[0]->tx_bd_dma_base);
1152 }
1153
stop_gfar(struct net_device * dev)1154 void stop_gfar(struct net_device *dev)
1155 {
1156 struct gfar_private *priv = netdev_priv(dev);
1157
1158 netif_tx_stop_all_queues(dev);
1159
1160 smp_mb__before_atomic();
1161 set_bit(GFAR_DOWN, &priv->state);
1162 smp_mb__after_atomic();
1163
1164 disable_napi(priv);
1165
1166 /* disable ints and gracefully shut down Rx/Tx DMA */
1167 gfar_halt(priv);
1168
1169 phy_stop(dev->phydev);
1170
1171 free_skb_resources(priv);
1172 }
1173
gfar_start(struct gfar_private * priv)1174 static void gfar_start(struct gfar_private *priv)
1175 {
1176 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1177 u32 tempval;
1178 int i = 0;
1179
1180 /* Enable Rx/Tx hw queues */
1181 gfar_write(®s->rqueue, priv->rqueue);
1182 gfar_write(®s->tqueue, priv->tqueue);
1183
1184 /* Initialize DMACTRL to have WWR and WOP */
1185 tempval = gfar_read(®s->dmactrl);
1186 tempval |= DMACTRL_INIT_SETTINGS;
1187 gfar_write(®s->dmactrl, tempval);
1188
1189 /* Make sure we aren't stopped */
1190 tempval = gfar_read(®s->dmactrl);
1191 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1192 gfar_write(®s->dmactrl, tempval);
1193
1194 for (i = 0; i < priv->num_grps; i++) {
1195 regs = priv->gfargrp[i].regs;
1196 /* Clear THLT/RHLT, so that the DMA starts polling now */
1197 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1198 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1199 }
1200
1201 /* Enable Rx/Tx DMA */
1202 tempval = gfar_read(®s->maccfg1);
1203 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1204 gfar_write(®s->maccfg1, tempval);
1205
1206 gfar_ints_enable(priv);
1207
1208 netif_trans_update(priv->ndev); /* prevent tx timeout */
1209 }
1210
gfar_new_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * rxb)1211 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1212 {
1213 struct page *page;
1214 dma_addr_t addr;
1215
1216 page = dev_alloc_page();
1217 if (unlikely(!page))
1218 return false;
1219
1220 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1221 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1222 __free_page(page);
1223
1224 return false;
1225 }
1226
1227 rxb->dma = addr;
1228 rxb->page = page;
1229 rxb->page_offset = 0;
1230
1231 return true;
1232 }
1233
gfar_rx_alloc_err(struct gfar_priv_rx_q * rx_queue)1234 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1235 {
1236 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1237 struct gfar_extra_stats *estats = &priv->extra_stats;
1238
1239 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1240 atomic64_inc(&estats->rx_alloc_err);
1241 }
1242
gfar_alloc_rx_buffs(struct gfar_priv_rx_q * rx_queue,int alloc_cnt)1243 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1244 int alloc_cnt)
1245 {
1246 struct rxbd8 *bdp;
1247 struct gfar_rx_buff *rxb;
1248 int i;
1249
1250 i = rx_queue->next_to_use;
1251 bdp = &rx_queue->rx_bd_base[i];
1252 rxb = &rx_queue->rx_buff[i];
1253
1254 while (alloc_cnt--) {
1255 /* try reuse page */
1256 if (unlikely(!rxb->page)) {
1257 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1258 gfar_rx_alloc_err(rx_queue);
1259 break;
1260 }
1261 }
1262
1263 /* Setup the new RxBD */
1264 gfar_init_rxbdp(rx_queue, bdp,
1265 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1266
1267 /* Update to the next pointer */
1268 bdp++;
1269 rxb++;
1270
1271 if (unlikely(++i == rx_queue->rx_ring_size)) {
1272 i = 0;
1273 bdp = rx_queue->rx_bd_base;
1274 rxb = rx_queue->rx_buff;
1275 }
1276 }
1277
1278 rx_queue->next_to_use = i;
1279 rx_queue->next_to_alloc = i;
1280 }
1281
gfar_init_bds(struct net_device * ndev)1282 static void gfar_init_bds(struct net_device *ndev)
1283 {
1284 struct gfar_private *priv = netdev_priv(ndev);
1285 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1286 struct gfar_priv_tx_q *tx_queue = NULL;
1287 struct gfar_priv_rx_q *rx_queue = NULL;
1288 struct txbd8 *txbdp;
1289 u32 __iomem *rfbptr;
1290 int i, j;
1291
1292 for (i = 0; i < priv->num_tx_queues; i++) {
1293 tx_queue = priv->tx_queue[i];
1294 /* Initialize some variables in our dev structure */
1295 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1296 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1297 tx_queue->cur_tx = tx_queue->tx_bd_base;
1298 tx_queue->skb_curtx = 0;
1299 tx_queue->skb_dirtytx = 0;
1300
1301 /* Initialize Transmit Descriptor Ring */
1302 txbdp = tx_queue->tx_bd_base;
1303 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1304 txbdp->lstatus = 0;
1305 txbdp->bufPtr = 0;
1306 txbdp++;
1307 }
1308
1309 /* Set the last descriptor in the ring to indicate wrap */
1310 txbdp--;
1311 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1312 TXBD_WRAP);
1313 }
1314
1315 rfbptr = ®s->rfbptr0;
1316 for (i = 0; i < priv->num_rx_queues; i++) {
1317 rx_queue = priv->rx_queue[i];
1318
1319 rx_queue->next_to_clean = 0;
1320 rx_queue->next_to_use = 0;
1321 rx_queue->next_to_alloc = 0;
1322
1323 /* make sure next_to_clean != next_to_use after this
1324 * by leaving at least 1 unused descriptor
1325 */
1326 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1327
1328 rx_queue->rfbptr = rfbptr;
1329 rfbptr += 2;
1330 }
1331 }
1332
gfar_alloc_skb_resources(struct net_device * ndev)1333 static int gfar_alloc_skb_resources(struct net_device *ndev)
1334 {
1335 void *vaddr;
1336 dma_addr_t addr;
1337 int i, j;
1338 struct gfar_private *priv = netdev_priv(ndev);
1339 struct device *dev = priv->dev;
1340 struct gfar_priv_tx_q *tx_queue = NULL;
1341 struct gfar_priv_rx_q *rx_queue = NULL;
1342
1343 priv->total_tx_ring_size = 0;
1344 for (i = 0; i < priv->num_tx_queues; i++)
1345 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1346
1347 priv->total_rx_ring_size = 0;
1348 for (i = 0; i < priv->num_rx_queues; i++)
1349 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1350
1351 /* Allocate memory for the buffer descriptors */
1352 vaddr = dma_alloc_coherent(dev,
1353 (priv->total_tx_ring_size *
1354 sizeof(struct txbd8)) +
1355 (priv->total_rx_ring_size *
1356 sizeof(struct rxbd8)),
1357 &addr, GFP_KERNEL);
1358 if (!vaddr)
1359 return -ENOMEM;
1360
1361 for (i = 0; i < priv->num_tx_queues; i++) {
1362 tx_queue = priv->tx_queue[i];
1363 tx_queue->tx_bd_base = vaddr;
1364 tx_queue->tx_bd_dma_base = addr;
1365 tx_queue->dev = ndev;
1366 /* enet DMA only understands physical addresses */
1367 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1368 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1369 }
1370
1371 /* Start the rx descriptor ring where the tx ring leaves off */
1372 for (i = 0; i < priv->num_rx_queues; i++) {
1373 rx_queue = priv->rx_queue[i];
1374 rx_queue->rx_bd_base = vaddr;
1375 rx_queue->rx_bd_dma_base = addr;
1376 rx_queue->ndev = ndev;
1377 rx_queue->dev = dev;
1378 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1379 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1380 }
1381
1382 /* Setup the skbuff rings */
1383 for (i = 0; i < priv->num_tx_queues; i++) {
1384 tx_queue = priv->tx_queue[i];
1385 tx_queue->tx_skbuff =
1386 kmalloc_array(tx_queue->tx_ring_size,
1387 sizeof(*tx_queue->tx_skbuff),
1388 GFP_KERNEL);
1389 if (!tx_queue->tx_skbuff)
1390 goto cleanup;
1391
1392 for (j = 0; j < tx_queue->tx_ring_size; j++)
1393 tx_queue->tx_skbuff[j] = NULL;
1394 }
1395
1396 for (i = 0; i < priv->num_rx_queues; i++) {
1397 rx_queue = priv->rx_queue[i];
1398 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1399 sizeof(*rx_queue->rx_buff),
1400 GFP_KERNEL);
1401 if (!rx_queue->rx_buff)
1402 goto cleanup;
1403 }
1404
1405 gfar_init_bds(ndev);
1406
1407 return 0;
1408
1409 cleanup:
1410 free_skb_resources(priv);
1411 return -ENOMEM;
1412 }
1413
1414 /* Bring the controller up and running */
startup_gfar(struct net_device * ndev)1415 int startup_gfar(struct net_device *ndev)
1416 {
1417 struct gfar_private *priv = netdev_priv(ndev);
1418 int err;
1419
1420 gfar_mac_reset(priv);
1421
1422 err = gfar_alloc_skb_resources(ndev);
1423 if (err)
1424 return err;
1425
1426 gfar_init_tx_rx_base(priv);
1427
1428 smp_mb__before_atomic();
1429 clear_bit(GFAR_DOWN, &priv->state);
1430 smp_mb__after_atomic();
1431
1432 /* Start Rx/Tx DMA and enable the interrupts */
1433 gfar_start(priv);
1434
1435 /* force link state update after mac reset */
1436 priv->oldlink = 0;
1437 priv->oldspeed = 0;
1438 priv->oldduplex = -1;
1439
1440 phy_start(ndev->phydev);
1441
1442 enable_napi(priv);
1443
1444 netif_tx_wake_all_queues(ndev);
1445
1446 return 0;
1447 }
1448
gfar_get_flowctrl_cfg(struct gfar_private * priv)1449 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1450 {
1451 struct net_device *ndev = priv->ndev;
1452 struct phy_device *phydev = ndev->phydev;
1453 u32 val = 0;
1454
1455 if (!phydev->duplex)
1456 return val;
1457
1458 if (!priv->pause_aneg_en) {
1459 if (priv->tx_pause_en)
1460 val |= MACCFG1_TX_FLOW;
1461 if (priv->rx_pause_en)
1462 val |= MACCFG1_RX_FLOW;
1463 } else {
1464 u16 lcl_adv, rmt_adv;
1465 u8 flowctrl;
1466 /* get link partner capabilities */
1467 rmt_adv = 0;
1468 if (phydev->pause)
1469 rmt_adv = LPA_PAUSE_CAP;
1470 if (phydev->asym_pause)
1471 rmt_adv |= LPA_PAUSE_ASYM;
1472
1473 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1474 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1475 if (flowctrl & FLOW_CTRL_TX)
1476 val |= MACCFG1_TX_FLOW;
1477 if (flowctrl & FLOW_CTRL_RX)
1478 val |= MACCFG1_RX_FLOW;
1479 }
1480
1481 return val;
1482 }
1483
gfar_update_link_state(struct gfar_private * priv)1484 static noinline void gfar_update_link_state(struct gfar_private *priv)
1485 {
1486 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1487 struct net_device *ndev = priv->ndev;
1488 struct phy_device *phydev = ndev->phydev;
1489 struct gfar_priv_rx_q *rx_queue = NULL;
1490 int i;
1491
1492 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1493 return;
1494
1495 if (phydev->link) {
1496 u32 tempval1 = gfar_read(®s->maccfg1);
1497 u32 tempval = gfar_read(®s->maccfg2);
1498 u32 ecntrl = gfar_read(®s->ecntrl);
1499 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1500
1501 if (phydev->duplex != priv->oldduplex) {
1502 if (!(phydev->duplex))
1503 tempval &= ~(MACCFG2_FULL_DUPLEX);
1504 else
1505 tempval |= MACCFG2_FULL_DUPLEX;
1506
1507 priv->oldduplex = phydev->duplex;
1508 }
1509
1510 if (phydev->speed != priv->oldspeed) {
1511 switch (phydev->speed) {
1512 case 1000:
1513 tempval =
1514 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1515
1516 ecntrl &= ~(ECNTRL_R100);
1517 break;
1518 case 100:
1519 case 10:
1520 tempval =
1521 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1522
1523 /* Reduced mode distinguishes
1524 * between 10 and 100
1525 */
1526 if (phydev->speed == SPEED_100)
1527 ecntrl |= ECNTRL_R100;
1528 else
1529 ecntrl &= ~(ECNTRL_R100);
1530 break;
1531 default:
1532 netif_warn(priv, link, priv->ndev,
1533 "Ack! Speed (%d) is not 10/100/1000!\n",
1534 phydev->speed);
1535 break;
1536 }
1537
1538 priv->oldspeed = phydev->speed;
1539 }
1540
1541 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1542 tempval1 |= gfar_get_flowctrl_cfg(priv);
1543
1544 /* Turn last free buffer recording on */
1545 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1546 for (i = 0; i < priv->num_rx_queues; i++) {
1547 u32 bdp_dma;
1548
1549 rx_queue = priv->rx_queue[i];
1550 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1551 gfar_write(rx_queue->rfbptr, bdp_dma);
1552 }
1553
1554 priv->tx_actual_en = 1;
1555 }
1556
1557 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1558 priv->tx_actual_en = 0;
1559
1560 gfar_write(®s->maccfg1, tempval1);
1561 gfar_write(®s->maccfg2, tempval);
1562 gfar_write(®s->ecntrl, ecntrl);
1563
1564 if (!priv->oldlink)
1565 priv->oldlink = 1;
1566
1567 } else if (priv->oldlink) {
1568 priv->oldlink = 0;
1569 priv->oldspeed = 0;
1570 priv->oldduplex = -1;
1571 }
1572
1573 if (netif_msg_link(priv))
1574 phy_print_status(phydev);
1575 }
1576
1577 /* Called every time the controller might need to be made
1578 * aware of new link state. The PHY code conveys this
1579 * information through variables in the phydev structure, and this
1580 * function converts those variables into the appropriate
1581 * register values, and can bring down the device if needed.
1582 */
adjust_link(struct net_device * dev)1583 static void adjust_link(struct net_device *dev)
1584 {
1585 struct gfar_private *priv = netdev_priv(dev);
1586 struct phy_device *phydev = dev->phydev;
1587
1588 if (unlikely(phydev->link != priv->oldlink ||
1589 (phydev->link && (phydev->duplex != priv->oldduplex ||
1590 phydev->speed != priv->oldspeed))))
1591 gfar_update_link_state(priv);
1592 }
1593
1594 /* Initialize TBI PHY interface for communicating with the
1595 * SERDES lynx PHY on the chip. We communicate with this PHY
1596 * through the MDIO bus on each controller, treating it as a
1597 * "normal" PHY at the address found in the TBIPA register. We assume
1598 * that the TBIPA register is valid. Either the MDIO bus code will set
1599 * it to a value that doesn't conflict with other PHYs on the bus, or the
1600 * value doesn't matter, as there are no other PHYs on the bus.
1601 */
gfar_configure_serdes(struct net_device * dev)1602 static void gfar_configure_serdes(struct net_device *dev)
1603 {
1604 struct gfar_private *priv = netdev_priv(dev);
1605 struct phy_device *tbiphy;
1606
1607 if (!priv->tbi_node) {
1608 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1609 "device tree specify a tbi-handle\n");
1610 return;
1611 }
1612
1613 tbiphy = of_phy_find_device(priv->tbi_node);
1614 if (!tbiphy) {
1615 dev_err(&dev->dev, "error: Could not get TBI device\n");
1616 return;
1617 }
1618
1619 /* If the link is already up, we must already be ok, and don't need to
1620 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1621 * everything for us? Resetting it takes the link down and requires
1622 * several seconds for it to come back.
1623 */
1624 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1625 put_device(&tbiphy->mdio.dev);
1626 return;
1627 }
1628
1629 /* Single clk mode, mii mode off(for serdes communication) */
1630 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1631
1632 phy_write(tbiphy, MII_ADVERTISE,
1633 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1634 ADVERTISE_1000XPSE_ASYM);
1635
1636 phy_write(tbiphy, MII_BMCR,
1637 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1638 BMCR_SPEED1000);
1639
1640 put_device(&tbiphy->mdio.dev);
1641 }
1642
1643 /* Initializes driver's PHY state, and attaches to the PHY.
1644 * Returns 0 on success.
1645 */
init_phy(struct net_device * dev)1646 static int init_phy(struct net_device *dev)
1647 {
1648 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1649 struct gfar_private *priv = netdev_priv(dev);
1650 phy_interface_t interface = priv->interface;
1651 struct phy_device *phydev;
1652 struct ethtool_eee edata;
1653
1654 linkmode_set_bit_array(phy_10_100_features_array,
1655 ARRAY_SIZE(phy_10_100_features_array),
1656 mask);
1657 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1658 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1659 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1660 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1661
1662 priv->oldlink = 0;
1663 priv->oldspeed = 0;
1664 priv->oldduplex = -1;
1665
1666 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1667 interface);
1668 if (!phydev) {
1669 dev_err(&dev->dev, "could not attach to PHY\n");
1670 return -ENODEV;
1671 }
1672
1673 if (interface == PHY_INTERFACE_MODE_SGMII)
1674 gfar_configure_serdes(dev);
1675
1676 /* Remove any features not supported by the controller */
1677 linkmode_and(phydev->supported, phydev->supported, mask);
1678 linkmode_copy(phydev->advertising, phydev->supported);
1679
1680 /* Add support for flow control */
1681 phy_support_asym_pause(phydev);
1682
1683 /* disable EEE autoneg, EEE not supported by eTSEC */
1684 memset(&edata, 0, sizeof(struct ethtool_eee));
1685 phy_ethtool_set_eee(phydev, &edata);
1686
1687 return 0;
1688 }
1689
gfar_add_fcb(struct sk_buff * skb)1690 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1691 {
1692 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1693
1694 memset(fcb, 0, GMAC_FCB_LEN);
1695
1696 return fcb;
1697 }
1698
gfar_tx_checksum(struct sk_buff * skb,struct txfcb * fcb,int fcb_length)1699 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1700 int fcb_length)
1701 {
1702 /* If we're here, it's a IP packet with a TCP or UDP
1703 * payload. We set it to checksum, using a pseudo-header
1704 * we provide
1705 */
1706 u8 flags = TXFCB_DEFAULT;
1707
1708 /* Tell the controller what the protocol is
1709 * And provide the already calculated phcs
1710 */
1711 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1712 flags |= TXFCB_UDP;
1713 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1714 } else
1715 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1716
1717 /* l3os is the distance between the start of the
1718 * frame (skb->data) and the start of the IP hdr.
1719 * l4os is the distance between the start of the
1720 * l3 hdr and the l4 hdr
1721 */
1722 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1723 fcb->l4os = skb_network_header_len(skb);
1724
1725 fcb->flags = flags;
1726 }
1727
gfar_tx_vlan(struct sk_buff * skb,struct txfcb * fcb)1728 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1729 {
1730 fcb->flags |= TXFCB_VLN;
1731 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1732 }
1733
skip_txbd(struct txbd8 * bdp,int stride,struct txbd8 * base,int ring_size)1734 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1735 struct txbd8 *base, int ring_size)
1736 {
1737 struct txbd8 *new_bd = bdp + stride;
1738
1739 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1740 }
1741
next_txbd(struct txbd8 * bdp,struct txbd8 * base,int ring_size)1742 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1743 int ring_size)
1744 {
1745 return skip_txbd(bdp, 1, base, ring_size);
1746 }
1747
1748 /* eTSEC12: csum generation not supported for some fcb offsets */
gfar_csum_errata_12(struct gfar_private * priv,unsigned long fcb_addr)1749 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1750 unsigned long fcb_addr)
1751 {
1752 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1753 (fcb_addr % 0x20) > 0x18);
1754 }
1755
1756 /* eTSEC76: csum generation for frames larger than 2500 may
1757 * cause excess delays before start of transmission
1758 */
gfar_csum_errata_76(struct gfar_private * priv,unsigned int len)1759 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1760 unsigned int len)
1761 {
1762 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1763 (len > 2500));
1764 }
1765
1766 /* This is called by the kernel when a frame is ready for transmission.
1767 * It is pointed to by the dev->hard_start_xmit function pointer
1768 */
gfar_start_xmit(struct sk_buff * skb,struct net_device * dev)1769 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1770 {
1771 struct gfar_private *priv = netdev_priv(dev);
1772 struct gfar_priv_tx_q *tx_queue = NULL;
1773 struct netdev_queue *txq;
1774 struct gfar __iomem *regs = NULL;
1775 struct txfcb *fcb = NULL;
1776 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1777 u32 lstatus;
1778 skb_frag_t *frag;
1779 int i, rq = 0;
1780 int do_tstamp, do_csum, do_vlan;
1781 u32 bufaddr;
1782 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1783
1784 rq = skb->queue_mapping;
1785 tx_queue = priv->tx_queue[rq];
1786 txq = netdev_get_tx_queue(dev, rq);
1787 base = tx_queue->tx_bd_base;
1788 regs = tx_queue->grp->regs;
1789
1790 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1791 do_vlan = skb_vlan_tag_present(skb);
1792 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1793 priv->hwts_tx_en;
1794
1795 if (do_csum || do_vlan)
1796 fcb_len = GMAC_FCB_LEN;
1797
1798 /* check if time stamp should be generated */
1799 if (unlikely(do_tstamp))
1800 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1801
1802 /* make space for additional header when fcb is needed */
1803 if (fcb_len) {
1804 if (unlikely(skb_cow_head(skb, fcb_len))) {
1805 dev->stats.tx_errors++;
1806 dev_kfree_skb_any(skb);
1807 return NETDEV_TX_OK;
1808 }
1809 }
1810
1811 /* total number of fragments in the SKB */
1812 nr_frags = skb_shinfo(skb)->nr_frags;
1813
1814 /* calculate the required number of TxBDs for this skb */
1815 if (unlikely(do_tstamp))
1816 nr_txbds = nr_frags + 2;
1817 else
1818 nr_txbds = nr_frags + 1;
1819
1820 /* check if there is space to queue this packet */
1821 if (nr_txbds > tx_queue->num_txbdfree) {
1822 /* no space, stop the queue */
1823 netif_tx_stop_queue(txq);
1824 dev->stats.tx_fifo_errors++;
1825 return NETDEV_TX_BUSY;
1826 }
1827
1828 /* Update transmit stats */
1829 bytes_sent = skb->len;
1830 tx_queue->stats.tx_bytes += bytes_sent;
1831 /* keep Tx bytes on wire for BQL accounting */
1832 GFAR_CB(skb)->bytes_sent = bytes_sent;
1833 tx_queue->stats.tx_packets++;
1834
1835 txbdp = txbdp_start = tx_queue->cur_tx;
1836 lstatus = be32_to_cpu(txbdp->lstatus);
1837
1838 /* Add TxPAL between FCB and frame if required */
1839 if (unlikely(do_tstamp)) {
1840 skb_push(skb, GMAC_TXPAL_LEN);
1841 memset(skb->data, 0, GMAC_TXPAL_LEN);
1842 }
1843
1844 /* Add TxFCB if required */
1845 if (fcb_len) {
1846 fcb = gfar_add_fcb(skb);
1847 lstatus |= BD_LFLAG(TXBD_TOE);
1848 }
1849
1850 /* Set up checksumming */
1851 if (do_csum) {
1852 gfar_tx_checksum(skb, fcb, fcb_len);
1853
1854 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1855 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1856 __skb_pull(skb, GMAC_FCB_LEN);
1857 skb_checksum_help(skb);
1858 if (do_vlan || do_tstamp) {
1859 /* put back a new fcb for vlan/tstamp TOE */
1860 fcb = gfar_add_fcb(skb);
1861 } else {
1862 /* Tx TOE not used */
1863 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1864 fcb = NULL;
1865 }
1866 }
1867 }
1868
1869 if (do_vlan)
1870 gfar_tx_vlan(skb, fcb);
1871
1872 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1873 DMA_TO_DEVICE);
1874 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1875 goto dma_map_err;
1876
1877 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1878
1879 /* Time stamp insertion requires one additional TxBD */
1880 if (unlikely(do_tstamp))
1881 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1882 tx_queue->tx_ring_size);
1883
1884 if (likely(!nr_frags)) {
1885 if (likely(!do_tstamp))
1886 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1887 } else {
1888 u32 lstatus_start = lstatus;
1889
1890 /* Place the fragment addresses and lengths into the TxBDs */
1891 frag = &skb_shinfo(skb)->frags[0];
1892 for (i = 0; i < nr_frags; i++, frag++) {
1893 unsigned int size;
1894
1895 /* Point at the next BD, wrapping as needed */
1896 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1897
1898 size = skb_frag_size(frag);
1899
1900 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1901 BD_LFLAG(TXBD_READY);
1902
1903 /* Handle the last BD specially */
1904 if (i == nr_frags - 1)
1905 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1906
1907 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1908 size, DMA_TO_DEVICE);
1909 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1910 goto dma_map_err;
1911
1912 /* set the TxBD length and buffer pointer */
1913 txbdp->bufPtr = cpu_to_be32(bufaddr);
1914 txbdp->lstatus = cpu_to_be32(lstatus);
1915 }
1916
1917 lstatus = lstatus_start;
1918 }
1919
1920 /* If time stamping is requested one additional TxBD must be set up. The
1921 * first TxBD points to the FCB and must have a data length of
1922 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1923 * the full frame length.
1924 */
1925 if (unlikely(do_tstamp)) {
1926 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1927
1928 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1929 bufaddr += fcb_len;
1930
1931 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1932 (skb_headlen(skb) - fcb_len);
1933 if (!nr_frags)
1934 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1935
1936 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1937 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1938 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1939
1940 /* Setup tx hardware time stamping */
1941 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1942 fcb->ptp = 1;
1943 } else {
1944 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1945 }
1946
1947 netdev_tx_sent_queue(txq, bytes_sent);
1948
1949 gfar_wmb();
1950
1951 txbdp_start->lstatus = cpu_to_be32(lstatus);
1952
1953 gfar_wmb(); /* force lstatus write before tx_skbuff */
1954
1955 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1956
1957 /* Update the current skb pointer to the next entry we will use
1958 * (wrapping if necessary)
1959 */
1960 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1961 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1962
1963 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1964
1965 /* We can work in parallel with gfar_clean_tx_ring(), except
1966 * when modifying num_txbdfree. Note that we didn't grab the lock
1967 * when we were reading the num_txbdfree and checking for available
1968 * space, that's because outside of this function it can only grow.
1969 */
1970 spin_lock_bh(&tx_queue->txlock);
1971 /* reduce TxBD free count */
1972 tx_queue->num_txbdfree -= (nr_txbds);
1973 spin_unlock_bh(&tx_queue->txlock);
1974
1975 /* If the next BD still needs to be cleaned up, then the bds
1976 * are full. We need to tell the kernel to stop sending us stuff.
1977 */
1978 if (!tx_queue->num_txbdfree) {
1979 netif_tx_stop_queue(txq);
1980
1981 dev->stats.tx_fifo_errors++;
1982 }
1983
1984 /* Tell the DMA to go go go */
1985 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1986
1987 return NETDEV_TX_OK;
1988
1989 dma_map_err:
1990 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
1991 if (do_tstamp)
1992 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1993 for (i = 0; i < nr_frags; i++) {
1994 lstatus = be32_to_cpu(txbdp->lstatus);
1995 if (!(lstatus & BD_LFLAG(TXBD_READY)))
1996 break;
1997
1998 lstatus &= ~BD_LFLAG(TXBD_READY);
1999 txbdp->lstatus = cpu_to_be32(lstatus);
2000 bufaddr = be32_to_cpu(txbdp->bufPtr);
2001 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2002 DMA_TO_DEVICE);
2003 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2004 }
2005 gfar_wmb();
2006 dev_kfree_skb_any(skb);
2007 return NETDEV_TX_OK;
2008 }
2009
2010 /* Changes the mac address if the controller is not running. */
gfar_set_mac_address(struct net_device * dev)2011 static int gfar_set_mac_address(struct net_device *dev)
2012 {
2013 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2014
2015 return 0;
2016 }
2017
gfar_change_mtu(struct net_device * dev,int new_mtu)2018 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2019 {
2020 struct gfar_private *priv = netdev_priv(dev);
2021
2022 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2023 cpu_relax();
2024
2025 if (dev->flags & IFF_UP)
2026 stop_gfar(dev);
2027
2028 dev->mtu = new_mtu;
2029
2030 if (dev->flags & IFF_UP)
2031 startup_gfar(dev);
2032
2033 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2034
2035 return 0;
2036 }
2037
reset_gfar(struct net_device * ndev)2038 static void reset_gfar(struct net_device *ndev)
2039 {
2040 struct gfar_private *priv = netdev_priv(ndev);
2041
2042 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2043 cpu_relax();
2044
2045 stop_gfar(ndev);
2046 startup_gfar(ndev);
2047
2048 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2049 }
2050
2051 /* gfar_reset_task gets scheduled when a packet has not been
2052 * transmitted after a set amount of time.
2053 * For now, assume that clearing out all the structures, and
2054 * starting over will fix the problem.
2055 */
gfar_reset_task(struct work_struct * work)2056 static void gfar_reset_task(struct work_struct *work)
2057 {
2058 struct gfar_private *priv = container_of(work, struct gfar_private,
2059 reset_task);
2060 reset_gfar(priv->ndev);
2061 }
2062
gfar_timeout(struct net_device * dev,unsigned int txqueue)2063 static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2064 {
2065 struct gfar_private *priv = netdev_priv(dev);
2066
2067 dev->stats.tx_errors++;
2068 schedule_work(&priv->reset_task);
2069 }
2070
gfar_hwtstamp_set(struct net_device * netdev,struct ifreq * ifr)2071 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2072 {
2073 struct hwtstamp_config config;
2074 struct gfar_private *priv = netdev_priv(netdev);
2075
2076 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2077 return -EFAULT;
2078
2079 switch (config.tx_type) {
2080 case HWTSTAMP_TX_OFF:
2081 priv->hwts_tx_en = 0;
2082 break;
2083 case HWTSTAMP_TX_ON:
2084 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2085 return -ERANGE;
2086 priv->hwts_tx_en = 1;
2087 break;
2088 default:
2089 return -ERANGE;
2090 }
2091
2092 switch (config.rx_filter) {
2093 case HWTSTAMP_FILTER_NONE:
2094 if (priv->hwts_rx_en) {
2095 priv->hwts_rx_en = 0;
2096 reset_gfar(netdev);
2097 }
2098 break;
2099 default:
2100 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2101 return -ERANGE;
2102 if (!priv->hwts_rx_en) {
2103 priv->hwts_rx_en = 1;
2104 reset_gfar(netdev);
2105 }
2106 config.rx_filter = HWTSTAMP_FILTER_ALL;
2107 break;
2108 }
2109
2110 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2111 -EFAULT : 0;
2112 }
2113
gfar_hwtstamp_get(struct net_device * netdev,struct ifreq * ifr)2114 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2115 {
2116 struct hwtstamp_config config;
2117 struct gfar_private *priv = netdev_priv(netdev);
2118
2119 config.flags = 0;
2120 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2121 config.rx_filter = (priv->hwts_rx_en ?
2122 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2123
2124 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2125 -EFAULT : 0;
2126 }
2127
gfar_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2128 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2129 {
2130 struct phy_device *phydev = dev->phydev;
2131
2132 if (!netif_running(dev))
2133 return -EINVAL;
2134
2135 if (cmd == SIOCSHWTSTAMP)
2136 return gfar_hwtstamp_set(dev, rq);
2137 if (cmd == SIOCGHWTSTAMP)
2138 return gfar_hwtstamp_get(dev, rq);
2139
2140 if (!phydev)
2141 return -ENODEV;
2142
2143 return phy_mii_ioctl(phydev, rq, cmd);
2144 }
2145
2146 /* Interrupt Handler for Transmit complete */
gfar_clean_tx_ring(struct gfar_priv_tx_q * tx_queue)2147 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2148 {
2149 struct net_device *dev = tx_queue->dev;
2150 struct netdev_queue *txq;
2151 struct gfar_private *priv = netdev_priv(dev);
2152 struct txbd8 *bdp, *next = NULL;
2153 struct txbd8 *lbdp = NULL;
2154 struct txbd8 *base = tx_queue->tx_bd_base;
2155 struct sk_buff *skb;
2156 int skb_dirtytx;
2157 int tx_ring_size = tx_queue->tx_ring_size;
2158 int frags = 0, nr_txbds = 0;
2159 int i;
2160 int howmany = 0;
2161 int tqi = tx_queue->qindex;
2162 unsigned int bytes_sent = 0;
2163 u32 lstatus;
2164 size_t buflen;
2165
2166 txq = netdev_get_tx_queue(dev, tqi);
2167 bdp = tx_queue->dirty_tx;
2168 skb_dirtytx = tx_queue->skb_dirtytx;
2169
2170 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2171 bool do_tstamp;
2172
2173 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2174 priv->hwts_tx_en;
2175
2176 frags = skb_shinfo(skb)->nr_frags;
2177
2178 /* When time stamping, one additional TxBD must be freed.
2179 * Also, we need to dma_unmap_single() the TxPAL.
2180 */
2181 if (unlikely(do_tstamp))
2182 nr_txbds = frags + 2;
2183 else
2184 nr_txbds = frags + 1;
2185
2186 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2187
2188 lstatus = be32_to_cpu(lbdp->lstatus);
2189
2190 /* Only clean completed frames */
2191 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2192 (lstatus & BD_LENGTH_MASK))
2193 break;
2194
2195 if (unlikely(do_tstamp)) {
2196 next = next_txbd(bdp, base, tx_ring_size);
2197 buflen = be16_to_cpu(next->length) +
2198 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2199 } else
2200 buflen = be16_to_cpu(bdp->length);
2201
2202 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2203 buflen, DMA_TO_DEVICE);
2204
2205 if (unlikely(do_tstamp)) {
2206 struct skb_shared_hwtstamps shhwtstamps;
2207 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2208 ~0x7UL);
2209
2210 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2211 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2212 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2213 skb_tstamp_tx(skb, &shhwtstamps);
2214 gfar_clear_txbd_status(bdp);
2215 bdp = next;
2216 }
2217
2218 gfar_clear_txbd_status(bdp);
2219 bdp = next_txbd(bdp, base, tx_ring_size);
2220
2221 for (i = 0; i < frags; i++) {
2222 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2223 be16_to_cpu(bdp->length),
2224 DMA_TO_DEVICE);
2225 gfar_clear_txbd_status(bdp);
2226 bdp = next_txbd(bdp, base, tx_ring_size);
2227 }
2228
2229 bytes_sent += GFAR_CB(skb)->bytes_sent;
2230
2231 dev_kfree_skb_any(skb);
2232
2233 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2234
2235 skb_dirtytx = (skb_dirtytx + 1) &
2236 TX_RING_MOD_MASK(tx_ring_size);
2237
2238 howmany++;
2239 spin_lock(&tx_queue->txlock);
2240 tx_queue->num_txbdfree += nr_txbds;
2241 spin_unlock(&tx_queue->txlock);
2242 }
2243
2244 /* If we freed a buffer, we can restart transmission, if necessary */
2245 if (tx_queue->num_txbdfree &&
2246 netif_tx_queue_stopped(txq) &&
2247 !(test_bit(GFAR_DOWN, &priv->state)))
2248 netif_wake_subqueue(priv->ndev, tqi);
2249
2250 /* Update dirty indicators */
2251 tx_queue->skb_dirtytx = skb_dirtytx;
2252 tx_queue->dirty_tx = bdp;
2253
2254 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2255 }
2256
count_errors(u32 lstatus,struct net_device * ndev)2257 static void count_errors(u32 lstatus, struct net_device *ndev)
2258 {
2259 struct gfar_private *priv = netdev_priv(ndev);
2260 struct net_device_stats *stats = &ndev->stats;
2261 struct gfar_extra_stats *estats = &priv->extra_stats;
2262
2263 /* If the packet was truncated, none of the other errors matter */
2264 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2265 stats->rx_length_errors++;
2266
2267 atomic64_inc(&estats->rx_trunc);
2268
2269 return;
2270 }
2271 /* Count the errors, if there were any */
2272 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2273 stats->rx_length_errors++;
2274
2275 if (lstatus & BD_LFLAG(RXBD_LARGE))
2276 atomic64_inc(&estats->rx_large);
2277 else
2278 atomic64_inc(&estats->rx_short);
2279 }
2280 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2281 stats->rx_frame_errors++;
2282 atomic64_inc(&estats->rx_nonoctet);
2283 }
2284 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2285 atomic64_inc(&estats->rx_crcerr);
2286 stats->rx_crc_errors++;
2287 }
2288 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2289 atomic64_inc(&estats->rx_overrun);
2290 stats->rx_over_errors++;
2291 }
2292 }
2293
gfar_receive(int irq,void * grp_id)2294 static irqreturn_t gfar_receive(int irq, void *grp_id)
2295 {
2296 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2297 unsigned long flags;
2298 u32 imask, ievent;
2299
2300 ievent = gfar_read(&grp->regs->ievent);
2301
2302 if (unlikely(ievent & IEVENT_FGPI)) {
2303 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2304 return IRQ_HANDLED;
2305 }
2306
2307 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2308 spin_lock_irqsave(&grp->grplock, flags);
2309 imask = gfar_read(&grp->regs->imask);
2310 imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
2311 gfar_write(&grp->regs->imask, imask);
2312 spin_unlock_irqrestore(&grp->grplock, flags);
2313 __napi_schedule(&grp->napi_rx);
2314 } else {
2315 /* Clear IEVENT, so interrupts aren't called again
2316 * because of the packets that have already arrived.
2317 */
2318 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2319 }
2320
2321 return IRQ_HANDLED;
2322 }
2323
2324 /* Interrupt Handler for Transmit complete */
gfar_transmit(int irq,void * grp_id)2325 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2326 {
2327 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2328 unsigned long flags;
2329 u32 imask;
2330
2331 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2332 spin_lock_irqsave(&grp->grplock, flags);
2333 imask = gfar_read(&grp->regs->imask);
2334 imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
2335 gfar_write(&grp->regs->imask, imask);
2336 spin_unlock_irqrestore(&grp->grplock, flags);
2337 __napi_schedule(&grp->napi_tx);
2338 } else {
2339 /* Clear IEVENT, so interrupts aren't called again
2340 * because of the packets that have already arrived.
2341 */
2342 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2343 }
2344
2345 return IRQ_HANDLED;
2346 }
2347
gfar_add_rx_frag(struct gfar_rx_buff * rxb,u32 lstatus,struct sk_buff * skb,bool first)2348 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2349 struct sk_buff *skb, bool first)
2350 {
2351 int size = lstatus & BD_LENGTH_MASK;
2352 struct page *page = rxb->page;
2353
2354 if (likely(first)) {
2355 skb_put(skb, size);
2356 } else {
2357 /* the last fragments' length contains the full frame length */
2358 if (lstatus & BD_LFLAG(RXBD_LAST))
2359 size -= skb->len;
2360
2361 WARN(size < 0, "gianfar: rx fragment size underflow");
2362 if (size < 0)
2363 return false;
2364
2365 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2366 rxb->page_offset + RXBUF_ALIGNMENT,
2367 size, GFAR_RXB_TRUESIZE);
2368 }
2369
2370 /* try reuse page */
2371 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2372 return false;
2373
2374 /* change offset to the other half */
2375 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2376
2377 page_ref_inc(page);
2378
2379 return true;
2380 }
2381
gfar_reuse_rx_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * old_rxb)2382 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2383 struct gfar_rx_buff *old_rxb)
2384 {
2385 struct gfar_rx_buff *new_rxb;
2386 u16 nta = rxq->next_to_alloc;
2387
2388 new_rxb = &rxq->rx_buff[nta];
2389
2390 /* find next buf that can reuse a page */
2391 nta++;
2392 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2393
2394 /* copy page reference */
2395 *new_rxb = *old_rxb;
2396
2397 /* sync for use by the device */
2398 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2399 old_rxb->page_offset,
2400 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2401 }
2402
gfar_get_next_rxbuff(struct gfar_priv_rx_q * rx_queue,u32 lstatus,struct sk_buff * skb)2403 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2404 u32 lstatus, struct sk_buff *skb)
2405 {
2406 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2407 struct page *page = rxb->page;
2408 bool first = false;
2409
2410 if (likely(!skb)) {
2411 void *buff_addr = page_address(page) + rxb->page_offset;
2412
2413 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2414 if (unlikely(!skb)) {
2415 gfar_rx_alloc_err(rx_queue);
2416 return NULL;
2417 }
2418 skb_reserve(skb, RXBUF_ALIGNMENT);
2419 first = true;
2420 }
2421
2422 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2423 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2424
2425 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2426 /* reuse the free half of the page */
2427 gfar_reuse_rx_page(rx_queue, rxb);
2428 } else {
2429 /* page cannot be reused, unmap it */
2430 dma_unmap_page(rx_queue->dev, rxb->dma,
2431 PAGE_SIZE, DMA_FROM_DEVICE);
2432 }
2433
2434 /* clear rxb content */
2435 rxb->page = NULL;
2436
2437 return skb;
2438 }
2439
gfar_rx_checksum(struct sk_buff * skb,struct rxfcb * fcb)2440 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2441 {
2442 /* If valid headers were found, and valid sums
2443 * were verified, then we tell the kernel that no
2444 * checksumming is necessary. Otherwise, it is [FIXME]
2445 */
2446 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2447 (RXFCB_CIP | RXFCB_CTU))
2448 skb->ip_summed = CHECKSUM_UNNECESSARY;
2449 else
2450 skb_checksum_none_assert(skb);
2451 }
2452
2453 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
gfar_process_frame(struct net_device * ndev,struct sk_buff * skb)2454 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2455 {
2456 struct gfar_private *priv = netdev_priv(ndev);
2457 struct rxfcb *fcb = NULL;
2458
2459 /* fcb is at the beginning if exists */
2460 fcb = (struct rxfcb *)skb->data;
2461
2462 /* Remove the FCB from the skb
2463 * Remove the padded bytes, if there are any
2464 */
2465 if (priv->uses_rxfcb)
2466 skb_pull(skb, GMAC_FCB_LEN);
2467
2468 /* Get receive timestamp from the skb */
2469 if (priv->hwts_rx_en) {
2470 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2471 u64 *ns = (u64 *) skb->data;
2472
2473 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2474 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2475 }
2476
2477 if (priv->padding)
2478 skb_pull(skb, priv->padding);
2479
2480 /* Trim off the FCS */
2481 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2482
2483 if (ndev->features & NETIF_F_RXCSUM)
2484 gfar_rx_checksum(skb, fcb);
2485
2486 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2487 * Even if vlan rx accel is disabled, on some chips
2488 * RXFCB_VLN is pseudo randomly set.
2489 */
2490 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2491 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2492 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2493 be16_to_cpu(fcb->vlctl));
2494 }
2495
2496 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2497 * until the budget/quota has been reached. Returns the number
2498 * of frames handled
2499 */
gfar_clean_rx_ring(struct gfar_priv_rx_q * rx_queue,int rx_work_limit)2500 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2501 int rx_work_limit)
2502 {
2503 struct net_device *ndev = rx_queue->ndev;
2504 struct gfar_private *priv = netdev_priv(ndev);
2505 struct rxbd8 *bdp;
2506 int i, howmany = 0;
2507 struct sk_buff *skb = rx_queue->skb;
2508 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2509 unsigned int total_bytes = 0, total_pkts = 0;
2510
2511 /* Get the first full descriptor */
2512 i = rx_queue->next_to_clean;
2513
2514 while (rx_work_limit--) {
2515 u32 lstatus;
2516
2517 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2518 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2519 cleaned_cnt = 0;
2520 }
2521
2522 bdp = &rx_queue->rx_bd_base[i];
2523 lstatus = be32_to_cpu(bdp->lstatus);
2524 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2525 break;
2526
2527 /* lost RXBD_LAST descriptor due to overrun */
2528 if (skb &&
2529 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2530 /* discard faulty buffer */
2531 dev_kfree_skb(skb);
2532 skb = NULL;
2533 rx_queue->stats.rx_dropped++;
2534
2535 /* can continue normally */
2536 }
2537
2538 /* order rx buffer descriptor reads */
2539 rmb();
2540
2541 /* fetch next to clean buffer from the ring */
2542 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2543 if (unlikely(!skb))
2544 break;
2545
2546 cleaned_cnt++;
2547 howmany++;
2548
2549 if (unlikely(++i == rx_queue->rx_ring_size))
2550 i = 0;
2551
2552 rx_queue->next_to_clean = i;
2553
2554 /* fetch next buffer if not the last in frame */
2555 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2556 continue;
2557
2558 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2559 count_errors(lstatus, ndev);
2560
2561 /* discard faulty buffer */
2562 dev_kfree_skb(skb);
2563 skb = NULL;
2564 rx_queue->stats.rx_dropped++;
2565 continue;
2566 }
2567
2568 gfar_process_frame(ndev, skb);
2569
2570 /* Increment the number of packets */
2571 total_pkts++;
2572 total_bytes += skb->len;
2573
2574 skb_record_rx_queue(skb, rx_queue->qindex);
2575
2576 skb->protocol = eth_type_trans(skb, ndev);
2577
2578 /* Send the packet up the stack */
2579 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2580
2581 skb = NULL;
2582 }
2583
2584 /* Store incomplete frames for completion */
2585 rx_queue->skb = skb;
2586
2587 rx_queue->stats.rx_packets += total_pkts;
2588 rx_queue->stats.rx_bytes += total_bytes;
2589
2590 if (cleaned_cnt)
2591 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2592
2593 /* Update Last Free RxBD pointer for LFC */
2594 if (unlikely(priv->tx_actual_en)) {
2595 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2596
2597 gfar_write(rx_queue->rfbptr, bdp_dma);
2598 }
2599
2600 return howmany;
2601 }
2602
gfar_poll_rx_sq(struct napi_struct * napi,int budget)2603 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2604 {
2605 struct gfar_priv_grp *gfargrp =
2606 container_of(napi, struct gfar_priv_grp, napi_rx);
2607 struct gfar __iomem *regs = gfargrp->regs;
2608 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2609 int work_done = 0;
2610
2611 /* Clear IEVENT, so interrupts aren't called again
2612 * because of the packets that have already arrived
2613 */
2614 gfar_write(®s->ievent, IEVENT_RX_MASK);
2615
2616 work_done = gfar_clean_rx_ring(rx_queue, budget);
2617
2618 if (work_done < budget) {
2619 u32 imask;
2620 napi_complete_done(napi, work_done);
2621 /* Clear the halt bit in RSTAT */
2622 gfar_write(®s->rstat, gfargrp->rstat);
2623
2624 spin_lock_irq(&gfargrp->grplock);
2625 imask = gfar_read(®s->imask);
2626 imask |= IMASK_RX_DEFAULT;
2627 gfar_write(®s->imask, imask);
2628 spin_unlock_irq(&gfargrp->grplock);
2629 }
2630
2631 return work_done;
2632 }
2633
gfar_poll_tx_sq(struct napi_struct * napi,int budget)2634 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2635 {
2636 struct gfar_priv_grp *gfargrp =
2637 container_of(napi, struct gfar_priv_grp, napi_tx);
2638 struct gfar __iomem *regs = gfargrp->regs;
2639 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2640 u32 imask;
2641
2642 /* Clear IEVENT, so interrupts aren't called again
2643 * because of the packets that have already arrived
2644 */
2645 gfar_write(®s->ievent, IEVENT_TX_MASK);
2646
2647 /* run Tx cleanup to completion */
2648 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2649 gfar_clean_tx_ring(tx_queue);
2650
2651 napi_complete(napi);
2652
2653 spin_lock_irq(&gfargrp->grplock);
2654 imask = gfar_read(®s->imask);
2655 imask |= IMASK_TX_DEFAULT;
2656 gfar_write(®s->imask, imask);
2657 spin_unlock_irq(&gfargrp->grplock);
2658
2659 return 0;
2660 }
2661
2662 /* GFAR error interrupt handler */
gfar_error(int irq,void * grp_id)2663 static irqreturn_t gfar_error(int irq, void *grp_id)
2664 {
2665 struct gfar_priv_grp *gfargrp = grp_id;
2666 struct gfar __iomem *regs = gfargrp->regs;
2667 struct gfar_private *priv= gfargrp->priv;
2668 struct net_device *dev = priv->ndev;
2669
2670 /* Save ievent for future reference */
2671 u32 events = gfar_read(®s->ievent);
2672
2673 /* Clear IEVENT */
2674 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2675
2676 /* Magic Packet is not an error. */
2677 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2678 (events & IEVENT_MAG))
2679 events &= ~IEVENT_MAG;
2680
2681 /* Hmm... */
2682 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2683 netdev_dbg(dev,
2684 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2685 events, gfar_read(®s->imask));
2686
2687 /* Update the error counters */
2688 if (events & IEVENT_TXE) {
2689 dev->stats.tx_errors++;
2690
2691 if (events & IEVENT_LC)
2692 dev->stats.tx_window_errors++;
2693 if (events & IEVENT_CRL)
2694 dev->stats.tx_aborted_errors++;
2695 if (events & IEVENT_XFUN) {
2696 netif_dbg(priv, tx_err, dev,
2697 "TX FIFO underrun, packet dropped\n");
2698 dev->stats.tx_dropped++;
2699 atomic64_inc(&priv->extra_stats.tx_underrun);
2700
2701 schedule_work(&priv->reset_task);
2702 }
2703 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2704 }
2705 if (events & IEVENT_MSRO) {
2706 struct rmon_mib __iomem *rmon = ®s->rmon;
2707 u32 car;
2708
2709 spin_lock(&priv->rmon_overflow.lock);
2710 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
2711 if (car) {
2712 priv->rmon_overflow.rdrp++;
2713 gfar_write(&rmon->car1, car);
2714 }
2715 spin_unlock(&priv->rmon_overflow.lock);
2716 }
2717 if (events & IEVENT_BSY) {
2718 dev->stats.rx_over_errors++;
2719 atomic64_inc(&priv->extra_stats.rx_bsy);
2720
2721 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2722 gfar_read(®s->rstat));
2723 }
2724 if (events & IEVENT_BABR) {
2725 dev->stats.rx_errors++;
2726 atomic64_inc(&priv->extra_stats.rx_babr);
2727
2728 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2729 }
2730 if (events & IEVENT_EBERR) {
2731 atomic64_inc(&priv->extra_stats.eberr);
2732 netif_dbg(priv, rx_err, dev, "bus error\n");
2733 }
2734 if (events & IEVENT_RXC)
2735 netif_dbg(priv, rx_status, dev, "control frame\n");
2736
2737 if (events & IEVENT_BABT) {
2738 atomic64_inc(&priv->extra_stats.tx_babt);
2739 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2740 }
2741 return IRQ_HANDLED;
2742 }
2743
2744 /* The interrupt handler for devices with one interrupt */
gfar_interrupt(int irq,void * grp_id)2745 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2746 {
2747 struct gfar_priv_grp *gfargrp = grp_id;
2748
2749 /* Save ievent for future reference */
2750 u32 events = gfar_read(&gfargrp->regs->ievent);
2751
2752 /* Check for reception */
2753 if (events & IEVENT_RX_MASK)
2754 gfar_receive(irq, grp_id);
2755
2756 /* Check for transmit completion */
2757 if (events & IEVENT_TX_MASK)
2758 gfar_transmit(irq, grp_id);
2759
2760 /* Check for errors */
2761 if (events & IEVENT_ERR_MASK)
2762 gfar_error(irq, grp_id);
2763
2764 return IRQ_HANDLED;
2765 }
2766
2767 #ifdef CONFIG_NET_POLL_CONTROLLER
2768 /* Polling 'interrupt' - used by things like netconsole to send skbs
2769 * without having to re-enable interrupts. It's not called while
2770 * the interrupt routine is executing.
2771 */
gfar_netpoll(struct net_device * dev)2772 static void gfar_netpoll(struct net_device *dev)
2773 {
2774 struct gfar_private *priv = netdev_priv(dev);
2775 int i;
2776
2777 /* If the device has multiple interrupts, run tx/rx */
2778 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2779 for (i = 0; i < priv->num_grps; i++) {
2780 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2781
2782 disable_irq(gfar_irq(grp, TX)->irq);
2783 disable_irq(gfar_irq(grp, RX)->irq);
2784 disable_irq(gfar_irq(grp, ER)->irq);
2785 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2786 enable_irq(gfar_irq(grp, ER)->irq);
2787 enable_irq(gfar_irq(grp, RX)->irq);
2788 enable_irq(gfar_irq(grp, TX)->irq);
2789 }
2790 } else {
2791 for (i = 0; i < priv->num_grps; i++) {
2792 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2793
2794 disable_irq(gfar_irq(grp, TX)->irq);
2795 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2796 enable_irq(gfar_irq(grp, TX)->irq);
2797 }
2798 }
2799 }
2800 #endif
2801
free_grp_irqs(struct gfar_priv_grp * grp)2802 static void free_grp_irqs(struct gfar_priv_grp *grp)
2803 {
2804 free_irq(gfar_irq(grp, TX)->irq, grp);
2805 free_irq(gfar_irq(grp, RX)->irq, grp);
2806 free_irq(gfar_irq(grp, ER)->irq, grp);
2807 }
2808
register_grp_irqs(struct gfar_priv_grp * grp)2809 static int register_grp_irqs(struct gfar_priv_grp *grp)
2810 {
2811 struct gfar_private *priv = grp->priv;
2812 struct net_device *dev = priv->ndev;
2813 int err;
2814
2815 /* If the device has multiple interrupts, register for
2816 * them. Otherwise, only register for the one
2817 */
2818 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2819 /* Install our interrupt handlers for Error,
2820 * Transmit, and Receive
2821 */
2822 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2823 gfar_irq(grp, ER)->name, grp);
2824 if (err < 0) {
2825 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2826 gfar_irq(grp, ER)->irq);
2827
2828 goto err_irq_fail;
2829 }
2830 enable_irq_wake(gfar_irq(grp, ER)->irq);
2831
2832 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2833 gfar_irq(grp, TX)->name, grp);
2834 if (err < 0) {
2835 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2836 gfar_irq(grp, TX)->irq);
2837 goto tx_irq_fail;
2838 }
2839 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2840 gfar_irq(grp, RX)->name, grp);
2841 if (err < 0) {
2842 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2843 gfar_irq(grp, RX)->irq);
2844 goto rx_irq_fail;
2845 }
2846 enable_irq_wake(gfar_irq(grp, RX)->irq);
2847
2848 } else {
2849 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2850 gfar_irq(grp, TX)->name, grp);
2851 if (err < 0) {
2852 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2853 gfar_irq(grp, TX)->irq);
2854 goto err_irq_fail;
2855 }
2856 enable_irq_wake(gfar_irq(grp, TX)->irq);
2857 }
2858
2859 return 0;
2860
2861 rx_irq_fail:
2862 free_irq(gfar_irq(grp, TX)->irq, grp);
2863 tx_irq_fail:
2864 free_irq(gfar_irq(grp, ER)->irq, grp);
2865 err_irq_fail:
2866 return err;
2867
2868 }
2869
gfar_free_irq(struct gfar_private * priv)2870 static void gfar_free_irq(struct gfar_private *priv)
2871 {
2872 int i;
2873
2874 /* Free the IRQs */
2875 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2876 for (i = 0; i < priv->num_grps; i++)
2877 free_grp_irqs(&priv->gfargrp[i]);
2878 } else {
2879 for (i = 0; i < priv->num_grps; i++)
2880 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2881 &priv->gfargrp[i]);
2882 }
2883 }
2884
gfar_request_irq(struct gfar_private * priv)2885 static int gfar_request_irq(struct gfar_private *priv)
2886 {
2887 int err, i, j;
2888
2889 for (i = 0; i < priv->num_grps; i++) {
2890 err = register_grp_irqs(&priv->gfargrp[i]);
2891 if (err) {
2892 for (j = 0; j < i; j++)
2893 free_grp_irqs(&priv->gfargrp[j]);
2894 return err;
2895 }
2896 }
2897
2898 return 0;
2899 }
2900
2901 /* Called when something needs to use the ethernet device
2902 * Returns 0 for success.
2903 */
gfar_enet_open(struct net_device * dev)2904 static int gfar_enet_open(struct net_device *dev)
2905 {
2906 struct gfar_private *priv = netdev_priv(dev);
2907 int err;
2908
2909 err = init_phy(dev);
2910 if (err)
2911 return err;
2912
2913 err = gfar_request_irq(priv);
2914 if (err)
2915 return err;
2916
2917 err = startup_gfar(dev);
2918 if (err)
2919 return err;
2920
2921 return err;
2922 }
2923
2924 /* Stops the kernel queue, and halts the controller */
gfar_close(struct net_device * dev)2925 static int gfar_close(struct net_device *dev)
2926 {
2927 struct gfar_private *priv = netdev_priv(dev);
2928
2929 cancel_work_sync(&priv->reset_task);
2930 stop_gfar(dev);
2931
2932 /* Disconnect from the PHY */
2933 phy_disconnect(dev->phydev);
2934
2935 gfar_free_irq(priv);
2936
2937 return 0;
2938 }
2939
2940 /* Clears each of the exact match registers to zero, so they
2941 * don't interfere with normal reception
2942 */
gfar_clear_exact_match(struct net_device * dev)2943 static void gfar_clear_exact_match(struct net_device *dev)
2944 {
2945 int idx;
2946 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
2947
2948 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
2949 gfar_set_mac_for_addr(dev, idx, zero_arr);
2950 }
2951
2952 /* Update the hash table based on the current list of multicast
2953 * addresses we subscribe to. Also, change the promiscuity of
2954 * the device based on the flags (this function is called
2955 * whenever dev->flags is changed
2956 */
gfar_set_multi(struct net_device * dev)2957 static void gfar_set_multi(struct net_device *dev)
2958 {
2959 struct netdev_hw_addr *ha;
2960 struct gfar_private *priv = netdev_priv(dev);
2961 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2962 u32 tempval;
2963
2964 if (dev->flags & IFF_PROMISC) {
2965 /* Set RCTRL to PROM */
2966 tempval = gfar_read(®s->rctrl);
2967 tempval |= RCTRL_PROM;
2968 gfar_write(®s->rctrl, tempval);
2969 } else {
2970 /* Set RCTRL to not PROM */
2971 tempval = gfar_read(®s->rctrl);
2972 tempval &= ~(RCTRL_PROM);
2973 gfar_write(®s->rctrl, tempval);
2974 }
2975
2976 if (dev->flags & IFF_ALLMULTI) {
2977 /* Set the hash to rx all multicast frames */
2978 gfar_write(®s->igaddr0, 0xffffffff);
2979 gfar_write(®s->igaddr1, 0xffffffff);
2980 gfar_write(®s->igaddr2, 0xffffffff);
2981 gfar_write(®s->igaddr3, 0xffffffff);
2982 gfar_write(®s->igaddr4, 0xffffffff);
2983 gfar_write(®s->igaddr5, 0xffffffff);
2984 gfar_write(®s->igaddr6, 0xffffffff);
2985 gfar_write(®s->igaddr7, 0xffffffff);
2986 gfar_write(®s->gaddr0, 0xffffffff);
2987 gfar_write(®s->gaddr1, 0xffffffff);
2988 gfar_write(®s->gaddr2, 0xffffffff);
2989 gfar_write(®s->gaddr3, 0xffffffff);
2990 gfar_write(®s->gaddr4, 0xffffffff);
2991 gfar_write(®s->gaddr5, 0xffffffff);
2992 gfar_write(®s->gaddr6, 0xffffffff);
2993 gfar_write(®s->gaddr7, 0xffffffff);
2994 } else {
2995 int em_num;
2996 int idx;
2997
2998 /* zero out the hash */
2999 gfar_write(®s->igaddr0, 0x0);
3000 gfar_write(®s->igaddr1, 0x0);
3001 gfar_write(®s->igaddr2, 0x0);
3002 gfar_write(®s->igaddr3, 0x0);
3003 gfar_write(®s->igaddr4, 0x0);
3004 gfar_write(®s->igaddr5, 0x0);
3005 gfar_write(®s->igaddr6, 0x0);
3006 gfar_write(®s->igaddr7, 0x0);
3007 gfar_write(®s->gaddr0, 0x0);
3008 gfar_write(®s->gaddr1, 0x0);
3009 gfar_write(®s->gaddr2, 0x0);
3010 gfar_write(®s->gaddr3, 0x0);
3011 gfar_write(®s->gaddr4, 0x0);
3012 gfar_write(®s->gaddr5, 0x0);
3013 gfar_write(®s->gaddr6, 0x0);
3014 gfar_write(®s->gaddr7, 0x0);
3015
3016 /* If we have extended hash tables, we need to
3017 * clear the exact match registers to prepare for
3018 * setting them
3019 */
3020 if (priv->extended_hash) {
3021 em_num = GFAR_EM_NUM + 1;
3022 gfar_clear_exact_match(dev);
3023 idx = 1;
3024 } else {
3025 idx = 0;
3026 em_num = 0;
3027 }
3028
3029 if (netdev_mc_empty(dev))
3030 return;
3031
3032 /* Parse the list, and set the appropriate bits */
3033 netdev_for_each_mc_addr(ha, dev) {
3034 if (idx < em_num) {
3035 gfar_set_mac_for_addr(dev, idx, ha->addr);
3036 idx++;
3037 } else
3038 gfar_set_hash_for_addr(dev, ha->addr);
3039 }
3040 }
3041 }
3042
gfar_mac_reset(struct gfar_private * priv)3043 void gfar_mac_reset(struct gfar_private *priv)
3044 {
3045 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3046 u32 tempval;
3047
3048 /* Reset MAC layer */
3049 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3050
3051 /* We need to delay at least 3 TX clocks */
3052 udelay(3);
3053
3054 /* the soft reset bit is not self-resetting, so we need to
3055 * clear it before resuming normal operation
3056 */
3057 gfar_write(®s->maccfg1, 0);
3058
3059 udelay(3);
3060
3061 gfar_rx_offload_en(priv);
3062
3063 /* Initialize the max receive frame/buffer lengths */
3064 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3065 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3066
3067 /* Initialize the Minimum Frame Length Register */
3068 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3069
3070 /* Initialize MACCFG2. */
3071 tempval = MACCFG2_INIT_SETTINGS;
3072
3073 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3074 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3075 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3076 */
3077 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3078 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3079
3080 gfar_write(®s->maccfg2, tempval);
3081
3082 /* Clear mac addr hash registers */
3083 gfar_write(®s->igaddr0, 0);
3084 gfar_write(®s->igaddr1, 0);
3085 gfar_write(®s->igaddr2, 0);
3086 gfar_write(®s->igaddr3, 0);
3087 gfar_write(®s->igaddr4, 0);
3088 gfar_write(®s->igaddr5, 0);
3089 gfar_write(®s->igaddr6, 0);
3090 gfar_write(®s->igaddr7, 0);
3091
3092 gfar_write(®s->gaddr0, 0);
3093 gfar_write(®s->gaddr1, 0);
3094 gfar_write(®s->gaddr2, 0);
3095 gfar_write(®s->gaddr3, 0);
3096 gfar_write(®s->gaddr4, 0);
3097 gfar_write(®s->gaddr5, 0);
3098 gfar_write(®s->gaddr6, 0);
3099 gfar_write(®s->gaddr7, 0);
3100
3101 if (priv->extended_hash)
3102 gfar_clear_exact_match(priv->ndev);
3103
3104 gfar_mac_rx_config(priv);
3105
3106 gfar_mac_tx_config(priv);
3107
3108 gfar_set_mac_address(priv->ndev);
3109
3110 gfar_set_multi(priv->ndev);
3111
3112 /* clear ievent and imask before configuring coalescing */
3113 gfar_ints_disable(priv);
3114
3115 /* Configure the coalescing support */
3116 gfar_configure_coalescing_all(priv);
3117 }
3118
gfar_hw_init(struct gfar_private * priv)3119 static void gfar_hw_init(struct gfar_private *priv)
3120 {
3121 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3122 u32 attrs;
3123
3124 /* Stop the DMA engine now, in case it was running before
3125 * (The firmware could have used it, and left it running).
3126 */
3127 gfar_halt(priv);
3128
3129 gfar_mac_reset(priv);
3130
3131 /* Zero out the rmon mib registers if it has them */
3132 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3133 memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1));
3134
3135 /* Mask off the CAM interrupts */
3136 gfar_write(®s->rmon.cam1, 0xffffffff);
3137 gfar_write(®s->rmon.cam2, 0xffffffff);
3138 /* Clear the CAR registers (w1c style) */
3139 gfar_write(®s->rmon.car1, 0xffffffff);
3140 gfar_write(®s->rmon.car2, 0xffffffff);
3141 }
3142
3143 /* Initialize ECNTRL */
3144 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3145
3146 /* Set the extraction length and index */
3147 attrs = ATTRELI_EL(priv->rx_stash_size) |
3148 ATTRELI_EI(priv->rx_stash_index);
3149
3150 gfar_write(®s->attreli, attrs);
3151
3152 /* Start with defaults, and add stashing
3153 * depending on driver parameters
3154 */
3155 attrs = ATTR_INIT_SETTINGS;
3156
3157 if (priv->bd_stash_en)
3158 attrs |= ATTR_BDSTASH;
3159
3160 if (priv->rx_stash_size != 0)
3161 attrs |= ATTR_BUFSTASH;
3162
3163 gfar_write(®s->attr, attrs);
3164
3165 /* FIFO configs */
3166 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3167 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3168 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3169
3170 /* Program the interrupt steering regs, only for MG devices */
3171 if (priv->num_grps > 1)
3172 gfar_write_isrg(priv);
3173 }
3174
3175 static const struct net_device_ops gfar_netdev_ops = {
3176 .ndo_open = gfar_enet_open,
3177 .ndo_start_xmit = gfar_start_xmit,
3178 .ndo_stop = gfar_close,
3179 .ndo_change_mtu = gfar_change_mtu,
3180 .ndo_set_features = gfar_set_features,
3181 .ndo_set_rx_mode = gfar_set_multi,
3182 .ndo_tx_timeout = gfar_timeout,
3183 .ndo_eth_ioctl = gfar_ioctl,
3184 .ndo_get_stats64 = gfar_get_stats64,
3185 .ndo_change_carrier = fixed_phy_change_carrier,
3186 .ndo_set_mac_address = gfar_set_mac_addr,
3187 .ndo_validate_addr = eth_validate_addr,
3188 #ifdef CONFIG_NET_POLL_CONTROLLER
3189 .ndo_poll_controller = gfar_netpoll,
3190 #endif
3191 };
3192
3193 /* Set up the ethernet device structure, private data,
3194 * and anything else we need before we start
3195 */
gfar_probe(struct platform_device * ofdev)3196 static int gfar_probe(struct platform_device *ofdev)
3197 {
3198 struct device_node *np = ofdev->dev.of_node;
3199 struct net_device *dev = NULL;
3200 struct gfar_private *priv = NULL;
3201 int err = 0, i;
3202
3203 err = gfar_of_init(ofdev, &dev);
3204
3205 if (err)
3206 return err;
3207
3208 priv = netdev_priv(dev);
3209 priv->ndev = dev;
3210 priv->ofdev = ofdev;
3211 priv->dev = &ofdev->dev;
3212 SET_NETDEV_DEV(dev, &ofdev->dev);
3213
3214 INIT_WORK(&priv->reset_task, gfar_reset_task);
3215
3216 platform_set_drvdata(ofdev, priv);
3217
3218 gfar_detect_errata(priv);
3219
3220 /* Set the dev->base_addr to the gfar reg region */
3221 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3222
3223 /* Fill in the dev structure */
3224 dev->watchdog_timeo = TX_TIMEOUT;
3225 /* MTU range: 50 - 9586 */
3226 dev->mtu = 1500;
3227 dev->min_mtu = 50;
3228 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3229 dev->netdev_ops = &gfar_netdev_ops;
3230 dev->ethtool_ops = &gfar_ethtool_ops;
3231
3232 /* Register for napi ...We are registering NAPI for each grp */
3233 for (i = 0; i < priv->num_grps; i++) {
3234 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3235 gfar_poll_rx_sq, NAPI_POLL_WEIGHT);
3236 netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
3237 gfar_poll_tx_sq, 2);
3238 }
3239
3240 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3241 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3242 NETIF_F_RXCSUM;
3243 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3244 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3245 }
3246
3247 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3248 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3249 NETIF_F_HW_VLAN_CTAG_RX;
3250 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3251 }
3252
3253 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3254
3255 gfar_init_addr_hash_table(priv);
3256
3257 /* Insert receive time stamps into padding alignment bytes, and
3258 * plus 2 bytes padding to ensure the cpu alignment.
3259 */
3260 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3261 priv->padding = 8 + DEFAULT_PADDING;
3262
3263 if (dev->features & NETIF_F_IP_CSUM ||
3264 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3265 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3266
3267 /* Initializing some of the rx/tx queue level parameters */
3268 for (i = 0; i < priv->num_tx_queues; i++) {
3269 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3270 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3271 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3272 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3273 }
3274
3275 for (i = 0; i < priv->num_rx_queues; i++) {
3276 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3277 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3278 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3279 }
3280
3281 /* Always enable rx filer if available */
3282 priv->rx_filer_enable =
3283 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3284 /* Enable most messages by default */
3285 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3286 /* use pritority h/w tx queue scheduling for single queue devices */
3287 if (priv->num_tx_queues == 1)
3288 priv->prio_sched_en = 1;
3289
3290 set_bit(GFAR_DOWN, &priv->state);
3291
3292 gfar_hw_init(priv);
3293
3294 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3295 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
3296
3297 spin_lock_init(&priv->rmon_overflow.lock);
3298 priv->rmon_overflow.imask = IMASK_MSRO;
3299 gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
3300 }
3301
3302 /* Carrier starts down, phylib will bring it up */
3303 netif_carrier_off(dev);
3304
3305 err = register_netdev(dev);
3306
3307 if (err) {
3308 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3309 goto register_fail;
3310 }
3311
3312 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3313 priv->wol_supported |= GFAR_WOL_MAGIC;
3314
3315 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3316 priv->rx_filer_enable)
3317 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3318
3319 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3320
3321 /* fill out IRQ number and name fields */
3322 for (i = 0; i < priv->num_grps; i++) {
3323 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3324 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3325 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3326 dev->name, "_g", '0' + i, "_tx");
3327 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3328 dev->name, "_g", '0' + i, "_rx");
3329 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3330 dev->name, "_g", '0' + i, "_er");
3331 } else
3332 strcpy(gfar_irq(grp, TX)->name, dev->name);
3333 }
3334
3335 /* Initialize the filer table */
3336 gfar_init_filer_table(priv);
3337
3338 /* Print out the device info */
3339 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3340
3341 /* Even more device info helps when determining which kernel
3342 * provided which set of benchmarks.
3343 */
3344 netdev_info(dev, "Running with NAPI enabled\n");
3345 for (i = 0; i < priv->num_rx_queues; i++)
3346 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3347 i, priv->rx_queue[i]->rx_ring_size);
3348 for (i = 0; i < priv->num_tx_queues; i++)
3349 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3350 i, priv->tx_queue[i]->tx_ring_size);
3351
3352 return 0;
3353
3354 register_fail:
3355 if (of_phy_is_fixed_link(np))
3356 of_phy_deregister_fixed_link(np);
3357 unmap_group_regs(priv);
3358 gfar_free_rx_queues(priv);
3359 gfar_free_tx_queues(priv);
3360 of_node_put(priv->phy_node);
3361 of_node_put(priv->tbi_node);
3362 free_gfar_dev(priv);
3363 return err;
3364 }
3365
gfar_remove(struct platform_device * ofdev)3366 static int gfar_remove(struct platform_device *ofdev)
3367 {
3368 struct gfar_private *priv = platform_get_drvdata(ofdev);
3369 struct device_node *np = ofdev->dev.of_node;
3370
3371 of_node_put(priv->phy_node);
3372 of_node_put(priv->tbi_node);
3373
3374 unregister_netdev(priv->ndev);
3375
3376 if (of_phy_is_fixed_link(np))
3377 of_phy_deregister_fixed_link(np);
3378
3379 unmap_group_regs(priv);
3380 gfar_free_rx_queues(priv);
3381 gfar_free_tx_queues(priv);
3382 free_gfar_dev(priv);
3383
3384 return 0;
3385 }
3386
3387 #ifdef CONFIG_PM
3388
__gfar_filer_disable(struct gfar_private * priv)3389 static void __gfar_filer_disable(struct gfar_private *priv)
3390 {
3391 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3392 u32 temp;
3393
3394 temp = gfar_read(®s->rctrl);
3395 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3396 gfar_write(®s->rctrl, temp);
3397 }
3398
__gfar_filer_enable(struct gfar_private * priv)3399 static void __gfar_filer_enable(struct gfar_private *priv)
3400 {
3401 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3402 u32 temp;
3403
3404 temp = gfar_read(®s->rctrl);
3405 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3406 gfar_write(®s->rctrl, temp);
3407 }
3408
3409 /* Filer rules implementing wol capabilities */
gfar_filer_config_wol(struct gfar_private * priv)3410 static void gfar_filer_config_wol(struct gfar_private *priv)
3411 {
3412 unsigned int i;
3413 u32 rqfcr;
3414
3415 __gfar_filer_disable(priv);
3416
3417 /* clear the filer table, reject any packet by default */
3418 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3419 for (i = 0; i <= MAX_FILER_IDX; i++)
3420 gfar_write_filer(priv, i, rqfcr, 0);
3421
3422 i = 0;
3423 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3424 /* unicast packet, accept it */
3425 struct net_device *ndev = priv->ndev;
3426 /* get the default rx queue index */
3427 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3428 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3429 (ndev->dev_addr[1] << 8) |
3430 ndev->dev_addr[2];
3431
3432 rqfcr = (qindex << 10) | RQFCR_AND |
3433 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3434
3435 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3436
3437 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3438 (ndev->dev_addr[4] << 8) |
3439 ndev->dev_addr[5];
3440 rqfcr = (qindex << 10) | RQFCR_GPI |
3441 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3442 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3443 }
3444
3445 __gfar_filer_enable(priv);
3446 }
3447
gfar_filer_restore_table(struct gfar_private * priv)3448 static void gfar_filer_restore_table(struct gfar_private *priv)
3449 {
3450 u32 rqfcr, rqfpr;
3451 unsigned int i;
3452
3453 __gfar_filer_disable(priv);
3454
3455 for (i = 0; i <= MAX_FILER_IDX; i++) {
3456 rqfcr = priv->ftp_rqfcr[i];
3457 rqfpr = priv->ftp_rqfpr[i];
3458 gfar_write_filer(priv, i, rqfcr, rqfpr);
3459 }
3460
3461 __gfar_filer_enable(priv);
3462 }
3463
3464 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
gfar_start_wol_filer(struct gfar_private * priv)3465 static void gfar_start_wol_filer(struct gfar_private *priv)
3466 {
3467 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3468 u32 tempval;
3469 int i = 0;
3470
3471 /* Enable Rx hw queues */
3472 gfar_write(®s->rqueue, priv->rqueue);
3473
3474 /* Initialize DMACTRL to have WWR and WOP */
3475 tempval = gfar_read(®s->dmactrl);
3476 tempval |= DMACTRL_INIT_SETTINGS;
3477 gfar_write(®s->dmactrl, tempval);
3478
3479 /* Make sure we aren't stopped */
3480 tempval = gfar_read(®s->dmactrl);
3481 tempval &= ~DMACTRL_GRS;
3482 gfar_write(®s->dmactrl, tempval);
3483
3484 for (i = 0; i < priv->num_grps; i++) {
3485 regs = priv->gfargrp[i].regs;
3486 /* Clear RHLT, so that the DMA starts polling now */
3487 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3488 /* enable the Filer General Purpose Interrupt */
3489 gfar_write(®s->imask, IMASK_FGPI);
3490 }
3491
3492 /* Enable Rx DMA */
3493 tempval = gfar_read(®s->maccfg1);
3494 tempval |= MACCFG1_RX_EN;
3495 gfar_write(®s->maccfg1, tempval);
3496 }
3497
gfar_suspend(struct device * dev)3498 static int gfar_suspend(struct device *dev)
3499 {
3500 struct gfar_private *priv = dev_get_drvdata(dev);
3501 struct net_device *ndev = priv->ndev;
3502 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3503 u32 tempval;
3504 u16 wol = priv->wol_opts;
3505
3506 if (!netif_running(ndev))
3507 return 0;
3508
3509 disable_napi(priv);
3510 netif_tx_lock(ndev);
3511 netif_device_detach(ndev);
3512 netif_tx_unlock(ndev);
3513
3514 gfar_halt(priv);
3515
3516 if (wol & GFAR_WOL_MAGIC) {
3517 /* Enable interrupt on Magic Packet */
3518 gfar_write(®s->imask, IMASK_MAG);
3519
3520 /* Enable Magic Packet mode */
3521 tempval = gfar_read(®s->maccfg2);
3522 tempval |= MACCFG2_MPEN;
3523 gfar_write(®s->maccfg2, tempval);
3524
3525 /* re-enable the Rx block */
3526 tempval = gfar_read(®s->maccfg1);
3527 tempval |= MACCFG1_RX_EN;
3528 gfar_write(®s->maccfg1, tempval);
3529
3530 } else if (wol & GFAR_WOL_FILER_UCAST) {
3531 gfar_filer_config_wol(priv);
3532 gfar_start_wol_filer(priv);
3533
3534 } else {
3535 phy_stop(ndev->phydev);
3536 }
3537
3538 return 0;
3539 }
3540
gfar_resume(struct device * dev)3541 static int gfar_resume(struct device *dev)
3542 {
3543 struct gfar_private *priv = dev_get_drvdata(dev);
3544 struct net_device *ndev = priv->ndev;
3545 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3546 u32 tempval;
3547 u16 wol = priv->wol_opts;
3548
3549 if (!netif_running(ndev))
3550 return 0;
3551
3552 if (wol & GFAR_WOL_MAGIC) {
3553 /* Disable Magic Packet mode */
3554 tempval = gfar_read(®s->maccfg2);
3555 tempval &= ~MACCFG2_MPEN;
3556 gfar_write(®s->maccfg2, tempval);
3557
3558 } else if (wol & GFAR_WOL_FILER_UCAST) {
3559 /* need to stop rx only, tx is already down */
3560 gfar_halt(priv);
3561 gfar_filer_restore_table(priv);
3562
3563 } else {
3564 phy_start(ndev->phydev);
3565 }
3566
3567 gfar_start(priv);
3568
3569 netif_device_attach(ndev);
3570 enable_napi(priv);
3571
3572 return 0;
3573 }
3574
gfar_restore(struct device * dev)3575 static int gfar_restore(struct device *dev)
3576 {
3577 struct gfar_private *priv = dev_get_drvdata(dev);
3578 struct net_device *ndev = priv->ndev;
3579
3580 if (!netif_running(ndev)) {
3581 netif_device_attach(ndev);
3582
3583 return 0;
3584 }
3585
3586 gfar_init_bds(ndev);
3587
3588 gfar_mac_reset(priv);
3589
3590 gfar_init_tx_rx_base(priv);
3591
3592 gfar_start(priv);
3593
3594 priv->oldlink = 0;
3595 priv->oldspeed = 0;
3596 priv->oldduplex = -1;
3597
3598 if (ndev->phydev)
3599 phy_start(ndev->phydev);
3600
3601 netif_device_attach(ndev);
3602 enable_napi(priv);
3603
3604 return 0;
3605 }
3606
3607 static const struct dev_pm_ops gfar_pm_ops = {
3608 .suspend = gfar_suspend,
3609 .resume = gfar_resume,
3610 .freeze = gfar_suspend,
3611 .thaw = gfar_resume,
3612 .restore = gfar_restore,
3613 };
3614
3615 #define GFAR_PM_OPS (&gfar_pm_ops)
3616
3617 #else
3618
3619 #define GFAR_PM_OPS NULL
3620
3621 #endif
3622
3623 static const struct of_device_id gfar_match[] =
3624 {
3625 {
3626 .type = "network",
3627 .compatible = "gianfar",
3628 },
3629 {
3630 .compatible = "fsl,etsec2",
3631 },
3632 {},
3633 };
3634 MODULE_DEVICE_TABLE(of, gfar_match);
3635
3636 /* Structure for a device driver */
3637 static struct platform_driver gfar_driver = {
3638 .driver = {
3639 .name = "fsl-gianfar",
3640 .pm = GFAR_PM_OPS,
3641 .of_match_table = gfar_match,
3642 },
3643 .probe = gfar_probe,
3644 .remove = gfar_remove,
3645 };
3646
3647 module_platform_driver(gfar_driver);
3648