1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
3 */
4
5 /* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_net.h>
14 #include <linux/of_device.h>
15 #include <linux/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/acpi.h>
18 #include "emac.h"
19 #include "emac-mac.h"
20 #include "emac-phy.h"
21 #include "emac-sgmii.h"
22
23 #define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
24 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
25
26 #define EMAC_RRD_SIZE 4
27 /* The RRD size if timestamping is enabled: */
28 #define EMAC_TS_RRD_SIZE 6
29 #define EMAC_TPD_SIZE 4
30 #define EMAC_RFD_SIZE 2
31
32 #define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0
33 #define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22
34 #define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0
35 #define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24
36
37 #define RXQ0_NUM_RFD_PREF_DEF 8
38 #define TXQ0_NUM_TPD_PREF_DEF 5
39
40 #define EMAC_PREAMBLE_DEF 7
41
42 #define DMAR_DLY_CNT_DEF 15
43 #define DMAW_DLY_CNT_DEF 4
44
45 #define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
46
47 #define ISR_TX_PKT (\
48 TX_PKT_INT |\
49 TX_PKT_INT1 |\
50 TX_PKT_INT2 |\
51 TX_PKT_INT3)
52
53 #define ISR_OVER (\
54 RFD0_UR_INT |\
55 RFD1_UR_INT |\
56 RFD2_UR_INT |\
57 RFD3_UR_INT |\
58 RFD4_UR_INT |\
59 RXF_OF_INT |\
60 TXF_UR_INT)
61
62 #define ISR_ERROR (\
63 DMAR_TO_INT |\
64 DMAW_TO_INT |\
65 TXQ_TO_INT)
66
67 /* in sync with enum emac_clk_id */
68 static const char * const emac_clk_name[] = {
69 "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
70 "rx_clk", "sys_clk"
71 };
72
emac_reg_update32(void __iomem * addr,u32 mask,u32 val)73 void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
74 {
75 u32 data = readl(addr);
76
77 writel(((data & ~mask) | val), addr);
78 }
79
80 /* reinitialize */
emac_reinit_locked(struct emac_adapter * adpt)81 int emac_reinit_locked(struct emac_adapter *adpt)
82 {
83 int ret;
84
85 mutex_lock(&adpt->reset_lock);
86
87 emac_mac_down(adpt);
88 emac_sgmii_reset(adpt);
89 ret = emac_mac_up(adpt);
90
91 mutex_unlock(&adpt->reset_lock);
92
93 return ret;
94 }
95
96 /* NAPI */
emac_napi_rtx(struct napi_struct * napi,int budget)97 static int emac_napi_rtx(struct napi_struct *napi, int budget)
98 {
99 struct emac_rx_queue *rx_q =
100 container_of(napi, struct emac_rx_queue, napi);
101 struct emac_adapter *adpt = netdev_priv(rx_q->netdev);
102 struct emac_irq *irq = rx_q->irq;
103 int work_done = 0;
104
105 emac_mac_rx_process(adpt, rx_q, &work_done, budget);
106
107 if (work_done < budget) {
108 napi_complete_done(napi, work_done);
109
110 irq->mask |= rx_q->intr;
111 writel(irq->mask, adpt->base + EMAC_INT_MASK);
112 }
113
114 return work_done;
115 }
116
117 /* Transmit the packet */
emac_start_xmit(struct sk_buff * skb,struct net_device * netdev)118 static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
119 struct net_device *netdev)
120 {
121 struct emac_adapter *adpt = netdev_priv(netdev);
122
123 return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
124 }
125
emac_isr(int _irq,void * data)126 static irqreturn_t emac_isr(int _irq, void *data)
127 {
128 struct emac_irq *irq = data;
129 struct emac_adapter *adpt =
130 container_of(irq, struct emac_adapter, irq);
131 struct emac_rx_queue *rx_q = &adpt->rx_q;
132 u32 isr, status;
133
134 /* disable the interrupt */
135 writel(0, adpt->base + EMAC_INT_MASK);
136
137 isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
138
139 status = isr & irq->mask;
140 if (status == 0)
141 goto exit;
142
143 if (status & ISR_ERROR) {
144 net_err_ratelimited("%s: error interrupt 0x%lx\n",
145 adpt->netdev->name, status & ISR_ERROR);
146 /* reset MAC */
147 schedule_work(&adpt->work_thread);
148 }
149
150 /* Schedule the napi for receive queue with interrupt
151 * status bit set
152 */
153 if (status & rx_q->intr) {
154 if (napi_schedule_prep(&rx_q->napi)) {
155 irq->mask &= ~rx_q->intr;
156 __napi_schedule(&rx_q->napi);
157 }
158 }
159
160 if (status & TX_PKT_INT)
161 emac_mac_tx_process(adpt, &adpt->tx_q);
162
163 if (status & ISR_OVER)
164 net_warn_ratelimited("%s: TX/RX overflow interrupt\n",
165 adpt->netdev->name);
166
167 exit:
168 /* enable the interrupt */
169 writel(irq->mask, adpt->base + EMAC_INT_MASK);
170
171 return IRQ_HANDLED;
172 }
173
174 /* Configure VLAN tag strip/insert feature */
emac_set_features(struct net_device * netdev,netdev_features_t features)175 static int emac_set_features(struct net_device *netdev,
176 netdev_features_t features)
177 {
178 netdev_features_t changed = features ^ netdev->features;
179 struct emac_adapter *adpt = netdev_priv(netdev);
180
181 /* We only need to reprogram the hardware if the VLAN tag features
182 * have changed, and if it's already running.
183 */
184 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
185 return 0;
186
187 if (!netif_running(netdev))
188 return 0;
189
190 /* emac_mac_mode_config() uses netdev->features to configure the EMAC,
191 * so make sure it's set first.
192 */
193 netdev->features = features;
194
195 return emac_reinit_locked(adpt);
196 }
197
198 /* Configure Multicast and Promiscuous modes */
emac_rx_mode_set(struct net_device * netdev)199 static void emac_rx_mode_set(struct net_device *netdev)
200 {
201 struct emac_adapter *adpt = netdev_priv(netdev);
202 struct netdev_hw_addr *ha;
203
204 emac_mac_mode_config(adpt);
205
206 /* update multicast address filtering */
207 emac_mac_multicast_addr_clear(adpt);
208 netdev_for_each_mc_addr(ha, netdev)
209 emac_mac_multicast_addr_set(adpt, ha->addr);
210 }
211
212 /* Change the Maximum Transfer Unit (MTU) */
emac_change_mtu(struct net_device * netdev,int new_mtu)213 static int emac_change_mtu(struct net_device *netdev, int new_mtu)
214 {
215 struct emac_adapter *adpt = netdev_priv(netdev);
216
217 netif_dbg(adpt, hw, adpt->netdev,
218 "changing MTU from %d to %d\n", netdev->mtu,
219 new_mtu);
220 netdev->mtu = new_mtu;
221
222 if (netif_running(netdev))
223 return emac_reinit_locked(adpt);
224
225 return 0;
226 }
227
228 /* Called when the network interface is made active */
emac_open(struct net_device * netdev)229 static int emac_open(struct net_device *netdev)
230 {
231 struct emac_adapter *adpt = netdev_priv(netdev);
232 struct emac_irq *irq = &adpt->irq;
233 int ret;
234
235 ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq);
236 if (ret) {
237 netdev_err(adpt->netdev, "could not request emac-core0 irq\n");
238 return ret;
239 }
240
241 /* allocate rx/tx dma buffer & descriptors */
242 ret = emac_mac_rx_tx_rings_alloc_all(adpt);
243 if (ret) {
244 netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
245 free_irq(irq->irq, irq);
246 return ret;
247 }
248
249 ret = emac_sgmii_open(adpt);
250 if (ret) {
251 emac_mac_rx_tx_rings_free_all(adpt);
252 free_irq(irq->irq, irq);
253 return ret;
254 }
255
256 ret = emac_mac_up(adpt);
257 if (ret) {
258 emac_mac_rx_tx_rings_free_all(adpt);
259 free_irq(irq->irq, irq);
260 emac_sgmii_close(adpt);
261 return ret;
262 }
263
264 return 0;
265 }
266
267 /* Called when the network interface is disabled */
emac_close(struct net_device * netdev)268 static int emac_close(struct net_device *netdev)
269 {
270 struct emac_adapter *adpt = netdev_priv(netdev);
271
272 mutex_lock(&adpt->reset_lock);
273
274 emac_sgmii_close(adpt);
275 emac_mac_down(adpt);
276 emac_mac_rx_tx_rings_free_all(adpt);
277
278 free_irq(adpt->irq.irq, &adpt->irq);
279
280 mutex_unlock(&adpt->reset_lock);
281
282 return 0;
283 }
284
285 /* Respond to a TX hang */
emac_tx_timeout(struct net_device * netdev,unsigned int txqueue)286 static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
287 {
288 struct emac_adapter *adpt = netdev_priv(netdev);
289
290 schedule_work(&adpt->work_thread);
291 }
292
293 /**
294 * emac_update_hw_stats - read the EMAC stat registers
295 * @adpt: pointer to adapter struct
296 *
297 * Reads the stats registers and write the values to adpt->stats.
298 *
299 * adpt->stats.lock must be held while calling this function,
300 * and while reading from adpt->stats.
301 */
emac_update_hw_stats(struct emac_adapter * adpt)302 void emac_update_hw_stats(struct emac_adapter *adpt)
303 {
304 struct emac_stats *stats = &adpt->stats;
305 u64 *stats_itr = &adpt->stats.rx_ok;
306 void __iomem *base = adpt->base;
307 unsigned int addr;
308
309 addr = REG_MAC_RX_STATUS_BIN;
310 while (addr <= REG_MAC_RX_STATUS_END) {
311 *stats_itr += readl_relaxed(base + addr);
312 stats_itr++;
313 addr += sizeof(u32);
314 }
315
316 /* additional rx status */
317 stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23);
318 stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24);
319
320 /* update tx status */
321 addr = REG_MAC_TX_STATUS_BIN;
322 stats_itr = &stats->tx_ok;
323
324 while (addr <= REG_MAC_TX_STATUS_END) {
325 *stats_itr += readl_relaxed(base + addr);
326 stats_itr++;
327 addr += sizeof(u32);
328 }
329
330 /* additional tx status */
331 stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25);
332 }
333
334 /* Provide network statistics info for the interface */
emac_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * net_stats)335 static void emac_get_stats64(struct net_device *netdev,
336 struct rtnl_link_stats64 *net_stats)
337 {
338 struct emac_adapter *adpt = netdev_priv(netdev);
339 struct emac_stats *stats = &adpt->stats;
340
341 spin_lock(&stats->lock);
342
343 emac_update_hw_stats(adpt);
344
345 /* return parsed statistics */
346 net_stats->rx_packets = stats->rx_ok;
347 net_stats->tx_packets = stats->tx_ok;
348 net_stats->rx_bytes = stats->rx_byte_cnt;
349 net_stats->tx_bytes = stats->tx_byte_cnt;
350 net_stats->multicast = stats->rx_mcast;
351 net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
352 stats->tx_late_col + stats->tx_abort_col;
353
354 net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
355 stats->rx_len_err + stats->rx_sz_ov +
356 stats->rx_align_err;
357 net_stats->rx_fifo_errors = stats->rx_rxf_ov;
358 net_stats->rx_length_errors = stats->rx_len_err;
359 net_stats->rx_crc_errors = stats->rx_fcs_err;
360 net_stats->rx_frame_errors = stats->rx_align_err;
361 net_stats->rx_over_errors = stats->rx_rxf_ov;
362 net_stats->rx_missed_errors = stats->rx_rxf_ov;
363
364 net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
365 stats->tx_underrun + stats->tx_trunc;
366 net_stats->tx_fifo_errors = stats->tx_underrun;
367 net_stats->tx_aborted_errors = stats->tx_abort_col;
368 net_stats->tx_window_errors = stats->tx_late_col;
369
370 spin_unlock(&stats->lock);
371 }
372
373 static const struct net_device_ops emac_netdev_ops = {
374 .ndo_open = emac_open,
375 .ndo_stop = emac_close,
376 .ndo_validate_addr = eth_validate_addr,
377 .ndo_start_xmit = emac_start_xmit,
378 .ndo_set_mac_address = eth_mac_addr,
379 .ndo_change_mtu = emac_change_mtu,
380 .ndo_eth_ioctl = phy_do_ioctl_running,
381 .ndo_tx_timeout = emac_tx_timeout,
382 .ndo_get_stats64 = emac_get_stats64,
383 .ndo_set_features = emac_set_features,
384 .ndo_set_rx_mode = emac_rx_mode_set,
385 };
386
387 /* Watchdog task routine, called to reinitialize the EMAC */
emac_work_thread(struct work_struct * work)388 static void emac_work_thread(struct work_struct *work)
389 {
390 struct emac_adapter *adpt =
391 container_of(work, struct emac_adapter, work_thread);
392
393 emac_reinit_locked(adpt);
394 }
395
396 /* Initialize various data structures */
emac_init_adapter(struct emac_adapter * adpt)397 static void emac_init_adapter(struct emac_adapter *adpt)
398 {
399 u32 reg;
400
401 adpt->rrd_size = EMAC_RRD_SIZE;
402 adpt->tpd_size = EMAC_TPD_SIZE;
403 adpt->rfd_size = EMAC_RFD_SIZE;
404
405 /* descriptors */
406 adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
407 adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
408
409 /* dma */
410 adpt->dma_order = emac_dma_ord_out;
411 adpt->dmar_block = emac_dma_req_4096;
412 adpt->dmaw_block = emac_dma_req_128;
413 adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
414 adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
415 adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
416 adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
417
418 /* irq moderator */
419 reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
420 ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
421 adpt->irq_mod = reg;
422
423 /* others */
424 adpt->preamble = EMAC_PREAMBLE_DEF;
425
426 /* default to automatic flow control */
427 adpt->automatic = true;
428
429 /* Disable single-pause-frame mode by default */
430 adpt->single_pause_mode = false;
431 }
432
433 /* Get the clock */
emac_clks_get(struct platform_device * pdev,struct emac_adapter * adpt)434 static int emac_clks_get(struct platform_device *pdev,
435 struct emac_adapter *adpt)
436 {
437 unsigned int i;
438
439 for (i = 0; i < EMAC_CLK_CNT; i++) {
440 struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]);
441
442 if (IS_ERR(clk)) {
443 dev_err(&pdev->dev,
444 "could not claim clock %s (error=%li)\n",
445 emac_clk_name[i], PTR_ERR(clk));
446
447 return PTR_ERR(clk);
448 }
449
450 adpt->clk[i] = clk;
451 }
452
453 return 0;
454 }
455
456 /* Initialize clocks */
emac_clks_phase1_init(struct platform_device * pdev,struct emac_adapter * adpt)457 static int emac_clks_phase1_init(struct platform_device *pdev,
458 struct emac_adapter *adpt)
459 {
460 int ret;
461
462 /* On ACPI platforms, clocks are controlled by firmware and/or
463 * ACPI, not by drivers.
464 */
465 if (has_acpi_companion(&pdev->dev))
466 return 0;
467
468 ret = emac_clks_get(pdev, adpt);
469 if (ret)
470 return ret;
471
472 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]);
473 if (ret)
474 return ret;
475
476 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
477 if (ret)
478 goto disable_clk_axi;
479
480 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
481 if (ret)
482 goto disable_clk_cfg_ahb;
483
484 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
485 if (ret)
486 goto disable_clk_cfg_ahb;
487
488 return 0;
489
490 disable_clk_cfg_ahb:
491 clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
492 disable_clk_axi:
493 clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
494
495 return ret;
496 }
497
498 /* Enable clocks; needs emac_clks_phase1_init to be called before */
emac_clks_phase2_init(struct platform_device * pdev,struct emac_adapter * adpt)499 static int emac_clks_phase2_init(struct platform_device *pdev,
500 struct emac_adapter *adpt)
501 {
502 int ret;
503
504 if (has_acpi_companion(&pdev->dev))
505 return 0;
506
507 ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
508 if (ret)
509 return ret;
510
511 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]);
512 if (ret)
513 return ret;
514
515 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
516 if (ret)
517 return ret;
518
519 ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000);
520 if (ret)
521 return ret;
522
523 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]);
524 if (ret)
525 return ret;
526
527 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]);
528 if (ret)
529 return ret;
530
531 return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]);
532 }
533
emac_clks_teardown(struct emac_adapter * adpt)534 static void emac_clks_teardown(struct emac_adapter *adpt)
535 {
536
537 unsigned int i;
538
539 for (i = 0; i < EMAC_CLK_CNT; i++)
540 clk_disable_unprepare(adpt->clk[i]);
541 }
542
543 /* Get the resources */
emac_probe_resources(struct platform_device * pdev,struct emac_adapter * adpt)544 static int emac_probe_resources(struct platform_device *pdev,
545 struct emac_adapter *adpt)
546 {
547 struct net_device *netdev = adpt->netdev;
548 int ret = 0;
549
550 /* get mac address */
551 if (device_get_ethdev_address(&pdev->dev, netdev))
552 eth_hw_addr_random(netdev);
553
554 /* Core 0 interrupt */
555 ret = platform_get_irq(pdev, 0);
556 if (ret < 0)
557 return ret;
558 adpt->irq.irq = ret;
559
560 /* base register address */
561 adpt->base = devm_platform_ioremap_resource(pdev, 0);
562 if (IS_ERR(adpt->base))
563 return PTR_ERR(adpt->base);
564
565 /* CSR register address */
566 adpt->csr = devm_platform_ioremap_resource(pdev, 1);
567 if (IS_ERR(adpt->csr))
568 return PTR_ERR(adpt->csr);
569
570 netdev->base_addr = (unsigned long)adpt->base;
571
572 return 0;
573 }
574
575 static const struct of_device_id emac_dt_match[] = {
576 {
577 .compatible = "qcom,fsm9900-emac",
578 },
579 {}
580 };
581 MODULE_DEVICE_TABLE(of, emac_dt_match);
582
583 #if IS_ENABLED(CONFIG_ACPI)
584 static const struct acpi_device_id emac_acpi_match[] = {
585 {
586 .id = "QCOM8070",
587 },
588 {}
589 };
590 MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
591 #endif
592
emac_probe(struct platform_device * pdev)593 static int emac_probe(struct platform_device *pdev)
594 {
595 struct net_device *netdev;
596 struct emac_adapter *adpt;
597 struct emac_sgmii *phy;
598 u16 devid, revid;
599 u32 reg;
600 int ret;
601
602 /* The TPD buffer address is limited to:
603 * 1. PTP: 45bits. (Driver doesn't support yet.)
604 * 2. NON-PTP: 46bits.
605 */
606 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
607 if (ret) {
608 dev_err(&pdev->dev, "could not set DMA mask\n");
609 return ret;
610 }
611
612 netdev = alloc_etherdev(sizeof(struct emac_adapter));
613 if (!netdev)
614 return -ENOMEM;
615
616 dev_set_drvdata(&pdev->dev, netdev);
617 SET_NETDEV_DEV(netdev, &pdev->dev);
618 emac_set_ethtool_ops(netdev);
619
620 adpt = netdev_priv(netdev);
621 adpt->netdev = netdev;
622 adpt->msg_enable = EMAC_MSG_DEFAULT;
623
624 phy = &adpt->phy;
625 atomic_set(&phy->decode_error_count, 0);
626
627 mutex_init(&adpt->reset_lock);
628 spin_lock_init(&adpt->stats.lock);
629
630 adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
631
632 ret = emac_probe_resources(pdev, adpt);
633 if (ret)
634 goto err_undo_netdev;
635
636 /* initialize clocks */
637 ret = emac_clks_phase1_init(pdev, adpt);
638 if (ret) {
639 dev_err(&pdev->dev, "could not initialize clocks\n");
640 goto err_undo_netdev;
641 }
642
643 netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
644 netdev->irq = adpt->irq.irq;
645
646 netdev->netdev_ops = &emac_netdev_ops;
647
648 emac_init_adapter(adpt);
649
650 /* init external phy */
651 ret = emac_phy_config(pdev, adpt);
652 if (ret)
653 goto err_undo_clocks;
654
655 /* init internal sgmii phy */
656 ret = emac_sgmii_config(pdev, adpt);
657 if (ret)
658 goto err_undo_mdiobus;
659
660 /* enable clocks */
661 ret = emac_clks_phase2_init(pdev, adpt);
662 if (ret) {
663 dev_err(&pdev->dev, "could not initialize clocks\n");
664 goto err_undo_mdiobus;
665 }
666
667 /* set hw features */
668 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
669 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
670 NETIF_F_HW_VLAN_CTAG_TX;
671 netdev->hw_features = netdev->features;
672
673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
674 NETIF_F_TSO | NETIF_F_TSO6;
675
676 /* MTU range: 46 - 9194 */
677 netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE -
678 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
679 netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE -
680 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
681
682 INIT_WORK(&adpt->work_thread, emac_work_thread);
683
684 /* Initialize queues */
685 emac_mac_rx_tx_ring_init_all(pdev, adpt);
686
687 netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx);
688
689 ret = register_netdev(netdev);
690 if (ret) {
691 dev_err(&pdev->dev, "could not register net device\n");
692 goto err_undo_napi;
693 }
694
695 reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
696 devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT;
697 revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
698 reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
699
700 netif_info(adpt, probe, netdev,
701 "hardware id %d.%d, hardware version %d.%d.%d\n",
702 devid, revid,
703 (reg & MAJOR_BMSK) >> MAJOR_SHFT,
704 (reg & MINOR_BMSK) >> MINOR_SHFT,
705 (reg & STEP_BMSK) >> STEP_SHFT);
706
707 return 0;
708
709 err_undo_napi:
710 netif_napi_del(&adpt->rx_q.napi);
711 err_undo_mdiobus:
712 put_device(&adpt->phydev->mdio.dev);
713 mdiobus_unregister(adpt->mii_bus);
714 err_undo_clocks:
715 emac_clks_teardown(adpt);
716 err_undo_netdev:
717 free_netdev(netdev);
718
719 return ret;
720 }
721
emac_remove(struct platform_device * pdev)722 static int emac_remove(struct platform_device *pdev)
723 {
724 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
725 struct emac_adapter *adpt = netdev_priv(netdev);
726
727 unregister_netdev(netdev);
728 netif_napi_del(&adpt->rx_q.napi);
729
730 emac_clks_teardown(adpt);
731
732 put_device(&adpt->phydev->mdio.dev);
733 mdiobus_unregister(adpt->mii_bus);
734
735 if (adpt->phy.digital)
736 iounmap(adpt->phy.digital);
737 iounmap(adpt->phy.base);
738
739 free_netdev(netdev);
740
741 return 0;
742 }
743
emac_shutdown(struct platform_device * pdev)744 static void emac_shutdown(struct platform_device *pdev)
745 {
746 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
747 struct emac_adapter *adpt = netdev_priv(netdev);
748
749 if (netdev->flags & IFF_UP) {
750 /* Closing the SGMII turns off its interrupts */
751 emac_sgmii_close(adpt);
752
753 /* Resetting the MAC turns off all DMA and its interrupts */
754 emac_mac_reset(adpt);
755 }
756 }
757
758 static struct platform_driver emac_platform_driver = {
759 .probe = emac_probe,
760 .remove = emac_remove,
761 .driver = {
762 .name = "qcom-emac",
763 .of_match_table = emac_dt_match,
764 .acpi_match_table = ACPI_PTR(emac_acpi_match),
765 },
766 .shutdown = emac_shutdown,
767 };
768
769 module_platform_driver(emac_platform_driver);
770
771 MODULE_LICENSE("GPL v2");
772 MODULE_ALIAS("platform:qcom-emac");
773