1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include "stmmac.h"
18 #include "stmmac_pcs.h"
19 #include "dwmac4.h"
20 #include "dwmac5.h"
21
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)22 static void dwmac4_core_init(struct mac_device_info *hw,
23 struct net_device *dev)
24 {
25 struct stmmac_priv *priv = netdev_priv(dev);
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
28
29 value |= GMAC_CORE_INIT;
30
31 if (hw->ps) {
32 value |= GMAC_CONFIG_TE;
33
34 value &= hw->link.speed_mask;
35 switch (hw->ps) {
36 case SPEED_1000:
37 value |= hw->link.speed1000;
38 break;
39 case SPEED_100:
40 value |= hw->link.speed100;
41 break;
42 case SPEED_10:
43 value |= hw->link.speed10;
44 break;
45 }
46 }
47
48 writel(value, ioaddr + GMAC_CONFIG);
49
50 /* Enable GMAC interrupts */
51 value = GMAC_INT_DEFAULT_ENABLE;
52
53 if (hw->pcs)
54 value |= GMAC_PCS_IRQ_DEFAULT;
55
56 /* Enable FPE interrupt */
57 if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
58 value |= GMAC_INT_FPE_EN;
59
60 writel(value, ioaddr + GMAC_INT_EN);
61
62 if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
63 init_waitqueue_head(&priv->tstamp_busy_wait);
64 }
65
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)66 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
67 u8 mode, u32 queue)
68 {
69 void __iomem *ioaddr = hw->pcsr;
70 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
71
72 value &= GMAC_RX_QUEUE_CLEAR(queue);
73 if (mode == MTL_QUEUE_AVB)
74 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
75 else if (mode == MTL_QUEUE_DCB)
76 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
77
78 writel(value, ioaddr + GMAC_RXQ_CTRL0);
79 }
80
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)81 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
82 u32 prio, u32 queue)
83 {
84 void __iomem *ioaddr = hw->pcsr;
85 u32 base_register;
86 u32 value;
87
88 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
89 if (queue >= 4)
90 queue -= 4;
91
92 value = readl(ioaddr + base_register);
93
94 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
95 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
96 GMAC_RXQCTRL_PSRQX_MASK(queue);
97 writel(value, ioaddr + base_register);
98 }
99
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)100 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
101 u32 prio, u32 queue)
102 {
103 void __iomem *ioaddr = hw->pcsr;
104 u32 base_register;
105 u32 value;
106
107 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
108 if (queue >= 4)
109 queue -= 4;
110
111 value = readl(ioaddr + base_register);
112
113 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
114 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
115 GMAC_TXQCTRL_PSTQX_MASK(queue);
116
117 writel(value, ioaddr + base_register);
118 }
119
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)120 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
121 u8 packet, u32 queue)
122 {
123 void __iomem *ioaddr = hw->pcsr;
124 u32 value;
125
126 static const struct stmmac_rx_routing route_possibilities[] = {
127 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
128 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
129 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
130 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
131 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
132 };
133
134 value = readl(ioaddr + GMAC_RXQ_CTRL1);
135
136 /* routing configuration */
137 value &= ~route_possibilities[packet - 1].reg_mask;
138 value |= (queue << route_possibilities[packet-1].reg_shift) &
139 route_possibilities[packet - 1].reg_mask;
140
141 /* some packets require extra ops */
142 if (packet == PACKET_AVCPQ) {
143 value &= ~GMAC_RXQCTRL_TACPQE;
144 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
145 } else if (packet == PACKET_MCBCQ) {
146 value &= ~GMAC_RXQCTRL_MCBCQEN;
147 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
148 }
149
150 writel(value, ioaddr + GMAC_RXQ_CTRL1);
151 }
152
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)153 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
154 u32 rx_alg)
155 {
156 void __iomem *ioaddr = hw->pcsr;
157 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
158
159 value &= ~MTL_OPERATION_RAA;
160 switch (rx_alg) {
161 case MTL_RX_ALGORITHM_SP:
162 value |= MTL_OPERATION_RAA_SP;
163 break;
164 case MTL_RX_ALGORITHM_WSP:
165 value |= MTL_OPERATION_RAA_WSP;
166 break;
167 default:
168 break;
169 }
170
171 writel(value, ioaddr + MTL_OPERATION_MODE);
172 }
173
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)174 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
175 u32 tx_alg)
176 {
177 void __iomem *ioaddr = hw->pcsr;
178 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
179
180 value &= ~MTL_OPERATION_SCHALG_MASK;
181 switch (tx_alg) {
182 case MTL_TX_ALGORITHM_WRR:
183 value |= MTL_OPERATION_SCHALG_WRR;
184 break;
185 case MTL_TX_ALGORITHM_WFQ:
186 value |= MTL_OPERATION_SCHALG_WFQ;
187 break;
188 case MTL_TX_ALGORITHM_DWRR:
189 value |= MTL_OPERATION_SCHALG_DWRR;
190 break;
191 case MTL_TX_ALGORITHM_SP:
192 value |= MTL_OPERATION_SCHALG_SP;
193 break;
194 default:
195 break;
196 }
197
198 writel(value, ioaddr + MTL_OPERATION_MODE);
199 }
200
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)201 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
202 u32 weight, u32 queue)
203 {
204 void __iomem *ioaddr = hw->pcsr;
205 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
206
207 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
208 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
209 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
210 }
211
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)212 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
213 {
214 void __iomem *ioaddr = hw->pcsr;
215 u32 value;
216
217 if (queue < 4)
218 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
219 else
220 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
221
222 if (queue == 0 || queue == 4) {
223 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
224 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
225 } else if (queue > 4) {
226 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
227 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
228 } else {
229 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
230 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
231 }
232
233 if (queue < 4)
234 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
235 else
236 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
237 }
238
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)239 static void dwmac4_config_cbs(struct mac_device_info *hw,
240 u32 send_slope, u32 idle_slope,
241 u32 high_credit, u32 low_credit, u32 queue)
242 {
243 void __iomem *ioaddr = hw->pcsr;
244 u32 value;
245
246 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
247 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
248 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
249 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
250 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
251
252 /* enable AV algorithm */
253 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
254 value |= MTL_ETS_CTRL_AVALG;
255 value |= MTL_ETS_CTRL_CC;
256 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
257
258 /* configure send slope */
259 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
260 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
261 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
262 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
263
264 /* configure idle slope (same register as tx weight) */
265 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
266
267 /* configure high credit */
268 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
269 value &= ~MTL_HIGH_CRED_HC_MASK;
270 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
271 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
272
273 /* configure high credit */
274 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
275 value &= ~MTL_HIGH_CRED_LC_MASK;
276 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
277 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
278 }
279
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)280 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
281 {
282 void __iomem *ioaddr = hw->pcsr;
283 int i;
284
285 for (i = 0; i < GMAC_REG_NUM; i++)
286 reg_space[i] = readl(ioaddr + i * 4);
287 }
288
dwmac4_rx_ipc_enable(struct mac_device_info * hw)289 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
290 {
291 void __iomem *ioaddr = hw->pcsr;
292 u32 value = readl(ioaddr + GMAC_CONFIG);
293
294 if (hw->rx_csum)
295 value |= GMAC_CONFIG_IPC;
296 else
297 value &= ~GMAC_CONFIG_IPC;
298
299 writel(value, ioaddr + GMAC_CONFIG);
300
301 value = readl(ioaddr + GMAC_CONFIG);
302
303 return !!(value & GMAC_CONFIG_IPC);
304 }
305
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)306 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
307 {
308 void __iomem *ioaddr = hw->pcsr;
309 unsigned int pmt = 0;
310 u32 config;
311
312 if (mode & WAKE_MAGIC) {
313 pr_debug("GMAC: WOL Magic frame\n");
314 pmt |= power_down | magic_pkt_en;
315 }
316 if (mode & WAKE_UCAST) {
317 pr_debug("GMAC: WOL on global unicast\n");
318 pmt |= power_down | global_unicast | wake_up_frame_en;
319 }
320
321 if (pmt) {
322 /* The receiver must be enabled for WOL before powering down */
323 config = readl(ioaddr + GMAC_CONFIG);
324 config |= GMAC_CONFIG_RE;
325 writel(config, ioaddr + GMAC_CONFIG);
326 }
327 writel(pmt, ioaddr + GMAC_PMT);
328 }
329
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)330 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
331 const unsigned char *addr, unsigned int reg_n)
332 {
333 void __iomem *ioaddr = hw->pcsr;
334
335 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
336 GMAC_ADDR_LOW(reg_n));
337 }
338
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)339 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
340 unsigned char *addr, unsigned int reg_n)
341 {
342 void __iomem *ioaddr = hw->pcsr;
343
344 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
345 GMAC_ADDR_LOW(reg_n));
346 }
347
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)348 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
349 bool en_tx_lpi_clockgating)
350 {
351 void __iomem *ioaddr = hw->pcsr;
352 u32 value;
353
354 /* Enable the link status receive on RGMII, SGMII ore SMII
355 * receive path and instruct the transmit to enter in LPI
356 * state.
357 */
358 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
359 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
360
361 if (en_tx_lpi_clockgating)
362 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
363
364 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
365 }
366
dwmac4_reset_eee_mode(struct mac_device_info * hw)367 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
368 {
369 void __iomem *ioaddr = hw->pcsr;
370 u32 value;
371
372 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
373 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
374 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
375 }
376
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)377 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
378 {
379 void __iomem *ioaddr = hw->pcsr;
380 u32 value;
381
382 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
383
384 if (link)
385 value |= GMAC4_LPI_CTRL_STATUS_PLS;
386 else
387 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
388
389 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
390 }
391
dwmac4_set_eee_lpi_entry_timer(struct mac_device_info * hw,int et)392 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
393 {
394 void __iomem *ioaddr = hw->pcsr;
395 int value = et & STMMAC_ET_MAX;
396 int regval;
397
398 /* Program LPI entry timer value into register */
399 writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
400
401 /* Enable/disable LPI entry timer */
402 regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
403 regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
404
405 if (et)
406 regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
407 else
408 regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
409
410 writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
411 }
412
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)413 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
414 {
415 void __iomem *ioaddr = hw->pcsr;
416 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
417
418 /* Program the timers in the LPI timer control register:
419 * LS: minimum time (ms) for which the link
420 * status from PHY should be ok before transmitting
421 * the LPI pattern.
422 * TW: minimum time (us) for which the core waits
423 * after it has stopped transmitting the LPI pattern.
424 */
425 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
426 }
427
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)428 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
429 {
430 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
431 u32 val;
432
433 val = readl(ioaddr + GMAC_VLAN_TAG);
434 val &= ~GMAC_VLAN_TAG_VID;
435 val |= GMAC_VLAN_TAG_ETV | vid;
436
437 writel(val, ioaddr + GMAC_VLAN_TAG);
438 }
439
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)440 static int dwmac4_write_vlan_filter(struct net_device *dev,
441 struct mac_device_info *hw,
442 u8 index, u32 data)
443 {
444 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
445 int i, timeout = 10;
446 u32 val;
447
448 if (index >= hw->num_vlan)
449 return -EINVAL;
450
451 writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
452
453 val = readl(ioaddr + GMAC_VLAN_TAG);
454 val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
455 GMAC_VLAN_TAG_CTRL_CT |
456 GMAC_VLAN_TAG_CTRL_OB);
457 val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
458
459 writel(val, ioaddr + GMAC_VLAN_TAG);
460
461 for (i = 0; i < timeout; i++) {
462 val = readl(ioaddr + GMAC_VLAN_TAG);
463 if (!(val & GMAC_VLAN_TAG_CTRL_OB))
464 return 0;
465 udelay(1);
466 }
467
468 netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
469
470 return -EBUSY;
471 }
472
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)473 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
474 struct mac_device_info *hw,
475 __be16 proto, u16 vid)
476 {
477 int index = -1;
478 u32 val = 0;
479 int i, ret;
480
481 if (vid > 4095)
482 return -EINVAL;
483
484 if (hw->promisc) {
485 netdev_err(dev,
486 "Adding VLAN in promisc mode not supported\n");
487 return -EPERM;
488 }
489
490 /* Single Rx VLAN Filter */
491 if (hw->num_vlan == 1) {
492 /* For single VLAN filter, VID 0 means VLAN promiscuous */
493 if (vid == 0) {
494 netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
495 return -EPERM;
496 }
497
498 if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
499 netdev_err(dev, "Only single VLAN ID supported\n");
500 return -EPERM;
501 }
502
503 hw->vlan_filter[0] = vid;
504 dwmac4_write_single_vlan(dev, vid);
505
506 return 0;
507 }
508
509 /* Extended Rx VLAN Filter Enable */
510 val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
511
512 for (i = 0; i < hw->num_vlan; i++) {
513 if (hw->vlan_filter[i] == val)
514 return 0;
515 else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
516 index = i;
517 }
518
519 if (index == -1) {
520 netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
521 hw->num_vlan);
522 return -EPERM;
523 }
524
525 ret = dwmac4_write_vlan_filter(dev, hw, index, val);
526
527 if (!ret)
528 hw->vlan_filter[index] = val;
529
530 return ret;
531 }
532
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)533 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
534 struct mac_device_info *hw,
535 __be16 proto, u16 vid)
536 {
537 int i, ret = 0;
538
539 if (hw->promisc) {
540 netdev_err(dev,
541 "Deleting VLAN in promisc mode not supported\n");
542 return -EPERM;
543 }
544
545 /* Single Rx VLAN Filter */
546 if (hw->num_vlan == 1) {
547 if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
548 hw->vlan_filter[0] = 0;
549 dwmac4_write_single_vlan(dev, 0);
550 }
551 return 0;
552 }
553
554 /* Extended Rx VLAN Filter Enable */
555 for (i = 0; i < hw->num_vlan; i++) {
556 if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
557 ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
558
559 if (!ret)
560 hw->vlan_filter[i] = 0;
561 else
562 return ret;
563 }
564 }
565
566 return ret;
567 }
568
dwmac4_vlan_promisc_enable(struct net_device * dev,struct mac_device_info * hw)569 static void dwmac4_vlan_promisc_enable(struct net_device *dev,
570 struct mac_device_info *hw)
571 {
572 void __iomem *ioaddr = hw->pcsr;
573 u32 value;
574 u32 hash;
575 u32 val;
576 int i;
577
578 /* Single Rx VLAN Filter */
579 if (hw->num_vlan == 1) {
580 dwmac4_write_single_vlan(dev, 0);
581 return;
582 }
583
584 /* Extended Rx VLAN Filter Enable */
585 for (i = 0; i < hw->num_vlan; i++) {
586 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
587 val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
588 dwmac4_write_vlan_filter(dev, hw, i, val);
589 }
590 }
591
592 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
593 if (hash & GMAC_VLAN_VLHT) {
594 value = readl(ioaddr + GMAC_VLAN_TAG);
595 if (value & GMAC_VLAN_VTHM) {
596 value &= ~GMAC_VLAN_VTHM;
597 writel(value, ioaddr + GMAC_VLAN_TAG);
598 }
599 }
600 }
601
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)602 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
603 struct mac_device_info *hw)
604 {
605 void __iomem *ioaddr = hw->pcsr;
606 u32 value;
607 u32 hash;
608 u32 val;
609 int i;
610
611 /* Single Rx VLAN Filter */
612 if (hw->num_vlan == 1) {
613 dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
614 return;
615 }
616
617 /* Extended Rx VLAN Filter Enable */
618 for (i = 0; i < hw->num_vlan; i++) {
619 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
620 val = hw->vlan_filter[i];
621 dwmac4_write_vlan_filter(dev, hw, i, val);
622 }
623 }
624
625 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
626 if (hash & GMAC_VLAN_VLHT) {
627 value = readl(ioaddr + GMAC_VLAN_TAG);
628 value |= GMAC_VLAN_VTHM;
629 writel(value, ioaddr + GMAC_VLAN_TAG);
630 }
631 }
632
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)633 static void dwmac4_set_filter(struct mac_device_info *hw,
634 struct net_device *dev)
635 {
636 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
637 int numhashregs = (hw->multicast_filter_bins >> 5);
638 int mcbitslog2 = hw->mcast_bits_log2;
639 unsigned int value;
640 u32 mc_filter[8];
641 int i;
642
643 memset(mc_filter, 0, sizeof(mc_filter));
644
645 value = readl(ioaddr + GMAC_PACKET_FILTER);
646 value &= ~GMAC_PACKET_FILTER_HMC;
647 value &= ~GMAC_PACKET_FILTER_HPF;
648 value &= ~GMAC_PACKET_FILTER_PCF;
649 value &= ~GMAC_PACKET_FILTER_PM;
650 value &= ~GMAC_PACKET_FILTER_PR;
651 value &= ~GMAC_PACKET_FILTER_RA;
652 if (dev->flags & IFF_PROMISC) {
653 /* VLAN Tag Filter Fail Packets Queuing */
654 if (hw->vlan_fail_q_en) {
655 value = readl(ioaddr + GMAC_RXQ_CTRL4);
656 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
657 value |= GMAC_RXQCTRL_VFFQE |
658 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
659 writel(value, ioaddr + GMAC_RXQ_CTRL4);
660 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
661 } else {
662 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
663 }
664
665 } else if ((dev->flags & IFF_ALLMULTI) ||
666 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
667 /* Pass all multi */
668 value |= GMAC_PACKET_FILTER_PM;
669 /* Set all the bits of the HASH tab */
670 memset(mc_filter, 0xff, sizeof(mc_filter));
671 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
672 struct netdev_hw_addr *ha;
673
674 /* Hash filter for multicast */
675 value |= GMAC_PACKET_FILTER_HMC;
676
677 netdev_for_each_mc_addr(ha, dev) {
678 /* The upper n bits of the calculated CRC are used to
679 * index the contents of the hash table. The number of
680 * bits used depends on the hardware configuration
681 * selected at core configuration time.
682 */
683 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
684 ETH_ALEN)) >> (32 - mcbitslog2);
685 /* The most significant bit determines the register to
686 * use (H/L) while the other 5 bits determine the bit
687 * within the register.
688 */
689 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
690 }
691 }
692
693 for (i = 0; i < numhashregs; i++)
694 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
695
696 value |= GMAC_PACKET_FILTER_HPF;
697
698 /* Handle multiple unicast addresses */
699 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
700 /* Switch to promiscuous mode if more than 128 addrs
701 * are required
702 */
703 value |= GMAC_PACKET_FILTER_PR;
704 } else {
705 struct netdev_hw_addr *ha;
706 int reg = 1;
707
708 netdev_for_each_uc_addr(ha, dev) {
709 dwmac4_set_umac_addr(hw, ha->addr, reg);
710 reg++;
711 }
712
713 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
714 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
715 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
716 reg++;
717 }
718 }
719
720 /* VLAN filtering */
721 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
722 value |= GMAC_PACKET_FILTER_VTFE;
723
724 writel(value, ioaddr + GMAC_PACKET_FILTER);
725
726 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
727 if (!hw->promisc) {
728 hw->promisc = 1;
729 dwmac4_vlan_promisc_enable(dev, hw);
730 }
731 } else {
732 if (hw->promisc) {
733 hw->promisc = 0;
734 dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
735 }
736 }
737 }
738
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)739 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
740 unsigned int fc, unsigned int pause_time,
741 u32 tx_cnt)
742 {
743 void __iomem *ioaddr = hw->pcsr;
744 unsigned int flow = 0;
745 u32 queue = 0;
746
747 pr_debug("GMAC Flow-Control:\n");
748 if (fc & FLOW_RX) {
749 pr_debug("\tReceive Flow-Control ON\n");
750 flow |= GMAC_RX_FLOW_CTRL_RFE;
751 } else {
752 pr_debug("\tReceive Flow-Control OFF\n");
753 }
754 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
755
756 if (fc & FLOW_TX) {
757 pr_debug("\tTransmit Flow-Control ON\n");
758
759 if (duplex)
760 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
761
762 for (queue = 0; queue < tx_cnt; queue++) {
763 flow = GMAC_TX_FLOW_CTRL_TFE;
764
765 if (duplex)
766 flow |=
767 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
768
769 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
770 }
771 } else {
772 for (queue = 0; queue < tx_cnt; queue++)
773 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
774 }
775 }
776
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)777 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
778 bool loopback)
779 {
780 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
781 }
782
dwmac4_rane(void __iomem * ioaddr,bool restart)783 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
784 {
785 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
786 }
787
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)788 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
789 {
790 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
791 }
792
793 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)794 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
795 {
796 u32 status;
797
798 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
799 x->irq_rgmii_n++;
800
801 /* Check the link status */
802 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
803 int speed_value;
804
805 x->pcs_link = 1;
806
807 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
808 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
809 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
810 x->pcs_speed = SPEED_1000;
811 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
812 x->pcs_speed = SPEED_100;
813 else
814 x->pcs_speed = SPEED_10;
815
816 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
817
818 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
819 x->pcs_duplex ? "Full" : "Half");
820 } else {
821 x->pcs_link = 0;
822 pr_info("Link is Down\n");
823 }
824 }
825
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)826 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
827 {
828 void __iomem *ioaddr = hw->pcsr;
829 u32 mtl_int_qx_status;
830 int ret = 0;
831
832 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
833
834 /* Check MTL Interrupt */
835 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
836 /* read Queue x Interrupt status */
837 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
838
839 if (status & MTL_RX_OVERFLOW_INT) {
840 /* clear Interrupt */
841 writel(status | MTL_RX_OVERFLOW_INT,
842 ioaddr + MTL_CHAN_INT_CTRL(chan));
843 ret = CORE_IRQ_MTL_RX_OVERFLOW;
844 }
845 }
846
847 return ret;
848 }
849
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)850 static int dwmac4_irq_status(struct mac_device_info *hw,
851 struct stmmac_extra_stats *x)
852 {
853 void __iomem *ioaddr = hw->pcsr;
854 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
855 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
856 int ret = 0;
857
858 /* Discard disabled bits */
859 intr_status &= intr_enable;
860
861 /* Not used events (e.g. MMC interrupts) are not handled. */
862 if ((intr_status & mmc_tx_irq))
863 x->mmc_tx_irq_n++;
864 if (unlikely(intr_status & mmc_rx_irq))
865 x->mmc_rx_irq_n++;
866 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
867 x->mmc_rx_csum_offload_irq_n++;
868 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
869 if (unlikely(intr_status & pmt_irq)) {
870 readl(ioaddr + GMAC_PMT);
871 x->irq_receive_pmt_irq_n++;
872 }
873
874 /* MAC tx/rx EEE LPI entry/exit interrupts */
875 if (intr_status & lpi_irq) {
876 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
877 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
878
879 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
880 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
881 x->irq_tx_path_in_lpi_mode_n++;
882 }
883 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
884 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
885 x->irq_tx_path_exit_lpi_mode_n++;
886 }
887 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
888 x->irq_rx_path_in_lpi_mode_n++;
889 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
890 x->irq_rx_path_exit_lpi_mode_n++;
891 }
892
893 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
894 if (intr_status & PCS_RGSMIIIS_IRQ)
895 dwmac4_phystatus(ioaddr, x);
896
897 return ret;
898 }
899
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)900 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
901 u32 rx_queues, u32 tx_queues)
902 {
903 u32 value;
904 u32 queue;
905
906 for (queue = 0; queue < tx_queues; queue++) {
907 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
908
909 if (value & MTL_DEBUG_TXSTSFSTS)
910 x->mtl_tx_status_fifo_full++;
911 if (value & MTL_DEBUG_TXFSTS)
912 x->mtl_tx_fifo_not_empty++;
913 if (value & MTL_DEBUG_TWCSTS)
914 x->mmtl_fifo_ctrl++;
915 if (value & MTL_DEBUG_TRCSTS_MASK) {
916 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
917 >> MTL_DEBUG_TRCSTS_SHIFT;
918 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
919 x->mtl_tx_fifo_read_ctrl_write++;
920 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
921 x->mtl_tx_fifo_read_ctrl_wait++;
922 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
923 x->mtl_tx_fifo_read_ctrl_read++;
924 else
925 x->mtl_tx_fifo_read_ctrl_idle++;
926 }
927 if (value & MTL_DEBUG_TXPAUSED)
928 x->mac_tx_in_pause++;
929 }
930
931 for (queue = 0; queue < rx_queues; queue++) {
932 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
933
934 if (value & MTL_DEBUG_RXFSTS_MASK) {
935 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
936 >> MTL_DEBUG_RRCSTS_SHIFT;
937
938 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
939 x->mtl_rx_fifo_fill_level_full++;
940 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
941 x->mtl_rx_fifo_fill_above_thresh++;
942 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
943 x->mtl_rx_fifo_fill_below_thresh++;
944 else
945 x->mtl_rx_fifo_fill_level_empty++;
946 }
947 if (value & MTL_DEBUG_RRCSTS_MASK) {
948 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
949 MTL_DEBUG_RRCSTS_SHIFT;
950
951 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
952 x->mtl_rx_fifo_read_ctrl_flush++;
953 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
954 x->mtl_rx_fifo_read_ctrl_read_data++;
955 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
956 x->mtl_rx_fifo_read_ctrl_status++;
957 else
958 x->mtl_rx_fifo_read_ctrl_idle++;
959 }
960 if (value & MTL_DEBUG_RWCSTS)
961 x->mtl_rx_fifo_ctrl_active++;
962 }
963
964 /* GMAC debug */
965 value = readl(ioaddr + GMAC_DEBUG);
966
967 if (value & GMAC_DEBUG_TFCSTS_MASK) {
968 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
969 >> GMAC_DEBUG_TFCSTS_SHIFT;
970
971 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
972 x->mac_tx_frame_ctrl_xfer++;
973 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
974 x->mac_tx_frame_ctrl_pause++;
975 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
976 x->mac_tx_frame_ctrl_wait++;
977 else
978 x->mac_tx_frame_ctrl_idle++;
979 }
980 if (value & GMAC_DEBUG_TPESTS)
981 x->mac_gmii_tx_proto_engine++;
982 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
983 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
984 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
985 if (value & GMAC_DEBUG_RPESTS)
986 x->mac_gmii_rx_proto_engine++;
987 }
988
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)989 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
990 {
991 u32 value = readl(ioaddr + GMAC_CONFIG);
992
993 if (enable)
994 value |= GMAC_CONFIG_LM;
995 else
996 value &= ~GMAC_CONFIG_LM;
997
998 writel(value, ioaddr + GMAC_CONFIG);
999 }
1000
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)1001 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
1002 __le16 perfect_match, bool is_double)
1003 {
1004 void __iomem *ioaddr = hw->pcsr;
1005 u32 value;
1006
1007 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
1008
1009 value = readl(ioaddr + GMAC_VLAN_TAG);
1010
1011 if (hash) {
1012 value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
1013 if (is_double) {
1014 value |= GMAC_VLAN_EDVLP;
1015 value |= GMAC_VLAN_ESVL;
1016 value |= GMAC_VLAN_DOVLTC;
1017 }
1018
1019 writel(value, ioaddr + GMAC_VLAN_TAG);
1020 } else if (perfect_match) {
1021 u32 value = GMAC_VLAN_ETV;
1022
1023 if (is_double) {
1024 value |= GMAC_VLAN_EDVLP;
1025 value |= GMAC_VLAN_ESVL;
1026 value |= GMAC_VLAN_DOVLTC;
1027 }
1028
1029 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1030 } else {
1031 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1032 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1033 value &= ~GMAC_VLAN_DOVLTC;
1034 value &= ~GMAC_VLAN_VID;
1035
1036 writel(value, ioaddr + GMAC_VLAN_TAG);
1037 }
1038 }
1039
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1040 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1041 {
1042 u32 value = readl(ioaddr + GMAC_CONFIG);
1043
1044 value &= ~GMAC_CONFIG_SARC;
1045 value |= val << GMAC_CONFIG_SARC_SHIFT;
1046
1047 writel(value, ioaddr + GMAC_CONFIG);
1048 }
1049
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1050 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1051 {
1052 void __iomem *ioaddr = hw->pcsr;
1053 u32 value;
1054
1055 value = readl(ioaddr + GMAC_VLAN_INCL);
1056 value |= GMAC_VLAN_VLTI;
1057 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1058 value &= ~GMAC_VLAN_VLC;
1059 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1060 writel(value, ioaddr + GMAC_VLAN_INCL);
1061 }
1062
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1063 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1064 u32 addr)
1065 {
1066 void __iomem *ioaddr = hw->pcsr;
1067 u32 value;
1068
1069 writel(addr, ioaddr + GMAC_ARP_ADDR);
1070
1071 value = readl(ioaddr + GMAC_CONFIG);
1072 if (en)
1073 value |= GMAC_CONFIG_ARPEN;
1074 else
1075 value &= ~GMAC_CONFIG_ARPEN;
1076 writel(value, ioaddr + GMAC_CONFIG);
1077 }
1078
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1079 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1080 bool en, bool ipv6, bool sa, bool inv,
1081 u32 match)
1082 {
1083 void __iomem *ioaddr = hw->pcsr;
1084 u32 value;
1085
1086 value = readl(ioaddr + GMAC_PACKET_FILTER);
1087 value |= GMAC_PACKET_FILTER_IPFE;
1088 writel(value, ioaddr + GMAC_PACKET_FILTER);
1089
1090 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1091
1092 /* For IPv6 not both SA/DA filters can be active */
1093 if (ipv6) {
1094 value |= GMAC_L3PEN0;
1095 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1096 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1097 if (sa) {
1098 value |= GMAC_L3SAM0;
1099 if (inv)
1100 value |= GMAC_L3SAIM0;
1101 } else {
1102 value |= GMAC_L3DAM0;
1103 if (inv)
1104 value |= GMAC_L3DAIM0;
1105 }
1106 } else {
1107 value &= ~GMAC_L3PEN0;
1108 if (sa) {
1109 value |= GMAC_L3SAM0;
1110 if (inv)
1111 value |= GMAC_L3SAIM0;
1112 } else {
1113 value |= GMAC_L3DAM0;
1114 if (inv)
1115 value |= GMAC_L3DAIM0;
1116 }
1117 }
1118
1119 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1120
1121 if (sa) {
1122 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1123 } else {
1124 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1125 }
1126
1127 if (!en)
1128 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1129
1130 return 0;
1131 }
1132
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1133 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1134 bool en, bool udp, bool sa, bool inv,
1135 u32 match)
1136 {
1137 void __iomem *ioaddr = hw->pcsr;
1138 u32 value;
1139
1140 value = readl(ioaddr + GMAC_PACKET_FILTER);
1141 value |= GMAC_PACKET_FILTER_IPFE;
1142 writel(value, ioaddr + GMAC_PACKET_FILTER);
1143
1144 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1145 if (udp) {
1146 value |= GMAC_L4PEN0;
1147 } else {
1148 value &= ~GMAC_L4PEN0;
1149 }
1150
1151 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1152 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1153 if (sa) {
1154 value |= GMAC_L4SPM0;
1155 if (inv)
1156 value |= GMAC_L4SPIM0;
1157 } else {
1158 value |= GMAC_L4DPM0;
1159 if (inv)
1160 value |= GMAC_L4DPIM0;
1161 }
1162
1163 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1164
1165 if (sa) {
1166 value = match & GMAC_L4SP0;
1167 } else {
1168 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1169 }
1170
1171 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1172
1173 if (!en)
1174 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1175
1176 return 0;
1177 }
1178
1179 const struct stmmac_ops dwmac4_ops = {
1180 .core_init = dwmac4_core_init,
1181 .set_mac = stmmac_set_mac,
1182 .rx_ipc = dwmac4_rx_ipc_enable,
1183 .rx_queue_enable = dwmac4_rx_queue_enable,
1184 .rx_queue_prio = dwmac4_rx_queue_priority,
1185 .tx_queue_prio = dwmac4_tx_queue_priority,
1186 .rx_queue_routing = dwmac4_rx_queue_routing,
1187 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1188 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1189 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1190 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1191 .config_cbs = dwmac4_config_cbs,
1192 .dump_regs = dwmac4_dump_regs,
1193 .host_irq_status = dwmac4_irq_status,
1194 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1195 .flow_ctrl = dwmac4_flow_ctrl,
1196 .pmt = dwmac4_pmt,
1197 .set_umac_addr = dwmac4_set_umac_addr,
1198 .get_umac_addr = dwmac4_get_umac_addr,
1199 .set_eee_mode = dwmac4_set_eee_mode,
1200 .reset_eee_mode = dwmac4_reset_eee_mode,
1201 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1202 .set_eee_timer = dwmac4_set_eee_timer,
1203 .set_eee_pls = dwmac4_set_eee_pls,
1204 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1205 .pcs_rane = dwmac4_rane,
1206 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1207 .debug = dwmac4_debug,
1208 .set_filter = dwmac4_set_filter,
1209 .set_mac_loopback = dwmac4_set_mac_loopback,
1210 .update_vlan_hash = dwmac4_update_vlan_hash,
1211 .sarc_configure = dwmac4_sarc_configure,
1212 .enable_vlan = dwmac4_enable_vlan,
1213 .set_arp_offload = dwmac4_set_arp_offload,
1214 .config_l3_filter = dwmac4_config_l3_filter,
1215 .config_l4_filter = dwmac4_config_l4_filter,
1216 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1217 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1218 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1219 };
1220
1221 const struct stmmac_ops dwmac410_ops = {
1222 .core_init = dwmac4_core_init,
1223 .set_mac = stmmac_dwmac4_set_mac,
1224 .rx_ipc = dwmac4_rx_ipc_enable,
1225 .rx_queue_enable = dwmac4_rx_queue_enable,
1226 .rx_queue_prio = dwmac4_rx_queue_priority,
1227 .tx_queue_prio = dwmac4_tx_queue_priority,
1228 .rx_queue_routing = dwmac4_rx_queue_routing,
1229 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1230 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1231 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1232 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1233 .config_cbs = dwmac4_config_cbs,
1234 .dump_regs = dwmac4_dump_regs,
1235 .host_irq_status = dwmac4_irq_status,
1236 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1237 .flow_ctrl = dwmac4_flow_ctrl,
1238 .pmt = dwmac4_pmt,
1239 .set_umac_addr = dwmac4_set_umac_addr,
1240 .get_umac_addr = dwmac4_get_umac_addr,
1241 .set_eee_mode = dwmac4_set_eee_mode,
1242 .reset_eee_mode = dwmac4_reset_eee_mode,
1243 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1244 .set_eee_timer = dwmac4_set_eee_timer,
1245 .set_eee_pls = dwmac4_set_eee_pls,
1246 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1247 .pcs_rane = dwmac4_rane,
1248 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1249 .debug = dwmac4_debug,
1250 .set_filter = dwmac4_set_filter,
1251 .flex_pps_config = dwmac5_flex_pps_config,
1252 .set_mac_loopback = dwmac4_set_mac_loopback,
1253 .update_vlan_hash = dwmac4_update_vlan_hash,
1254 .sarc_configure = dwmac4_sarc_configure,
1255 .enable_vlan = dwmac4_enable_vlan,
1256 .set_arp_offload = dwmac4_set_arp_offload,
1257 .config_l3_filter = dwmac4_config_l3_filter,
1258 .config_l4_filter = dwmac4_config_l4_filter,
1259 .est_configure = dwmac5_est_configure,
1260 .est_irq_status = dwmac5_est_irq_status,
1261 .fpe_configure = dwmac5_fpe_configure,
1262 .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1263 .fpe_irq_status = dwmac5_fpe_irq_status,
1264 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1265 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1266 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1267 };
1268
1269 const struct stmmac_ops dwmac510_ops = {
1270 .core_init = dwmac4_core_init,
1271 .set_mac = stmmac_dwmac4_set_mac,
1272 .rx_ipc = dwmac4_rx_ipc_enable,
1273 .rx_queue_enable = dwmac4_rx_queue_enable,
1274 .rx_queue_prio = dwmac4_rx_queue_priority,
1275 .tx_queue_prio = dwmac4_tx_queue_priority,
1276 .rx_queue_routing = dwmac4_rx_queue_routing,
1277 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1278 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1279 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1280 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1281 .config_cbs = dwmac4_config_cbs,
1282 .dump_regs = dwmac4_dump_regs,
1283 .host_irq_status = dwmac4_irq_status,
1284 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1285 .flow_ctrl = dwmac4_flow_ctrl,
1286 .pmt = dwmac4_pmt,
1287 .set_umac_addr = dwmac4_set_umac_addr,
1288 .get_umac_addr = dwmac4_get_umac_addr,
1289 .set_eee_mode = dwmac4_set_eee_mode,
1290 .reset_eee_mode = dwmac4_reset_eee_mode,
1291 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1292 .set_eee_timer = dwmac4_set_eee_timer,
1293 .set_eee_pls = dwmac4_set_eee_pls,
1294 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1295 .pcs_rane = dwmac4_rane,
1296 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1297 .debug = dwmac4_debug,
1298 .set_filter = dwmac4_set_filter,
1299 .safety_feat_config = dwmac5_safety_feat_config,
1300 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1301 .safety_feat_dump = dwmac5_safety_feat_dump,
1302 .rxp_config = dwmac5_rxp_config,
1303 .flex_pps_config = dwmac5_flex_pps_config,
1304 .set_mac_loopback = dwmac4_set_mac_loopback,
1305 .update_vlan_hash = dwmac4_update_vlan_hash,
1306 .sarc_configure = dwmac4_sarc_configure,
1307 .enable_vlan = dwmac4_enable_vlan,
1308 .set_arp_offload = dwmac4_set_arp_offload,
1309 .config_l3_filter = dwmac4_config_l3_filter,
1310 .config_l4_filter = dwmac4_config_l4_filter,
1311 .est_configure = dwmac5_est_configure,
1312 .est_irq_status = dwmac5_est_irq_status,
1313 .fpe_configure = dwmac5_fpe_configure,
1314 .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1315 .fpe_irq_status = dwmac5_fpe_irq_status,
1316 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1317 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1318 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1319 };
1320
dwmac4_get_num_vlan(void __iomem * ioaddr)1321 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1322 {
1323 u32 val, num_vlan;
1324
1325 val = readl(ioaddr + GMAC_HW_FEATURE3);
1326 switch (val & GMAC_HW_FEAT_NRVF) {
1327 case 0:
1328 num_vlan = 1;
1329 break;
1330 case 1:
1331 num_vlan = 4;
1332 break;
1333 case 2:
1334 num_vlan = 8;
1335 break;
1336 case 3:
1337 num_vlan = 16;
1338 break;
1339 case 4:
1340 num_vlan = 24;
1341 break;
1342 case 5:
1343 num_vlan = 32;
1344 break;
1345 default:
1346 num_vlan = 1;
1347 }
1348
1349 return num_vlan;
1350 }
1351
dwmac4_setup(struct stmmac_priv * priv)1352 int dwmac4_setup(struct stmmac_priv *priv)
1353 {
1354 struct mac_device_info *mac = priv->hw;
1355
1356 dev_info(priv->device, "\tDWMAC4/5\n");
1357
1358 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1359 mac->pcsr = priv->ioaddr;
1360 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1361 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1362 mac->mcast_bits_log2 = 0;
1363
1364 if (mac->multicast_filter_bins)
1365 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1366
1367 mac->link.duplex = GMAC_CONFIG_DM;
1368 mac->link.speed10 = GMAC_CONFIG_PS;
1369 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1370 mac->link.speed1000 = 0;
1371 mac->link.speed2500 = GMAC_CONFIG_FES;
1372 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1373 mac->mii.addr = GMAC_MDIO_ADDR;
1374 mac->mii.data = GMAC_MDIO_DATA;
1375 mac->mii.addr_shift = 21;
1376 mac->mii.addr_mask = GENMASK(25, 21);
1377 mac->mii.reg_shift = 16;
1378 mac->mii.reg_mask = GENMASK(20, 16);
1379 mac->mii.clk_csr_shift = 8;
1380 mac->mii.clk_csr_mask = GENMASK(11, 8);
1381 mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1382
1383 return 0;
1384 }
1385