1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22 
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)23 static void dwmac4_core_init(struct mac_device_info *hw,
24 			     struct net_device *dev)
25 {
26 	struct stmmac_priv *priv = netdev_priv(dev);
27 	void __iomem *ioaddr = hw->pcsr;
28 	u32 value = readl(ioaddr + GMAC_CONFIG);
29 
30 	value |= GMAC_CORE_INIT;
31 
32 	if (hw->ps) {
33 		value |= GMAC_CONFIG_TE;
34 
35 		value &= hw->link.speed_mask;
36 		switch (hw->ps) {
37 		case SPEED_1000:
38 			value |= hw->link.speed1000;
39 			break;
40 		case SPEED_100:
41 			value |= hw->link.speed100;
42 			break;
43 		case SPEED_10:
44 			value |= hw->link.speed10;
45 			break;
46 		}
47 	}
48 
49 	writel(value, ioaddr + GMAC_CONFIG);
50 
51 	/* Enable GMAC interrupts */
52 	value = GMAC_INT_DEFAULT_ENABLE;
53 
54 	if (hw->pcs)
55 		value |= GMAC_PCS_IRQ_DEFAULT;
56 
57 	/* Enable FPE interrupt */
58 	if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
59 		value |= GMAC_INT_FPE_EN;
60 
61 	writel(value, ioaddr + GMAC_INT_EN);
62 
63 	if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
64 		init_waitqueue_head(&priv->tstamp_busy_wait);
65 }
66 
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)67 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
68 				   u8 mode, u32 queue)
69 {
70 	void __iomem *ioaddr = hw->pcsr;
71 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
72 
73 	value &= GMAC_RX_QUEUE_CLEAR(queue);
74 	if (mode == MTL_QUEUE_AVB)
75 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
76 	else if (mode == MTL_QUEUE_DCB)
77 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
78 
79 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
80 }
81 
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)82 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
83 				     u32 prio, u32 queue)
84 {
85 	void __iomem *ioaddr = hw->pcsr;
86 	u32 base_register;
87 	u32 value;
88 
89 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
90 	if (queue >= 4)
91 		queue -= 4;
92 
93 	value = readl(ioaddr + base_register);
94 
95 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
96 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
97 						GMAC_RXQCTRL_PSRQX_MASK(queue);
98 	writel(value, ioaddr + base_register);
99 }
100 
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)101 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
102 				     u32 prio, u32 queue)
103 {
104 	void __iomem *ioaddr = hw->pcsr;
105 	u32 base_register;
106 	u32 value;
107 
108 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
109 	if (queue >= 4)
110 		queue -= 4;
111 
112 	value = readl(ioaddr + base_register);
113 
114 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
115 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
116 						GMAC_TXQCTRL_PSTQX_MASK(queue);
117 
118 	writel(value, ioaddr + base_register);
119 }
120 
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)121 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
122 				    u8 packet, u32 queue)
123 {
124 	void __iomem *ioaddr = hw->pcsr;
125 	u32 value;
126 
127 	static const struct stmmac_rx_routing route_possibilities[] = {
128 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
129 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
130 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
131 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
132 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
133 	};
134 
135 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
136 
137 	/* routing configuration */
138 	value &= ~route_possibilities[packet - 1].reg_mask;
139 	value |= (queue << route_possibilities[packet-1].reg_shift) &
140 		 route_possibilities[packet - 1].reg_mask;
141 
142 	/* some packets require extra ops */
143 	if (packet == PACKET_AVCPQ) {
144 		value &= ~GMAC_RXQCTRL_TACPQE;
145 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
146 	} else if (packet == PACKET_MCBCQ) {
147 		value &= ~GMAC_RXQCTRL_MCBCQEN;
148 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
149 	}
150 
151 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
152 }
153 
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)154 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
155 					  u32 rx_alg)
156 {
157 	void __iomem *ioaddr = hw->pcsr;
158 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
159 
160 	value &= ~MTL_OPERATION_RAA;
161 	switch (rx_alg) {
162 	case MTL_RX_ALGORITHM_SP:
163 		value |= MTL_OPERATION_RAA_SP;
164 		break;
165 	case MTL_RX_ALGORITHM_WSP:
166 		value |= MTL_OPERATION_RAA_WSP;
167 		break;
168 	default:
169 		break;
170 	}
171 
172 	writel(value, ioaddr + MTL_OPERATION_MODE);
173 }
174 
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)175 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
176 					  u32 tx_alg)
177 {
178 	void __iomem *ioaddr = hw->pcsr;
179 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
180 
181 	value &= ~MTL_OPERATION_SCHALG_MASK;
182 	switch (tx_alg) {
183 	case MTL_TX_ALGORITHM_WRR:
184 		value |= MTL_OPERATION_SCHALG_WRR;
185 		break;
186 	case MTL_TX_ALGORITHM_WFQ:
187 		value |= MTL_OPERATION_SCHALG_WFQ;
188 		break;
189 	case MTL_TX_ALGORITHM_DWRR:
190 		value |= MTL_OPERATION_SCHALG_DWRR;
191 		break;
192 	case MTL_TX_ALGORITHM_SP:
193 		value |= MTL_OPERATION_SCHALG_SP;
194 		break;
195 	default:
196 		break;
197 	}
198 
199 	writel(value, ioaddr + MTL_OPERATION_MODE);
200 }
201 
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)202 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
203 					   u32 weight, u32 queue)
204 {
205 	void __iomem *ioaddr = hw->pcsr;
206 	u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
207 
208 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
209 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
210 	writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
211 }
212 
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)213 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
214 {
215 	void __iomem *ioaddr = hw->pcsr;
216 	u32 value;
217 
218 	if (queue < 4)
219 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
220 	else
221 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
222 
223 	if (queue == 0 || queue == 4) {
224 		value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
225 		value |= MTL_RXQ_DMA_Q04MDMACH(chan);
226 	} else if (queue > 4) {
227 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
228 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
229 	} else {
230 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
231 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
232 	}
233 
234 	if (queue < 4)
235 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
236 	else
237 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
238 }
239 
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)240 static void dwmac4_config_cbs(struct mac_device_info *hw,
241 			      u32 send_slope, u32 idle_slope,
242 			      u32 high_credit, u32 low_credit, u32 queue)
243 {
244 	void __iomem *ioaddr = hw->pcsr;
245 	u32 value;
246 
247 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
248 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
249 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
250 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
251 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
252 
253 	/* enable AV algorithm */
254 	value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
255 	value |= MTL_ETS_CTRL_AVALG;
256 	value |= MTL_ETS_CTRL_CC;
257 	writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
258 
259 	/* configure send slope */
260 	value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
261 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
262 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
263 	writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
264 
265 	/* configure idle slope (same register as tx weight) */
266 	dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
267 
268 	/* configure high credit */
269 	value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
270 	value &= ~MTL_HIGH_CRED_HC_MASK;
271 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
272 	writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
273 
274 	/* configure high credit */
275 	value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
276 	value &= ~MTL_HIGH_CRED_LC_MASK;
277 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
278 	writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
279 }
280 
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)281 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
282 {
283 	void __iomem *ioaddr = hw->pcsr;
284 	int i;
285 
286 	for (i = 0; i < GMAC_REG_NUM; i++)
287 		reg_space[i] = readl(ioaddr + i * 4);
288 }
289 
dwmac4_rx_ipc_enable(struct mac_device_info * hw)290 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
291 {
292 	void __iomem *ioaddr = hw->pcsr;
293 	u32 value = readl(ioaddr + GMAC_CONFIG);
294 
295 	if (hw->rx_csum)
296 		value |= GMAC_CONFIG_IPC;
297 	else
298 		value &= ~GMAC_CONFIG_IPC;
299 
300 	writel(value, ioaddr + GMAC_CONFIG);
301 
302 	value = readl(ioaddr + GMAC_CONFIG);
303 
304 	return !!(value & GMAC_CONFIG_IPC);
305 }
306 
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)307 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
308 {
309 	void __iomem *ioaddr = hw->pcsr;
310 	unsigned int pmt = 0;
311 	u32 config;
312 
313 	if (mode & WAKE_MAGIC) {
314 		pr_debug("GMAC: WOL Magic frame\n");
315 		pmt |= power_down | magic_pkt_en;
316 	}
317 	if (mode & WAKE_UCAST) {
318 		pr_debug("GMAC: WOL on global unicast\n");
319 		pmt |= power_down | global_unicast | wake_up_frame_en;
320 	}
321 
322 	if (pmt) {
323 		/* The receiver must be enabled for WOL before powering down */
324 		config = readl(ioaddr + GMAC_CONFIG);
325 		config |= GMAC_CONFIG_RE;
326 		writel(config, ioaddr + GMAC_CONFIG);
327 	}
328 	writel(pmt, ioaddr + GMAC_PMT);
329 }
330 
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)331 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
332 				 const unsigned char *addr, unsigned int reg_n)
333 {
334 	void __iomem *ioaddr = hw->pcsr;
335 
336 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
337 				   GMAC_ADDR_LOW(reg_n));
338 }
339 
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)340 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
341 				 unsigned char *addr, unsigned int reg_n)
342 {
343 	void __iomem *ioaddr = hw->pcsr;
344 
345 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
346 				   GMAC_ADDR_LOW(reg_n));
347 }
348 
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)349 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
350 				bool en_tx_lpi_clockgating)
351 {
352 	void __iomem *ioaddr = hw->pcsr;
353 	u32 value;
354 
355 	/* Enable the link status receive on RGMII, SGMII ore SMII
356 	 * receive path and instruct the transmit to enter in LPI
357 	 * state.
358 	 */
359 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
360 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
361 
362 	if (en_tx_lpi_clockgating)
363 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
364 
365 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
366 }
367 
dwmac4_reset_eee_mode(struct mac_device_info * hw)368 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
369 {
370 	void __iomem *ioaddr = hw->pcsr;
371 	u32 value;
372 
373 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
374 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
375 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
376 }
377 
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)378 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
379 {
380 	void __iomem *ioaddr = hw->pcsr;
381 	u32 value;
382 
383 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
384 
385 	if (link)
386 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
387 	else
388 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
389 
390 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
391 }
392 
dwmac4_set_eee_lpi_entry_timer(struct mac_device_info * hw,int et)393 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
394 {
395 	void __iomem *ioaddr = hw->pcsr;
396 	int value = et & STMMAC_ET_MAX;
397 	int regval;
398 
399 	/* Program LPI entry timer value into register */
400 	writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
401 
402 	/* Enable/disable LPI entry timer */
403 	regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
404 	regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
405 
406 	if (et)
407 		regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
408 	else
409 		regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
410 
411 	writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
412 }
413 
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)414 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
415 {
416 	void __iomem *ioaddr = hw->pcsr;
417 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
418 
419 	/* Program the timers in the LPI timer control register:
420 	 * LS: minimum time (ms) for which the link
421 	 *  status from PHY should be ok before transmitting
422 	 *  the LPI pattern.
423 	 * TW: minimum time (us) for which the core waits
424 	 *  after it has stopped transmitting the LPI pattern.
425 	 */
426 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
427 }
428 
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)429 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
430 {
431 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
432 	u32 val;
433 
434 	val = readl(ioaddr + GMAC_VLAN_TAG);
435 	val &= ~GMAC_VLAN_TAG_VID;
436 	val |= GMAC_VLAN_TAG_ETV | vid;
437 
438 	writel(val, ioaddr + GMAC_VLAN_TAG);
439 }
440 
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)441 static int dwmac4_write_vlan_filter(struct net_device *dev,
442 				    struct mac_device_info *hw,
443 				    u8 index, u32 data)
444 {
445 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
446 	int i, timeout = 10;
447 	u32 val;
448 
449 	if (index >= hw->num_vlan)
450 		return -EINVAL;
451 
452 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
453 
454 	val = readl(ioaddr + GMAC_VLAN_TAG);
455 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
456 		GMAC_VLAN_TAG_CTRL_CT |
457 		GMAC_VLAN_TAG_CTRL_OB);
458 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
459 
460 	writel(val, ioaddr + GMAC_VLAN_TAG);
461 
462 	for (i = 0; i < timeout; i++) {
463 		val = readl(ioaddr + GMAC_VLAN_TAG);
464 		if (!(val & GMAC_VLAN_TAG_CTRL_OB))
465 			return 0;
466 		udelay(1);
467 	}
468 
469 	netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
470 
471 	return -EBUSY;
472 }
473 
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)474 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
475 				      struct mac_device_info *hw,
476 				      __be16 proto, u16 vid)
477 {
478 	int index = -1;
479 	u32 val = 0;
480 	int i, ret;
481 
482 	if (vid > 4095)
483 		return -EINVAL;
484 
485 	if (hw->promisc) {
486 		netdev_err(dev,
487 			   "Adding VLAN in promisc mode not supported\n");
488 		return -EPERM;
489 	}
490 
491 	/* Single Rx VLAN Filter */
492 	if (hw->num_vlan == 1) {
493 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
494 		if (vid == 0) {
495 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
496 			return -EPERM;
497 		}
498 
499 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
500 			netdev_err(dev, "Only single VLAN ID supported\n");
501 			return -EPERM;
502 		}
503 
504 		hw->vlan_filter[0] = vid;
505 		dwmac4_write_single_vlan(dev, vid);
506 
507 		return 0;
508 	}
509 
510 	/* Extended Rx VLAN Filter Enable */
511 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
512 
513 	for (i = 0; i < hw->num_vlan; i++) {
514 		if (hw->vlan_filter[i] == val)
515 			return 0;
516 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
517 			index = i;
518 	}
519 
520 	if (index == -1) {
521 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
522 			   hw->num_vlan);
523 		return -EPERM;
524 	}
525 
526 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
527 
528 	if (!ret)
529 		hw->vlan_filter[index] = val;
530 
531 	return ret;
532 }
533 
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)534 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
535 				      struct mac_device_info *hw,
536 				      __be16 proto, u16 vid)
537 {
538 	int i, ret = 0;
539 
540 	if (hw->promisc) {
541 		netdev_err(dev,
542 			   "Deleting VLAN in promisc mode not supported\n");
543 		return -EPERM;
544 	}
545 
546 	/* Single Rx VLAN Filter */
547 	if (hw->num_vlan == 1) {
548 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
549 			hw->vlan_filter[0] = 0;
550 			dwmac4_write_single_vlan(dev, 0);
551 		}
552 		return 0;
553 	}
554 
555 	/* Extended Rx VLAN Filter Enable */
556 	for (i = 0; i < hw->num_vlan; i++) {
557 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
558 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
559 
560 			if (!ret)
561 				hw->vlan_filter[i] = 0;
562 			else
563 				return ret;
564 		}
565 	}
566 
567 	return ret;
568 }
569 
dwmac4_vlan_promisc_enable(struct net_device * dev,struct mac_device_info * hw)570 static void dwmac4_vlan_promisc_enable(struct net_device *dev,
571 				       struct mac_device_info *hw)
572 {
573 	void __iomem *ioaddr = hw->pcsr;
574 	u32 value;
575 	u32 hash;
576 	u32 val;
577 	int i;
578 
579 	/* Single Rx VLAN Filter */
580 	if (hw->num_vlan == 1) {
581 		dwmac4_write_single_vlan(dev, 0);
582 		return;
583 	}
584 
585 	/* Extended Rx VLAN Filter Enable */
586 	for (i = 0; i < hw->num_vlan; i++) {
587 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
588 			val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
589 			dwmac4_write_vlan_filter(dev, hw, i, val);
590 		}
591 	}
592 
593 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
594 	if (hash & GMAC_VLAN_VLHT) {
595 		value = readl(ioaddr + GMAC_VLAN_TAG);
596 		if (value & GMAC_VLAN_VTHM) {
597 			value &= ~GMAC_VLAN_VTHM;
598 			writel(value, ioaddr + GMAC_VLAN_TAG);
599 		}
600 	}
601 }
602 
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)603 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
604 					   struct mac_device_info *hw)
605 {
606 	void __iomem *ioaddr = hw->pcsr;
607 	u32 value;
608 	u32 hash;
609 	u32 val;
610 	int i;
611 
612 	/* Single Rx VLAN Filter */
613 	if (hw->num_vlan == 1) {
614 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
615 		return;
616 	}
617 
618 	/* Extended Rx VLAN Filter Enable */
619 	for (i = 0; i < hw->num_vlan; i++) {
620 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
621 			val = hw->vlan_filter[i];
622 			dwmac4_write_vlan_filter(dev, hw, i, val);
623 		}
624 	}
625 
626 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
627 	if (hash & GMAC_VLAN_VLHT) {
628 		value = readl(ioaddr + GMAC_VLAN_TAG);
629 		value |= GMAC_VLAN_VTHM;
630 		writel(value, ioaddr + GMAC_VLAN_TAG);
631 	}
632 }
633 
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)634 static void dwmac4_set_filter(struct mac_device_info *hw,
635 			      struct net_device *dev)
636 {
637 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
638 	int numhashregs = (hw->multicast_filter_bins >> 5);
639 	int mcbitslog2 = hw->mcast_bits_log2;
640 	unsigned int value;
641 	u32 mc_filter[8];
642 	int i;
643 
644 	memset(mc_filter, 0, sizeof(mc_filter));
645 
646 	value = readl(ioaddr + GMAC_PACKET_FILTER);
647 	value &= ~GMAC_PACKET_FILTER_HMC;
648 	value &= ~GMAC_PACKET_FILTER_HPF;
649 	value &= ~GMAC_PACKET_FILTER_PCF;
650 	value &= ~GMAC_PACKET_FILTER_PM;
651 	value &= ~GMAC_PACKET_FILTER_PR;
652 	value &= ~GMAC_PACKET_FILTER_RA;
653 	if (dev->flags & IFF_PROMISC) {
654 		/* VLAN Tag Filter Fail Packets Queuing */
655 		if (hw->vlan_fail_q_en) {
656 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
657 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
658 			value |= GMAC_RXQCTRL_VFFQE |
659 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
660 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
661 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
662 		} else {
663 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
664 		}
665 
666 	} else if ((dev->flags & IFF_ALLMULTI) ||
667 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
668 		/* Pass all multi */
669 		value |= GMAC_PACKET_FILTER_PM;
670 		/* Set all the bits of the HASH tab */
671 		memset(mc_filter, 0xff, sizeof(mc_filter));
672 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
673 		struct netdev_hw_addr *ha;
674 
675 		/* Hash filter for multicast */
676 		value |= GMAC_PACKET_FILTER_HMC;
677 
678 		netdev_for_each_mc_addr(ha, dev) {
679 			/* The upper n bits of the calculated CRC are used to
680 			 * index the contents of the hash table. The number of
681 			 * bits used depends on the hardware configuration
682 			 * selected at core configuration time.
683 			 */
684 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
685 					ETH_ALEN)) >> (32 - mcbitslog2);
686 			/* The most significant bit determines the register to
687 			 * use (H/L) while the other 5 bits determine the bit
688 			 * within the register.
689 			 */
690 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
691 		}
692 	}
693 
694 	for (i = 0; i < numhashregs; i++)
695 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
696 
697 	value |= GMAC_PACKET_FILTER_HPF;
698 
699 	/* Handle multiple unicast addresses */
700 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
701 		/* Switch to promiscuous mode if more than 128 addrs
702 		 * are required
703 		 */
704 		value |= GMAC_PACKET_FILTER_PR;
705 	} else {
706 		struct netdev_hw_addr *ha;
707 		int reg = 1;
708 
709 		netdev_for_each_uc_addr(ha, dev) {
710 			dwmac4_set_umac_addr(hw, ha->addr, reg);
711 			reg++;
712 		}
713 
714 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
715 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
716 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
717 			reg++;
718 		}
719 	}
720 
721 	/* VLAN filtering */
722 	if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
723 		value |= GMAC_PACKET_FILTER_VTFE;
724 
725 	writel(value, ioaddr + GMAC_PACKET_FILTER);
726 
727 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
728 		if (!hw->promisc) {
729 			hw->promisc = 1;
730 			dwmac4_vlan_promisc_enable(dev, hw);
731 		}
732 	} else {
733 		if (hw->promisc) {
734 			hw->promisc = 0;
735 			dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
736 		}
737 	}
738 }
739 
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)740 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
741 			     unsigned int fc, unsigned int pause_time,
742 			     u32 tx_cnt)
743 {
744 	void __iomem *ioaddr = hw->pcsr;
745 	unsigned int flow = 0;
746 	u32 queue = 0;
747 
748 	pr_debug("GMAC Flow-Control:\n");
749 	if (fc & FLOW_RX) {
750 		pr_debug("\tReceive Flow-Control ON\n");
751 		flow |= GMAC_RX_FLOW_CTRL_RFE;
752 	}
753 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
754 
755 	if (fc & FLOW_TX) {
756 		pr_debug("\tTransmit Flow-Control ON\n");
757 
758 		if (duplex)
759 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
760 
761 		for (queue = 0; queue < tx_cnt; queue++) {
762 			flow = GMAC_TX_FLOW_CTRL_TFE;
763 
764 			if (duplex)
765 				flow |=
766 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
767 
768 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
769 		}
770 	} else {
771 		for (queue = 0; queue < tx_cnt; queue++)
772 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
773 	}
774 }
775 
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)776 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
777 			    bool loopback)
778 {
779 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
780 }
781 
dwmac4_rane(void __iomem * ioaddr,bool restart)782 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
783 {
784 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
785 }
786 
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)787 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
788 {
789 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
790 }
791 
792 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)793 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
794 {
795 	u32 status;
796 
797 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
798 	x->irq_rgmii_n++;
799 
800 	/* Check the link status */
801 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
802 		int speed_value;
803 
804 		x->pcs_link = 1;
805 
806 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
807 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
808 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
809 			x->pcs_speed = SPEED_1000;
810 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
811 			x->pcs_speed = SPEED_100;
812 		else
813 			x->pcs_speed = SPEED_10;
814 
815 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
816 
817 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
818 			x->pcs_duplex ? "Full" : "Half");
819 	} else {
820 		x->pcs_link = 0;
821 		pr_info("Link is Down\n");
822 	}
823 }
824 
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)825 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
826 {
827 	void __iomem *ioaddr = hw->pcsr;
828 	u32 mtl_int_qx_status;
829 	int ret = 0;
830 
831 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
832 
833 	/* Check MTL Interrupt */
834 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
835 		/* read Queue x Interrupt status */
836 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
837 
838 		if (status & MTL_RX_OVERFLOW_INT) {
839 			/*  clear Interrupt */
840 			writel(status | MTL_RX_OVERFLOW_INT,
841 			       ioaddr + MTL_CHAN_INT_CTRL(chan));
842 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
843 		}
844 	}
845 
846 	return ret;
847 }
848 
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)849 static int dwmac4_irq_status(struct mac_device_info *hw,
850 			     struct stmmac_extra_stats *x)
851 {
852 	void __iomem *ioaddr = hw->pcsr;
853 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
854 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
855 	int ret = 0;
856 
857 	/* Discard disabled bits */
858 	intr_status &= intr_enable;
859 
860 	/* Not used events (e.g. MMC interrupts) are not handled. */
861 	if ((intr_status & mmc_tx_irq))
862 		x->mmc_tx_irq_n++;
863 	if (unlikely(intr_status & mmc_rx_irq))
864 		x->mmc_rx_irq_n++;
865 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
866 		x->mmc_rx_csum_offload_irq_n++;
867 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
868 	if (unlikely(intr_status & pmt_irq)) {
869 		readl(ioaddr + GMAC_PMT);
870 		x->irq_receive_pmt_irq_n++;
871 	}
872 
873 	/* MAC tx/rx EEE LPI entry/exit interrupts */
874 	if (intr_status & lpi_irq) {
875 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
876 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
877 
878 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
879 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
880 			x->irq_tx_path_in_lpi_mode_n++;
881 		}
882 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
883 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
884 			x->irq_tx_path_exit_lpi_mode_n++;
885 		}
886 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
887 			x->irq_rx_path_in_lpi_mode_n++;
888 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
889 			x->irq_rx_path_exit_lpi_mode_n++;
890 	}
891 
892 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
893 	if (intr_status & PCS_RGSMIIIS_IRQ)
894 		dwmac4_phystatus(ioaddr, x);
895 
896 	return ret;
897 }
898 
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)899 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
900 			 u32 rx_queues, u32 tx_queues)
901 {
902 	u32 value;
903 	u32 queue;
904 
905 	for (queue = 0; queue < tx_queues; queue++) {
906 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
907 
908 		if (value & MTL_DEBUG_TXSTSFSTS)
909 			x->mtl_tx_status_fifo_full++;
910 		if (value & MTL_DEBUG_TXFSTS)
911 			x->mtl_tx_fifo_not_empty++;
912 		if (value & MTL_DEBUG_TWCSTS)
913 			x->mmtl_fifo_ctrl++;
914 		if (value & MTL_DEBUG_TRCSTS_MASK) {
915 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
916 				     >> MTL_DEBUG_TRCSTS_SHIFT;
917 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
918 				x->mtl_tx_fifo_read_ctrl_write++;
919 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
920 				x->mtl_tx_fifo_read_ctrl_wait++;
921 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
922 				x->mtl_tx_fifo_read_ctrl_read++;
923 			else
924 				x->mtl_tx_fifo_read_ctrl_idle++;
925 		}
926 		if (value & MTL_DEBUG_TXPAUSED)
927 			x->mac_tx_in_pause++;
928 	}
929 
930 	for (queue = 0; queue < rx_queues; queue++) {
931 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
932 
933 		if (value & MTL_DEBUG_RXFSTS_MASK) {
934 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
935 				     >> MTL_DEBUG_RRCSTS_SHIFT;
936 
937 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
938 				x->mtl_rx_fifo_fill_level_full++;
939 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
940 				x->mtl_rx_fifo_fill_above_thresh++;
941 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
942 				x->mtl_rx_fifo_fill_below_thresh++;
943 			else
944 				x->mtl_rx_fifo_fill_level_empty++;
945 		}
946 		if (value & MTL_DEBUG_RRCSTS_MASK) {
947 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
948 				     MTL_DEBUG_RRCSTS_SHIFT;
949 
950 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
951 				x->mtl_rx_fifo_read_ctrl_flush++;
952 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
953 				x->mtl_rx_fifo_read_ctrl_read_data++;
954 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
955 				x->mtl_rx_fifo_read_ctrl_status++;
956 			else
957 				x->mtl_rx_fifo_read_ctrl_idle++;
958 		}
959 		if (value & MTL_DEBUG_RWCSTS)
960 			x->mtl_rx_fifo_ctrl_active++;
961 	}
962 
963 	/* GMAC debug */
964 	value = readl(ioaddr + GMAC_DEBUG);
965 
966 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
967 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
968 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
969 
970 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
971 			x->mac_tx_frame_ctrl_xfer++;
972 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
973 			x->mac_tx_frame_ctrl_pause++;
974 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
975 			x->mac_tx_frame_ctrl_wait++;
976 		else
977 			x->mac_tx_frame_ctrl_idle++;
978 	}
979 	if (value & GMAC_DEBUG_TPESTS)
980 		x->mac_gmii_tx_proto_engine++;
981 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
982 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
983 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
984 	if (value & GMAC_DEBUG_RPESTS)
985 		x->mac_gmii_rx_proto_engine++;
986 }
987 
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)988 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
989 {
990 	u32 value = readl(ioaddr + GMAC_CONFIG);
991 
992 	if (enable)
993 		value |= GMAC_CONFIG_LM;
994 	else
995 		value &= ~GMAC_CONFIG_LM;
996 
997 	writel(value, ioaddr + GMAC_CONFIG);
998 }
999 
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)1000 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
1001 				    __le16 perfect_match, bool is_double)
1002 {
1003 	void __iomem *ioaddr = hw->pcsr;
1004 	u32 value;
1005 
1006 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
1007 
1008 	value = readl(ioaddr + GMAC_VLAN_TAG);
1009 
1010 	if (hash) {
1011 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
1012 		if (is_double) {
1013 			value |= GMAC_VLAN_EDVLP;
1014 			value |= GMAC_VLAN_ESVL;
1015 			value |= GMAC_VLAN_DOVLTC;
1016 		}
1017 
1018 		writel(value, ioaddr + GMAC_VLAN_TAG);
1019 	} else if (perfect_match) {
1020 		u32 value = GMAC_VLAN_ETV;
1021 
1022 		if (is_double) {
1023 			value |= GMAC_VLAN_EDVLP;
1024 			value |= GMAC_VLAN_ESVL;
1025 			value |= GMAC_VLAN_DOVLTC;
1026 		}
1027 
1028 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1029 	} else {
1030 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1031 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1032 		value &= ~GMAC_VLAN_DOVLTC;
1033 		value &= ~GMAC_VLAN_VID;
1034 
1035 		writel(value, ioaddr + GMAC_VLAN_TAG);
1036 	}
1037 }
1038 
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1039 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1040 {
1041 	u32 value = readl(ioaddr + GMAC_CONFIG);
1042 
1043 	value &= ~GMAC_CONFIG_SARC;
1044 	value |= val << GMAC_CONFIG_SARC_SHIFT;
1045 
1046 	writel(value, ioaddr + GMAC_CONFIG);
1047 }
1048 
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1049 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1050 {
1051 	void __iomem *ioaddr = hw->pcsr;
1052 	u32 value;
1053 
1054 	value = readl(ioaddr + GMAC_VLAN_INCL);
1055 	value |= GMAC_VLAN_VLTI;
1056 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1057 	value &= ~GMAC_VLAN_VLC;
1058 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1059 	writel(value, ioaddr + GMAC_VLAN_INCL);
1060 }
1061 
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1062 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1063 				   u32 addr)
1064 {
1065 	void __iomem *ioaddr = hw->pcsr;
1066 	u32 value;
1067 
1068 	writel(addr, ioaddr + GMAC_ARP_ADDR);
1069 
1070 	value = readl(ioaddr + GMAC_CONFIG);
1071 	if (en)
1072 		value |= GMAC_CONFIG_ARPEN;
1073 	else
1074 		value &= ~GMAC_CONFIG_ARPEN;
1075 	writel(value, ioaddr + GMAC_CONFIG);
1076 }
1077 
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1078 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1079 				   bool en, bool ipv6, bool sa, bool inv,
1080 				   u32 match)
1081 {
1082 	void __iomem *ioaddr = hw->pcsr;
1083 	u32 value;
1084 
1085 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1086 	value |= GMAC_PACKET_FILTER_IPFE;
1087 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1088 
1089 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1090 
1091 	/* For IPv6 not both SA/DA filters can be active */
1092 	if (ipv6) {
1093 		value |= GMAC_L3PEN0;
1094 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1095 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1096 		if (sa) {
1097 			value |= GMAC_L3SAM0;
1098 			if (inv)
1099 				value |= GMAC_L3SAIM0;
1100 		} else {
1101 			value |= GMAC_L3DAM0;
1102 			if (inv)
1103 				value |= GMAC_L3DAIM0;
1104 		}
1105 	} else {
1106 		value &= ~GMAC_L3PEN0;
1107 		if (sa) {
1108 			value |= GMAC_L3SAM0;
1109 			if (inv)
1110 				value |= GMAC_L3SAIM0;
1111 		} else {
1112 			value |= GMAC_L3DAM0;
1113 			if (inv)
1114 				value |= GMAC_L3DAIM0;
1115 		}
1116 	}
1117 
1118 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1119 
1120 	if (sa) {
1121 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1122 	} else {
1123 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1124 	}
1125 
1126 	if (!en)
1127 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1128 
1129 	return 0;
1130 }
1131 
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1132 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1133 				   bool en, bool udp, bool sa, bool inv,
1134 				   u32 match)
1135 {
1136 	void __iomem *ioaddr = hw->pcsr;
1137 	u32 value;
1138 
1139 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1140 	value |= GMAC_PACKET_FILTER_IPFE;
1141 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1142 
1143 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1144 	if (udp) {
1145 		value |= GMAC_L4PEN0;
1146 	} else {
1147 		value &= ~GMAC_L4PEN0;
1148 	}
1149 
1150 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1151 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1152 	if (sa) {
1153 		value |= GMAC_L4SPM0;
1154 		if (inv)
1155 			value |= GMAC_L4SPIM0;
1156 	} else {
1157 		value |= GMAC_L4DPM0;
1158 		if (inv)
1159 			value |= GMAC_L4DPIM0;
1160 	}
1161 
1162 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1163 
1164 	if (sa) {
1165 		value = match & GMAC_L4SP0;
1166 	} else {
1167 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1168 	}
1169 
1170 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1171 
1172 	if (!en)
1173 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1174 
1175 	return 0;
1176 }
1177 
1178 const struct stmmac_ops dwmac4_ops = {
1179 	.core_init = dwmac4_core_init,
1180 	.set_mac = stmmac_set_mac,
1181 	.rx_ipc = dwmac4_rx_ipc_enable,
1182 	.rx_queue_enable = dwmac4_rx_queue_enable,
1183 	.rx_queue_prio = dwmac4_rx_queue_priority,
1184 	.tx_queue_prio = dwmac4_tx_queue_priority,
1185 	.rx_queue_routing = dwmac4_rx_queue_routing,
1186 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1187 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1188 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1189 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1190 	.config_cbs = dwmac4_config_cbs,
1191 	.dump_regs = dwmac4_dump_regs,
1192 	.host_irq_status = dwmac4_irq_status,
1193 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1194 	.flow_ctrl = dwmac4_flow_ctrl,
1195 	.pmt = dwmac4_pmt,
1196 	.set_umac_addr = dwmac4_set_umac_addr,
1197 	.get_umac_addr = dwmac4_get_umac_addr,
1198 	.set_eee_mode = dwmac4_set_eee_mode,
1199 	.reset_eee_mode = dwmac4_reset_eee_mode,
1200 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1201 	.set_eee_timer = dwmac4_set_eee_timer,
1202 	.set_eee_pls = dwmac4_set_eee_pls,
1203 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1204 	.pcs_rane = dwmac4_rane,
1205 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1206 	.debug = dwmac4_debug,
1207 	.set_filter = dwmac4_set_filter,
1208 	.set_mac_loopback = dwmac4_set_mac_loopback,
1209 	.update_vlan_hash = dwmac4_update_vlan_hash,
1210 	.sarc_configure = dwmac4_sarc_configure,
1211 	.enable_vlan = dwmac4_enable_vlan,
1212 	.set_arp_offload = dwmac4_set_arp_offload,
1213 	.config_l3_filter = dwmac4_config_l3_filter,
1214 	.config_l4_filter = dwmac4_config_l4_filter,
1215 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1216 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1217 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1218 };
1219 
1220 const struct stmmac_ops dwmac410_ops = {
1221 	.core_init = dwmac4_core_init,
1222 	.set_mac = stmmac_dwmac4_set_mac,
1223 	.rx_ipc = dwmac4_rx_ipc_enable,
1224 	.rx_queue_enable = dwmac4_rx_queue_enable,
1225 	.rx_queue_prio = dwmac4_rx_queue_priority,
1226 	.tx_queue_prio = dwmac4_tx_queue_priority,
1227 	.rx_queue_routing = dwmac4_rx_queue_routing,
1228 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1229 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1230 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1231 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1232 	.config_cbs = dwmac4_config_cbs,
1233 	.dump_regs = dwmac4_dump_regs,
1234 	.host_irq_status = dwmac4_irq_status,
1235 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1236 	.flow_ctrl = dwmac4_flow_ctrl,
1237 	.pmt = dwmac4_pmt,
1238 	.set_umac_addr = dwmac4_set_umac_addr,
1239 	.get_umac_addr = dwmac4_get_umac_addr,
1240 	.set_eee_mode = dwmac4_set_eee_mode,
1241 	.reset_eee_mode = dwmac4_reset_eee_mode,
1242 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1243 	.set_eee_timer = dwmac4_set_eee_timer,
1244 	.set_eee_pls = dwmac4_set_eee_pls,
1245 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1246 	.pcs_rane = dwmac4_rane,
1247 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1248 	.debug = dwmac4_debug,
1249 	.set_filter = dwmac4_set_filter,
1250 	.flex_pps_config = dwmac5_flex_pps_config,
1251 	.set_mac_loopback = dwmac4_set_mac_loopback,
1252 	.update_vlan_hash = dwmac4_update_vlan_hash,
1253 	.sarc_configure = dwmac4_sarc_configure,
1254 	.enable_vlan = dwmac4_enable_vlan,
1255 	.set_arp_offload = dwmac4_set_arp_offload,
1256 	.config_l3_filter = dwmac4_config_l3_filter,
1257 	.config_l4_filter = dwmac4_config_l4_filter,
1258 	.est_configure = dwmac5_est_configure,
1259 	.est_irq_status = dwmac5_est_irq_status,
1260 	.fpe_configure = dwmac5_fpe_configure,
1261 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1262 	.fpe_irq_status = dwmac5_fpe_irq_status,
1263 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1264 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1265 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1266 };
1267 
1268 const struct stmmac_ops dwmac510_ops = {
1269 	.core_init = dwmac4_core_init,
1270 	.set_mac = stmmac_dwmac4_set_mac,
1271 	.rx_ipc = dwmac4_rx_ipc_enable,
1272 	.rx_queue_enable = dwmac4_rx_queue_enable,
1273 	.rx_queue_prio = dwmac4_rx_queue_priority,
1274 	.tx_queue_prio = dwmac4_tx_queue_priority,
1275 	.rx_queue_routing = dwmac4_rx_queue_routing,
1276 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1277 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1278 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1279 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1280 	.config_cbs = dwmac4_config_cbs,
1281 	.dump_regs = dwmac4_dump_regs,
1282 	.host_irq_status = dwmac4_irq_status,
1283 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1284 	.flow_ctrl = dwmac4_flow_ctrl,
1285 	.pmt = dwmac4_pmt,
1286 	.set_umac_addr = dwmac4_set_umac_addr,
1287 	.get_umac_addr = dwmac4_get_umac_addr,
1288 	.set_eee_mode = dwmac4_set_eee_mode,
1289 	.reset_eee_mode = dwmac4_reset_eee_mode,
1290 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1291 	.set_eee_timer = dwmac4_set_eee_timer,
1292 	.set_eee_pls = dwmac4_set_eee_pls,
1293 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1294 	.pcs_rane = dwmac4_rane,
1295 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1296 	.debug = dwmac4_debug,
1297 	.set_filter = dwmac4_set_filter,
1298 	.safety_feat_config = dwmac5_safety_feat_config,
1299 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1300 	.safety_feat_dump = dwmac5_safety_feat_dump,
1301 	.rxp_config = dwmac5_rxp_config,
1302 	.flex_pps_config = dwmac5_flex_pps_config,
1303 	.set_mac_loopback = dwmac4_set_mac_loopback,
1304 	.update_vlan_hash = dwmac4_update_vlan_hash,
1305 	.sarc_configure = dwmac4_sarc_configure,
1306 	.enable_vlan = dwmac4_enable_vlan,
1307 	.set_arp_offload = dwmac4_set_arp_offload,
1308 	.config_l3_filter = dwmac4_config_l3_filter,
1309 	.config_l4_filter = dwmac4_config_l4_filter,
1310 	.est_configure = dwmac5_est_configure,
1311 	.est_irq_status = dwmac5_est_irq_status,
1312 	.fpe_configure = dwmac5_fpe_configure,
1313 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1314 	.fpe_irq_status = dwmac5_fpe_irq_status,
1315 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1316 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1317 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1318 };
1319 
dwmac4_get_num_vlan(void __iomem * ioaddr)1320 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1321 {
1322 	u32 val, num_vlan;
1323 
1324 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1325 	switch (val & GMAC_HW_FEAT_NRVF) {
1326 	case 0:
1327 		num_vlan = 1;
1328 		break;
1329 	case 1:
1330 		num_vlan = 4;
1331 		break;
1332 	case 2:
1333 		num_vlan = 8;
1334 		break;
1335 	case 3:
1336 		num_vlan = 16;
1337 		break;
1338 	case 4:
1339 		num_vlan = 24;
1340 		break;
1341 	case 5:
1342 		num_vlan = 32;
1343 		break;
1344 	default:
1345 		num_vlan = 1;
1346 	}
1347 
1348 	return num_vlan;
1349 }
1350 
dwmac4_setup(struct stmmac_priv * priv)1351 int dwmac4_setup(struct stmmac_priv *priv)
1352 {
1353 	struct mac_device_info *mac = priv->hw;
1354 
1355 	dev_info(priv->device, "\tDWMAC4/5\n");
1356 
1357 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1358 	mac->pcsr = priv->ioaddr;
1359 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1360 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1361 	mac->mcast_bits_log2 = 0;
1362 
1363 	if (mac->multicast_filter_bins)
1364 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1365 
1366 	mac->link.duplex = GMAC_CONFIG_DM;
1367 	mac->link.speed10 = GMAC_CONFIG_PS;
1368 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1369 	mac->link.speed1000 = 0;
1370 	mac->link.speed2500 = GMAC_CONFIG_FES;
1371 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1372 	mac->mii.addr = GMAC_MDIO_ADDR;
1373 	mac->mii.data = GMAC_MDIO_DATA;
1374 	mac->mii.addr_shift = 21;
1375 	mac->mii.addr_mask = GENMASK(25, 21);
1376 	mac->mii.reg_shift = 16;
1377 	mac->mii.reg_mask = GENMASK(20, 16);
1378 	mac->mii.clk_csr_shift = 8;
1379 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1380 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1381 
1382 	return 0;
1383 }
1384