1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54 * with fine resolution and binary rollover. This avoid non-monotonic behavior
55 * (clock jumps) when changing timestamping settings at runtime.
56 */
57 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58 PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO 5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX 256
82 #define STMMAC_TX_XSK_AVAIL 16
83 #define STMMAC_RX_FILL_BATCH 16
84
85 #define STMMAC_XDP_PASS 0
86 #define STMMAC_XDP_CONSUMED BIT(0)
87 #define STMMAC_XDP_TX BIT(1)
88 #define STMMAC_XDP_REDIRECT BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK 256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER 1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121 * but allow user to force to use the chain instead of the ring
122 */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 int ret = 0;
152
153 if (enabled) {
154 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 if (ret)
156 return ret;
157 ret = clk_prepare_enable(priv->plat->pclk);
158 if (ret) {
159 clk_disable_unprepare(priv->plat->stmmac_clk);
160 return ret;
161 }
162 if (priv->plat->clks_config) {
163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 if (ret) {
165 clk_disable_unprepare(priv->plat->stmmac_clk);
166 clk_disable_unprepare(priv->plat->pclk);
167 return ret;
168 }
169 }
170 } else {
171 clk_disable_unprepare(priv->plat->stmmac_clk);
172 clk_disable_unprepare(priv->plat->pclk);
173 if (priv->plat->clks_config)
174 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 }
176
177 return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182 * stmmac_verify_args - verify the driver parameters.
183 * Description: it checks the driver parameters and set a default in case of
184 * errors.
185 */
stmmac_verify_args(void)186 static void stmmac_verify_args(void)
187 {
188 if (unlikely(watchdog < 0))
189 watchdog = TX_TIMEO;
190 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191 buf_sz = DEFAULT_BUFSIZE;
192 if (unlikely(flow_ctrl > 1))
193 flow_ctrl = FLOW_AUTO;
194 else if (likely(flow_ctrl < 0))
195 flow_ctrl = FLOW_OFF;
196 if (unlikely((pause < 0) || (pause > 0xffff)))
197 pause = PAUSE_TIME;
198 if (eee_timer < 0)
199 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
200 }
201
__stmmac_disable_all_queues(struct stmmac_priv * priv)202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203 {
204 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
205 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207 u32 queue;
208
209 for (queue = 0; queue < maxq; queue++) {
210 struct stmmac_channel *ch = &priv->channel[queue];
211
212 if (stmmac_xdp_is_enabled(priv) &&
213 test_bit(queue, priv->af_xdp_zc_qps)) {
214 napi_disable(&ch->rxtx_napi);
215 continue;
216 }
217
218 if (queue < rx_queues_cnt)
219 napi_disable(&ch->rx_napi);
220 if (queue < tx_queues_cnt)
221 napi_disable(&ch->tx_napi);
222 }
223 }
224
225 /**
226 * stmmac_disable_all_queues - Disable all queues
227 * @priv: driver private structure
228 */
stmmac_disable_all_queues(struct stmmac_priv * priv)229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 struct stmmac_rx_queue *rx_q;
233 u32 queue;
234
235 /* synchronize_rcu() needed for pending XDP buffers to drain */
236 for (queue = 0; queue < rx_queues_cnt; queue++) {
237 rx_q = &priv->dma_conf.rx_queue[queue];
238 if (rx_q->xsk_pool) {
239 synchronize_rcu();
240 break;
241 }
242 }
243
244 __stmmac_disable_all_queues(priv);
245 }
246
247 /**
248 * stmmac_enable_all_queues - Enable all queues
249 * @priv: driver private structure
250 */
stmmac_enable_all_queues(struct stmmac_priv * priv)251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252 {
253 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
254 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
255 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256 u32 queue;
257
258 for (queue = 0; queue < maxq; queue++) {
259 struct stmmac_channel *ch = &priv->channel[queue];
260
261 if (stmmac_xdp_is_enabled(priv) &&
262 test_bit(queue, priv->af_xdp_zc_qps)) {
263 napi_enable(&ch->rxtx_napi);
264 continue;
265 }
266
267 if (queue < rx_queues_cnt)
268 napi_enable(&ch->rx_napi);
269 if (queue < tx_queues_cnt)
270 napi_enable(&ch->tx_napi);
271 }
272 }
273
stmmac_service_event_schedule(struct stmmac_priv * priv)274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
275 {
276 if (!test_bit(STMMAC_DOWN, &priv->state) &&
277 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
278 queue_work(priv->wq, &priv->service_task);
279 }
280
stmmac_global_err(struct stmmac_priv * priv)281 static void stmmac_global_err(struct stmmac_priv *priv)
282 {
283 netif_carrier_off(priv->dev);
284 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
285 stmmac_service_event_schedule(priv);
286 }
287
288 /**
289 * stmmac_clk_csr_set - dynamically set the MDC clock
290 * @priv: driver private structure
291 * Description: this is to dynamically set the MDC clock according to the csr
292 * clock input.
293 * Note:
294 * If a specific clk_csr value is passed from the platform
295 * this means that the CSR Clock Range selection cannot be
296 * changed at run-time and it is fixed (as reported in the driver
297 * documentation). Viceversa the driver will try to set the MDC
298 * clock dynamically according to the actual clock input.
299 */
stmmac_clk_csr_set(struct stmmac_priv * priv)300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301 {
302 u32 clk_rate;
303
304 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305
306 /* Platform provided default clk_csr would be assumed valid
307 * for all other cases except for the below mentioned ones.
308 * For values higher than the IEEE 802.3 specified frequency
309 * we can not estimate the proper divider as it is not known
310 * the frequency of clk_csr_i. So we do not change the default
311 * divider.
312 */
313 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314 if (clk_rate < CSR_F_35M)
315 priv->clk_csr = STMMAC_CSR_20_35M;
316 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317 priv->clk_csr = STMMAC_CSR_35_60M;
318 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319 priv->clk_csr = STMMAC_CSR_60_100M;
320 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321 priv->clk_csr = STMMAC_CSR_100_150M;
322 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323 priv->clk_csr = STMMAC_CSR_150_250M;
324 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325 priv->clk_csr = STMMAC_CSR_250_300M;
326 }
327
328 if (priv->plat->has_sun8i) {
329 if (clk_rate > 160000000)
330 priv->clk_csr = 0x03;
331 else if (clk_rate > 80000000)
332 priv->clk_csr = 0x02;
333 else if (clk_rate > 40000000)
334 priv->clk_csr = 0x01;
335 else
336 priv->clk_csr = 0;
337 }
338
339 if (priv->plat->has_xgmac) {
340 if (clk_rate > 400000000)
341 priv->clk_csr = 0x5;
342 else if (clk_rate > 350000000)
343 priv->clk_csr = 0x4;
344 else if (clk_rate > 300000000)
345 priv->clk_csr = 0x3;
346 else if (clk_rate > 250000000)
347 priv->clk_csr = 0x2;
348 else if (clk_rate > 150000000)
349 priv->clk_csr = 0x1;
350 else
351 priv->clk_csr = 0x0;
352 }
353 }
354
print_pkt(unsigned char * buf,int len)355 static void print_pkt(unsigned char *buf, int len)
356 {
357 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
359 }
360
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
362 {
363 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364 u32 avail;
365
366 if (tx_q->dirty_tx > tx_q->cur_tx)
367 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368 else
369 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370
371 return avail;
372 }
373
374 /**
375 * stmmac_rx_dirty - Get RX queue dirty
376 * @priv: driver private structure
377 * @queue: RX queue index
378 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380 {
381 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382 u32 dirty;
383
384 if (rx_q->dirty_rx <= rx_q->cur_rx)
385 dirty = rx_q->cur_rx - rx_q->dirty_rx;
386 else
387 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388
389 return dirty;
390 }
391
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393 {
394 int tx_lpi_timer;
395
396 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
397 priv->eee_sw_timer_en = en ? 0 : 1;
398 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
399 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400 }
401
402 /**
403 * stmmac_enable_eee_mode - check and enter in LPI mode
404 * @priv: driver private structure
405 * Description: this function is to verify and enter in LPI mode in case of
406 * EEE.
407 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409 {
410 u32 tx_cnt = priv->plat->tx_queues_to_use;
411 u32 queue;
412
413 /* check if all TX queues have the work finished */
414 for (queue = 0; queue < tx_cnt; queue++) {
415 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416
417 if (tx_q->dirty_tx != tx_q->cur_tx)
418 return -EBUSY; /* still unfinished work */
419 }
420
421 /* Check and enter in LPI mode */
422 if (!priv->tx_path_in_lpi_mode)
423 stmmac_set_eee_mode(priv, priv->hw,
424 priv->plat->en_tx_lpi_clockgating);
425 return 0;
426 }
427
428 /**
429 * stmmac_disable_eee_mode - disable and exit from LPI mode
430 * @priv: driver private structure
431 * Description: this function is to exit and disable EEE in case of
432 * LPI state is true. This is called by the xmit.
433 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435 {
436 if (!priv->eee_sw_timer_en) {
437 stmmac_lpi_entry_timer_config(priv, 0);
438 return;
439 }
440
441 stmmac_reset_eee_mode(priv, priv->hw);
442 del_timer_sync(&priv->eee_ctrl_timer);
443 priv->tx_path_in_lpi_mode = false;
444 }
445
446 /**
447 * stmmac_eee_ctrl_timer - EEE TX SW timer.
448 * @t: timer_list struct containing private info
449 * Description:
450 * if there is no data transfer and if we are not in LPI state,
451 * then MAC Transmitter can be moved to LPI state.
452 */
stmmac_eee_ctrl_timer(struct timer_list * t)453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
454 {
455 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456
457 if (stmmac_enable_eee_mode(priv))
458 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459 }
460
461 /**
462 * stmmac_eee_init - init EEE
463 * @priv: driver private structure
464 * Description:
465 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
466 * can also manage EEE, this function enable the LPI state and start related
467 * timer.
468 */
stmmac_eee_init(struct stmmac_priv * priv)469 bool stmmac_eee_init(struct stmmac_priv *priv)
470 {
471 int eee_tw_timer = priv->eee_tw_timer;
472
473 /* Using PCS we cannot dial with the phy registers at this stage
474 * so we do not support extra feature like EEE.
475 */
476 if (priv->hw->pcs == STMMAC_PCS_TBI ||
477 priv->hw->pcs == STMMAC_PCS_RTBI)
478 return false;
479
480 /* Check if MAC core supports the EEE feature. */
481 if (!priv->dma_cap.eee)
482 return false;
483
484 mutex_lock(&priv->lock);
485
486 /* Check if it needs to be deactivated */
487 if (!priv->eee_active) {
488 if (priv->eee_enabled) {
489 netdev_dbg(priv->dev, "disable EEE\n");
490 stmmac_lpi_entry_timer_config(priv, 0);
491 del_timer_sync(&priv->eee_ctrl_timer);
492 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493 if (priv->hw->xpcs)
494 xpcs_config_eee(priv->hw->xpcs,
495 priv->plat->mult_fact_100ns,
496 false);
497 }
498 mutex_unlock(&priv->lock);
499 return false;
500 }
501
502 if (priv->eee_active && !priv->eee_enabled) {
503 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
504 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505 eee_tw_timer);
506 if (priv->hw->xpcs)
507 xpcs_config_eee(priv->hw->xpcs,
508 priv->plat->mult_fact_100ns,
509 true);
510 }
511
512 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513 del_timer_sync(&priv->eee_ctrl_timer);
514 priv->tx_path_in_lpi_mode = false;
515 stmmac_lpi_entry_timer_config(priv, 1);
516 } else {
517 stmmac_lpi_entry_timer_config(priv, 0);
518 mod_timer(&priv->eee_ctrl_timer,
519 STMMAC_LPI_T(priv->tx_lpi_timer));
520 }
521
522 mutex_unlock(&priv->lock);
523 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524 return true;
525 }
526
527 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
528 * @priv: driver private structure
529 * @p : descriptor pointer
530 * @skb : the socket buffer
531 * Description :
532 * This function will read timestamp from the descriptor & pass it to stack.
533 * and also perform some sanity checks.
534 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536 struct dma_desc *p, struct sk_buff *skb)
537 {
538 struct skb_shared_hwtstamps shhwtstamp;
539 bool found = false;
540 u64 ns = 0;
541
542 if (!priv->hwts_tx_en)
543 return;
544
545 /* exit if skb doesn't support hw tstamp */
546 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547 return;
548
549 /* check tx tstamp status */
550 if (stmmac_get_tx_timestamp_status(priv, p)) {
551 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
552 found = true;
553 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
554 found = true;
555 }
556
557 if (found) {
558 ns -= priv->plat->cdc_error_adj;
559
560 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561 shhwtstamp.hwtstamp = ns_to_ktime(ns);
562
563 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564 /* pass tstamp to stack */
565 skb_tstamp_tx(skb, &shhwtstamp);
566 }
567 }
568
569 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
570 * @priv: driver private structure
571 * @p : descriptor pointer
572 * @np : next descriptor pointer
573 * @skb : the socket buffer
574 * Description :
575 * This function will read received packet's timestamp from the descriptor
576 * and pass it to stack. It also perform some sanity checks.
577 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579 struct dma_desc *np, struct sk_buff *skb)
580 {
581 struct skb_shared_hwtstamps *shhwtstamp = NULL;
582 struct dma_desc *desc = p;
583 u64 ns = 0;
584
585 if (!priv->hwts_rx_en)
586 return;
587 /* For GMAC4, the valid timestamp is from CTX next desc. */
588 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
589 desc = np;
590
591 /* Check if timestamp is available */
592 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
593 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
594
595 ns -= priv->plat->cdc_error_adj;
596
597 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598 shhwtstamp = skb_hwtstamps(skb);
599 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600 shhwtstamp->hwtstamp = ns_to_ktime(ns);
601 } else {
602 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603 }
604 }
605
606 /**
607 * stmmac_hwtstamp_set - control hardware timestamping.
608 * @dev: device pointer.
609 * @ifr: An IOCTL specific structure, that can contain a pointer to
610 * a proprietary structure used to pass information to the driver.
611 * Description:
612 * This function configures the MAC to enable/disable both outgoing(TX)
613 * and incoming(RX) packets time stamping based on user input.
614 * Return Value:
615 * 0 on success and an appropriate -ve integer on failure.
616 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618 {
619 struct stmmac_priv *priv = netdev_priv(dev);
620 struct hwtstamp_config config;
621 u32 ptp_v2 = 0;
622 u32 tstamp_all = 0;
623 u32 ptp_over_ipv4_udp = 0;
624 u32 ptp_over_ipv6_udp = 0;
625 u32 ptp_over_ethernet = 0;
626 u32 snap_type_sel = 0;
627 u32 ts_master_en = 0;
628 u32 ts_event_en = 0;
629
630 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631 netdev_alert(priv->dev, "No support for HW time stamping\n");
632 priv->hwts_tx_en = 0;
633 priv->hwts_rx_en = 0;
634
635 return -EOPNOTSUPP;
636 }
637
638 if (copy_from_user(&config, ifr->ifr_data,
639 sizeof(config)))
640 return -EFAULT;
641
642 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643 __func__, config.flags, config.tx_type, config.rx_filter);
644
645 if (config.tx_type != HWTSTAMP_TX_OFF &&
646 config.tx_type != HWTSTAMP_TX_ON)
647 return -ERANGE;
648
649 if (priv->adv_ts) {
650 switch (config.rx_filter) {
651 case HWTSTAMP_FILTER_NONE:
652 /* time stamp no incoming packet at all */
653 config.rx_filter = HWTSTAMP_FILTER_NONE;
654 break;
655
656 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657 /* PTP v1, UDP, any kind of event packet */
658 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
659 /* 'xmac' hardware can support Sync, Pdelay_Req and
660 * Pdelay_resp by setting bit14 and bits17/16 to 01
661 * This leaves Delay_Req timestamps out.
662 * Enable all events *and* general purpose message
663 * timestamping
664 */
665 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668 break;
669
670 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671 /* PTP v1, UDP, Sync packet */
672 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673 /* take time stamp for SYNC messages only */
674 ts_event_en = PTP_TCR_TSEVNTENA;
675
676 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 break;
679
680 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681 /* PTP v1, UDP, Delay_req packet */
682 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683 /* take time stamp for Delay_Req messages only */
684 ts_master_en = PTP_TCR_TSMSTRENA;
685 ts_event_en = PTP_TCR_TSEVNTENA;
686
687 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689 break;
690
691 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692 /* PTP v2, UDP, any kind of event packet */
693 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694 ptp_v2 = PTP_TCR_TSVER2ENA;
695 /* take time stamp for all event messages */
696 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697
698 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700 break;
701
702 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703 /* PTP v2, UDP, Sync packet */
704 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705 ptp_v2 = PTP_TCR_TSVER2ENA;
706 /* take time stamp for SYNC messages only */
707 ts_event_en = PTP_TCR_TSEVNTENA;
708
709 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711 break;
712
713 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714 /* PTP v2, UDP, Delay_req packet */
715 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716 ptp_v2 = PTP_TCR_TSVER2ENA;
717 /* take time stamp for Delay_Req messages only */
718 ts_master_en = PTP_TCR_TSMSTRENA;
719 ts_event_en = PTP_TCR_TSEVNTENA;
720
721 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723 break;
724
725 case HWTSTAMP_FILTER_PTP_V2_EVENT:
726 /* PTP v2/802.AS1 any layer, any kind of event packet */
727 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728 ptp_v2 = PTP_TCR_TSVER2ENA;
729 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
730 if (priv->synopsys_id < DWMAC_CORE_4_10)
731 ts_event_en = PTP_TCR_TSEVNTENA;
732 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734 ptp_over_ethernet = PTP_TCR_TSIPENA;
735 break;
736
737 case HWTSTAMP_FILTER_PTP_V2_SYNC:
738 /* PTP v2/802.AS1, any layer, Sync packet */
739 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740 ptp_v2 = PTP_TCR_TSVER2ENA;
741 /* take time stamp for SYNC messages only */
742 ts_event_en = PTP_TCR_TSEVNTENA;
743
744 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746 ptp_over_ethernet = PTP_TCR_TSIPENA;
747 break;
748
749 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750 /* PTP v2/802.AS1, any layer, Delay_req packet */
751 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752 ptp_v2 = PTP_TCR_TSVER2ENA;
753 /* take time stamp for Delay_Req messages only */
754 ts_master_en = PTP_TCR_TSMSTRENA;
755 ts_event_en = PTP_TCR_TSEVNTENA;
756
757 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759 ptp_over_ethernet = PTP_TCR_TSIPENA;
760 break;
761
762 case HWTSTAMP_FILTER_NTP_ALL:
763 case HWTSTAMP_FILTER_ALL:
764 /* time stamp any incoming packet */
765 config.rx_filter = HWTSTAMP_FILTER_ALL;
766 tstamp_all = PTP_TCR_TSENALL;
767 break;
768
769 default:
770 return -ERANGE;
771 }
772 } else {
773 switch (config.rx_filter) {
774 case HWTSTAMP_FILTER_NONE:
775 config.rx_filter = HWTSTAMP_FILTER_NONE;
776 break;
777 default:
778 /* PTP v1, UDP, any kind of event packet */
779 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780 break;
781 }
782 }
783 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
784 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785
786 priv->systime_flags = STMMAC_HWTS_ACTIVE;
787
788 if (priv->hwts_tx_en || priv->hwts_rx_en) {
789 priv->systime_flags |= tstamp_all | ptp_v2 |
790 ptp_over_ethernet | ptp_over_ipv6_udp |
791 ptp_over_ipv4_udp | ts_event_en |
792 ts_master_en | snap_type_sel;
793 }
794
795 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796
797 memcpy(&priv->tstamp_config, &config, sizeof(config));
798
799 return copy_to_user(ifr->ifr_data, &config,
800 sizeof(config)) ? -EFAULT : 0;
801 }
802
803 /**
804 * stmmac_hwtstamp_get - read hardware timestamping.
805 * @dev: device pointer.
806 * @ifr: An IOCTL specific structure, that can contain a pointer to
807 * a proprietary structure used to pass information to the driver.
808 * Description:
809 * This function obtain the current hardware timestamping settings
810 * as requested.
811 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813 {
814 struct stmmac_priv *priv = netdev_priv(dev);
815 struct hwtstamp_config *config = &priv->tstamp_config;
816
817 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818 return -EOPNOTSUPP;
819
820 return copy_to_user(ifr->ifr_data, config,
821 sizeof(*config)) ? -EFAULT : 0;
822 }
823
824 /**
825 * stmmac_init_tstamp_counter - init hardware timestamping counter
826 * @priv: driver private structure
827 * @systime_flags: timestamping flags
828 * Description:
829 * Initialize hardware counter for packet timestamping.
830 * This is valid as long as the interface is open and not suspended.
831 * Will be rerun after resuming from suspend, case in which the timestamping
832 * flags updated by stmmac_hwtstamp_set() also need to be restored.
833 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835 {
836 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837 struct timespec64 now;
838 u32 sec_inc = 0;
839 u64 temp = 0;
840
841 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842 return -EOPNOTSUPP;
843
844 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845 priv->systime_flags = systime_flags;
846
847 /* program Sub Second Increment reg */
848 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849 priv->plat->clk_ptp_rate,
850 xmac, &sec_inc);
851 temp = div_u64(1000000000ULL, sec_inc);
852
853 /* Store sub second increment for later use */
854 priv->sub_second_inc = sec_inc;
855
856 /* calculate default added value:
857 * formula is :
858 * addend = (2^32)/freq_div_ratio;
859 * where, freq_div_ratio = 1e9ns/sec_inc
860 */
861 temp = (u64)(temp << 32);
862 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864
865 /* initialize system time */
866 ktime_get_real_ts64(&now);
867
868 /* lower 32 bits of tv_sec are safe until y2106 */
869 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870
871 return 0;
872 }
873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874
875 /**
876 * stmmac_init_ptp - init PTP
877 * @priv: driver private structure
878 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
879 * This is done by looking at the HW cap. register.
880 * This function also registers the ptp driver.
881 */
stmmac_init_ptp(struct stmmac_priv * priv)882 static int stmmac_init_ptp(struct stmmac_priv *priv)
883 {
884 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885 int ret;
886
887 if (priv->plat->ptp_clk_freq_config)
888 priv->plat->ptp_clk_freq_config(priv);
889
890 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891 if (ret)
892 return ret;
893
894 priv->adv_ts = 0;
895 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
896 if (xmac && priv->dma_cap.atime_stamp)
897 priv->adv_ts = 1;
898 /* Dwmac 3.x core with extend_desc can support adv_ts */
899 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900 priv->adv_ts = 1;
901
902 if (priv->dma_cap.time_stamp)
903 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
904
905 if (priv->adv_ts)
906 netdev_info(priv->dev,
907 "IEEE 1588-2008 Advanced Timestamp supported\n");
908
909 priv->hwts_tx_en = 0;
910 priv->hwts_rx_en = 0;
911
912 return 0;
913 }
914
stmmac_release_ptp(struct stmmac_priv * priv)915 static void stmmac_release_ptp(struct stmmac_priv *priv)
916 {
917 clk_disable_unprepare(priv->plat->clk_ptp_ref);
918 stmmac_ptp_unregister(priv);
919 }
920
921 /**
922 * stmmac_mac_flow_ctrl - Configure flow control in all queues
923 * @priv: driver private structure
924 * @duplex: duplex passed to the next function
925 * Description: It is used for configuring the flow control in all queues
926 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
928 {
929 u32 tx_cnt = priv->plat->tx_queues_to_use;
930
931 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
932 priv->pause, tx_cnt);
933 }
934
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
936 phy_interface_t interface)
937 {
938 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
939
940 if (!priv->hw->xpcs)
941 return NULL;
942
943 return &priv->hw->xpcs->pcs;
944 }
945
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)946 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
947 const struct phylink_link_state *state)
948 {
949 /* Nothing to do, xpcs_config() handles everything */
950 }
951
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
953 {
954 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
955 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
956 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
957 bool *hs_enable = &fpe_cfg->hs_enable;
958
959 if (is_up && *hs_enable) {
960 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
961 } else {
962 *lo_state = FPE_STATE_OFF;
963 *lp_state = FPE_STATE_OFF;
964 }
965 }
966
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)967 static void stmmac_mac_link_down(struct phylink_config *config,
968 unsigned int mode, phy_interface_t interface)
969 {
970 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
972 stmmac_mac_set(priv, priv->ioaddr, false);
973 priv->eee_active = false;
974 priv->tx_lpi_enabled = false;
975 priv->eee_enabled = stmmac_eee_init(priv);
976 stmmac_set_eee_pls(priv, priv->hw, false);
977
978 if (priv->dma_cap.fpesel)
979 stmmac_fpe_link_state_handle(priv, false);
980 }
981
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)982 static void stmmac_mac_link_up(struct phylink_config *config,
983 struct phy_device *phy,
984 unsigned int mode, phy_interface_t interface,
985 int speed, int duplex,
986 bool tx_pause, bool rx_pause)
987 {
988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989 u32 old_ctrl, ctrl;
990
991 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
992 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
993
994 if (interface == PHY_INTERFACE_MODE_USXGMII) {
995 switch (speed) {
996 case SPEED_10000:
997 ctrl |= priv->hw->link.xgmii.speed10000;
998 break;
999 case SPEED_5000:
1000 ctrl |= priv->hw->link.xgmii.speed5000;
1001 break;
1002 case SPEED_2500:
1003 ctrl |= priv->hw->link.xgmii.speed2500;
1004 break;
1005 default:
1006 return;
1007 }
1008 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1009 switch (speed) {
1010 case SPEED_100000:
1011 ctrl |= priv->hw->link.xlgmii.speed100000;
1012 break;
1013 case SPEED_50000:
1014 ctrl |= priv->hw->link.xlgmii.speed50000;
1015 break;
1016 case SPEED_40000:
1017 ctrl |= priv->hw->link.xlgmii.speed40000;
1018 break;
1019 case SPEED_25000:
1020 ctrl |= priv->hw->link.xlgmii.speed25000;
1021 break;
1022 case SPEED_10000:
1023 ctrl |= priv->hw->link.xgmii.speed10000;
1024 break;
1025 case SPEED_2500:
1026 ctrl |= priv->hw->link.speed2500;
1027 break;
1028 case SPEED_1000:
1029 ctrl |= priv->hw->link.speed1000;
1030 break;
1031 default:
1032 return;
1033 }
1034 } else {
1035 switch (speed) {
1036 case SPEED_2500:
1037 ctrl |= priv->hw->link.speed2500;
1038 break;
1039 case SPEED_1000:
1040 ctrl |= priv->hw->link.speed1000;
1041 break;
1042 case SPEED_100:
1043 ctrl |= priv->hw->link.speed100;
1044 break;
1045 case SPEED_10:
1046 ctrl |= priv->hw->link.speed10;
1047 break;
1048 default:
1049 return;
1050 }
1051 }
1052
1053 priv->speed = speed;
1054
1055 if (priv->plat->fix_mac_speed)
1056 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1057
1058 if (!duplex)
1059 ctrl &= ~priv->hw->link.duplex;
1060 else
1061 ctrl |= priv->hw->link.duplex;
1062
1063 /* Flow Control operation */
1064 if (rx_pause && tx_pause)
1065 priv->flow_ctrl = FLOW_AUTO;
1066 else if (rx_pause && !tx_pause)
1067 priv->flow_ctrl = FLOW_RX;
1068 else if (!rx_pause && tx_pause)
1069 priv->flow_ctrl = FLOW_TX;
1070 else
1071 priv->flow_ctrl = FLOW_OFF;
1072
1073 stmmac_mac_flow_ctrl(priv, duplex);
1074
1075 if (ctrl != old_ctrl)
1076 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1077
1078 stmmac_mac_set(priv, priv->ioaddr, true);
1079 if (phy && priv->dma_cap.eee) {
1080 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1081 priv->eee_enabled = stmmac_eee_init(priv);
1082 priv->tx_lpi_enabled = priv->eee_enabled;
1083 stmmac_set_eee_pls(priv, priv->hw, true);
1084 }
1085
1086 if (priv->dma_cap.fpesel)
1087 stmmac_fpe_link_state_handle(priv, true);
1088 }
1089
1090 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1091 .validate = phylink_generic_validate,
1092 .mac_select_pcs = stmmac_mac_select_pcs,
1093 .mac_config = stmmac_mac_config,
1094 .mac_link_down = stmmac_mac_link_down,
1095 .mac_link_up = stmmac_mac_link_up,
1096 };
1097
1098 /**
1099 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1100 * @priv: driver private structure
1101 * Description: this is to verify if the HW supports the PCS.
1102 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1103 * configured for the TBI, RTBI, or SGMII PHY interface.
1104 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1105 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1106 {
1107 int interface = priv->plat->interface;
1108
1109 if (priv->dma_cap.pcs) {
1110 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1111 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1112 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1113 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1114 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1115 priv->hw->pcs = STMMAC_PCS_RGMII;
1116 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1117 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1118 priv->hw->pcs = STMMAC_PCS_SGMII;
1119 }
1120 }
1121 }
1122
1123 /**
1124 * stmmac_init_phy - PHY initialization
1125 * @dev: net device structure
1126 * Description: it initializes the driver's PHY state, and attaches the PHY
1127 * to the mac driver.
1128 * Return value:
1129 * 0 on success
1130 */
stmmac_init_phy(struct net_device * dev)1131 static int stmmac_init_phy(struct net_device *dev)
1132 {
1133 struct stmmac_priv *priv = netdev_priv(dev);
1134 struct fwnode_handle *fwnode;
1135 int ret;
1136
1137 fwnode = of_fwnode_handle(priv->plat->phylink_node);
1138 if (!fwnode)
1139 fwnode = dev_fwnode(priv->device);
1140
1141 if (fwnode)
1142 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1143
1144 /* Some DT bindings do not set-up the PHY handle. Let's try to
1145 * manually parse it
1146 */
1147 if (!fwnode || ret) {
1148 int addr = priv->plat->phy_addr;
1149 struct phy_device *phydev;
1150
1151 if (addr < 0) {
1152 netdev_err(priv->dev, "no phy found\n");
1153 return -ENODEV;
1154 }
1155
1156 phydev = mdiobus_get_phy(priv->mii, addr);
1157 if (!phydev) {
1158 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1159 return -ENODEV;
1160 }
1161
1162 ret = phylink_connect_phy(priv->phylink, phydev);
1163 }
1164
1165 if (!priv->plat->pmt) {
1166 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1167
1168 phylink_ethtool_get_wol(priv->phylink, &wol);
1169 device_set_wakeup_capable(priv->device, !!wol.supported);
1170 }
1171
1172 return ret;
1173 }
1174
stmmac_phy_setup(struct stmmac_priv * priv)1175 static int stmmac_phy_setup(struct stmmac_priv *priv)
1176 {
1177 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1178 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1179 int max_speed = priv->plat->max_speed;
1180 int mode = priv->plat->phy_interface;
1181 struct phylink *phylink;
1182
1183 priv->phylink_config.dev = &priv->dev->dev;
1184 priv->phylink_config.type = PHYLINK_NETDEV;
1185 if (priv->plat->mdio_bus_data)
1186 priv->phylink_config.ovr_an_inband =
1187 mdio_bus_data->xpcs_an_inband;
1188
1189 if (!fwnode)
1190 fwnode = dev_fwnode(priv->device);
1191
1192 /* Set the platform/firmware specified interface mode */
1193 __set_bit(mode, priv->phylink_config.supported_interfaces);
1194
1195 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1196 if (priv->hw->xpcs)
1197 xpcs_get_interfaces(priv->hw->xpcs,
1198 priv->phylink_config.supported_interfaces);
1199
1200 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1201 MAC_10 | MAC_100;
1202
1203 if (!max_speed || max_speed >= 1000)
1204 priv->phylink_config.mac_capabilities |= MAC_1000;
1205
1206 if (priv->plat->has_gmac4) {
1207 if (!max_speed || max_speed >= 2500)
1208 priv->phylink_config.mac_capabilities |= MAC_2500FD;
1209 } else if (priv->plat->has_xgmac) {
1210 if (!max_speed || max_speed >= 2500)
1211 priv->phylink_config.mac_capabilities |= MAC_2500FD;
1212 if (!max_speed || max_speed >= 5000)
1213 priv->phylink_config.mac_capabilities |= MAC_5000FD;
1214 if (!max_speed || max_speed >= 10000)
1215 priv->phylink_config.mac_capabilities |= MAC_10000FD;
1216 if (!max_speed || max_speed >= 25000)
1217 priv->phylink_config.mac_capabilities |= MAC_25000FD;
1218 if (!max_speed || max_speed >= 40000)
1219 priv->phylink_config.mac_capabilities |= MAC_40000FD;
1220 if (!max_speed || max_speed >= 50000)
1221 priv->phylink_config.mac_capabilities |= MAC_50000FD;
1222 if (!max_speed || max_speed >= 100000)
1223 priv->phylink_config.mac_capabilities |= MAC_100000FD;
1224 }
1225
1226 /* Half-Duplex can only work with single queue */
1227 if (priv->plat->tx_queues_to_use > 1)
1228 priv->phylink_config.mac_capabilities &=
1229 ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1230 priv->phylink_config.mac_managed_pm = true;
1231
1232 phylink = phylink_create(&priv->phylink_config, fwnode,
1233 mode, &stmmac_phylink_mac_ops);
1234 if (IS_ERR(phylink))
1235 return PTR_ERR(phylink);
1236
1237 priv->phylink = phylink;
1238 return 0;
1239 }
1240
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1241 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1242 struct stmmac_dma_conf *dma_conf)
1243 {
1244 u32 rx_cnt = priv->plat->rx_queues_to_use;
1245 unsigned int desc_size;
1246 void *head_rx;
1247 u32 queue;
1248
1249 /* Display RX rings */
1250 for (queue = 0; queue < rx_cnt; queue++) {
1251 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1252
1253 pr_info("\tRX Queue %u rings\n", queue);
1254
1255 if (priv->extend_desc) {
1256 head_rx = (void *)rx_q->dma_erx;
1257 desc_size = sizeof(struct dma_extended_desc);
1258 } else {
1259 head_rx = (void *)rx_q->dma_rx;
1260 desc_size = sizeof(struct dma_desc);
1261 }
1262
1263 /* Display RX ring */
1264 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1265 rx_q->dma_rx_phy, desc_size);
1266 }
1267 }
1268
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1269 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1270 struct stmmac_dma_conf *dma_conf)
1271 {
1272 u32 tx_cnt = priv->plat->tx_queues_to_use;
1273 unsigned int desc_size;
1274 void *head_tx;
1275 u32 queue;
1276
1277 /* Display TX rings */
1278 for (queue = 0; queue < tx_cnt; queue++) {
1279 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1280
1281 pr_info("\tTX Queue %d rings\n", queue);
1282
1283 if (priv->extend_desc) {
1284 head_tx = (void *)tx_q->dma_etx;
1285 desc_size = sizeof(struct dma_extended_desc);
1286 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1287 head_tx = (void *)tx_q->dma_entx;
1288 desc_size = sizeof(struct dma_edesc);
1289 } else {
1290 head_tx = (void *)tx_q->dma_tx;
1291 desc_size = sizeof(struct dma_desc);
1292 }
1293
1294 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1295 tx_q->dma_tx_phy, desc_size);
1296 }
1297 }
1298
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1299 static void stmmac_display_rings(struct stmmac_priv *priv,
1300 struct stmmac_dma_conf *dma_conf)
1301 {
1302 /* Display RX ring */
1303 stmmac_display_rx_rings(priv, dma_conf);
1304
1305 /* Display TX ring */
1306 stmmac_display_tx_rings(priv, dma_conf);
1307 }
1308
stmmac_set_bfsize(int mtu,int bufsize)1309 static int stmmac_set_bfsize(int mtu, int bufsize)
1310 {
1311 int ret = bufsize;
1312
1313 if (mtu >= BUF_SIZE_8KiB)
1314 ret = BUF_SIZE_16KiB;
1315 else if (mtu >= BUF_SIZE_4KiB)
1316 ret = BUF_SIZE_8KiB;
1317 else if (mtu >= BUF_SIZE_2KiB)
1318 ret = BUF_SIZE_4KiB;
1319 else if (mtu > DEFAULT_BUFSIZE)
1320 ret = BUF_SIZE_2KiB;
1321 else
1322 ret = DEFAULT_BUFSIZE;
1323
1324 return ret;
1325 }
1326
1327 /**
1328 * stmmac_clear_rx_descriptors - clear RX descriptors
1329 * @priv: driver private structure
1330 * @dma_conf: structure to take the dma data
1331 * @queue: RX queue index
1332 * Description: this function is called to clear the RX descriptors
1333 * in case of both basic and extended descriptors are used.
1334 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1335 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1336 struct stmmac_dma_conf *dma_conf,
1337 u32 queue)
1338 {
1339 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1340 int i;
1341
1342 /* Clear the RX descriptors */
1343 for (i = 0; i < dma_conf->dma_rx_size; i++)
1344 if (priv->extend_desc)
1345 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1346 priv->use_riwt, priv->mode,
1347 (i == dma_conf->dma_rx_size - 1),
1348 dma_conf->dma_buf_sz);
1349 else
1350 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1351 priv->use_riwt, priv->mode,
1352 (i == dma_conf->dma_rx_size - 1),
1353 dma_conf->dma_buf_sz);
1354 }
1355
1356 /**
1357 * stmmac_clear_tx_descriptors - clear tx descriptors
1358 * @priv: driver private structure
1359 * @dma_conf: structure to take the dma data
1360 * @queue: TX queue index.
1361 * Description: this function is called to clear the TX descriptors
1362 * in case of both basic and extended descriptors are used.
1363 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1364 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1365 struct stmmac_dma_conf *dma_conf,
1366 u32 queue)
1367 {
1368 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1369 int i;
1370
1371 /* Clear the TX descriptors */
1372 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1373 int last = (i == (dma_conf->dma_tx_size - 1));
1374 struct dma_desc *p;
1375
1376 if (priv->extend_desc)
1377 p = &tx_q->dma_etx[i].basic;
1378 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1379 p = &tx_q->dma_entx[i].basic;
1380 else
1381 p = &tx_q->dma_tx[i];
1382
1383 stmmac_init_tx_desc(priv, p, priv->mode, last);
1384 }
1385 }
1386
1387 /**
1388 * stmmac_clear_descriptors - clear descriptors
1389 * @priv: driver private structure
1390 * @dma_conf: structure to take the dma data
1391 * Description: this function is called to clear the TX and RX descriptors
1392 * in case of both basic and extended descriptors are used.
1393 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1394 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1395 struct stmmac_dma_conf *dma_conf)
1396 {
1397 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1398 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1399 u32 queue;
1400
1401 /* Clear the RX descriptors */
1402 for (queue = 0; queue < rx_queue_cnt; queue++)
1403 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1404
1405 /* Clear the TX descriptors */
1406 for (queue = 0; queue < tx_queue_cnt; queue++)
1407 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1408 }
1409
1410 /**
1411 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1412 * @priv: driver private structure
1413 * @dma_conf: structure to take the dma data
1414 * @p: descriptor pointer
1415 * @i: descriptor index
1416 * @flags: gfp flag
1417 * @queue: RX queue index
1418 * Description: this function is called to allocate a receive buffer, perform
1419 * the DMA mapping and init the descriptor.
1420 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1421 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1422 struct stmmac_dma_conf *dma_conf,
1423 struct dma_desc *p,
1424 int i, gfp_t flags, u32 queue)
1425 {
1426 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1427 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1428 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1429
1430 if (priv->dma_cap.addr64 <= 32)
1431 gfp |= GFP_DMA32;
1432
1433 if (!buf->page) {
1434 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1435 if (!buf->page)
1436 return -ENOMEM;
1437 buf->page_offset = stmmac_rx_offset(priv);
1438 }
1439
1440 if (priv->sph && !buf->sec_page) {
1441 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1442 if (!buf->sec_page)
1443 return -ENOMEM;
1444
1445 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1446 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1447 } else {
1448 buf->sec_page = NULL;
1449 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1450 }
1451
1452 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1453
1454 stmmac_set_desc_addr(priv, p, buf->addr);
1455 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1456 stmmac_init_desc3(priv, p);
1457
1458 return 0;
1459 }
1460
1461 /**
1462 * stmmac_free_rx_buffer - free RX dma buffers
1463 * @priv: private structure
1464 * @rx_q: RX queue
1465 * @i: buffer index.
1466 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1467 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1468 struct stmmac_rx_queue *rx_q,
1469 int i)
1470 {
1471 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1472
1473 if (buf->page)
1474 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1475 buf->page = NULL;
1476
1477 if (buf->sec_page)
1478 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1479 buf->sec_page = NULL;
1480 }
1481
1482 /**
1483 * stmmac_free_tx_buffer - free RX dma buffers
1484 * @priv: private structure
1485 * @dma_conf: structure to take the dma data
1486 * @queue: RX queue index
1487 * @i: buffer index.
1488 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1489 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1490 struct stmmac_dma_conf *dma_conf,
1491 u32 queue, int i)
1492 {
1493 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1494
1495 if (tx_q->tx_skbuff_dma[i].buf &&
1496 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1497 if (tx_q->tx_skbuff_dma[i].map_as_page)
1498 dma_unmap_page(priv->device,
1499 tx_q->tx_skbuff_dma[i].buf,
1500 tx_q->tx_skbuff_dma[i].len,
1501 DMA_TO_DEVICE);
1502 else
1503 dma_unmap_single(priv->device,
1504 tx_q->tx_skbuff_dma[i].buf,
1505 tx_q->tx_skbuff_dma[i].len,
1506 DMA_TO_DEVICE);
1507 }
1508
1509 if (tx_q->xdpf[i] &&
1510 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1511 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1512 xdp_return_frame(tx_q->xdpf[i]);
1513 tx_q->xdpf[i] = NULL;
1514 }
1515
1516 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1517 tx_q->xsk_frames_done++;
1518
1519 if (tx_q->tx_skbuff[i] &&
1520 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1521 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1522 tx_q->tx_skbuff[i] = NULL;
1523 }
1524
1525 tx_q->tx_skbuff_dma[i].buf = 0;
1526 tx_q->tx_skbuff_dma[i].map_as_page = false;
1527 }
1528
1529 /**
1530 * dma_free_rx_skbufs - free RX dma buffers
1531 * @priv: private structure
1532 * @dma_conf: structure to take the dma data
1533 * @queue: RX queue index
1534 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1535 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1536 struct stmmac_dma_conf *dma_conf,
1537 u32 queue)
1538 {
1539 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1540 int i;
1541
1542 for (i = 0; i < dma_conf->dma_rx_size; i++)
1543 stmmac_free_rx_buffer(priv, rx_q, i);
1544 }
1545
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1546 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1547 struct stmmac_dma_conf *dma_conf,
1548 u32 queue, gfp_t flags)
1549 {
1550 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1551 int i;
1552
1553 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1554 struct dma_desc *p;
1555 int ret;
1556
1557 if (priv->extend_desc)
1558 p = &((rx_q->dma_erx + i)->basic);
1559 else
1560 p = rx_q->dma_rx + i;
1561
1562 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1563 queue);
1564 if (ret)
1565 return ret;
1566
1567 rx_q->buf_alloc_num++;
1568 }
1569
1570 return 0;
1571 }
1572
1573 /**
1574 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1575 * @priv: private structure
1576 * @dma_conf: structure to take the dma data
1577 * @queue: RX queue index
1578 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1579 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1580 struct stmmac_dma_conf *dma_conf,
1581 u32 queue)
1582 {
1583 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1584 int i;
1585
1586 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1587 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1588
1589 if (!buf->xdp)
1590 continue;
1591
1592 xsk_buff_free(buf->xdp);
1593 buf->xdp = NULL;
1594 }
1595 }
1596
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1597 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1598 struct stmmac_dma_conf *dma_conf,
1599 u32 queue)
1600 {
1601 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1602 int i;
1603
1604 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1605 struct stmmac_rx_buffer *buf;
1606 dma_addr_t dma_addr;
1607 struct dma_desc *p;
1608
1609 if (priv->extend_desc)
1610 p = (struct dma_desc *)(rx_q->dma_erx + i);
1611 else
1612 p = rx_q->dma_rx + i;
1613
1614 buf = &rx_q->buf_pool[i];
1615
1616 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1617 if (!buf->xdp)
1618 return -ENOMEM;
1619
1620 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1621 stmmac_set_desc_addr(priv, p, dma_addr);
1622 rx_q->buf_alloc_num++;
1623 }
1624
1625 return 0;
1626 }
1627
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1628 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1629 {
1630 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1631 return NULL;
1632
1633 return xsk_get_pool_from_qid(priv->dev, queue);
1634 }
1635
1636 /**
1637 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1638 * @priv: driver private structure
1639 * @dma_conf: structure to take the dma data
1640 * @queue: RX queue index
1641 * @flags: gfp flag.
1642 * Description: this function initializes the DMA RX descriptors
1643 * and allocates the socket buffers. It supports the chained and ring
1644 * modes.
1645 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1646 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1647 struct stmmac_dma_conf *dma_conf,
1648 u32 queue, gfp_t flags)
1649 {
1650 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1651 int ret;
1652
1653 netif_dbg(priv, probe, priv->dev,
1654 "(%s) dma_rx_phy=0x%08x\n", __func__,
1655 (u32)rx_q->dma_rx_phy);
1656
1657 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1658
1659 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1660
1661 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1662
1663 if (rx_q->xsk_pool) {
1664 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1665 MEM_TYPE_XSK_BUFF_POOL,
1666 NULL));
1667 netdev_info(priv->dev,
1668 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1669 rx_q->queue_index);
1670 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1671 } else {
1672 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1673 MEM_TYPE_PAGE_POOL,
1674 rx_q->page_pool));
1675 netdev_info(priv->dev,
1676 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1677 rx_q->queue_index);
1678 }
1679
1680 if (rx_q->xsk_pool) {
1681 /* RX XDP ZC buffer pool may not be populated, e.g.
1682 * xdpsock TX-only.
1683 */
1684 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1685 } else {
1686 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1687 if (ret < 0)
1688 return -ENOMEM;
1689 }
1690
1691 /* Setup the chained descriptor addresses */
1692 if (priv->mode == STMMAC_CHAIN_MODE) {
1693 if (priv->extend_desc)
1694 stmmac_mode_init(priv, rx_q->dma_erx,
1695 rx_q->dma_rx_phy,
1696 dma_conf->dma_rx_size, 1);
1697 else
1698 stmmac_mode_init(priv, rx_q->dma_rx,
1699 rx_q->dma_rx_phy,
1700 dma_conf->dma_rx_size, 0);
1701 }
1702
1703 return 0;
1704 }
1705
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1706 static int init_dma_rx_desc_rings(struct net_device *dev,
1707 struct stmmac_dma_conf *dma_conf,
1708 gfp_t flags)
1709 {
1710 struct stmmac_priv *priv = netdev_priv(dev);
1711 u32 rx_count = priv->plat->rx_queues_to_use;
1712 int queue;
1713 int ret;
1714
1715 /* RX INITIALIZATION */
1716 netif_dbg(priv, probe, priv->dev,
1717 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1718
1719 for (queue = 0; queue < rx_count; queue++) {
1720 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1721 if (ret)
1722 goto err_init_rx_buffers;
1723 }
1724
1725 return 0;
1726
1727 err_init_rx_buffers:
1728 while (queue >= 0) {
1729 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1730
1731 if (rx_q->xsk_pool)
1732 dma_free_rx_xskbufs(priv, dma_conf, queue);
1733 else
1734 dma_free_rx_skbufs(priv, dma_conf, queue);
1735
1736 rx_q->buf_alloc_num = 0;
1737 rx_q->xsk_pool = NULL;
1738
1739 queue--;
1740 }
1741
1742 return ret;
1743 }
1744
1745 /**
1746 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1747 * @priv: driver private structure
1748 * @dma_conf: structure to take the dma data
1749 * @queue: TX queue index
1750 * Description: this function initializes the DMA TX descriptors
1751 * and allocates the socket buffers. It supports the chained and ring
1752 * modes.
1753 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1754 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1755 struct stmmac_dma_conf *dma_conf,
1756 u32 queue)
1757 {
1758 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1759 int i;
1760
1761 netif_dbg(priv, probe, priv->dev,
1762 "(%s) dma_tx_phy=0x%08x\n", __func__,
1763 (u32)tx_q->dma_tx_phy);
1764
1765 /* Setup the chained descriptor addresses */
1766 if (priv->mode == STMMAC_CHAIN_MODE) {
1767 if (priv->extend_desc)
1768 stmmac_mode_init(priv, tx_q->dma_etx,
1769 tx_q->dma_tx_phy,
1770 dma_conf->dma_tx_size, 1);
1771 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1772 stmmac_mode_init(priv, tx_q->dma_tx,
1773 tx_q->dma_tx_phy,
1774 dma_conf->dma_tx_size, 0);
1775 }
1776
1777 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1778
1779 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1780 struct dma_desc *p;
1781
1782 if (priv->extend_desc)
1783 p = &((tx_q->dma_etx + i)->basic);
1784 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1785 p = &((tx_q->dma_entx + i)->basic);
1786 else
1787 p = tx_q->dma_tx + i;
1788
1789 stmmac_clear_desc(priv, p);
1790
1791 tx_q->tx_skbuff_dma[i].buf = 0;
1792 tx_q->tx_skbuff_dma[i].map_as_page = false;
1793 tx_q->tx_skbuff_dma[i].len = 0;
1794 tx_q->tx_skbuff_dma[i].last_segment = false;
1795 tx_q->tx_skbuff[i] = NULL;
1796 }
1797
1798 return 0;
1799 }
1800
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1801 static int init_dma_tx_desc_rings(struct net_device *dev,
1802 struct stmmac_dma_conf *dma_conf)
1803 {
1804 struct stmmac_priv *priv = netdev_priv(dev);
1805 u32 tx_queue_cnt;
1806 u32 queue;
1807
1808 tx_queue_cnt = priv->plat->tx_queues_to_use;
1809
1810 for (queue = 0; queue < tx_queue_cnt; queue++)
1811 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1812
1813 return 0;
1814 }
1815
1816 /**
1817 * init_dma_desc_rings - init the RX/TX descriptor rings
1818 * @dev: net device structure
1819 * @dma_conf: structure to take the dma data
1820 * @flags: gfp flag.
1821 * Description: this function initializes the DMA RX/TX descriptors
1822 * and allocates the socket buffers. It supports the chained and ring
1823 * modes.
1824 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1825 static int init_dma_desc_rings(struct net_device *dev,
1826 struct stmmac_dma_conf *dma_conf,
1827 gfp_t flags)
1828 {
1829 struct stmmac_priv *priv = netdev_priv(dev);
1830 int ret;
1831
1832 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1833 if (ret)
1834 return ret;
1835
1836 ret = init_dma_tx_desc_rings(dev, dma_conf);
1837
1838 stmmac_clear_descriptors(priv, dma_conf);
1839
1840 if (netif_msg_hw(priv))
1841 stmmac_display_rings(priv, dma_conf);
1842
1843 return ret;
1844 }
1845
1846 /**
1847 * dma_free_tx_skbufs - free TX dma buffers
1848 * @priv: private structure
1849 * @dma_conf: structure to take the dma data
1850 * @queue: TX queue index
1851 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1852 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1853 struct stmmac_dma_conf *dma_conf,
1854 u32 queue)
1855 {
1856 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1857 int i;
1858
1859 tx_q->xsk_frames_done = 0;
1860
1861 for (i = 0; i < dma_conf->dma_tx_size; i++)
1862 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1863
1864 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1865 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1866 tx_q->xsk_frames_done = 0;
1867 tx_q->xsk_pool = NULL;
1868 }
1869 }
1870
1871 /**
1872 * stmmac_free_tx_skbufs - free TX skb buffers
1873 * @priv: private structure
1874 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1875 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1876 {
1877 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1878 u32 queue;
1879
1880 for (queue = 0; queue < tx_queue_cnt; queue++)
1881 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1882 }
1883
1884 /**
1885 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1886 * @priv: private structure
1887 * @dma_conf: structure to take the dma data
1888 * @queue: RX queue index
1889 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1890 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1891 struct stmmac_dma_conf *dma_conf,
1892 u32 queue)
1893 {
1894 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1895
1896 /* Release the DMA RX socket buffers */
1897 if (rx_q->xsk_pool)
1898 dma_free_rx_xskbufs(priv, dma_conf, queue);
1899 else
1900 dma_free_rx_skbufs(priv, dma_conf, queue);
1901
1902 rx_q->buf_alloc_num = 0;
1903 rx_q->xsk_pool = NULL;
1904
1905 /* Free DMA regions of consistent memory previously allocated */
1906 if (!priv->extend_desc)
1907 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1908 sizeof(struct dma_desc),
1909 rx_q->dma_rx, rx_q->dma_rx_phy);
1910 else
1911 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1912 sizeof(struct dma_extended_desc),
1913 rx_q->dma_erx, rx_q->dma_rx_phy);
1914
1915 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1916 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1917
1918 kfree(rx_q->buf_pool);
1919 if (rx_q->page_pool)
1920 page_pool_destroy(rx_q->page_pool);
1921 }
1922
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1923 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1924 struct stmmac_dma_conf *dma_conf)
1925 {
1926 u32 rx_count = priv->plat->rx_queues_to_use;
1927 u32 queue;
1928
1929 /* Free RX queue resources */
1930 for (queue = 0; queue < rx_count; queue++)
1931 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1932 }
1933
1934 /**
1935 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1936 * @priv: private structure
1937 * @dma_conf: structure to take the dma data
1938 * @queue: TX queue index
1939 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1940 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1941 struct stmmac_dma_conf *dma_conf,
1942 u32 queue)
1943 {
1944 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1945 size_t size;
1946 void *addr;
1947
1948 /* Release the DMA TX socket buffers */
1949 dma_free_tx_skbufs(priv, dma_conf, queue);
1950
1951 if (priv->extend_desc) {
1952 size = sizeof(struct dma_extended_desc);
1953 addr = tx_q->dma_etx;
1954 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1955 size = sizeof(struct dma_edesc);
1956 addr = tx_q->dma_entx;
1957 } else {
1958 size = sizeof(struct dma_desc);
1959 addr = tx_q->dma_tx;
1960 }
1961
1962 size *= dma_conf->dma_tx_size;
1963
1964 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1965
1966 kfree(tx_q->tx_skbuff_dma);
1967 kfree(tx_q->tx_skbuff);
1968 }
1969
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1970 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 struct stmmac_dma_conf *dma_conf)
1972 {
1973 u32 tx_count = priv->plat->tx_queues_to_use;
1974 u32 queue;
1975
1976 /* Free TX queue resources */
1977 for (queue = 0; queue < tx_count; queue++)
1978 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1979 }
1980
1981 /**
1982 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1983 * @priv: private structure
1984 * @dma_conf: structure to take the dma data
1985 * @queue: RX queue index
1986 * Description: according to which descriptor can be used (extend or basic)
1987 * this function allocates the resources for TX and RX paths. In case of
1988 * reception, for example, it pre-allocated the RX socket buffer in order to
1989 * allow zero-copy mechanism.
1990 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1991 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1992 struct stmmac_dma_conf *dma_conf,
1993 u32 queue)
1994 {
1995 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1996 struct stmmac_channel *ch = &priv->channel[queue];
1997 bool xdp_prog = stmmac_xdp_is_enabled(priv);
1998 struct page_pool_params pp_params = { 0 };
1999 unsigned int num_pages;
2000 unsigned int napi_id;
2001 int ret;
2002
2003 rx_q->queue_index = queue;
2004 rx_q->priv_data = priv;
2005
2006 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2007 pp_params.pool_size = dma_conf->dma_rx_size;
2008 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2009 pp_params.order = ilog2(num_pages);
2010 pp_params.nid = dev_to_node(priv->device);
2011 pp_params.dev = priv->device;
2012 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2013 pp_params.offset = stmmac_rx_offset(priv);
2014 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2015
2016 rx_q->page_pool = page_pool_create(&pp_params);
2017 if (IS_ERR(rx_q->page_pool)) {
2018 ret = PTR_ERR(rx_q->page_pool);
2019 rx_q->page_pool = NULL;
2020 return ret;
2021 }
2022
2023 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2024 sizeof(*rx_q->buf_pool),
2025 GFP_KERNEL);
2026 if (!rx_q->buf_pool)
2027 return -ENOMEM;
2028
2029 if (priv->extend_desc) {
2030 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2031 dma_conf->dma_rx_size *
2032 sizeof(struct dma_extended_desc),
2033 &rx_q->dma_rx_phy,
2034 GFP_KERNEL);
2035 if (!rx_q->dma_erx)
2036 return -ENOMEM;
2037
2038 } else {
2039 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2040 dma_conf->dma_rx_size *
2041 sizeof(struct dma_desc),
2042 &rx_q->dma_rx_phy,
2043 GFP_KERNEL);
2044 if (!rx_q->dma_rx)
2045 return -ENOMEM;
2046 }
2047
2048 if (stmmac_xdp_is_enabled(priv) &&
2049 test_bit(queue, priv->af_xdp_zc_qps))
2050 napi_id = ch->rxtx_napi.napi_id;
2051 else
2052 napi_id = ch->rx_napi.napi_id;
2053
2054 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2055 rx_q->queue_index,
2056 napi_id);
2057 if (ret) {
2058 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2059 return -EINVAL;
2060 }
2061
2062 return 0;
2063 }
2064
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2065 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2066 struct stmmac_dma_conf *dma_conf)
2067 {
2068 u32 rx_count = priv->plat->rx_queues_to_use;
2069 u32 queue;
2070 int ret;
2071
2072 /* RX queues buffers and DMA */
2073 for (queue = 0; queue < rx_count; queue++) {
2074 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2075 if (ret)
2076 goto err_dma;
2077 }
2078
2079 return 0;
2080
2081 err_dma:
2082 free_dma_rx_desc_resources(priv, dma_conf);
2083
2084 return ret;
2085 }
2086
2087 /**
2088 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2089 * @priv: private structure
2090 * @dma_conf: structure to take the dma data
2091 * @queue: TX queue index
2092 * Description: according to which descriptor can be used (extend or basic)
2093 * this function allocates the resources for TX and RX paths. In case of
2094 * reception, for example, it pre-allocated the RX socket buffer in order to
2095 * allow zero-copy mechanism.
2096 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2097 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2098 struct stmmac_dma_conf *dma_conf,
2099 u32 queue)
2100 {
2101 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2102 size_t size;
2103 void *addr;
2104
2105 tx_q->queue_index = queue;
2106 tx_q->priv_data = priv;
2107
2108 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2109 sizeof(*tx_q->tx_skbuff_dma),
2110 GFP_KERNEL);
2111 if (!tx_q->tx_skbuff_dma)
2112 return -ENOMEM;
2113
2114 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2115 sizeof(struct sk_buff *),
2116 GFP_KERNEL);
2117 if (!tx_q->tx_skbuff)
2118 return -ENOMEM;
2119
2120 if (priv->extend_desc)
2121 size = sizeof(struct dma_extended_desc);
2122 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2123 size = sizeof(struct dma_edesc);
2124 else
2125 size = sizeof(struct dma_desc);
2126
2127 size *= dma_conf->dma_tx_size;
2128
2129 addr = dma_alloc_coherent(priv->device, size,
2130 &tx_q->dma_tx_phy, GFP_KERNEL);
2131 if (!addr)
2132 return -ENOMEM;
2133
2134 if (priv->extend_desc)
2135 tx_q->dma_etx = addr;
2136 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2137 tx_q->dma_entx = addr;
2138 else
2139 tx_q->dma_tx = addr;
2140
2141 return 0;
2142 }
2143
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2144 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2145 struct stmmac_dma_conf *dma_conf)
2146 {
2147 u32 tx_count = priv->plat->tx_queues_to_use;
2148 u32 queue;
2149 int ret;
2150
2151 /* TX queues buffers and DMA */
2152 for (queue = 0; queue < tx_count; queue++) {
2153 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2154 if (ret)
2155 goto err_dma;
2156 }
2157
2158 return 0;
2159
2160 err_dma:
2161 free_dma_tx_desc_resources(priv, dma_conf);
2162 return ret;
2163 }
2164
2165 /**
2166 * alloc_dma_desc_resources - alloc TX/RX resources.
2167 * @priv: private structure
2168 * @dma_conf: structure to take the dma data
2169 * Description: according to which descriptor can be used (extend or basic)
2170 * this function allocates the resources for TX and RX paths. In case of
2171 * reception, for example, it pre-allocated the RX socket buffer in order to
2172 * allow zero-copy mechanism.
2173 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2174 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2175 struct stmmac_dma_conf *dma_conf)
2176 {
2177 /* RX Allocation */
2178 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2179
2180 if (ret)
2181 return ret;
2182
2183 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2184
2185 return ret;
2186 }
2187
2188 /**
2189 * free_dma_desc_resources - free dma desc resources
2190 * @priv: private structure
2191 * @dma_conf: structure to take the dma data
2192 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2193 static void free_dma_desc_resources(struct stmmac_priv *priv,
2194 struct stmmac_dma_conf *dma_conf)
2195 {
2196 /* Release the DMA TX socket buffers */
2197 free_dma_tx_desc_resources(priv, dma_conf);
2198
2199 /* Release the DMA RX socket buffers later
2200 * to ensure all pending XDP_TX buffers are returned.
2201 */
2202 free_dma_rx_desc_resources(priv, dma_conf);
2203 }
2204
2205 /**
2206 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2207 * @priv: driver private structure
2208 * Description: It is used for enabling the rx queues in the MAC
2209 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2210 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2211 {
2212 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2213 int queue;
2214 u8 mode;
2215
2216 for (queue = 0; queue < rx_queues_count; queue++) {
2217 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2218 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2219 }
2220 }
2221
2222 /**
2223 * stmmac_start_rx_dma - start RX DMA channel
2224 * @priv: driver private structure
2225 * @chan: RX channel index
2226 * Description:
2227 * This starts a RX DMA channel
2228 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2229 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2230 {
2231 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2232 stmmac_start_rx(priv, priv->ioaddr, chan);
2233 }
2234
2235 /**
2236 * stmmac_start_tx_dma - start TX DMA channel
2237 * @priv: driver private structure
2238 * @chan: TX channel index
2239 * Description:
2240 * This starts a TX DMA channel
2241 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2242 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2243 {
2244 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2245 stmmac_start_tx(priv, priv->ioaddr, chan);
2246 }
2247
2248 /**
2249 * stmmac_stop_rx_dma - stop RX DMA channel
2250 * @priv: driver private structure
2251 * @chan: RX channel index
2252 * Description:
2253 * This stops a RX DMA channel
2254 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2255 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2256 {
2257 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2258 stmmac_stop_rx(priv, priv->ioaddr, chan);
2259 }
2260
2261 /**
2262 * stmmac_stop_tx_dma - stop TX DMA channel
2263 * @priv: driver private structure
2264 * @chan: TX channel index
2265 * Description:
2266 * This stops a TX DMA channel
2267 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2268 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2269 {
2270 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2271 stmmac_stop_tx(priv, priv->ioaddr, chan);
2272 }
2273
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2274 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2275 {
2276 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2277 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2278 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2279 u32 chan;
2280
2281 for (chan = 0; chan < dma_csr_ch; chan++) {
2282 struct stmmac_channel *ch = &priv->channel[chan];
2283 unsigned long flags;
2284
2285 spin_lock_irqsave(&ch->lock, flags);
2286 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2287 spin_unlock_irqrestore(&ch->lock, flags);
2288 }
2289 }
2290
2291 /**
2292 * stmmac_start_all_dma - start all RX and TX DMA channels
2293 * @priv: driver private structure
2294 * Description:
2295 * This starts all the RX and TX DMA channels
2296 */
stmmac_start_all_dma(struct stmmac_priv * priv)2297 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2298 {
2299 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2300 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2301 u32 chan = 0;
2302
2303 for (chan = 0; chan < rx_channels_count; chan++)
2304 stmmac_start_rx_dma(priv, chan);
2305
2306 for (chan = 0; chan < tx_channels_count; chan++)
2307 stmmac_start_tx_dma(priv, chan);
2308 }
2309
2310 /**
2311 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2312 * @priv: driver private structure
2313 * Description:
2314 * This stops the RX and TX DMA channels
2315 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2316 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2317 {
2318 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2319 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2320 u32 chan = 0;
2321
2322 for (chan = 0; chan < rx_channels_count; chan++)
2323 stmmac_stop_rx_dma(priv, chan);
2324
2325 for (chan = 0; chan < tx_channels_count; chan++)
2326 stmmac_stop_tx_dma(priv, chan);
2327 }
2328
2329 /**
2330 * stmmac_dma_operation_mode - HW DMA operation mode
2331 * @priv: driver private structure
2332 * Description: it is used for configuring the DMA operation mode register in
2333 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2334 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2335 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2336 {
2337 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2338 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2339 int rxfifosz = priv->plat->rx_fifo_size;
2340 int txfifosz = priv->plat->tx_fifo_size;
2341 u32 txmode = 0;
2342 u32 rxmode = 0;
2343 u32 chan = 0;
2344 u8 qmode = 0;
2345
2346 if (rxfifosz == 0)
2347 rxfifosz = priv->dma_cap.rx_fifo_size;
2348 if (txfifosz == 0)
2349 txfifosz = priv->dma_cap.tx_fifo_size;
2350
2351 /* Adjust for real per queue fifo size */
2352 rxfifosz /= rx_channels_count;
2353 txfifosz /= tx_channels_count;
2354
2355 if (priv->plat->force_thresh_dma_mode) {
2356 txmode = tc;
2357 rxmode = tc;
2358 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2359 /*
2360 * In case of GMAC, SF mode can be enabled
2361 * to perform the TX COE in HW. This depends on:
2362 * 1) TX COE if actually supported
2363 * 2) There is no bugged Jumbo frame support
2364 * that needs to not insert csum in the TDES.
2365 */
2366 txmode = SF_DMA_MODE;
2367 rxmode = SF_DMA_MODE;
2368 priv->xstats.threshold = SF_DMA_MODE;
2369 } else {
2370 txmode = tc;
2371 rxmode = SF_DMA_MODE;
2372 }
2373
2374 /* configure all channels */
2375 for (chan = 0; chan < rx_channels_count; chan++) {
2376 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2377 u32 buf_size;
2378
2379 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2380
2381 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2382 rxfifosz, qmode);
2383
2384 if (rx_q->xsk_pool) {
2385 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2386 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2387 buf_size,
2388 chan);
2389 } else {
2390 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2391 priv->dma_conf.dma_buf_sz,
2392 chan);
2393 }
2394 }
2395
2396 for (chan = 0; chan < tx_channels_count; chan++) {
2397 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2398
2399 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2400 txfifosz, qmode);
2401 }
2402 }
2403
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2404 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2405 {
2406 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2407 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2408 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2409 unsigned int entry = tx_q->cur_tx;
2410 struct dma_desc *tx_desc = NULL;
2411 struct xdp_desc xdp_desc;
2412 bool work_done = true;
2413
2414 /* Avoids TX time-out as we are sharing with slow path */
2415 txq_trans_cond_update(nq);
2416
2417 budget = min(budget, stmmac_tx_avail(priv, queue));
2418
2419 while (budget-- > 0) {
2420 dma_addr_t dma_addr;
2421 bool set_ic;
2422
2423 /* We are sharing with slow path and stop XSK TX desc submission when
2424 * available TX ring is less than threshold.
2425 */
2426 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2427 !netif_carrier_ok(priv->dev)) {
2428 work_done = false;
2429 break;
2430 }
2431
2432 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2433 break;
2434
2435 if (likely(priv->extend_desc))
2436 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2437 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2438 tx_desc = &tx_q->dma_entx[entry].basic;
2439 else
2440 tx_desc = tx_q->dma_tx + entry;
2441
2442 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2443 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2444
2445 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2446
2447 /* To return XDP buffer to XSK pool, we simple call
2448 * xsk_tx_completed(), so we don't need to fill up
2449 * 'buf' and 'xdpf'.
2450 */
2451 tx_q->tx_skbuff_dma[entry].buf = 0;
2452 tx_q->xdpf[entry] = NULL;
2453
2454 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2455 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2456 tx_q->tx_skbuff_dma[entry].last_segment = true;
2457 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2458
2459 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2460
2461 tx_q->tx_count_frames++;
2462
2463 if (!priv->tx_coal_frames[queue])
2464 set_ic = false;
2465 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2466 set_ic = true;
2467 else
2468 set_ic = false;
2469
2470 if (set_ic) {
2471 tx_q->tx_count_frames = 0;
2472 stmmac_set_tx_ic(priv, tx_desc);
2473 priv->xstats.tx_set_ic_bit++;
2474 }
2475
2476 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2477 true, priv->mode, true, true,
2478 xdp_desc.len);
2479
2480 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2481
2482 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2483 entry = tx_q->cur_tx;
2484 }
2485
2486 if (tx_desc) {
2487 stmmac_flush_tx_descriptors(priv, queue);
2488 xsk_tx_release(pool);
2489 }
2490
2491 /* Return true if all of the 3 conditions are met
2492 * a) TX Budget is still available
2493 * b) work_done = true when XSK TX desc peek is empty (no more
2494 * pending XSK TX for transmission)
2495 */
2496 return !!budget && work_done;
2497 }
2498
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2499 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2500 {
2501 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2502 tc += 64;
2503
2504 if (priv->plat->force_thresh_dma_mode)
2505 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2506 else
2507 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2508 chan);
2509
2510 priv->xstats.threshold = tc;
2511 }
2512 }
2513
2514 /**
2515 * stmmac_tx_clean - to manage the transmission completion
2516 * @priv: driver private structure
2517 * @budget: napi budget limiting this functions packet handling
2518 * @queue: TX queue index
2519 * Description: it reclaims the transmit resources after transmission completes.
2520 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2521 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2522 {
2523 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2524 unsigned int bytes_compl = 0, pkts_compl = 0;
2525 unsigned int entry, xmits = 0, count = 0;
2526
2527 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2528
2529 priv->xstats.tx_clean++;
2530
2531 tx_q->xsk_frames_done = 0;
2532
2533 entry = tx_q->dirty_tx;
2534
2535 /* Try to clean all TX complete frame in 1 shot */
2536 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2537 struct xdp_frame *xdpf;
2538 struct sk_buff *skb;
2539 struct dma_desc *p;
2540 int status;
2541
2542 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2543 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2544 xdpf = tx_q->xdpf[entry];
2545 skb = NULL;
2546 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2547 xdpf = NULL;
2548 skb = tx_q->tx_skbuff[entry];
2549 } else {
2550 xdpf = NULL;
2551 skb = NULL;
2552 }
2553
2554 if (priv->extend_desc)
2555 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2556 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2557 p = &tx_q->dma_entx[entry].basic;
2558 else
2559 p = tx_q->dma_tx + entry;
2560
2561 status = stmmac_tx_status(priv, &priv->dev->stats,
2562 &priv->xstats, p, priv->ioaddr);
2563 /* Check if the descriptor is owned by the DMA */
2564 if (unlikely(status & tx_dma_own))
2565 break;
2566
2567 count++;
2568
2569 /* Make sure descriptor fields are read after reading
2570 * the own bit.
2571 */
2572 dma_rmb();
2573
2574 /* Just consider the last segment and ...*/
2575 if (likely(!(status & tx_not_ls))) {
2576 /* ... verify the status error condition */
2577 if (unlikely(status & tx_err)) {
2578 priv->dev->stats.tx_errors++;
2579 if (unlikely(status & tx_err_bump_tc))
2580 stmmac_bump_dma_threshold(priv, queue);
2581 } else {
2582 priv->dev->stats.tx_packets++;
2583 priv->xstats.tx_pkt_n++;
2584 priv->xstats.txq_stats[queue].tx_pkt_n++;
2585 }
2586 if (skb)
2587 stmmac_get_tx_hwtstamp(priv, p, skb);
2588 }
2589
2590 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2591 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2592 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2593 dma_unmap_page(priv->device,
2594 tx_q->tx_skbuff_dma[entry].buf,
2595 tx_q->tx_skbuff_dma[entry].len,
2596 DMA_TO_DEVICE);
2597 else
2598 dma_unmap_single(priv->device,
2599 tx_q->tx_skbuff_dma[entry].buf,
2600 tx_q->tx_skbuff_dma[entry].len,
2601 DMA_TO_DEVICE);
2602 tx_q->tx_skbuff_dma[entry].buf = 0;
2603 tx_q->tx_skbuff_dma[entry].len = 0;
2604 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2605 }
2606
2607 stmmac_clean_desc3(priv, tx_q, p);
2608
2609 tx_q->tx_skbuff_dma[entry].last_segment = false;
2610 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2611
2612 if (xdpf &&
2613 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2614 xdp_return_frame_rx_napi(xdpf);
2615 tx_q->xdpf[entry] = NULL;
2616 }
2617
2618 if (xdpf &&
2619 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2620 xdp_return_frame(xdpf);
2621 tx_q->xdpf[entry] = NULL;
2622 }
2623
2624 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2625 tx_q->xsk_frames_done++;
2626
2627 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2628 if (likely(skb)) {
2629 pkts_compl++;
2630 bytes_compl += skb->len;
2631 dev_consume_skb_any(skb);
2632 tx_q->tx_skbuff[entry] = NULL;
2633 }
2634 }
2635
2636 stmmac_release_tx_desc(priv, p, priv->mode);
2637
2638 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2639 }
2640 tx_q->dirty_tx = entry;
2641
2642 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2643 pkts_compl, bytes_compl);
2644
2645 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2646 queue))) &&
2647 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2648
2649 netif_dbg(priv, tx_done, priv->dev,
2650 "%s: restart transmit\n", __func__);
2651 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2652 }
2653
2654 if (tx_q->xsk_pool) {
2655 bool work_done;
2656
2657 if (tx_q->xsk_frames_done)
2658 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2659
2660 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2661 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2662
2663 /* For XSK TX, we try to send as many as possible.
2664 * If XSK work done (XSK TX desc empty and budget still
2665 * available), return "budget - 1" to reenable TX IRQ.
2666 * Else, return "budget" to make NAPI continue polling.
2667 */
2668 work_done = stmmac_xdp_xmit_zc(priv, queue,
2669 STMMAC_XSK_TX_BUDGET_MAX);
2670 if (work_done)
2671 xmits = budget - 1;
2672 else
2673 xmits = budget;
2674 }
2675
2676 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2677 priv->eee_sw_timer_en) {
2678 if (stmmac_enable_eee_mode(priv))
2679 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2680 }
2681
2682 /* We still have pending packets, let's call for a new scheduling */
2683 if (tx_q->dirty_tx != tx_q->cur_tx)
2684 hrtimer_start(&tx_q->txtimer,
2685 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2686 HRTIMER_MODE_REL);
2687
2688 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2689
2690 /* Combine decisions from TX clean and XSK TX */
2691 return max(count, xmits);
2692 }
2693
2694 /**
2695 * stmmac_tx_err - to manage the tx error
2696 * @priv: driver private structure
2697 * @chan: channel index
2698 * Description: it cleans the descriptors and restarts the transmission
2699 * in case of transmission errors.
2700 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2701 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2702 {
2703 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2704
2705 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2706
2707 stmmac_stop_tx_dma(priv, chan);
2708 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2709 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2710 stmmac_reset_tx_queue(priv, chan);
2711 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2712 tx_q->dma_tx_phy, chan);
2713 stmmac_start_tx_dma(priv, chan);
2714
2715 priv->dev->stats.tx_errors++;
2716 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2717 }
2718
2719 /**
2720 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2721 * @priv: driver private structure
2722 * @txmode: TX operating mode
2723 * @rxmode: RX operating mode
2724 * @chan: channel index
2725 * Description: it is used for configuring of the DMA operation mode in
2726 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2727 * mode.
2728 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2729 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2730 u32 rxmode, u32 chan)
2731 {
2732 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2733 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2734 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2735 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2736 int rxfifosz = priv->plat->rx_fifo_size;
2737 int txfifosz = priv->plat->tx_fifo_size;
2738
2739 if (rxfifosz == 0)
2740 rxfifosz = priv->dma_cap.rx_fifo_size;
2741 if (txfifosz == 0)
2742 txfifosz = priv->dma_cap.tx_fifo_size;
2743
2744 /* Adjust for real per queue fifo size */
2745 rxfifosz /= rx_channels_count;
2746 txfifosz /= tx_channels_count;
2747
2748 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2749 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2750 }
2751
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2752 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2753 {
2754 int ret;
2755
2756 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2757 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2758 if (ret && (ret != -EINVAL)) {
2759 stmmac_global_err(priv);
2760 return true;
2761 }
2762
2763 return false;
2764 }
2765
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2766 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2767 {
2768 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2769 &priv->xstats, chan, dir);
2770 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2771 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2772 struct stmmac_channel *ch = &priv->channel[chan];
2773 struct napi_struct *rx_napi;
2774 struct napi_struct *tx_napi;
2775 unsigned long flags;
2776
2777 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2778 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2779
2780 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2781 if (napi_schedule_prep(rx_napi)) {
2782 spin_lock_irqsave(&ch->lock, flags);
2783 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2784 spin_unlock_irqrestore(&ch->lock, flags);
2785 __napi_schedule(rx_napi);
2786 }
2787 }
2788
2789 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2790 if (napi_schedule_prep(tx_napi)) {
2791 spin_lock_irqsave(&ch->lock, flags);
2792 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2793 spin_unlock_irqrestore(&ch->lock, flags);
2794 __napi_schedule(tx_napi);
2795 }
2796 }
2797
2798 return status;
2799 }
2800
2801 /**
2802 * stmmac_dma_interrupt - DMA ISR
2803 * @priv: driver private structure
2804 * Description: this is the DMA ISR. It is called by the main ISR.
2805 * It calls the dwmac dma routine and schedule poll method in case of some
2806 * work can be done.
2807 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2808 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2809 {
2810 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2811 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2812 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2813 tx_channel_count : rx_channel_count;
2814 u32 chan;
2815 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2816
2817 /* Make sure we never check beyond our status buffer. */
2818 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2819 channels_to_check = ARRAY_SIZE(status);
2820
2821 for (chan = 0; chan < channels_to_check; chan++)
2822 status[chan] = stmmac_napi_check(priv, chan,
2823 DMA_DIR_RXTX);
2824
2825 for (chan = 0; chan < tx_channel_count; chan++) {
2826 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2827 /* Try to bump up the dma threshold on this failure */
2828 stmmac_bump_dma_threshold(priv, chan);
2829 } else if (unlikely(status[chan] == tx_hard_error)) {
2830 stmmac_tx_err(priv, chan);
2831 }
2832 }
2833 }
2834
2835 /**
2836 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2837 * @priv: driver private structure
2838 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2839 */
stmmac_mmc_setup(struct stmmac_priv * priv)2840 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2841 {
2842 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2843 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2844
2845 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2846
2847 if (priv->dma_cap.rmon) {
2848 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2849 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2850 } else
2851 netdev_info(priv->dev, "No MAC Management Counters available\n");
2852 }
2853
2854 /**
2855 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2856 * @priv: driver private structure
2857 * Description:
2858 * new GMAC chip generations have a new register to indicate the
2859 * presence of the optional feature/functions.
2860 * This can be also used to override the value passed through the
2861 * platform and necessary for old MAC10/100 and GMAC chips.
2862 */
stmmac_get_hw_features(struct stmmac_priv * priv)2863 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2864 {
2865 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2866 }
2867
2868 /**
2869 * stmmac_check_ether_addr - check if the MAC addr is valid
2870 * @priv: driver private structure
2871 * Description:
2872 * it is to verify if the MAC address is valid, in case of failures it
2873 * generates a random MAC address
2874 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2875 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2876 {
2877 u8 addr[ETH_ALEN];
2878
2879 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2880 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2881 if (is_valid_ether_addr(addr))
2882 eth_hw_addr_set(priv->dev, addr);
2883 else
2884 eth_hw_addr_random(priv->dev);
2885 dev_info(priv->device, "device MAC address %pM\n",
2886 priv->dev->dev_addr);
2887 }
2888 }
2889
2890 /**
2891 * stmmac_init_dma_engine - DMA init.
2892 * @priv: driver private structure
2893 * Description:
2894 * It inits the DMA invoking the specific MAC/GMAC callback.
2895 * Some DMA parameters can be passed from the platform;
2896 * in case of these are not passed a default is kept for the MAC or GMAC.
2897 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2898 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2899 {
2900 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2901 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2902 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2903 struct stmmac_rx_queue *rx_q;
2904 struct stmmac_tx_queue *tx_q;
2905 u32 chan = 0;
2906 int atds = 0;
2907 int ret = 0;
2908
2909 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2910 dev_err(priv->device, "Invalid DMA configuration\n");
2911 return -EINVAL;
2912 }
2913
2914 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2915 atds = 1;
2916
2917 ret = stmmac_reset(priv, priv->ioaddr);
2918 if (ret) {
2919 dev_err(priv->device, "Failed to reset the dma\n");
2920 return ret;
2921 }
2922
2923 /* DMA Configuration */
2924 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2925
2926 if (priv->plat->axi)
2927 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2928
2929 /* DMA CSR Channel configuration */
2930 for (chan = 0; chan < dma_csr_ch; chan++) {
2931 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2932 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2933 }
2934
2935 /* DMA RX Channel Configuration */
2936 for (chan = 0; chan < rx_channels_count; chan++) {
2937 rx_q = &priv->dma_conf.rx_queue[chan];
2938
2939 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2940 rx_q->dma_rx_phy, chan);
2941
2942 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2943 (rx_q->buf_alloc_num *
2944 sizeof(struct dma_desc));
2945 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2946 rx_q->rx_tail_addr, chan);
2947 }
2948
2949 /* DMA TX Channel Configuration */
2950 for (chan = 0; chan < tx_channels_count; chan++) {
2951 tx_q = &priv->dma_conf.tx_queue[chan];
2952
2953 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2954 tx_q->dma_tx_phy, chan);
2955
2956 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2957 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2958 tx_q->tx_tail_addr, chan);
2959 }
2960
2961 return ret;
2962 }
2963
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2964 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2965 {
2966 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2967
2968 hrtimer_start(&tx_q->txtimer,
2969 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2970 HRTIMER_MODE_REL);
2971 }
2972
2973 /**
2974 * stmmac_tx_timer - mitigation sw timer for tx.
2975 * @t: data pointer
2976 * Description:
2977 * This is the timer handler to directly invoke the stmmac_tx_clean.
2978 */
stmmac_tx_timer(struct hrtimer * t)2979 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2980 {
2981 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2982 struct stmmac_priv *priv = tx_q->priv_data;
2983 struct stmmac_channel *ch;
2984 struct napi_struct *napi;
2985
2986 ch = &priv->channel[tx_q->queue_index];
2987 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2988
2989 if (likely(napi_schedule_prep(napi))) {
2990 unsigned long flags;
2991
2992 spin_lock_irqsave(&ch->lock, flags);
2993 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2994 spin_unlock_irqrestore(&ch->lock, flags);
2995 __napi_schedule(napi);
2996 }
2997
2998 return HRTIMER_NORESTART;
2999 }
3000
3001 /**
3002 * stmmac_init_coalesce - init mitigation options.
3003 * @priv: driver private structure
3004 * Description:
3005 * This inits the coalesce parameters: i.e. timer rate,
3006 * timer handler and default threshold used for enabling the
3007 * interrupt on completion bit.
3008 */
stmmac_init_coalesce(struct stmmac_priv * priv)3009 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3010 {
3011 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3012 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3013 u32 chan;
3014
3015 for (chan = 0; chan < tx_channel_count; chan++) {
3016 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3017
3018 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3019 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3020
3021 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3022 tx_q->txtimer.function = stmmac_tx_timer;
3023 }
3024
3025 for (chan = 0; chan < rx_channel_count; chan++)
3026 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3027 }
3028
stmmac_set_rings_length(struct stmmac_priv * priv)3029 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3030 {
3031 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3032 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3033 u32 chan;
3034
3035 /* set TX ring length */
3036 for (chan = 0; chan < tx_channels_count; chan++)
3037 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3038 (priv->dma_conf.dma_tx_size - 1), chan);
3039
3040 /* set RX ring length */
3041 for (chan = 0; chan < rx_channels_count; chan++)
3042 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3043 (priv->dma_conf.dma_rx_size - 1), chan);
3044 }
3045
3046 /**
3047 * stmmac_set_tx_queue_weight - Set TX queue weight
3048 * @priv: driver private structure
3049 * Description: It is used for setting TX queues weight
3050 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3051 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3052 {
3053 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3054 u32 weight;
3055 u32 queue;
3056
3057 for (queue = 0; queue < tx_queues_count; queue++) {
3058 weight = priv->plat->tx_queues_cfg[queue].weight;
3059 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3060 }
3061 }
3062
3063 /**
3064 * stmmac_configure_cbs - Configure CBS in TX queue
3065 * @priv: driver private structure
3066 * Description: It is used for configuring CBS in AVB TX queues
3067 */
stmmac_configure_cbs(struct stmmac_priv * priv)3068 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3069 {
3070 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3071 u32 mode_to_use;
3072 u32 queue;
3073
3074 /* queue 0 is reserved for legacy traffic */
3075 for (queue = 1; queue < tx_queues_count; queue++) {
3076 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3077 if (mode_to_use == MTL_QUEUE_DCB)
3078 continue;
3079
3080 stmmac_config_cbs(priv, priv->hw,
3081 priv->plat->tx_queues_cfg[queue].send_slope,
3082 priv->plat->tx_queues_cfg[queue].idle_slope,
3083 priv->plat->tx_queues_cfg[queue].high_credit,
3084 priv->plat->tx_queues_cfg[queue].low_credit,
3085 queue);
3086 }
3087 }
3088
3089 /**
3090 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3091 * @priv: driver private structure
3092 * Description: It is used for mapping RX queues to RX dma channels
3093 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3094 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3095 {
3096 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3097 u32 queue;
3098 u32 chan;
3099
3100 for (queue = 0; queue < rx_queues_count; queue++) {
3101 chan = priv->plat->rx_queues_cfg[queue].chan;
3102 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3103 }
3104 }
3105
3106 /**
3107 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3108 * @priv: driver private structure
3109 * Description: It is used for configuring the RX Queue Priority
3110 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3111 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3112 {
3113 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3114 u32 queue;
3115 u32 prio;
3116
3117 for (queue = 0; queue < rx_queues_count; queue++) {
3118 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3119 continue;
3120
3121 prio = priv->plat->rx_queues_cfg[queue].prio;
3122 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3123 }
3124 }
3125
3126 /**
3127 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3128 * @priv: driver private structure
3129 * Description: It is used for configuring the TX Queue Priority
3130 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3131 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3132 {
3133 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3134 u32 queue;
3135 u32 prio;
3136
3137 for (queue = 0; queue < tx_queues_count; queue++) {
3138 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3139 continue;
3140
3141 prio = priv->plat->tx_queues_cfg[queue].prio;
3142 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3143 }
3144 }
3145
3146 /**
3147 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3148 * @priv: driver private structure
3149 * Description: It is used for configuring the RX queue routing
3150 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3151 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3152 {
3153 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3154 u32 queue;
3155 u8 packet;
3156
3157 for (queue = 0; queue < rx_queues_count; queue++) {
3158 /* no specific packet type routing specified for the queue */
3159 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3160 continue;
3161
3162 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3163 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3164 }
3165 }
3166
stmmac_mac_config_rss(struct stmmac_priv * priv)3167 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3168 {
3169 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3170 priv->rss.enable = false;
3171 return;
3172 }
3173
3174 if (priv->dev->features & NETIF_F_RXHASH)
3175 priv->rss.enable = true;
3176 else
3177 priv->rss.enable = false;
3178
3179 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3180 priv->plat->rx_queues_to_use);
3181 }
3182
3183 /**
3184 * stmmac_mtl_configuration - Configure MTL
3185 * @priv: driver private structure
3186 * Description: It is used for configurring MTL
3187 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3188 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3189 {
3190 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3191 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3192
3193 if (tx_queues_count > 1)
3194 stmmac_set_tx_queue_weight(priv);
3195
3196 /* Configure MTL RX algorithms */
3197 if (rx_queues_count > 1)
3198 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3199 priv->plat->rx_sched_algorithm);
3200
3201 /* Configure MTL TX algorithms */
3202 if (tx_queues_count > 1)
3203 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3204 priv->plat->tx_sched_algorithm);
3205
3206 /* Configure CBS in AVB TX queues */
3207 if (tx_queues_count > 1)
3208 stmmac_configure_cbs(priv);
3209
3210 /* Map RX MTL to DMA channels */
3211 stmmac_rx_queue_dma_chan_map(priv);
3212
3213 /* Enable MAC RX Queues */
3214 stmmac_mac_enable_rx_queues(priv);
3215
3216 /* Set RX priorities */
3217 if (rx_queues_count > 1)
3218 stmmac_mac_config_rx_queues_prio(priv);
3219
3220 /* Set TX priorities */
3221 if (tx_queues_count > 1)
3222 stmmac_mac_config_tx_queues_prio(priv);
3223
3224 /* Set RX routing */
3225 if (rx_queues_count > 1)
3226 stmmac_mac_config_rx_queues_routing(priv);
3227
3228 /* Receive Side Scaling */
3229 if (rx_queues_count > 1)
3230 stmmac_mac_config_rss(priv);
3231 }
3232
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3233 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3234 {
3235 if (priv->dma_cap.asp) {
3236 netdev_info(priv->dev, "Enabling Safety Features\n");
3237 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3238 priv->plat->safety_feat_cfg);
3239 } else {
3240 netdev_info(priv->dev, "No Safety Features support found\n");
3241 }
3242 }
3243
stmmac_fpe_start_wq(struct stmmac_priv * priv)3244 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3245 {
3246 char *name;
3247
3248 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3249 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3250
3251 name = priv->wq_name;
3252 sprintf(name, "%s-fpe", priv->dev->name);
3253
3254 priv->fpe_wq = create_singlethread_workqueue(name);
3255 if (!priv->fpe_wq) {
3256 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3257
3258 return -ENOMEM;
3259 }
3260 netdev_info(priv->dev, "FPE workqueue start");
3261
3262 return 0;
3263 }
3264
3265 /**
3266 * stmmac_hw_setup - setup mac in a usable state.
3267 * @dev : pointer to the device structure.
3268 * @ptp_register: register PTP if set
3269 * Description:
3270 * this is the main function to setup the HW in a usable state because the
3271 * dma engine is reset, the core registers are configured (e.g. AXI,
3272 * Checksum features, timers). The DMA is ready to start receiving and
3273 * transmitting.
3274 * Return value:
3275 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3276 * file on failure.
3277 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3278 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3279 {
3280 struct stmmac_priv *priv = netdev_priv(dev);
3281 u32 rx_cnt = priv->plat->rx_queues_to_use;
3282 u32 tx_cnt = priv->plat->tx_queues_to_use;
3283 bool sph_en;
3284 u32 chan;
3285 int ret;
3286
3287 /* DMA initialization and SW reset */
3288 ret = stmmac_init_dma_engine(priv);
3289 if (ret < 0) {
3290 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3291 __func__);
3292 return ret;
3293 }
3294
3295 /* Copy the MAC addr into the HW */
3296 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3297
3298 /* PS and related bits will be programmed according to the speed */
3299 if (priv->hw->pcs) {
3300 int speed = priv->plat->mac_port_sel_speed;
3301
3302 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3303 (speed == SPEED_1000)) {
3304 priv->hw->ps = speed;
3305 } else {
3306 dev_warn(priv->device, "invalid port speed\n");
3307 priv->hw->ps = 0;
3308 }
3309 }
3310
3311 /* Initialize the MAC Core */
3312 stmmac_core_init(priv, priv->hw, dev);
3313
3314 /* Initialize MTL*/
3315 stmmac_mtl_configuration(priv);
3316
3317 /* Initialize Safety Features */
3318 stmmac_safety_feat_configuration(priv);
3319
3320 ret = stmmac_rx_ipc(priv, priv->hw);
3321 if (!ret) {
3322 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3323 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3324 priv->hw->rx_csum = 0;
3325 }
3326
3327 /* Enable the MAC Rx/Tx */
3328 stmmac_mac_set(priv, priv->ioaddr, true);
3329
3330 /* Set the HW DMA mode and the COE */
3331 stmmac_dma_operation_mode(priv);
3332
3333 stmmac_mmc_setup(priv);
3334
3335 if (ptp_register) {
3336 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3337 if (ret < 0)
3338 netdev_warn(priv->dev,
3339 "failed to enable PTP reference clock: %pe\n",
3340 ERR_PTR(ret));
3341 }
3342
3343 ret = stmmac_init_ptp(priv);
3344 if (ret == -EOPNOTSUPP)
3345 netdev_info(priv->dev, "PTP not supported by HW\n");
3346 else if (ret)
3347 netdev_warn(priv->dev, "PTP init failed\n");
3348 else if (ptp_register)
3349 stmmac_ptp_register(priv);
3350
3351 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3352
3353 /* Convert the timer from msec to usec */
3354 if (!priv->tx_lpi_timer)
3355 priv->tx_lpi_timer = eee_timer * 1000;
3356
3357 if (priv->use_riwt) {
3358 u32 queue;
3359
3360 for (queue = 0; queue < rx_cnt; queue++) {
3361 if (!priv->rx_riwt[queue])
3362 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3363
3364 stmmac_rx_watchdog(priv, priv->ioaddr,
3365 priv->rx_riwt[queue], queue);
3366 }
3367 }
3368
3369 if (priv->hw->pcs)
3370 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3371
3372 /* set TX and RX rings length */
3373 stmmac_set_rings_length(priv);
3374
3375 /* Enable TSO */
3376 if (priv->tso) {
3377 for (chan = 0; chan < tx_cnt; chan++) {
3378 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3379
3380 /* TSO and TBS cannot co-exist */
3381 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3382 continue;
3383
3384 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3385 }
3386 }
3387
3388 /* Enable Split Header */
3389 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3390 for (chan = 0; chan < rx_cnt; chan++)
3391 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3392
3393
3394 /* VLAN Tag Insertion */
3395 if (priv->dma_cap.vlins)
3396 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3397
3398 /* TBS */
3399 for (chan = 0; chan < tx_cnt; chan++) {
3400 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3401 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3402
3403 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3404 }
3405
3406 /* Configure real RX and TX queues */
3407 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3408 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3409
3410 /* Start the ball rolling... */
3411 stmmac_start_all_dma(priv);
3412
3413 if (priv->dma_cap.fpesel) {
3414 stmmac_fpe_start_wq(priv);
3415
3416 if (priv->plat->fpe_cfg->enable)
3417 stmmac_fpe_handshake(priv, true);
3418 }
3419
3420 return 0;
3421 }
3422
stmmac_hw_teardown(struct net_device * dev)3423 static void stmmac_hw_teardown(struct net_device *dev)
3424 {
3425 struct stmmac_priv *priv = netdev_priv(dev);
3426
3427 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3428 }
3429
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3430 static void stmmac_free_irq(struct net_device *dev,
3431 enum request_irq_err irq_err, int irq_idx)
3432 {
3433 struct stmmac_priv *priv = netdev_priv(dev);
3434 int j;
3435
3436 switch (irq_err) {
3437 case REQ_IRQ_ERR_ALL:
3438 irq_idx = priv->plat->tx_queues_to_use;
3439 fallthrough;
3440 case REQ_IRQ_ERR_TX:
3441 for (j = irq_idx - 1; j >= 0; j--) {
3442 if (priv->tx_irq[j] > 0) {
3443 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3444 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3445 }
3446 }
3447 irq_idx = priv->plat->rx_queues_to_use;
3448 fallthrough;
3449 case REQ_IRQ_ERR_RX:
3450 for (j = irq_idx - 1; j >= 0; j--) {
3451 if (priv->rx_irq[j] > 0) {
3452 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3453 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3454 }
3455 }
3456
3457 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3458 free_irq(priv->sfty_ue_irq, dev);
3459 fallthrough;
3460 case REQ_IRQ_ERR_SFTY_UE:
3461 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3462 free_irq(priv->sfty_ce_irq, dev);
3463 fallthrough;
3464 case REQ_IRQ_ERR_SFTY_CE:
3465 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3466 free_irq(priv->lpi_irq, dev);
3467 fallthrough;
3468 case REQ_IRQ_ERR_LPI:
3469 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3470 free_irq(priv->wol_irq, dev);
3471 fallthrough;
3472 case REQ_IRQ_ERR_WOL:
3473 free_irq(dev->irq, dev);
3474 fallthrough;
3475 case REQ_IRQ_ERR_MAC:
3476 case REQ_IRQ_ERR_NO:
3477 /* If MAC IRQ request error, no more IRQ to free */
3478 break;
3479 }
3480 }
3481
stmmac_request_irq_multi_msi(struct net_device * dev)3482 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3483 {
3484 struct stmmac_priv *priv = netdev_priv(dev);
3485 enum request_irq_err irq_err;
3486 cpumask_t cpu_mask;
3487 int irq_idx = 0;
3488 char *int_name;
3489 int ret;
3490 int i;
3491
3492 /* For common interrupt */
3493 int_name = priv->int_name_mac;
3494 sprintf(int_name, "%s:%s", dev->name, "mac");
3495 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3496 0, int_name, dev);
3497 if (unlikely(ret < 0)) {
3498 netdev_err(priv->dev,
3499 "%s: alloc mac MSI %d (error: %d)\n",
3500 __func__, dev->irq, ret);
3501 irq_err = REQ_IRQ_ERR_MAC;
3502 goto irq_error;
3503 }
3504
3505 /* Request the Wake IRQ in case of another line
3506 * is used for WoL
3507 */
3508 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3509 int_name = priv->int_name_wol;
3510 sprintf(int_name, "%s:%s", dev->name, "wol");
3511 ret = request_irq(priv->wol_irq,
3512 stmmac_mac_interrupt,
3513 0, int_name, dev);
3514 if (unlikely(ret < 0)) {
3515 netdev_err(priv->dev,
3516 "%s: alloc wol MSI %d (error: %d)\n",
3517 __func__, priv->wol_irq, ret);
3518 irq_err = REQ_IRQ_ERR_WOL;
3519 goto irq_error;
3520 }
3521 }
3522
3523 /* Request the LPI IRQ in case of another line
3524 * is used for LPI
3525 */
3526 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3527 int_name = priv->int_name_lpi;
3528 sprintf(int_name, "%s:%s", dev->name, "lpi");
3529 ret = request_irq(priv->lpi_irq,
3530 stmmac_mac_interrupt,
3531 0, int_name, dev);
3532 if (unlikely(ret < 0)) {
3533 netdev_err(priv->dev,
3534 "%s: alloc lpi MSI %d (error: %d)\n",
3535 __func__, priv->lpi_irq, ret);
3536 irq_err = REQ_IRQ_ERR_LPI;
3537 goto irq_error;
3538 }
3539 }
3540
3541 /* Request the Safety Feature Correctible Error line in
3542 * case of another line is used
3543 */
3544 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3545 int_name = priv->int_name_sfty_ce;
3546 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3547 ret = request_irq(priv->sfty_ce_irq,
3548 stmmac_safety_interrupt,
3549 0, int_name, dev);
3550 if (unlikely(ret < 0)) {
3551 netdev_err(priv->dev,
3552 "%s: alloc sfty ce MSI %d (error: %d)\n",
3553 __func__, priv->sfty_ce_irq, ret);
3554 irq_err = REQ_IRQ_ERR_SFTY_CE;
3555 goto irq_error;
3556 }
3557 }
3558
3559 /* Request the Safety Feature Uncorrectible Error line in
3560 * case of another line is used
3561 */
3562 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3563 int_name = priv->int_name_sfty_ue;
3564 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3565 ret = request_irq(priv->sfty_ue_irq,
3566 stmmac_safety_interrupt,
3567 0, int_name, dev);
3568 if (unlikely(ret < 0)) {
3569 netdev_err(priv->dev,
3570 "%s: alloc sfty ue MSI %d (error: %d)\n",
3571 __func__, priv->sfty_ue_irq, ret);
3572 irq_err = REQ_IRQ_ERR_SFTY_UE;
3573 goto irq_error;
3574 }
3575 }
3576
3577 /* Request Rx MSI irq */
3578 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3579 if (i >= MTL_MAX_RX_QUEUES)
3580 break;
3581 if (priv->rx_irq[i] == 0)
3582 continue;
3583
3584 int_name = priv->int_name_rx_irq[i];
3585 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3586 ret = request_irq(priv->rx_irq[i],
3587 stmmac_msi_intr_rx,
3588 0, int_name, &priv->dma_conf.rx_queue[i]);
3589 if (unlikely(ret < 0)) {
3590 netdev_err(priv->dev,
3591 "%s: alloc rx-%d MSI %d (error: %d)\n",
3592 __func__, i, priv->rx_irq[i], ret);
3593 irq_err = REQ_IRQ_ERR_RX;
3594 irq_idx = i;
3595 goto irq_error;
3596 }
3597 cpumask_clear(&cpu_mask);
3598 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3599 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3600 }
3601
3602 /* Request Tx MSI irq */
3603 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3604 if (i >= MTL_MAX_TX_QUEUES)
3605 break;
3606 if (priv->tx_irq[i] == 0)
3607 continue;
3608
3609 int_name = priv->int_name_tx_irq[i];
3610 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3611 ret = request_irq(priv->tx_irq[i],
3612 stmmac_msi_intr_tx,
3613 0, int_name, &priv->dma_conf.tx_queue[i]);
3614 if (unlikely(ret < 0)) {
3615 netdev_err(priv->dev,
3616 "%s: alloc tx-%d MSI %d (error: %d)\n",
3617 __func__, i, priv->tx_irq[i], ret);
3618 irq_err = REQ_IRQ_ERR_TX;
3619 irq_idx = i;
3620 goto irq_error;
3621 }
3622 cpumask_clear(&cpu_mask);
3623 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3624 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3625 }
3626
3627 return 0;
3628
3629 irq_error:
3630 stmmac_free_irq(dev, irq_err, irq_idx);
3631 return ret;
3632 }
3633
stmmac_request_irq_single(struct net_device * dev)3634 static int stmmac_request_irq_single(struct net_device *dev)
3635 {
3636 struct stmmac_priv *priv = netdev_priv(dev);
3637 enum request_irq_err irq_err;
3638 int ret;
3639
3640 ret = request_irq(dev->irq, stmmac_interrupt,
3641 IRQF_SHARED, dev->name, dev);
3642 if (unlikely(ret < 0)) {
3643 netdev_err(priv->dev,
3644 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3645 __func__, dev->irq, ret);
3646 irq_err = REQ_IRQ_ERR_MAC;
3647 goto irq_error;
3648 }
3649
3650 /* Request the Wake IRQ in case of another line
3651 * is used for WoL
3652 */
3653 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3654 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3655 IRQF_SHARED, dev->name, dev);
3656 if (unlikely(ret < 0)) {
3657 netdev_err(priv->dev,
3658 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3659 __func__, priv->wol_irq, ret);
3660 irq_err = REQ_IRQ_ERR_WOL;
3661 goto irq_error;
3662 }
3663 }
3664
3665 /* Request the IRQ lines */
3666 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3667 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3668 IRQF_SHARED, dev->name, dev);
3669 if (unlikely(ret < 0)) {
3670 netdev_err(priv->dev,
3671 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3672 __func__, priv->lpi_irq, ret);
3673 irq_err = REQ_IRQ_ERR_LPI;
3674 goto irq_error;
3675 }
3676 }
3677
3678 return 0;
3679
3680 irq_error:
3681 stmmac_free_irq(dev, irq_err, 0);
3682 return ret;
3683 }
3684
stmmac_request_irq(struct net_device * dev)3685 static int stmmac_request_irq(struct net_device *dev)
3686 {
3687 struct stmmac_priv *priv = netdev_priv(dev);
3688 int ret;
3689
3690 /* Request the IRQ lines */
3691 if (priv->plat->multi_msi_en)
3692 ret = stmmac_request_irq_multi_msi(dev);
3693 else
3694 ret = stmmac_request_irq_single(dev);
3695
3696 return ret;
3697 }
3698
3699 /**
3700 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3701 * @priv: driver private structure
3702 * @mtu: MTU to setup the dma queue and buf with
3703 * Description: Allocate and generate a dma_conf based on the provided MTU.
3704 * Allocate the Tx/Rx DMA queue and init them.
3705 * Return value:
3706 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3707 */
3708 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3709 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3710 {
3711 struct stmmac_dma_conf *dma_conf;
3712 int chan, bfsize, ret;
3713
3714 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3715 if (!dma_conf) {
3716 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3717 __func__);
3718 return ERR_PTR(-ENOMEM);
3719 }
3720
3721 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3722 if (bfsize < 0)
3723 bfsize = 0;
3724
3725 if (bfsize < BUF_SIZE_16KiB)
3726 bfsize = stmmac_set_bfsize(mtu, 0);
3727
3728 dma_conf->dma_buf_sz = bfsize;
3729 /* Chose the tx/rx size from the already defined one in the
3730 * priv struct. (if defined)
3731 */
3732 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3733 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3734
3735 if (!dma_conf->dma_tx_size)
3736 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3737 if (!dma_conf->dma_rx_size)
3738 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3739
3740 /* Earlier check for TBS */
3741 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3742 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3743 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3744
3745 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3746 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3747 }
3748
3749 ret = alloc_dma_desc_resources(priv, dma_conf);
3750 if (ret < 0) {
3751 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3752 __func__);
3753 goto alloc_error;
3754 }
3755
3756 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3757 if (ret < 0) {
3758 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3759 __func__);
3760 goto init_error;
3761 }
3762
3763 return dma_conf;
3764
3765 init_error:
3766 free_dma_desc_resources(priv, dma_conf);
3767 alloc_error:
3768 kfree(dma_conf);
3769 return ERR_PTR(ret);
3770 }
3771
3772 /**
3773 * __stmmac_open - open entry point of the driver
3774 * @dev : pointer to the device structure.
3775 * @dma_conf : structure to take the dma data
3776 * Description:
3777 * This function is the open entry point of the driver.
3778 * Return value:
3779 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3780 * file on failure.
3781 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3782 static int __stmmac_open(struct net_device *dev,
3783 struct stmmac_dma_conf *dma_conf)
3784 {
3785 struct stmmac_priv *priv = netdev_priv(dev);
3786 int mode = priv->plat->phy_interface;
3787 u32 chan;
3788 int ret;
3789
3790 ret = pm_runtime_resume_and_get(priv->device);
3791 if (ret < 0)
3792 return ret;
3793
3794 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3795 priv->hw->pcs != STMMAC_PCS_RTBI &&
3796 (!priv->hw->xpcs ||
3797 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3798 ret = stmmac_init_phy(dev);
3799 if (ret) {
3800 netdev_err(priv->dev,
3801 "%s: Cannot attach to PHY (error: %d)\n",
3802 __func__, ret);
3803 goto init_phy_error;
3804 }
3805 }
3806
3807 /* Extra statistics */
3808 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3809 priv->xstats.threshold = tc;
3810
3811 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3812
3813 buf_sz = dma_conf->dma_buf_sz;
3814 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3815
3816 stmmac_reset_queues_param(priv);
3817
3818 if (priv->plat->serdes_powerup) {
3819 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3820 if (ret < 0) {
3821 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3822 __func__);
3823 goto init_error;
3824 }
3825 }
3826
3827 ret = stmmac_hw_setup(dev, true);
3828 if (ret < 0) {
3829 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3830 goto init_error;
3831 }
3832
3833 stmmac_init_coalesce(priv);
3834
3835 phylink_start(priv->phylink);
3836 /* We may have called phylink_speed_down before */
3837 phylink_speed_up(priv->phylink);
3838
3839 ret = stmmac_request_irq(dev);
3840 if (ret)
3841 goto irq_error;
3842
3843 stmmac_enable_all_queues(priv);
3844 netif_tx_start_all_queues(priv->dev);
3845 stmmac_enable_all_dma_irq(priv);
3846
3847 return 0;
3848
3849 irq_error:
3850 phylink_stop(priv->phylink);
3851
3852 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3853 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3854
3855 stmmac_hw_teardown(dev);
3856 init_error:
3857 free_dma_desc_resources(priv, &priv->dma_conf);
3858 phylink_disconnect_phy(priv->phylink);
3859 init_phy_error:
3860 pm_runtime_put(priv->device);
3861 return ret;
3862 }
3863
stmmac_open(struct net_device * dev)3864 static int stmmac_open(struct net_device *dev)
3865 {
3866 struct stmmac_priv *priv = netdev_priv(dev);
3867 struct stmmac_dma_conf *dma_conf;
3868 int ret;
3869
3870 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3871 if (IS_ERR(dma_conf))
3872 return PTR_ERR(dma_conf);
3873
3874 ret = __stmmac_open(dev, dma_conf);
3875 kfree(dma_conf);
3876 return ret;
3877 }
3878
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3879 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3880 {
3881 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3882
3883 if (priv->fpe_wq)
3884 destroy_workqueue(priv->fpe_wq);
3885
3886 netdev_info(priv->dev, "FPE workqueue stop");
3887 }
3888
3889 /**
3890 * stmmac_release - close entry point of the driver
3891 * @dev : device pointer.
3892 * Description:
3893 * This is the stop entry point of the driver.
3894 */
stmmac_release(struct net_device * dev)3895 static int stmmac_release(struct net_device *dev)
3896 {
3897 struct stmmac_priv *priv = netdev_priv(dev);
3898 u32 chan;
3899
3900 if (device_may_wakeup(priv->device))
3901 phylink_speed_down(priv->phylink, false);
3902 /* Stop and disconnect the PHY */
3903 phylink_stop(priv->phylink);
3904 phylink_disconnect_phy(priv->phylink);
3905
3906 stmmac_disable_all_queues(priv);
3907
3908 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3909 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3910
3911 netif_tx_disable(dev);
3912
3913 /* Free the IRQ lines */
3914 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3915
3916 if (priv->eee_enabled) {
3917 priv->tx_path_in_lpi_mode = false;
3918 del_timer_sync(&priv->eee_ctrl_timer);
3919 }
3920
3921 /* Stop TX/RX DMA and clear the descriptors */
3922 stmmac_stop_all_dma(priv);
3923
3924 /* Release and free the Rx/Tx resources */
3925 free_dma_desc_resources(priv, &priv->dma_conf);
3926
3927 /* Disable the MAC Rx/Tx */
3928 stmmac_mac_set(priv, priv->ioaddr, false);
3929
3930 /* Powerdown Serdes if there is */
3931 if (priv->plat->serdes_powerdown)
3932 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3933
3934 netif_carrier_off(dev);
3935
3936 stmmac_release_ptp(priv);
3937
3938 pm_runtime_put(priv->device);
3939
3940 if (priv->dma_cap.fpesel)
3941 stmmac_fpe_stop_wq(priv);
3942
3943 return 0;
3944 }
3945
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3946 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3947 struct stmmac_tx_queue *tx_q)
3948 {
3949 u16 tag = 0x0, inner_tag = 0x0;
3950 u32 inner_type = 0x0;
3951 struct dma_desc *p;
3952
3953 if (!priv->dma_cap.vlins)
3954 return false;
3955 if (!skb_vlan_tag_present(skb))
3956 return false;
3957 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3958 inner_tag = skb_vlan_tag_get(skb);
3959 inner_type = STMMAC_VLAN_INSERT;
3960 }
3961
3962 tag = skb_vlan_tag_get(skb);
3963
3964 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3965 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3966 else
3967 p = &tx_q->dma_tx[tx_q->cur_tx];
3968
3969 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3970 return false;
3971
3972 stmmac_set_tx_owner(priv, p);
3973 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3974 return true;
3975 }
3976
3977 /**
3978 * stmmac_tso_allocator - close entry point of the driver
3979 * @priv: driver private structure
3980 * @des: buffer start address
3981 * @total_len: total length to fill in descriptors
3982 * @last_segment: condition for the last descriptor
3983 * @queue: TX queue index
3984 * Description:
3985 * This function fills descriptor and request new descriptors according to
3986 * buffer length to fill
3987 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)3988 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3989 int total_len, bool last_segment, u32 queue)
3990 {
3991 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3992 struct dma_desc *desc;
3993 u32 buff_size;
3994 int tmp_len;
3995
3996 tmp_len = total_len;
3997
3998 while (tmp_len > 0) {
3999 dma_addr_t curr_addr;
4000
4001 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4002 priv->dma_conf.dma_tx_size);
4003 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4004
4005 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4006 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4007 else
4008 desc = &tx_q->dma_tx[tx_q->cur_tx];
4009
4010 curr_addr = des + (total_len - tmp_len);
4011 if (priv->dma_cap.addr64 <= 32)
4012 desc->des0 = cpu_to_le32(curr_addr);
4013 else
4014 stmmac_set_desc_addr(priv, desc, curr_addr);
4015
4016 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4017 TSO_MAX_BUFF_SIZE : tmp_len;
4018
4019 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4020 0, 1,
4021 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4022 0, 0);
4023
4024 tmp_len -= TSO_MAX_BUFF_SIZE;
4025 }
4026 }
4027
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4028 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4029 {
4030 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4031 int desc_size;
4032
4033 if (likely(priv->extend_desc))
4034 desc_size = sizeof(struct dma_extended_desc);
4035 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4036 desc_size = sizeof(struct dma_edesc);
4037 else
4038 desc_size = sizeof(struct dma_desc);
4039
4040 /* The own bit must be the latest setting done when prepare the
4041 * descriptor and then barrier is needed to make sure that
4042 * all is coherent before granting the DMA engine.
4043 */
4044 wmb();
4045
4046 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4047 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4048 }
4049
4050 /**
4051 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4052 * @skb : the socket buffer
4053 * @dev : device pointer
4054 * Description: this is the transmit function that is called on TSO frames
4055 * (support available on GMAC4 and newer chips).
4056 * Diagram below show the ring programming in case of TSO frames:
4057 *
4058 * First Descriptor
4059 * --------
4060 * | DES0 |---> buffer1 = L2/L3/L4 header
4061 * | DES1 |---> TCP Payload (can continue on next descr...)
4062 * | DES2 |---> buffer 1 and 2 len
4063 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4064 * --------
4065 * |
4066 * ...
4067 * |
4068 * --------
4069 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4070 * | DES1 | --|
4071 * | DES2 | --> buffer 1 and 2 len
4072 * | DES3 |
4073 * --------
4074 *
4075 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4076 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4077 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4078 {
4079 struct dma_desc *desc, *first, *mss_desc = NULL;
4080 struct stmmac_priv *priv = netdev_priv(dev);
4081 int nfrags = skb_shinfo(skb)->nr_frags;
4082 u32 queue = skb_get_queue_mapping(skb);
4083 unsigned int first_entry, tx_packets;
4084 int tmp_pay_len = 0, first_tx;
4085 struct stmmac_tx_queue *tx_q;
4086 bool has_vlan, set_ic;
4087 u8 proto_hdr_len, hdr;
4088 u32 pay_len, mss;
4089 dma_addr_t des;
4090 int i;
4091
4092 tx_q = &priv->dma_conf.tx_queue[queue];
4093 first_tx = tx_q->cur_tx;
4094
4095 /* Compute header lengths */
4096 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4097 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4098 hdr = sizeof(struct udphdr);
4099 } else {
4100 proto_hdr_len = skb_tcp_all_headers(skb);
4101 hdr = tcp_hdrlen(skb);
4102 }
4103
4104 /* Desc availability based on threshold should be enough safe */
4105 if (unlikely(stmmac_tx_avail(priv, queue) <
4106 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4107 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4108 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4109 queue));
4110 /* This is a hard error, log it. */
4111 netdev_err(priv->dev,
4112 "%s: Tx Ring full when queue awake\n",
4113 __func__);
4114 }
4115 return NETDEV_TX_BUSY;
4116 }
4117
4118 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4119
4120 mss = skb_shinfo(skb)->gso_size;
4121
4122 /* set new MSS value if needed */
4123 if (mss != tx_q->mss) {
4124 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4125 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4126 else
4127 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4128
4129 stmmac_set_mss(priv, mss_desc, mss);
4130 tx_q->mss = mss;
4131 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4132 priv->dma_conf.dma_tx_size);
4133 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4134 }
4135
4136 if (netif_msg_tx_queued(priv)) {
4137 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4138 __func__, hdr, proto_hdr_len, pay_len, mss);
4139 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4140 skb->data_len);
4141 }
4142
4143 /* Check if VLAN can be inserted by HW */
4144 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4145
4146 first_entry = tx_q->cur_tx;
4147 WARN_ON(tx_q->tx_skbuff[first_entry]);
4148
4149 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4150 desc = &tx_q->dma_entx[first_entry].basic;
4151 else
4152 desc = &tx_q->dma_tx[first_entry];
4153 first = desc;
4154
4155 if (has_vlan)
4156 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4157
4158 /* first descriptor: fill Headers on Buf1 */
4159 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4160 DMA_TO_DEVICE);
4161 if (dma_mapping_error(priv->device, des))
4162 goto dma_map_err;
4163
4164 tx_q->tx_skbuff_dma[first_entry].buf = des;
4165 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4166 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4167 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4168
4169 if (priv->dma_cap.addr64 <= 32) {
4170 first->des0 = cpu_to_le32(des);
4171
4172 /* Fill start of payload in buff2 of first descriptor */
4173 if (pay_len)
4174 first->des1 = cpu_to_le32(des + proto_hdr_len);
4175
4176 /* If needed take extra descriptors to fill the remaining payload */
4177 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4178 } else {
4179 stmmac_set_desc_addr(priv, first, des);
4180 tmp_pay_len = pay_len;
4181 des += proto_hdr_len;
4182 pay_len = 0;
4183 }
4184
4185 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4186
4187 /* Prepare fragments */
4188 for (i = 0; i < nfrags; i++) {
4189 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4190
4191 des = skb_frag_dma_map(priv->device, frag, 0,
4192 skb_frag_size(frag),
4193 DMA_TO_DEVICE);
4194 if (dma_mapping_error(priv->device, des))
4195 goto dma_map_err;
4196
4197 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4198 (i == nfrags - 1), queue);
4199
4200 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4201 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4202 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4203 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4204 }
4205
4206 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4207
4208 /* Only the last descriptor gets to point to the skb. */
4209 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4210 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4211
4212 /* Manage tx mitigation */
4213 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4214 tx_q->tx_count_frames += tx_packets;
4215
4216 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4217 set_ic = true;
4218 else if (!priv->tx_coal_frames[queue])
4219 set_ic = false;
4220 else if (tx_packets > priv->tx_coal_frames[queue])
4221 set_ic = true;
4222 else if ((tx_q->tx_count_frames %
4223 priv->tx_coal_frames[queue]) < tx_packets)
4224 set_ic = true;
4225 else
4226 set_ic = false;
4227
4228 if (set_ic) {
4229 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4230 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4231 else
4232 desc = &tx_q->dma_tx[tx_q->cur_tx];
4233
4234 tx_q->tx_count_frames = 0;
4235 stmmac_set_tx_ic(priv, desc);
4236 priv->xstats.tx_set_ic_bit++;
4237 }
4238
4239 /* We've used all descriptors we need for this skb, however,
4240 * advance cur_tx so that it references a fresh descriptor.
4241 * ndo_start_xmit will fill this descriptor the next time it's
4242 * called and stmmac_tx_clean may clean up to this descriptor.
4243 */
4244 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4245
4246 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4247 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4248 __func__);
4249 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4250 }
4251
4252 dev->stats.tx_bytes += skb->len;
4253 priv->xstats.tx_tso_frames++;
4254 priv->xstats.tx_tso_nfrags += nfrags;
4255
4256 if (priv->sarc_type)
4257 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4258
4259 skb_tx_timestamp(skb);
4260
4261 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4262 priv->hwts_tx_en)) {
4263 /* declare that device is doing timestamping */
4264 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4265 stmmac_enable_tx_timestamp(priv, first);
4266 }
4267
4268 /* Complete the first descriptor before granting the DMA */
4269 stmmac_prepare_tso_tx_desc(priv, first, 1,
4270 proto_hdr_len,
4271 pay_len,
4272 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4273 hdr / 4, (skb->len - proto_hdr_len));
4274
4275 /* If context desc is used to change MSS */
4276 if (mss_desc) {
4277 /* Make sure that first descriptor has been completely
4278 * written, including its own bit. This is because MSS is
4279 * actually before first descriptor, so we need to make
4280 * sure that MSS's own bit is the last thing written.
4281 */
4282 dma_wmb();
4283 stmmac_set_tx_owner(priv, mss_desc);
4284 }
4285
4286 if (netif_msg_pktdata(priv)) {
4287 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4288 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4289 tx_q->cur_tx, first, nfrags);
4290 pr_info(">>> frame to be transmitted: ");
4291 print_pkt(skb->data, skb_headlen(skb));
4292 }
4293
4294 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4295
4296 stmmac_flush_tx_descriptors(priv, queue);
4297 stmmac_tx_timer_arm(priv, queue);
4298
4299 return NETDEV_TX_OK;
4300
4301 dma_map_err:
4302 dev_err(priv->device, "Tx dma map failed\n");
4303 dev_kfree_skb(skb);
4304 priv->dev->stats.tx_dropped++;
4305 return NETDEV_TX_OK;
4306 }
4307
4308 /**
4309 * stmmac_xmit - Tx entry point of the driver
4310 * @skb : the socket buffer
4311 * @dev : device pointer
4312 * Description : this is the tx entry point of the driver.
4313 * It programs the chain or the ring and supports oversized frames
4314 * and SG feature.
4315 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4316 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4317 {
4318 unsigned int first_entry, tx_packets, enh_desc;
4319 struct stmmac_priv *priv = netdev_priv(dev);
4320 unsigned int nopaged_len = skb_headlen(skb);
4321 int i, csum_insertion = 0, is_jumbo = 0;
4322 u32 queue = skb_get_queue_mapping(skb);
4323 int nfrags = skb_shinfo(skb)->nr_frags;
4324 int gso = skb_shinfo(skb)->gso_type;
4325 struct dma_edesc *tbs_desc = NULL;
4326 struct dma_desc *desc, *first;
4327 struct stmmac_tx_queue *tx_q;
4328 bool has_vlan, set_ic;
4329 int entry, first_tx;
4330 dma_addr_t des;
4331
4332 tx_q = &priv->dma_conf.tx_queue[queue];
4333 first_tx = tx_q->cur_tx;
4334
4335 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4336 stmmac_disable_eee_mode(priv);
4337
4338 /* Manage oversized TCP frames for GMAC4 device */
4339 if (skb_is_gso(skb) && priv->tso) {
4340 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4341 return stmmac_tso_xmit(skb, dev);
4342 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4343 return stmmac_tso_xmit(skb, dev);
4344 }
4345
4346 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4347 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4348 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4349 queue));
4350 /* This is a hard error, log it. */
4351 netdev_err(priv->dev,
4352 "%s: Tx Ring full when queue awake\n",
4353 __func__);
4354 }
4355 return NETDEV_TX_BUSY;
4356 }
4357
4358 /* Check if VLAN can be inserted by HW */
4359 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4360
4361 entry = tx_q->cur_tx;
4362 first_entry = entry;
4363 WARN_ON(tx_q->tx_skbuff[first_entry]);
4364
4365 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4366
4367 if (likely(priv->extend_desc))
4368 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4369 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4370 desc = &tx_q->dma_entx[entry].basic;
4371 else
4372 desc = tx_q->dma_tx + entry;
4373
4374 first = desc;
4375
4376 if (has_vlan)
4377 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4378
4379 enh_desc = priv->plat->enh_desc;
4380 /* To program the descriptors according to the size of the frame */
4381 if (enh_desc)
4382 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4383
4384 if (unlikely(is_jumbo)) {
4385 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4386 if (unlikely(entry < 0) && (entry != -EINVAL))
4387 goto dma_map_err;
4388 }
4389
4390 for (i = 0; i < nfrags; i++) {
4391 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4392 int len = skb_frag_size(frag);
4393 bool last_segment = (i == (nfrags - 1));
4394
4395 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4396 WARN_ON(tx_q->tx_skbuff[entry]);
4397
4398 if (likely(priv->extend_desc))
4399 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4400 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4401 desc = &tx_q->dma_entx[entry].basic;
4402 else
4403 desc = tx_q->dma_tx + entry;
4404
4405 des = skb_frag_dma_map(priv->device, frag, 0, len,
4406 DMA_TO_DEVICE);
4407 if (dma_mapping_error(priv->device, des))
4408 goto dma_map_err; /* should reuse desc w/o issues */
4409
4410 tx_q->tx_skbuff_dma[entry].buf = des;
4411
4412 stmmac_set_desc_addr(priv, desc, des);
4413
4414 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4415 tx_q->tx_skbuff_dma[entry].len = len;
4416 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4417 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4418
4419 /* Prepare the descriptor and set the own bit too */
4420 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4421 priv->mode, 1, last_segment, skb->len);
4422 }
4423
4424 /* Only the last descriptor gets to point to the skb. */
4425 tx_q->tx_skbuff[entry] = skb;
4426 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4427
4428 /* According to the coalesce parameter the IC bit for the latest
4429 * segment is reset and the timer re-started to clean the tx status.
4430 * This approach takes care about the fragments: desc is the first
4431 * element in case of no SG.
4432 */
4433 tx_packets = (entry + 1) - first_tx;
4434 tx_q->tx_count_frames += tx_packets;
4435
4436 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4437 set_ic = true;
4438 else if (!priv->tx_coal_frames[queue])
4439 set_ic = false;
4440 else if (tx_packets > priv->tx_coal_frames[queue])
4441 set_ic = true;
4442 else if ((tx_q->tx_count_frames %
4443 priv->tx_coal_frames[queue]) < tx_packets)
4444 set_ic = true;
4445 else
4446 set_ic = false;
4447
4448 if (set_ic) {
4449 if (likely(priv->extend_desc))
4450 desc = &tx_q->dma_etx[entry].basic;
4451 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4452 desc = &tx_q->dma_entx[entry].basic;
4453 else
4454 desc = &tx_q->dma_tx[entry];
4455
4456 tx_q->tx_count_frames = 0;
4457 stmmac_set_tx_ic(priv, desc);
4458 priv->xstats.tx_set_ic_bit++;
4459 }
4460
4461 /* We've used all descriptors we need for this skb, however,
4462 * advance cur_tx so that it references a fresh descriptor.
4463 * ndo_start_xmit will fill this descriptor the next time it's
4464 * called and stmmac_tx_clean may clean up to this descriptor.
4465 */
4466 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4467 tx_q->cur_tx = entry;
4468
4469 if (netif_msg_pktdata(priv)) {
4470 netdev_dbg(priv->dev,
4471 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4472 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4473 entry, first, nfrags);
4474
4475 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4476 print_pkt(skb->data, skb->len);
4477 }
4478
4479 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4480 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4481 __func__);
4482 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4483 }
4484
4485 dev->stats.tx_bytes += skb->len;
4486
4487 if (priv->sarc_type)
4488 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4489
4490 skb_tx_timestamp(skb);
4491
4492 /* Ready to fill the first descriptor and set the OWN bit w/o any
4493 * problems because all the descriptors are actually ready to be
4494 * passed to the DMA engine.
4495 */
4496 if (likely(!is_jumbo)) {
4497 bool last_segment = (nfrags == 0);
4498
4499 des = dma_map_single(priv->device, skb->data,
4500 nopaged_len, DMA_TO_DEVICE);
4501 if (dma_mapping_error(priv->device, des))
4502 goto dma_map_err;
4503
4504 tx_q->tx_skbuff_dma[first_entry].buf = des;
4505 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4506 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4507
4508 stmmac_set_desc_addr(priv, first, des);
4509
4510 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4511 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4512
4513 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4514 priv->hwts_tx_en)) {
4515 /* declare that device is doing timestamping */
4516 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4517 stmmac_enable_tx_timestamp(priv, first);
4518 }
4519
4520 /* Prepare the first descriptor setting the OWN bit too */
4521 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4522 csum_insertion, priv->mode, 0, last_segment,
4523 skb->len);
4524 }
4525
4526 if (tx_q->tbs & STMMAC_TBS_EN) {
4527 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4528
4529 tbs_desc = &tx_q->dma_entx[first_entry];
4530 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4531 }
4532
4533 stmmac_set_tx_owner(priv, first);
4534
4535 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4536
4537 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4538
4539 stmmac_flush_tx_descriptors(priv, queue);
4540 stmmac_tx_timer_arm(priv, queue);
4541
4542 return NETDEV_TX_OK;
4543
4544 dma_map_err:
4545 netdev_err(priv->dev, "Tx DMA map failed\n");
4546 dev_kfree_skb(skb);
4547 priv->dev->stats.tx_dropped++;
4548 return NETDEV_TX_OK;
4549 }
4550
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4551 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4552 {
4553 struct vlan_ethhdr *veth;
4554 __be16 vlan_proto;
4555 u16 vlanid;
4556
4557 veth = (struct vlan_ethhdr *)skb->data;
4558 vlan_proto = veth->h_vlan_proto;
4559
4560 if ((vlan_proto == htons(ETH_P_8021Q) &&
4561 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4562 (vlan_proto == htons(ETH_P_8021AD) &&
4563 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4564 /* pop the vlan tag */
4565 vlanid = ntohs(veth->h_vlan_TCI);
4566 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4567 skb_pull(skb, VLAN_HLEN);
4568 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4569 }
4570 }
4571
4572 /**
4573 * stmmac_rx_refill - refill used skb preallocated buffers
4574 * @priv: driver private structure
4575 * @queue: RX queue index
4576 * Description : this is to reallocate the skb for the reception process
4577 * that is based on zero-copy.
4578 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4579 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4580 {
4581 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4582 int dirty = stmmac_rx_dirty(priv, queue);
4583 unsigned int entry = rx_q->dirty_rx;
4584 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4585
4586 if (priv->dma_cap.addr64 <= 32)
4587 gfp |= GFP_DMA32;
4588
4589 while (dirty-- > 0) {
4590 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4591 struct dma_desc *p;
4592 bool use_rx_wd;
4593
4594 if (priv->extend_desc)
4595 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4596 else
4597 p = rx_q->dma_rx + entry;
4598
4599 if (!buf->page) {
4600 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4601 if (!buf->page)
4602 break;
4603 }
4604
4605 if (priv->sph && !buf->sec_page) {
4606 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4607 if (!buf->sec_page)
4608 break;
4609
4610 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4611 }
4612
4613 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4614
4615 stmmac_set_desc_addr(priv, p, buf->addr);
4616 if (priv->sph)
4617 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4618 else
4619 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4620 stmmac_refill_desc3(priv, rx_q, p);
4621
4622 rx_q->rx_count_frames++;
4623 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4624 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4625 rx_q->rx_count_frames = 0;
4626
4627 use_rx_wd = !priv->rx_coal_frames[queue];
4628 use_rx_wd |= rx_q->rx_count_frames > 0;
4629 if (!priv->use_riwt)
4630 use_rx_wd = false;
4631
4632 dma_wmb();
4633 stmmac_set_rx_owner(priv, p, use_rx_wd);
4634
4635 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4636 }
4637 rx_q->dirty_rx = entry;
4638 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4639 (rx_q->dirty_rx * sizeof(struct dma_desc));
4640 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4641 }
4642
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4643 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4644 struct dma_desc *p,
4645 int status, unsigned int len)
4646 {
4647 unsigned int plen = 0, hlen = 0;
4648 int coe = priv->hw->rx_csum;
4649
4650 /* Not first descriptor, buffer is always zero */
4651 if (priv->sph && len)
4652 return 0;
4653
4654 /* First descriptor, get split header length */
4655 stmmac_get_rx_header_len(priv, p, &hlen);
4656 if (priv->sph && hlen) {
4657 priv->xstats.rx_split_hdr_pkt_n++;
4658 return hlen;
4659 }
4660
4661 /* First descriptor, not last descriptor and not split header */
4662 if (status & rx_not_ls)
4663 return priv->dma_conf.dma_buf_sz;
4664
4665 plen = stmmac_get_rx_frame_len(priv, p, coe);
4666
4667 /* First descriptor and last descriptor and not split header */
4668 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4669 }
4670
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4671 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4672 struct dma_desc *p,
4673 int status, unsigned int len)
4674 {
4675 int coe = priv->hw->rx_csum;
4676 unsigned int plen = 0;
4677
4678 /* Not split header, buffer is not available */
4679 if (!priv->sph)
4680 return 0;
4681
4682 /* Not last descriptor */
4683 if (status & rx_not_ls)
4684 return priv->dma_conf.dma_buf_sz;
4685
4686 plen = stmmac_get_rx_frame_len(priv, p, coe);
4687
4688 /* Last descriptor */
4689 return plen - len;
4690 }
4691
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4692 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4693 struct xdp_frame *xdpf, bool dma_map)
4694 {
4695 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4696 unsigned int entry = tx_q->cur_tx;
4697 struct dma_desc *tx_desc;
4698 dma_addr_t dma_addr;
4699 bool set_ic;
4700
4701 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4702 return STMMAC_XDP_CONSUMED;
4703
4704 if (likely(priv->extend_desc))
4705 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4706 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4707 tx_desc = &tx_q->dma_entx[entry].basic;
4708 else
4709 tx_desc = tx_q->dma_tx + entry;
4710
4711 if (dma_map) {
4712 dma_addr = dma_map_single(priv->device, xdpf->data,
4713 xdpf->len, DMA_TO_DEVICE);
4714 if (dma_mapping_error(priv->device, dma_addr))
4715 return STMMAC_XDP_CONSUMED;
4716
4717 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4718 } else {
4719 struct page *page = virt_to_page(xdpf->data);
4720
4721 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4722 xdpf->headroom;
4723 dma_sync_single_for_device(priv->device, dma_addr,
4724 xdpf->len, DMA_BIDIRECTIONAL);
4725
4726 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4727 }
4728
4729 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4730 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4731 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4732 tx_q->tx_skbuff_dma[entry].last_segment = true;
4733 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4734
4735 tx_q->xdpf[entry] = xdpf;
4736
4737 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4738
4739 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4740 true, priv->mode, true, true,
4741 xdpf->len);
4742
4743 tx_q->tx_count_frames++;
4744
4745 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4746 set_ic = true;
4747 else
4748 set_ic = false;
4749
4750 if (set_ic) {
4751 tx_q->tx_count_frames = 0;
4752 stmmac_set_tx_ic(priv, tx_desc);
4753 priv->xstats.tx_set_ic_bit++;
4754 }
4755
4756 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4757
4758 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4759 tx_q->cur_tx = entry;
4760
4761 return STMMAC_XDP_TX;
4762 }
4763
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4764 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4765 int cpu)
4766 {
4767 int index = cpu;
4768
4769 if (unlikely(index < 0))
4770 index = 0;
4771
4772 while (index >= priv->plat->tx_queues_to_use)
4773 index -= priv->plat->tx_queues_to_use;
4774
4775 return index;
4776 }
4777
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4778 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4779 struct xdp_buff *xdp)
4780 {
4781 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4782 int cpu = smp_processor_id();
4783 struct netdev_queue *nq;
4784 int queue;
4785 int res;
4786
4787 if (unlikely(!xdpf))
4788 return STMMAC_XDP_CONSUMED;
4789
4790 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4791 nq = netdev_get_tx_queue(priv->dev, queue);
4792
4793 __netif_tx_lock(nq, cpu);
4794 /* Avoids TX time-out as we are sharing with slow path */
4795 txq_trans_cond_update(nq);
4796
4797 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4798 if (res == STMMAC_XDP_TX)
4799 stmmac_flush_tx_descriptors(priv, queue);
4800
4801 __netif_tx_unlock(nq);
4802
4803 return res;
4804 }
4805
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4806 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4807 struct bpf_prog *prog,
4808 struct xdp_buff *xdp)
4809 {
4810 u32 act;
4811 int res;
4812
4813 act = bpf_prog_run_xdp(prog, xdp);
4814 switch (act) {
4815 case XDP_PASS:
4816 res = STMMAC_XDP_PASS;
4817 break;
4818 case XDP_TX:
4819 res = stmmac_xdp_xmit_back(priv, xdp);
4820 break;
4821 case XDP_REDIRECT:
4822 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4823 res = STMMAC_XDP_CONSUMED;
4824 else
4825 res = STMMAC_XDP_REDIRECT;
4826 break;
4827 default:
4828 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4829 fallthrough;
4830 case XDP_ABORTED:
4831 trace_xdp_exception(priv->dev, prog, act);
4832 fallthrough;
4833 case XDP_DROP:
4834 res = STMMAC_XDP_CONSUMED;
4835 break;
4836 }
4837
4838 return res;
4839 }
4840
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4841 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4842 struct xdp_buff *xdp)
4843 {
4844 struct bpf_prog *prog;
4845 int res;
4846
4847 prog = READ_ONCE(priv->xdp_prog);
4848 if (!prog) {
4849 res = STMMAC_XDP_PASS;
4850 goto out;
4851 }
4852
4853 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4854 out:
4855 return ERR_PTR(-res);
4856 }
4857
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4858 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4859 int xdp_status)
4860 {
4861 int cpu = smp_processor_id();
4862 int queue;
4863
4864 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4865
4866 if (xdp_status & STMMAC_XDP_TX)
4867 stmmac_tx_timer_arm(priv, queue);
4868
4869 if (xdp_status & STMMAC_XDP_REDIRECT)
4870 xdp_do_flush();
4871 }
4872
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4873 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4874 struct xdp_buff *xdp)
4875 {
4876 unsigned int metasize = xdp->data - xdp->data_meta;
4877 unsigned int datasize = xdp->data_end - xdp->data;
4878 struct sk_buff *skb;
4879
4880 skb = __napi_alloc_skb(&ch->rxtx_napi,
4881 xdp->data_end - xdp->data_hard_start,
4882 GFP_ATOMIC | __GFP_NOWARN);
4883 if (unlikely(!skb))
4884 return NULL;
4885
4886 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4887 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4888 if (metasize)
4889 skb_metadata_set(skb, metasize);
4890
4891 return skb;
4892 }
4893
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4894 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4895 struct dma_desc *p, struct dma_desc *np,
4896 struct xdp_buff *xdp)
4897 {
4898 struct stmmac_channel *ch = &priv->channel[queue];
4899 unsigned int len = xdp->data_end - xdp->data;
4900 enum pkt_hash_types hash_type;
4901 int coe = priv->hw->rx_csum;
4902 struct sk_buff *skb;
4903 u32 hash;
4904
4905 skb = stmmac_construct_skb_zc(ch, xdp);
4906 if (!skb) {
4907 priv->dev->stats.rx_dropped++;
4908 return;
4909 }
4910
4911 stmmac_get_rx_hwtstamp(priv, p, np, skb);
4912 stmmac_rx_vlan(priv->dev, skb);
4913 skb->protocol = eth_type_trans(skb, priv->dev);
4914
4915 if (unlikely(!coe))
4916 skb_checksum_none_assert(skb);
4917 else
4918 skb->ip_summed = CHECKSUM_UNNECESSARY;
4919
4920 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4921 skb_set_hash(skb, hash, hash_type);
4922
4923 skb_record_rx_queue(skb, queue);
4924 napi_gro_receive(&ch->rxtx_napi, skb);
4925
4926 priv->dev->stats.rx_packets++;
4927 priv->dev->stats.rx_bytes += len;
4928 }
4929
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)4930 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4931 {
4932 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4933 unsigned int entry = rx_q->dirty_rx;
4934 struct dma_desc *rx_desc = NULL;
4935 bool ret = true;
4936
4937 budget = min(budget, stmmac_rx_dirty(priv, queue));
4938
4939 while (budget-- > 0 && entry != rx_q->cur_rx) {
4940 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4941 dma_addr_t dma_addr;
4942 bool use_rx_wd;
4943
4944 if (!buf->xdp) {
4945 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4946 if (!buf->xdp) {
4947 ret = false;
4948 break;
4949 }
4950 }
4951
4952 if (priv->extend_desc)
4953 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4954 else
4955 rx_desc = rx_q->dma_rx + entry;
4956
4957 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4958 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4959 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4960 stmmac_refill_desc3(priv, rx_q, rx_desc);
4961
4962 rx_q->rx_count_frames++;
4963 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4964 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4965 rx_q->rx_count_frames = 0;
4966
4967 use_rx_wd = !priv->rx_coal_frames[queue];
4968 use_rx_wd |= rx_q->rx_count_frames > 0;
4969 if (!priv->use_riwt)
4970 use_rx_wd = false;
4971
4972 dma_wmb();
4973 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4974
4975 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4976 }
4977
4978 if (rx_desc) {
4979 rx_q->dirty_rx = entry;
4980 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4981 (rx_q->dirty_rx * sizeof(struct dma_desc));
4982 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4983 }
4984
4985 return ret;
4986 }
4987
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)4988 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4989 {
4990 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4991 unsigned int count = 0, error = 0, len = 0;
4992 int dirty = stmmac_rx_dirty(priv, queue);
4993 unsigned int next_entry = rx_q->cur_rx;
4994 unsigned int desc_size;
4995 struct bpf_prog *prog;
4996 bool failure = false;
4997 int xdp_status = 0;
4998 int status = 0;
4999
5000 if (netif_msg_rx_status(priv)) {
5001 void *rx_head;
5002
5003 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5004 if (priv->extend_desc) {
5005 rx_head = (void *)rx_q->dma_erx;
5006 desc_size = sizeof(struct dma_extended_desc);
5007 } else {
5008 rx_head = (void *)rx_q->dma_rx;
5009 desc_size = sizeof(struct dma_desc);
5010 }
5011
5012 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5013 rx_q->dma_rx_phy, desc_size);
5014 }
5015 while (count < limit) {
5016 struct stmmac_rx_buffer *buf;
5017 unsigned int buf1_len = 0;
5018 struct dma_desc *np, *p;
5019 int entry;
5020 int res;
5021
5022 if (!count && rx_q->state_saved) {
5023 error = rx_q->state.error;
5024 len = rx_q->state.len;
5025 } else {
5026 rx_q->state_saved = false;
5027 error = 0;
5028 len = 0;
5029 }
5030
5031 if (count >= limit)
5032 break;
5033
5034 read_again:
5035 buf1_len = 0;
5036 entry = next_entry;
5037 buf = &rx_q->buf_pool[entry];
5038
5039 if (dirty >= STMMAC_RX_FILL_BATCH) {
5040 failure = failure ||
5041 !stmmac_rx_refill_zc(priv, queue, dirty);
5042 dirty = 0;
5043 }
5044
5045 if (priv->extend_desc)
5046 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5047 else
5048 p = rx_q->dma_rx + entry;
5049
5050 /* read the status of the incoming frame */
5051 status = stmmac_rx_status(priv, &priv->dev->stats,
5052 &priv->xstats, p);
5053 /* check if managed by the DMA otherwise go ahead */
5054 if (unlikely(status & dma_own))
5055 break;
5056
5057 /* Prefetch the next RX descriptor */
5058 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5059 priv->dma_conf.dma_rx_size);
5060 next_entry = rx_q->cur_rx;
5061
5062 if (priv->extend_desc)
5063 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5064 else
5065 np = rx_q->dma_rx + next_entry;
5066
5067 prefetch(np);
5068
5069 /* Ensure a valid XSK buffer before proceed */
5070 if (!buf->xdp)
5071 break;
5072
5073 if (priv->extend_desc)
5074 stmmac_rx_extended_status(priv, &priv->dev->stats,
5075 &priv->xstats,
5076 rx_q->dma_erx + entry);
5077 if (unlikely(status == discard_frame)) {
5078 xsk_buff_free(buf->xdp);
5079 buf->xdp = NULL;
5080 dirty++;
5081 error = 1;
5082 if (!priv->hwts_rx_en)
5083 priv->dev->stats.rx_errors++;
5084 }
5085
5086 if (unlikely(error && (status & rx_not_ls)))
5087 goto read_again;
5088 if (unlikely(error)) {
5089 count++;
5090 continue;
5091 }
5092
5093 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5094 if (likely(status & rx_not_ls)) {
5095 xsk_buff_free(buf->xdp);
5096 buf->xdp = NULL;
5097 dirty++;
5098 count++;
5099 goto read_again;
5100 }
5101
5102 /* XDP ZC Frame only support primary buffers for now */
5103 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5104 len += buf1_len;
5105
5106 /* ACS is disabled; strip manually. */
5107 if (likely(!(status & rx_not_ls))) {
5108 buf1_len -= ETH_FCS_LEN;
5109 len -= ETH_FCS_LEN;
5110 }
5111
5112 /* RX buffer is good and fit into a XSK pool buffer */
5113 buf->xdp->data_end = buf->xdp->data + buf1_len;
5114 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5115
5116 prog = READ_ONCE(priv->xdp_prog);
5117 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5118
5119 switch (res) {
5120 case STMMAC_XDP_PASS:
5121 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5122 xsk_buff_free(buf->xdp);
5123 break;
5124 case STMMAC_XDP_CONSUMED:
5125 xsk_buff_free(buf->xdp);
5126 priv->dev->stats.rx_dropped++;
5127 break;
5128 case STMMAC_XDP_TX:
5129 case STMMAC_XDP_REDIRECT:
5130 xdp_status |= res;
5131 break;
5132 }
5133
5134 buf->xdp = NULL;
5135 dirty++;
5136 count++;
5137 }
5138
5139 if (status & rx_not_ls) {
5140 rx_q->state_saved = true;
5141 rx_q->state.error = error;
5142 rx_q->state.len = len;
5143 }
5144
5145 stmmac_finalize_xdp_rx(priv, xdp_status);
5146
5147 priv->xstats.rx_pkt_n += count;
5148 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5149
5150 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5151 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5152 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5153 else
5154 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5155
5156 return (int)count;
5157 }
5158
5159 return failure ? limit : (int)count;
5160 }
5161
5162 /**
5163 * stmmac_rx - manage the receive process
5164 * @priv: driver private structure
5165 * @limit: napi bugget
5166 * @queue: RX queue index.
5167 * Description : this the function called by the napi poll method.
5168 * It gets all the frames inside the ring.
5169 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5170 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5171 {
5172 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5173 struct stmmac_channel *ch = &priv->channel[queue];
5174 unsigned int count = 0, error = 0, len = 0;
5175 int status = 0, coe = priv->hw->rx_csum;
5176 unsigned int next_entry = rx_q->cur_rx;
5177 enum dma_data_direction dma_dir;
5178 unsigned int desc_size;
5179 struct sk_buff *skb = NULL;
5180 struct xdp_buff xdp;
5181 int xdp_status = 0;
5182 int buf_sz;
5183
5184 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5185 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5186
5187 if (netif_msg_rx_status(priv)) {
5188 void *rx_head;
5189
5190 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5191 if (priv->extend_desc) {
5192 rx_head = (void *)rx_q->dma_erx;
5193 desc_size = sizeof(struct dma_extended_desc);
5194 } else {
5195 rx_head = (void *)rx_q->dma_rx;
5196 desc_size = sizeof(struct dma_desc);
5197 }
5198
5199 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5200 rx_q->dma_rx_phy, desc_size);
5201 }
5202 while (count < limit) {
5203 unsigned int buf1_len = 0, buf2_len = 0;
5204 enum pkt_hash_types hash_type;
5205 struct stmmac_rx_buffer *buf;
5206 struct dma_desc *np, *p;
5207 int entry;
5208 u32 hash;
5209
5210 if (!count && rx_q->state_saved) {
5211 skb = rx_q->state.skb;
5212 error = rx_q->state.error;
5213 len = rx_q->state.len;
5214 } else {
5215 rx_q->state_saved = false;
5216 skb = NULL;
5217 error = 0;
5218 len = 0;
5219 }
5220
5221 if (count >= limit)
5222 break;
5223
5224 read_again:
5225 buf1_len = 0;
5226 buf2_len = 0;
5227 entry = next_entry;
5228 buf = &rx_q->buf_pool[entry];
5229
5230 if (priv->extend_desc)
5231 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5232 else
5233 p = rx_q->dma_rx + entry;
5234
5235 /* read the status of the incoming frame */
5236 status = stmmac_rx_status(priv, &priv->dev->stats,
5237 &priv->xstats, p);
5238 /* check if managed by the DMA otherwise go ahead */
5239 if (unlikely(status & dma_own))
5240 break;
5241
5242 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5243 priv->dma_conf.dma_rx_size);
5244 next_entry = rx_q->cur_rx;
5245
5246 if (priv->extend_desc)
5247 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5248 else
5249 np = rx_q->dma_rx + next_entry;
5250
5251 prefetch(np);
5252
5253 if (priv->extend_desc)
5254 stmmac_rx_extended_status(priv, &priv->dev->stats,
5255 &priv->xstats, rx_q->dma_erx + entry);
5256 if (unlikely(status == discard_frame)) {
5257 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5258 buf->page = NULL;
5259 error = 1;
5260 if (!priv->hwts_rx_en)
5261 priv->dev->stats.rx_errors++;
5262 }
5263
5264 if (unlikely(error && (status & rx_not_ls)))
5265 goto read_again;
5266 if (unlikely(error)) {
5267 dev_kfree_skb(skb);
5268 skb = NULL;
5269 count++;
5270 continue;
5271 }
5272
5273 /* Buffer is good. Go on. */
5274
5275 prefetch(page_address(buf->page) + buf->page_offset);
5276 if (buf->sec_page)
5277 prefetch(page_address(buf->sec_page));
5278
5279 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5280 len += buf1_len;
5281 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5282 len += buf2_len;
5283
5284 /* ACS is disabled; strip manually. */
5285 if (likely(!(status & rx_not_ls))) {
5286 if (buf2_len) {
5287 buf2_len -= ETH_FCS_LEN;
5288 len -= ETH_FCS_LEN;
5289 } else if (buf1_len) {
5290 buf1_len -= ETH_FCS_LEN;
5291 len -= ETH_FCS_LEN;
5292 }
5293 }
5294
5295 if (!skb) {
5296 unsigned int pre_len, sync_len;
5297
5298 dma_sync_single_for_cpu(priv->device, buf->addr,
5299 buf1_len, dma_dir);
5300
5301 xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5302 xdp_prepare_buff(&xdp, page_address(buf->page),
5303 buf->page_offset, buf1_len, false);
5304
5305 pre_len = xdp.data_end - xdp.data_hard_start -
5306 buf->page_offset;
5307 skb = stmmac_xdp_run_prog(priv, &xdp);
5308 /* Due xdp_adjust_tail: DMA sync for_device
5309 * cover max len CPU touch
5310 */
5311 sync_len = xdp.data_end - xdp.data_hard_start -
5312 buf->page_offset;
5313 sync_len = max(sync_len, pre_len);
5314
5315 /* For Not XDP_PASS verdict */
5316 if (IS_ERR(skb)) {
5317 unsigned int xdp_res = -PTR_ERR(skb);
5318
5319 if (xdp_res & STMMAC_XDP_CONSUMED) {
5320 page_pool_put_page(rx_q->page_pool,
5321 virt_to_head_page(xdp.data),
5322 sync_len, true);
5323 buf->page = NULL;
5324 priv->dev->stats.rx_dropped++;
5325
5326 /* Clear skb as it was set as
5327 * status by XDP program.
5328 */
5329 skb = NULL;
5330
5331 if (unlikely((status & rx_not_ls)))
5332 goto read_again;
5333
5334 count++;
5335 continue;
5336 } else if (xdp_res & (STMMAC_XDP_TX |
5337 STMMAC_XDP_REDIRECT)) {
5338 xdp_status |= xdp_res;
5339 buf->page = NULL;
5340 skb = NULL;
5341 count++;
5342 continue;
5343 }
5344 }
5345 }
5346
5347 if (!skb) {
5348 /* XDP program may expand or reduce tail */
5349 buf1_len = xdp.data_end - xdp.data;
5350
5351 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5352 if (!skb) {
5353 priv->dev->stats.rx_dropped++;
5354 count++;
5355 goto drain_data;
5356 }
5357
5358 /* XDP program may adjust header */
5359 skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5360 skb_put(skb, buf1_len);
5361
5362 /* Data payload copied into SKB, page ready for recycle */
5363 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5364 buf->page = NULL;
5365 } else if (buf1_len) {
5366 dma_sync_single_for_cpu(priv->device, buf->addr,
5367 buf1_len, dma_dir);
5368 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5369 buf->page, buf->page_offset, buf1_len,
5370 priv->dma_conf.dma_buf_sz);
5371
5372 /* Data payload appended into SKB */
5373 page_pool_release_page(rx_q->page_pool, buf->page);
5374 buf->page = NULL;
5375 }
5376
5377 if (buf2_len) {
5378 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5379 buf2_len, dma_dir);
5380 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5381 buf->sec_page, 0, buf2_len,
5382 priv->dma_conf.dma_buf_sz);
5383
5384 /* Data payload appended into SKB */
5385 page_pool_release_page(rx_q->page_pool, buf->sec_page);
5386 buf->sec_page = NULL;
5387 }
5388
5389 drain_data:
5390 if (likely(status & rx_not_ls))
5391 goto read_again;
5392 if (!skb)
5393 continue;
5394
5395 /* Got entire packet into SKB. Finish it. */
5396
5397 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5398 stmmac_rx_vlan(priv->dev, skb);
5399 skb->protocol = eth_type_trans(skb, priv->dev);
5400
5401 if (unlikely(!coe))
5402 skb_checksum_none_assert(skb);
5403 else
5404 skb->ip_summed = CHECKSUM_UNNECESSARY;
5405
5406 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5407 skb_set_hash(skb, hash, hash_type);
5408
5409 skb_record_rx_queue(skb, queue);
5410 napi_gro_receive(&ch->rx_napi, skb);
5411 skb = NULL;
5412
5413 priv->dev->stats.rx_packets++;
5414 priv->dev->stats.rx_bytes += len;
5415 count++;
5416 }
5417
5418 if (status & rx_not_ls || skb) {
5419 rx_q->state_saved = true;
5420 rx_q->state.skb = skb;
5421 rx_q->state.error = error;
5422 rx_q->state.len = len;
5423 }
5424
5425 stmmac_finalize_xdp_rx(priv, xdp_status);
5426
5427 stmmac_rx_refill(priv, queue);
5428
5429 priv->xstats.rx_pkt_n += count;
5430 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5431
5432 return count;
5433 }
5434
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5435 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5436 {
5437 struct stmmac_channel *ch =
5438 container_of(napi, struct stmmac_channel, rx_napi);
5439 struct stmmac_priv *priv = ch->priv_data;
5440 u32 chan = ch->index;
5441 int work_done;
5442
5443 priv->xstats.napi_poll++;
5444
5445 work_done = stmmac_rx(priv, budget, chan);
5446 if (work_done < budget && napi_complete_done(napi, work_done)) {
5447 unsigned long flags;
5448
5449 spin_lock_irqsave(&ch->lock, flags);
5450 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5451 spin_unlock_irqrestore(&ch->lock, flags);
5452 }
5453
5454 return work_done;
5455 }
5456
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5457 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5458 {
5459 struct stmmac_channel *ch =
5460 container_of(napi, struct stmmac_channel, tx_napi);
5461 struct stmmac_priv *priv = ch->priv_data;
5462 u32 chan = ch->index;
5463 int work_done;
5464
5465 priv->xstats.napi_poll++;
5466
5467 work_done = stmmac_tx_clean(priv, budget, chan);
5468 work_done = min(work_done, budget);
5469
5470 if (work_done < budget && napi_complete_done(napi, work_done)) {
5471 unsigned long flags;
5472
5473 spin_lock_irqsave(&ch->lock, flags);
5474 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5475 spin_unlock_irqrestore(&ch->lock, flags);
5476 }
5477
5478 return work_done;
5479 }
5480
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5481 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5482 {
5483 struct stmmac_channel *ch =
5484 container_of(napi, struct stmmac_channel, rxtx_napi);
5485 struct stmmac_priv *priv = ch->priv_data;
5486 int rx_done, tx_done, rxtx_done;
5487 u32 chan = ch->index;
5488
5489 priv->xstats.napi_poll++;
5490
5491 tx_done = stmmac_tx_clean(priv, budget, chan);
5492 tx_done = min(tx_done, budget);
5493
5494 rx_done = stmmac_rx_zc(priv, budget, chan);
5495
5496 rxtx_done = max(tx_done, rx_done);
5497
5498 /* If either TX or RX work is not complete, return budget
5499 * and keep pooling
5500 */
5501 if (rxtx_done >= budget)
5502 return budget;
5503
5504 /* all work done, exit the polling mode */
5505 if (napi_complete_done(napi, rxtx_done)) {
5506 unsigned long flags;
5507
5508 spin_lock_irqsave(&ch->lock, flags);
5509 /* Both RX and TX work done are compelte,
5510 * so enable both RX & TX IRQs.
5511 */
5512 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5513 spin_unlock_irqrestore(&ch->lock, flags);
5514 }
5515
5516 return min(rxtx_done, budget - 1);
5517 }
5518
5519 /**
5520 * stmmac_tx_timeout
5521 * @dev : Pointer to net device structure
5522 * @txqueue: the index of the hanging transmit queue
5523 * Description: this function is called when a packet transmission fails to
5524 * complete within a reasonable time. The driver will mark the error in the
5525 * netdev structure and arrange for the device to be reset to a sane state
5526 * in order to transmit a new packet.
5527 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5528 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5529 {
5530 struct stmmac_priv *priv = netdev_priv(dev);
5531
5532 stmmac_global_err(priv);
5533 }
5534
5535 /**
5536 * stmmac_set_rx_mode - entry point for multicast addressing
5537 * @dev : pointer to the device structure
5538 * Description:
5539 * This function is a driver entry point which gets called by the kernel
5540 * whenever multicast addresses must be enabled/disabled.
5541 * Return value:
5542 * void.
5543 */
stmmac_set_rx_mode(struct net_device * dev)5544 static void stmmac_set_rx_mode(struct net_device *dev)
5545 {
5546 struct stmmac_priv *priv = netdev_priv(dev);
5547
5548 stmmac_set_filter(priv, priv->hw, dev);
5549 }
5550
5551 /**
5552 * stmmac_change_mtu - entry point to change MTU size for the device.
5553 * @dev : device pointer.
5554 * @new_mtu : the new MTU size for the device.
5555 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5556 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5557 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5558 * Return value:
5559 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5560 * file on failure.
5561 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5562 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5563 {
5564 struct stmmac_priv *priv = netdev_priv(dev);
5565 int txfifosz = priv->plat->tx_fifo_size;
5566 struct stmmac_dma_conf *dma_conf;
5567 const int mtu = new_mtu;
5568 int ret;
5569
5570 if (txfifosz == 0)
5571 txfifosz = priv->dma_cap.tx_fifo_size;
5572
5573 txfifosz /= priv->plat->tx_queues_to_use;
5574
5575 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5576 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5577 return -EINVAL;
5578 }
5579
5580 new_mtu = STMMAC_ALIGN(new_mtu);
5581
5582 /* If condition true, FIFO is too small or MTU too large */
5583 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5584 return -EINVAL;
5585
5586 if (netif_running(dev)) {
5587 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5588 /* Try to allocate the new DMA conf with the new mtu */
5589 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5590 if (IS_ERR(dma_conf)) {
5591 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5592 mtu);
5593 return PTR_ERR(dma_conf);
5594 }
5595
5596 stmmac_release(dev);
5597
5598 ret = __stmmac_open(dev, dma_conf);
5599 kfree(dma_conf);
5600 if (ret) {
5601 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5602 return ret;
5603 }
5604
5605 stmmac_set_rx_mode(dev);
5606 }
5607
5608 dev->mtu = mtu;
5609 netdev_update_features(dev);
5610
5611 return 0;
5612 }
5613
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5614 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5615 netdev_features_t features)
5616 {
5617 struct stmmac_priv *priv = netdev_priv(dev);
5618
5619 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5620 features &= ~NETIF_F_RXCSUM;
5621
5622 if (!priv->plat->tx_coe)
5623 features &= ~NETIF_F_CSUM_MASK;
5624
5625 /* Some GMAC devices have a bugged Jumbo frame support that
5626 * needs to have the Tx COE disabled for oversized frames
5627 * (due to limited buffer sizes). In this case we disable
5628 * the TX csum insertion in the TDES and not use SF.
5629 */
5630 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5631 features &= ~NETIF_F_CSUM_MASK;
5632
5633 /* Disable tso if asked by ethtool */
5634 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5635 if (features & NETIF_F_TSO)
5636 priv->tso = true;
5637 else
5638 priv->tso = false;
5639 }
5640
5641 return features;
5642 }
5643
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5644 static int stmmac_set_features(struct net_device *netdev,
5645 netdev_features_t features)
5646 {
5647 struct stmmac_priv *priv = netdev_priv(netdev);
5648
5649 /* Keep the COE Type in case of csum is supporting */
5650 if (features & NETIF_F_RXCSUM)
5651 priv->hw->rx_csum = priv->plat->rx_coe;
5652 else
5653 priv->hw->rx_csum = 0;
5654 /* No check needed because rx_coe has been set before and it will be
5655 * fixed in case of issue.
5656 */
5657 stmmac_rx_ipc(priv, priv->hw);
5658
5659 if (priv->sph_cap) {
5660 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5661 u32 chan;
5662
5663 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5664 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5665 }
5666
5667 return 0;
5668 }
5669
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5670 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5671 {
5672 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5673 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5674 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5675 bool *hs_enable = &fpe_cfg->hs_enable;
5676
5677 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5678 return;
5679
5680 /* If LP has sent verify mPacket, LP is FPE capable */
5681 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5682 if (*lp_state < FPE_STATE_CAPABLE)
5683 *lp_state = FPE_STATE_CAPABLE;
5684
5685 /* If user has requested FPE enable, quickly response */
5686 if (*hs_enable)
5687 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5688 MPACKET_RESPONSE);
5689 }
5690
5691 /* If Local has sent verify mPacket, Local is FPE capable */
5692 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5693 if (*lo_state < FPE_STATE_CAPABLE)
5694 *lo_state = FPE_STATE_CAPABLE;
5695 }
5696
5697 /* If LP has sent response mPacket, LP is entering FPE ON */
5698 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5699 *lp_state = FPE_STATE_ENTERING_ON;
5700
5701 /* If Local has sent response mPacket, Local is entering FPE ON */
5702 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5703 *lo_state = FPE_STATE_ENTERING_ON;
5704
5705 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5706 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5707 priv->fpe_wq) {
5708 queue_work(priv->fpe_wq, &priv->fpe_task);
5709 }
5710 }
5711
stmmac_common_interrupt(struct stmmac_priv * priv)5712 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5713 {
5714 u32 rx_cnt = priv->plat->rx_queues_to_use;
5715 u32 tx_cnt = priv->plat->tx_queues_to_use;
5716 u32 queues_count;
5717 u32 queue;
5718 bool xmac;
5719
5720 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5721 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5722
5723 if (priv->irq_wake)
5724 pm_wakeup_event(priv->device, 0);
5725
5726 if (priv->dma_cap.estsel)
5727 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5728 &priv->xstats, tx_cnt);
5729
5730 if (priv->dma_cap.fpesel) {
5731 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5732 priv->dev);
5733
5734 stmmac_fpe_event_status(priv, status);
5735 }
5736
5737 /* To handle GMAC own interrupts */
5738 if ((priv->plat->has_gmac) || xmac) {
5739 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5740
5741 if (unlikely(status)) {
5742 /* For LPI we need to save the tx status */
5743 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5744 priv->tx_path_in_lpi_mode = true;
5745 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5746 priv->tx_path_in_lpi_mode = false;
5747 }
5748
5749 for (queue = 0; queue < queues_count; queue++) {
5750 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5751 queue);
5752 }
5753
5754 /* PCS link status */
5755 if (priv->hw->pcs) {
5756 if (priv->xstats.pcs_link)
5757 netif_carrier_on(priv->dev);
5758 else
5759 netif_carrier_off(priv->dev);
5760 }
5761
5762 stmmac_timestamp_interrupt(priv, priv);
5763 }
5764 }
5765
5766 /**
5767 * stmmac_interrupt - main ISR
5768 * @irq: interrupt number.
5769 * @dev_id: to pass the net device pointer.
5770 * Description: this is the main driver interrupt service routine.
5771 * It can call:
5772 * o DMA service routine (to manage incoming frame reception and transmission
5773 * status)
5774 * o Core interrupts to manage: remote wake-up, management counter, LPI
5775 * interrupts.
5776 */
stmmac_interrupt(int irq,void * dev_id)5777 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5778 {
5779 struct net_device *dev = (struct net_device *)dev_id;
5780 struct stmmac_priv *priv = netdev_priv(dev);
5781
5782 /* Check if adapter is up */
5783 if (test_bit(STMMAC_DOWN, &priv->state))
5784 return IRQ_HANDLED;
5785
5786 /* Check if a fatal error happened */
5787 if (stmmac_safety_feat_interrupt(priv))
5788 return IRQ_HANDLED;
5789
5790 /* To handle Common interrupts */
5791 stmmac_common_interrupt(priv);
5792
5793 /* To handle DMA interrupts */
5794 stmmac_dma_interrupt(priv);
5795
5796 return IRQ_HANDLED;
5797 }
5798
stmmac_mac_interrupt(int irq,void * dev_id)5799 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5800 {
5801 struct net_device *dev = (struct net_device *)dev_id;
5802 struct stmmac_priv *priv = netdev_priv(dev);
5803
5804 if (unlikely(!dev)) {
5805 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5806 return IRQ_NONE;
5807 }
5808
5809 /* Check if adapter is up */
5810 if (test_bit(STMMAC_DOWN, &priv->state))
5811 return IRQ_HANDLED;
5812
5813 /* To handle Common interrupts */
5814 stmmac_common_interrupt(priv);
5815
5816 return IRQ_HANDLED;
5817 }
5818
stmmac_safety_interrupt(int irq,void * dev_id)5819 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5820 {
5821 struct net_device *dev = (struct net_device *)dev_id;
5822 struct stmmac_priv *priv = netdev_priv(dev);
5823
5824 if (unlikely(!dev)) {
5825 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5826 return IRQ_NONE;
5827 }
5828
5829 /* Check if adapter is up */
5830 if (test_bit(STMMAC_DOWN, &priv->state))
5831 return IRQ_HANDLED;
5832
5833 /* Check if a fatal error happened */
5834 stmmac_safety_feat_interrupt(priv);
5835
5836 return IRQ_HANDLED;
5837 }
5838
stmmac_msi_intr_tx(int irq,void * data)5839 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5840 {
5841 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5842 struct stmmac_dma_conf *dma_conf;
5843 int chan = tx_q->queue_index;
5844 struct stmmac_priv *priv;
5845 int status;
5846
5847 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5848 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5849
5850 if (unlikely(!data)) {
5851 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5852 return IRQ_NONE;
5853 }
5854
5855 /* Check if adapter is up */
5856 if (test_bit(STMMAC_DOWN, &priv->state))
5857 return IRQ_HANDLED;
5858
5859 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5860
5861 if (unlikely(status & tx_hard_error_bump_tc)) {
5862 /* Try to bump up the dma threshold on this failure */
5863 stmmac_bump_dma_threshold(priv, chan);
5864 } else if (unlikely(status == tx_hard_error)) {
5865 stmmac_tx_err(priv, chan);
5866 }
5867
5868 return IRQ_HANDLED;
5869 }
5870
stmmac_msi_intr_rx(int irq,void * data)5871 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5872 {
5873 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5874 struct stmmac_dma_conf *dma_conf;
5875 int chan = rx_q->queue_index;
5876 struct stmmac_priv *priv;
5877
5878 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5879 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5880
5881 if (unlikely(!data)) {
5882 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5883 return IRQ_NONE;
5884 }
5885
5886 /* Check if adapter is up */
5887 if (test_bit(STMMAC_DOWN, &priv->state))
5888 return IRQ_HANDLED;
5889
5890 stmmac_napi_check(priv, chan, DMA_DIR_RX);
5891
5892 return IRQ_HANDLED;
5893 }
5894
5895 #ifdef CONFIG_NET_POLL_CONTROLLER
5896 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5897 * to allow network I/O with interrupts disabled.
5898 */
stmmac_poll_controller(struct net_device * dev)5899 static void stmmac_poll_controller(struct net_device *dev)
5900 {
5901 struct stmmac_priv *priv = netdev_priv(dev);
5902 int i;
5903
5904 /* If adapter is down, do nothing */
5905 if (test_bit(STMMAC_DOWN, &priv->state))
5906 return;
5907
5908 if (priv->plat->multi_msi_en) {
5909 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5910 stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5911
5912 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5913 stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5914 } else {
5915 disable_irq(dev->irq);
5916 stmmac_interrupt(dev->irq, dev);
5917 enable_irq(dev->irq);
5918 }
5919 }
5920 #endif
5921
5922 /**
5923 * stmmac_ioctl - Entry point for the Ioctl
5924 * @dev: Device pointer.
5925 * @rq: An IOCTL specefic structure, that can contain a pointer to
5926 * a proprietary structure used to pass information to the driver.
5927 * @cmd: IOCTL command
5928 * Description:
5929 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5930 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)5931 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5932 {
5933 struct stmmac_priv *priv = netdev_priv (dev);
5934 int ret = -EOPNOTSUPP;
5935
5936 if (!netif_running(dev))
5937 return -EINVAL;
5938
5939 switch (cmd) {
5940 case SIOCGMIIPHY:
5941 case SIOCGMIIREG:
5942 case SIOCSMIIREG:
5943 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5944 break;
5945 case SIOCSHWTSTAMP:
5946 ret = stmmac_hwtstamp_set(dev, rq);
5947 break;
5948 case SIOCGHWTSTAMP:
5949 ret = stmmac_hwtstamp_get(dev, rq);
5950 break;
5951 default:
5952 break;
5953 }
5954
5955 return ret;
5956 }
5957
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)5958 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5959 void *cb_priv)
5960 {
5961 struct stmmac_priv *priv = cb_priv;
5962 int ret = -EOPNOTSUPP;
5963
5964 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5965 return ret;
5966
5967 __stmmac_disable_all_queues(priv);
5968
5969 switch (type) {
5970 case TC_SETUP_CLSU32:
5971 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5972 break;
5973 case TC_SETUP_CLSFLOWER:
5974 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5975 break;
5976 default:
5977 break;
5978 }
5979
5980 stmmac_enable_all_queues(priv);
5981 return ret;
5982 }
5983
5984 static LIST_HEAD(stmmac_block_cb_list);
5985
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)5986 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5987 void *type_data)
5988 {
5989 struct stmmac_priv *priv = netdev_priv(ndev);
5990
5991 switch (type) {
5992 case TC_SETUP_BLOCK:
5993 return flow_block_cb_setup_simple(type_data,
5994 &stmmac_block_cb_list,
5995 stmmac_setup_tc_block_cb,
5996 priv, priv, true);
5997 case TC_SETUP_QDISC_CBS:
5998 return stmmac_tc_setup_cbs(priv, priv, type_data);
5999 case TC_SETUP_QDISC_TAPRIO:
6000 return stmmac_tc_setup_taprio(priv, priv, type_data);
6001 case TC_SETUP_QDISC_ETF:
6002 return stmmac_tc_setup_etf(priv, priv, type_data);
6003 default:
6004 return -EOPNOTSUPP;
6005 }
6006 }
6007
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6008 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6009 struct net_device *sb_dev)
6010 {
6011 int gso = skb_shinfo(skb)->gso_type;
6012
6013 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6014 /*
6015 * There is no way to determine the number of TSO/USO
6016 * capable Queues. Let's use always the Queue 0
6017 * because if TSO/USO is supported then at least this
6018 * one will be capable.
6019 */
6020 return 0;
6021 }
6022
6023 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6024 }
6025
stmmac_set_mac_address(struct net_device * ndev,void * addr)6026 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6027 {
6028 struct stmmac_priv *priv = netdev_priv(ndev);
6029 int ret = 0;
6030
6031 ret = pm_runtime_resume_and_get(priv->device);
6032 if (ret < 0)
6033 return ret;
6034
6035 ret = eth_mac_addr(ndev, addr);
6036 if (ret)
6037 goto set_mac_error;
6038
6039 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6040
6041 set_mac_error:
6042 pm_runtime_put(priv->device);
6043
6044 return ret;
6045 }
6046
6047 #ifdef CONFIG_DEBUG_FS
6048 static struct dentry *stmmac_fs_dir;
6049
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6050 static void sysfs_display_ring(void *head, int size, int extend_desc,
6051 struct seq_file *seq, dma_addr_t dma_phy_addr)
6052 {
6053 int i;
6054 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6055 struct dma_desc *p = (struct dma_desc *)head;
6056 dma_addr_t dma_addr;
6057
6058 for (i = 0; i < size; i++) {
6059 if (extend_desc) {
6060 dma_addr = dma_phy_addr + i * sizeof(*ep);
6061 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6062 i, &dma_addr,
6063 le32_to_cpu(ep->basic.des0),
6064 le32_to_cpu(ep->basic.des1),
6065 le32_to_cpu(ep->basic.des2),
6066 le32_to_cpu(ep->basic.des3));
6067 ep++;
6068 } else {
6069 dma_addr = dma_phy_addr + i * sizeof(*p);
6070 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6071 i, &dma_addr,
6072 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6073 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6074 p++;
6075 }
6076 seq_printf(seq, "\n");
6077 }
6078 }
6079
stmmac_rings_status_show(struct seq_file * seq,void * v)6080 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6081 {
6082 struct net_device *dev = seq->private;
6083 struct stmmac_priv *priv = netdev_priv(dev);
6084 u32 rx_count = priv->plat->rx_queues_to_use;
6085 u32 tx_count = priv->plat->tx_queues_to_use;
6086 u32 queue;
6087
6088 if ((dev->flags & IFF_UP) == 0)
6089 return 0;
6090
6091 for (queue = 0; queue < rx_count; queue++) {
6092 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6093
6094 seq_printf(seq, "RX Queue %d:\n", queue);
6095
6096 if (priv->extend_desc) {
6097 seq_printf(seq, "Extended descriptor ring:\n");
6098 sysfs_display_ring((void *)rx_q->dma_erx,
6099 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6100 } else {
6101 seq_printf(seq, "Descriptor ring:\n");
6102 sysfs_display_ring((void *)rx_q->dma_rx,
6103 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6104 }
6105 }
6106
6107 for (queue = 0; queue < tx_count; queue++) {
6108 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6109
6110 seq_printf(seq, "TX Queue %d:\n", queue);
6111
6112 if (priv->extend_desc) {
6113 seq_printf(seq, "Extended descriptor ring:\n");
6114 sysfs_display_ring((void *)tx_q->dma_etx,
6115 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6116 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6117 seq_printf(seq, "Descriptor ring:\n");
6118 sysfs_display_ring((void *)tx_q->dma_tx,
6119 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6120 }
6121 }
6122
6123 return 0;
6124 }
6125 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6126
stmmac_dma_cap_show(struct seq_file * seq,void * v)6127 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6128 {
6129 struct net_device *dev = seq->private;
6130 struct stmmac_priv *priv = netdev_priv(dev);
6131
6132 if (!priv->hw_cap_support) {
6133 seq_printf(seq, "DMA HW features not supported\n");
6134 return 0;
6135 }
6136
6137 seq_printf(seq, "==============================\n");
6138 seq_printf(seq, "\tDMA HW features\n");
6139 seq_printf(seq, "==============================\n");
6140
6141 seq_printf(seq, "\t10/100 Mbps: %s\n",
6142 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6143 seq_printf(seq, "\t1000 Mbps: %s\n",
6144 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6145 seq_printf(seq, "\tHalf duplex: %s\n",
6146 (priv->dma_cap.half_duplex) ? "Y" : "N");
6147 seq_printf(seq, "\tHash Filter: %s\n",
6148 (priv->dma_cap.hash_filter) ? "Y" : "N");
6149 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6150 (priv->dma_cap.multi_addr) ? "Y" : "N");
6151 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6152 (priv->dma_cap.pcs) ? "Y" : "N");
6153 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6154 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6155 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6156 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6157 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6158 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6159 seq_printf(seq, "\tRMON module: %s\n",
6160 (priv->dma_cap.rmon) ? "Y" : "N");
6161 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6162 (priv->dma_cap.time_stamp) ? "Y" : "N");
6163 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6164 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6165 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6166 (priv->dma_cap.eee) ? "Y" : "N");
6167 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6168 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6169 (priv->dma_cap.tx_coe) ? "Y" : "N");
6170 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6171 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6172 (priv->dma_cap.rx_coe) ? "Y" : "N");
6173 } else {
6174 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6175 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6176 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6177 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6178 }
6179 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6180 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6181 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6182 priv->dma_cap.number_rx_channel);
6183 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6184 priv->dma_cap.number_tx_channel);
6185 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6186 priv->dma_cap.number_rx_queues);
6187 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6188 priv->dma_cap.number_tx_queues);
6189 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6190 (priv->dma_cap.enh_desc) ? "Y" : "N");
6191 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6192 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6193 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6194 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6195 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6196 priv->dma_cap.pps_out_num);
6197 seq_printf(seq, "\tSafety Features: %s\n",
6198 priv->dma_cap.asp ? "Y" : "N");
6199 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6200 priv->dma_cap.frpsel ? "Y" : "N");
6201 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6202 priv->dma_cap.addr64);
6203 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6204 priv->dma_cap.rssen ? "Y" : "N");
6205 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6206 priv->dma_cap.vlhash ? "Y" : "N");
6207 seq_printf(seq, "\tSplit Header: %s\n",
6208 priv->dma_cap.sphen ? "Y" : "N");
6209 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6210 priv->dma_cap.vlins ? "Y" : "N");
6211 seq_printf(seq, "\tDouble VLAN: %s\n",
6212 priv->dma_cap.dvlan ? "Y" : "N");
6213 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6214 priv->dma_cap.l3l4fnum);
6215 seq_printf(seq, "\tARP Offloading: %s\n",
6216 priv->dma_cap.arpoffsel ? "Y" : "N");
6217 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6218 priv->dma_cap.estsel ? "Y" : "N");
6219 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6220 priv->dma_cap.fpesel ? "Y" : "N");
6221 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6222 priv->dma_cap.tbssel ? "Y" : "N");
6223 return 0;
6224 }
6225 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6226
6227 /* Use network device events to rename debugfs file entries.
6228 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6229 static int stmmac_device_event(struct notifier_block *unused,
6230 unsigned long event, void *ptr)
6231 {
6232 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6233 struct stmmac_priv *priv = netdev_priv(dev);
6234
6235 if (dev->netdev_ops != &stmmac_netdev_ops)
6236 goto done;
6237
6238 switch (event) {
6239 case NETDEV_CHANGENAME:
6240 if (priv->dbgfs_dir)
6241 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6242 priv->dbgfs_dir,
6243 stmmac_fs_dir,
6244 dev->name);
6245 break;
6246 }
6247 done:
6248 return NOTIFY_DONE;
6249 }
6250
6251 static struct notifier_block stmmac_notifier = {
6252 .notifier_call = stmmac_device_event,
6253 };
6254
stmmac_init_fs(struct net_device * dev)6255 static void stmmac_init_fs(struct net_device *dev)
6256 {
6257 struct stmmac_priv *priv = netdev_priv(dev);
6258
6259 rtnl_lock();
6260
6261 /* Create per netdev entries */
6262 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6263
6264 /* Entry to report DMA RX/TX rings */
6265 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6266 &stmmac_rings_status_fops);
6267
6268 /* Entry to report the DMA HW features */
6269 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6270 &stmmac_dma_cap_fops);
6271
6272 rtnl_unlock();
6273 }
6274
stmmac_exit_fs(struct net_device * dev)6275 static void stmmac_exit_fs(struct net_device *dev)
6276 {
6277 struct stmmac_priv *priv = netdev_priv(dev);
6278
6279 debugfs_remove_recursive(priv->dbgfs_dir);
6280 }
6281 #endif /* CONFIG_DEBUG_FS */
6282
stmmac_vid_crc32_le(__le16 vid_le)6283 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6284 {
6285 unsigned char *data = (unsigned char *)&vid_le;
6286 unsigned char data_byte = 0;
6287 u32 crc = ~0x0;
6288 u32 temp = 0;
6289 int i, bits;
6290
6291 bits = get_bitmask_order(VLAN_VID_MASK);
6292 for (i = 0; i < bits; i++) {
6293 if ((i % 8) == 0)
6294 data_byte = data[i / 8];
6295
6296 temp = ((crc & 1) ^ data_byte) & 1;
6297 crc >>= 1;
6298 data_byte >>= 1;
6299
6300 if (temp)
6301 crc ^= 0xedb88320;
6302 }
6303
6304 return crc;
6305 }
6306
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6307 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6308 {
6309 u32 crc, hash = 0;
6310 __le16 pmatch = 0;
6311 int count = 0;
6312 u16 vid = 0;
6313
6314 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6315 __le16 vid_le = cpu_to_le16(vid);
6316 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6317 hash |= (1 << crc);
6318 count++;
6319 }
6320
6321 if (!priv->dma_cap.vlhash) {
6322 if (count > 2) /* VID = 0 always passes filter */
6323 return -EOPNOTSUPP;
6324
6325 pmatch = cpu_to_le16(vid);
6326 hash = 0;
6327 }
6328
6329 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6330 }
6331
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6332 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6333 {
6334 struct stmmac_priv *priv = netdev_priv(ndev);
6335 bool is_double = false;
6336 int ret;
6337
6338 if (be16_to_cpu(proto) == ETH_P_8021AD)
6339 is_double = true;
6340
6341 set_bit(vid, priv->active_vlans);
6342 ret = stmmac_vlan_update(priv, is_double);
6343 if (ret) {
6344 clear_bit(vid, priv->active_vlans);
6345 return ret;
6346 }
6347
6348 if (priv->hw->num_vlan) {
6349 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6350 if (ret)
6351 return ret;
6352 }
6353
6354 return 0;
6355 }
6356
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6357 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6358 {
6359 struct stmmac_priv *priv = netdev_priv(ndev);
6360 bool is_double = false;
6361 int ret;
6362
6363 ret = pm_runtime_resume_and_get(priv->device);
6364 if (ret < 0)
6365 return ret;
6366
6367 if (be16_to_cpu(proto) == ETH_P_8021AD)
6368 is_double = true;
6369
6370 clear_bit(vid, priv->active_vlans);
6371
6372 if (priv->hw->num_vlan) {
6373 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6374 if (ret)
6375 goto del_vlan_error;
6376 }
6377
6378 ret = stmmac_vlan_update(priv, is_double);
6379
6380 del_vlan_error:
6381 pm_runtime_put(priv->device);
6382
6383 return ret;
6384 }
6385
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6386 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6387 {
6388 struct stmmac_priv *priv = netdev_priv(dev);
6389
6390 switch (bpf->command) {
6391 case XDP_SETUP_PROG:
6392 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6393 case XDP_SETUP_XSK_POOL:
6394 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6395 bpf->xsk.queue_id);
6396 default:
6397 return -EOPNOTSUPP;
6398 }
6399 }
6400
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6401 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6402 struct xdp_frame **frames, u32 flags)
6403 {
6404 struct stmmac_priv *priv = netdev_priv(dev);
6405 int cpu = smp_processor_id();
6406 struct netdev_queue *nq;
6407 int i, nxmit = 0;
6408 int queue;
6409
6410 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6411 return -ENETDOWN;
6412
6413 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6414 return -EINVAL;
6415
6416 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6417 nq = netdev_get_tx_queue(priv->dev, queue);
6418
6419 __netif_tx_lock(nq, cpu);
6420 /* Avoids TX time-out as we are sharing with slow path */
6421 txq_trans_cond_update(nq);
6422
6423 for (i = 0; i < num_frames; i++) {
6424 int res;
6425
6426 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6427 if (res == STMMAC_XDP_CONSUMED)
6428 break;
6429
6430 nxmit++;
6431 }
6432
6433 if (flags & XDP_XMIT_FLUSH) {
6434 stmmac_flush_tx_descriptors(priv, queue);
6435 stmmac_tx_timer_arm(priv, queue);
6436 }
6437
6438 __netif_tx_unlock(nq);
6439
6440 return nxmit;
6441 }
6442
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6443 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6444 {
6445 struct stmmac_channel *ch = &priv->channel[queue];
6446 unsigned long flags;
6447
6448 spin_lock_irqsave(&ch->lock, flags);
6449 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6450 spin_unlock_irqrestore(&ch->lock, flags);
6451
6452 stmmac_stop_rx_dma(priv, queue);
6453 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6454 }
6455
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6456 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6457 {
6458 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6459 struct stmmac_channel *ch = &priv->channel[queue];
6460 unsigned long flags;
6461 u32 buf_size;
6462 int ret;
6463
6464 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6465 if (ret) {
6466 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6467 return;
6468 }
6469
6470 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6471 if (ret) {
6472 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6473 netdev_err(priv->dev, "Failed to init RX desc.\n");
6474 return;
6475 }
6476
6477 stmmac_reset_rx_queue(priv, queue);
6478 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6479
6480 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6481 rx_q->dma_rx_phy, rx_q->queue_index);
6482
6483 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6484 sizeof(struct dma_desc));
6485 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6486 rx_q->rx_tail_addr, rx_q->queue_index);
6487
6488 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6489 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6490 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6491 buf_size,
6492 rx_q->queue_index);
6493 } else {
6494 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6495 priv->dma_conf.dma_buf_sz,
6496 rx_q->queue_index);
6497 }
6498
6499 stmmac_start_rx_dma(priv, queue);
6500
6501 spin_lock_irqsave(&ch->lock, flags);
6502 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6503 spin_unlock_irqrestore(&ch->lock, flags);
6504 }
6505
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6506 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6507 {
6508 struct stmmac_channel *ch = &priv->channel[queue];
6509 unsigned long flags;
6510
6511 spin_lock_irqsave(&ch->lock, flags);
6512 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6513 spin_unlock_irqrestore(&ch->lock, flags);
6514
6515 stmmac_stop_tx_dma(priv, queue);
6516 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6517 }
6518
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6519 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6520 {
6521 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6522 struct stmmac_channel *ch = &priv->channel[queue];
6523 unsigned long flags;
6524 int ret;
6525
6526 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6527 if (ret) {
6528 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6529 return;
6530 }
6531
6532 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6533 if (ret) {
6534 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6535 netdev_err(priv->dev, "Failed to init TX desc.\n");
6536 return;
6537 }
6538
6539 stmmac_reset_tx_queue(priv, queue);
6540 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6541
6542 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6543 tx_q->dma_tx_phy, tx_q->queue_index);
6544
6545 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6546 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6547
6548 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6549 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6550 tx_q->tx_tail_addr, tx_q->queue_index);
6551
6552 stmmac_start_tx_dma(priv, queue);
6553
6554 spin_lock_irqsave(&ch->lock, flags);
6555 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6556 spin_unlock_irqrestore(&ch->lock, flags);
6557 }
6558
stmmac_xdp_release(struct net_device * dev)6559 void stmmac_xdp_release(struct net_device *dev)
6560 {
6561 struct stmmac_priv *priv = netdev_priv(dev);
6562 u32 chan;
6563
6564 /* Ensure tx function is not running */
6565 netif_tx_disable(dev);
6566
6567 /* Disable NAPI process */
6568 stmmac_disable_all_queues(priv);
6569
6570 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6571 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6572
6573 /* Free the IRQ lines */
6574 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6575
6576 /* Stop TX/RX DMA channels */
6577 stmmac_stop_all_dma(priv);
6578
6579 /* Release and free the Rx/Tx resources */
6580 free_dma_desc_resources(priv, &priv->dma_conf);
6581
6582 /* Disable the MAC Rx/Tx */
6583 stmmac_mac_set(priv, priv->ioaddr, false);
6584
6585 /* set trans_start so we don't get spurious
6586 * watchdogs during reset
6587 */
6588 netif_trans_update(dev);
6589 netif_carrier_off(dev);
6590 }
6591
stmmac_xdp_open(struct net_device * dev)6592 int stmmac_xdp_open(struct net_device *dev)
6593 {
6594 struct stmmac_priv *priv = netdev_priv(dev);
6595 u32 rx_cnt = priv->plat->rx_queues_to_use;
6596 u32 tx_cnt = priv->plat->tx_queues_to_use;
6597 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6598 struct stmmac_rx_queue *rx_q;
6599 struct stmmac_tx_queue *tx_q;
6600 u32 buf_size;
6601 bool sph_en;
6602 u32 chan;
6603 int ret;
6604
6605 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6606 if (ret < 0) {
6607 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6608 __func__);
6609 goto dma_desc_error;
6610 }
6611
6612 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6613 if (ret < 0) {
6614 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6615 __func__);
6616 goto init_error;
6617 }
6618
6619 /* DMA CSR Channel configuration */
6620 for (chan = 0; chan < dma_csr_ch; chan++) {
6621 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6622 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6623 }
6624
6625 /* Adjust Split header */
6626 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6627
6628 /* DMA RX Channel Configuration */
6629 for (chan = 0; chan < rx_cnt; chan++) {
6630 rx_q = &priv->dma_conf.rx_queue[chan];
6631
6632 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6633 rx_q->dma_rx_phy, chan);
6634
6635 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6636 (rx_q->buf_alloc_num *
6637 sizeof(struct dma_desc));
6638 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6639 rx_q->rx_tail_addr, chan);
6640
6641 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6642 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6643 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6644 buf_size,
6645 rx_q->queue_index);
6646 } else {
6647 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6648 priv->dma_conf.dma_buf_sz,
6649 rx_q->queue_index);
6650 }
6651
6652 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6653 }
6654
6655 /* DMA TX Channel Configuration */
6656 for (chan = 0; chan < tx_cnt; chan++) {
6657 tx_q = &priv->dma_conf.tx_queue[chan];
6658
6659 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6660 tx_q->dma_tx_phy, chan);
6661
6662 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6663 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6664 tx_q->tx_tail_addr, chan);
6665
6666 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6667 tx_q->txtimer.function = stmmac_tx_timer;
6668 }
6669
6670 /* Enable the MAC Rx/Tx */
6671 stmmac_mac_set(priv, priv->ioaddr, true);
6672
6673 /* Start Rx & Tx DMA Channels */
6674 stmmac_start_all_dma(priv);
6675
6676 ret = stmmac_request_irq(dev);
6677 if (ret)
6678 goto irq_error;
6679
6680 /* Enable NAPI process*/
6681 stmmac_enable_all_queues(priv);
6682 netif_carrier_on(dev);
6683 netif_tx_start_all_queues(dev);
6684 stmmac_enable_all_dma_irq(priv);
6685
6686 return 0;
6687
6688 irq_error:
6689 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6690 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6691
6692 stmmac_hw_teardown(dev);
6693 init_error:
6694 free_dma_desc_resources(priv, &priv->dma_conf);
6695 dma_desc_error:
6696 return ret;
6697 }
6698
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6699 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6700 {
6701 struct stmmac_priv *priv = netdev_priv(dev);
6702 struct stmmac_rx_queue *rx_q;
6703 struct stmmac_tx_queue *tx_q;
6704 struct stmmac_channel *ch;
6705
6706 if (test_bit(STMMAC_DOWN, &priv->state) ||
6707 !netif_carrier_ok(priv->dev))
6708 return -ENETDOWN;
6709
6710 if (!stmmac_xdp_is_enabled(priv))
6711 return -EINVAL;
6712
6713 if (queue >= priv->plat->rx_queues_to_use ||
6714 queue >= priv->plat->tx_queues_to_use)
6715 return -EINVAL;
6716
6717 rx_q = &priv->dma_conf.rx_queue[queue];
6718 tx_q = &priv->dma_conf.tx_queue[queue];
6719 ch = &priv->channel[queue];
6720
6721 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6722 return -EINVAL;
6723
6724 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6725 /* EQoS does not have per-DMA channel SW interrupt,
6726 * so we schedule RX Napi straight-away.
6727 */
6728 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6729 __napi_schedule(&ch->rxtx_napi);
6730 }
6731
6732 return 0;
6733 }
6734
6735 static const struct net_device_ops stmmac_netdev_ops = {
6736 .ndo_open = stmmac_open,
6737 .ndo_start_xmit = stmmac_xmit,
6738 .ndo_stop = stmmac_release,
6739 .ndo_change_mtu = stmmac_change_mtu,
6740 .ndo_fix_features = stmmac_fix_features,
6741 .ndo_set_features = stmmac_set_features,
6742 .ndo_set_rx_mode = stmmac_set_rx_mode,
6743 .ndo_tx_timeout = stmmac_tx_timeout,
6744 .ndo_eth_ioctl = stmmac_ioctl,
6745 .ndo_setup_tc = stmmac_setup_tc,
6746 .ndo_select_queue = stmmac_select_queue,
6747 #ifdef CONFIG_NET_POLL_CONTROLLER
6748 .ndo_poll_controller = stmmac_poll_controller,
6749 #endif
6750 .ndo_set_mac_address = stmmac_set_mac_address,
6751 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6752 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6753 .ndo_bpf = stmmac_bpf,
6754 .ndo_xdp_xmit = stmmac_xdp_xmit,
6755 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6756 };
6757
stmmac_reset_subtask(struct stmmac_priv * priv)6758 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6759 {
6760 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6761 return;
6762 if (test_bit(STMMAC_DOWN, &priv->state))
6763 return;
6764
6765 netdev_err(priv->dev, "Reset adapter.\n");
6766
6767 rtnl_lock();
6768 netif_trans_update(priv->dev);
6769 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6770 usleep_range(1000, 2000);
6771
6772 set_bit(STMMAC_DOWN, &priv->state);
6773 dev_close(priv->dev);
6774 dev_open(priv->dev, NULL);
6775 clear_bit(STMMAC_DOWN, &priv->state);
6776 clear_bit(STMMAC_RESETING, &priv->state);
6777 rtnl_unlock();
6778 }
6779
stmmac_service_task(struct work_struct * work)6780 static void stmmac_service_task(struct work_struct *work)
6781 {
6782 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6783 service_task);
6784
6785 stmmac_reset_subtask(priv);
6786 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6787 }
6788
6789 /**
6790 * stmmac_hw_init - Init the MAC device
6791 * @priv: driver private structure
6792 * Description: this function is to configure the MAC device according to
6793 * some platform parameters or the HW capability register. It prepares the
6794 * driver to use either ring or chain modes and to setup either enhanced or
6795 * normal descriptors.
6796 */
stmmac_hw_init(struct stmmac_priv * priv)6797 static int stmmac_hw_init(struct stmmac_priv *priv)
6798 {
6799 int ret;
6800
6801 /* dwmac-sun8i only work in chain mode */
6802 if (priv->plat->has_sun8i)
6803 chain_mode = 1;
6804 priv->chain_mode = chain_mode;
6805
6806 /* Initialize HW Interface */
6807 ret = stmmac_hwif_init(priv);
6808 if (ret)
6809 return ret;
6810
6811 /* Get the HW capability (new GMAC newer than 3.50a) */
6812 priv->hw_cap_support = stmmac_get_hw_features(priv);
6813 if (priv->hw_cap_support) {
6814 dev_info(priv->device, "DMA HW capability register supported\n");
6815
6816 /* We can override some gmac/dma configuration fields: e.g.
6817 * enh_desc, tx_coe (e.g. that are passed through the
6818 * platform) with the values from the HW capability
6819 * register (if supported).
6820 */
6821 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6822 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6823 !priv->plat->use_phy_wol;
6824 priv->hw->pmt = priv->plat->pmt;
6825 if (priv->dma_cap.hash_tb_sz) {
6826 priv->hw->multicast_filter_bins =
6827 (BIT(priv->dma_cap.hash_tb_sz) << 5);
6828 priv->hw->mcast_bits_log2 =
6829 ilog2(priv->hw->multicast_filter_bins);
6830 }
6831
6832 /* TXCOE doesn't work in thresh DMA mode */
6833 if (priv->plat->force_thresh_dma_mode)
6834 priv->plat->tx_coe = 0;
6835 else
6836 priv->plat->tx_coe = priv->dma_cap.tx_coe;
6837
6838 /* In case of GMAC4 rx_coe is from HW cap register. */
6839 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6840
6841 if (priv->dma_cap.rx_coe_type2)
6842 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6843 else if (priv->dma_cap.rx_coe_type1)
6844 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6845
6846 } else {
6847 dev_info(priv->device, "No HW DMA feature register supported\n");
6848 }
6849
6850 if (priv->plat->rx_coe) {
6851 priv->hw->rx_csum = priv->plat->rx_coe;
6852 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6853 if (priv->synopsys_id < DWMAC_CORE_4_00)
6854 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6855 }
6856 if (priv->plat->tx_coe)
6857 dev_info(priv->device, "TX Checksum insertion supported\n");
6858
6859 if (priv->plat->pmt) {
6860 dev_info(priv->device, "Wake-Up On Lan supported\n");
6861 device_set_wakeup_capable(priv->device, 1);
6862 }
6863
6864 if (priv->dma_cap.tsoen)
6865 dev_info(priv->device, "TSO supported\n");
6866
6867 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6868 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6869
6870 /* Run HW quirks, if any */
6871 if (priv->hwif_quirks) {
6872 ret = priv->hwif_quirks(priv);
6873 if (ret)
6874 return ret;
6875 }
6876
6877 /* Rx Watchdog is available in the COREs newer than the 3.40.
6878 * In some case, for example on bugged HW this feature
6879 * has to be disable and this can be done by passing the
6880 * riwt_off field from the platform.
6881 */
6882 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6883 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6884 priv->use_riwt = 1;
6885 dev_info(priv->device,
6886 "Enable RX Mitigation via HW Watchdog Timer\n");
6887 }
6888
6889 return 0;
6890 }
6891
stmmac_napi_add(struct net_device * dev)6892 static void stmmac_napi_add(struct net_device *dev)
6893 {
6894 struct stmmac_priv *priv = netdev_priv(dev);
6895 u32 queue, maxq;
6896
6897 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6898
6899 for (queue = 0; queue < maxq; queue++) {
6900 struct stmmac_channel *ch = &priv->channel[queue];
6901
6902 ch->priv_data = priv;
6903 ch->index = queue;
6904 spin_lock_init(&ch->lock);
6905
6906 if (queue < priv->plat->rx_queues_to_use) {
6907 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
6908 }
6909 if (queue < priv->plat->tx_queues_to_use) {
6910 netif_napi_add_tx(dev, &ch->tx_napi,
6911 stmmac_napi_poll_tx);
6912 }
6913 if (queue < priv->plat->rx_queues_to_use &&
6914 queue < priv->plat->tx_queues_to_use) {
6915 netif_napi_add(dev, &ch->rxtx_napi,
6916 stmmac_napi_poll_rxtx);
6917 }
6918 }
6919 }
6920
stmmac_napi_del(struct net_device * dev)6921 static void stmmac_napi_del(struct net_device *dev)
6922 {
6923 struct stmmac_priv *priv = netdev_priv(dev);
6924 u32 queue, maxq;
6925
6926 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6927
6928 for (queue = 0; queue < maxq; queue++) {
6929 struct stmmac_channel *ch = &priv->channel[queue];
6930
6931 if (queue < priv->plat->rx_queues_to_use)
6932 netif_napi_del(&ch->rx_napi);
6933 if (queue < priv->plat->tx_queues_to_use)
6934 netif_napi_del(&ch->tx_napi);
6935 if (queue < priv->plat->rx_queues_to_use &&
6936 queue < priv->plat->tx_queues_to_use) {
6937 netif_napi_del(&ch->rxtx_napi);
6938 }
6939 }
6940 }
6941
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)6942 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6943 {
6944 struct stmmac_priv *priv = netdev_priv(dev);
6945 int ret = 0;
6946
6947 if (netif_running(dev))
6948 stmmac_release(dev);
6949
6950 stmmac_napi_del(dev);
6951
6952 priv->plat->rx_queues_to_use = rx_cnt;
6953 priv->plat->tx_queues_to_use = tx_cnt;
6954
6955 stmmac_napi_add(dev);
6956
6957 if (netif_running(dev))
6958 ret = stmmac_open(dev);
6959
6960 return ret;
6961 }
6962
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)6963 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6964 {
6965 struct stmmac_priv *priv = netdev_priv(dev);
6966 int ret = 0;
6967
6968 if (netif_running(dev))
6969 stmmac_release(dev);
6970
6971 priv->dma_conf.dma_rx_size = rx_size;
6972 priv->dma_conf.dma_tx_size = tx_size;
6973
6974 if (netif_running(dev))
6975 ret = stmmac_open(dev);
6976
6977 return ret;
6978 }
6979
6980 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)6981 static void stmmac_fpe_lp_task(struct work_struct *work)
6982 {
6983 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6984 fpe_task);
6985 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6986 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6987 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6988 bool *hs_enable = &fpe_cfg->hs_enable;
6989 bool *enable = &fpe_cfg->enable;
6990 int retries = 20;
6991
6992 while (retries-- > 0) {
6993 /* Bail out immediately if FPE handshake is OFF */
6994 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6995 break;
6996
6997 if (*lo_state == FPE_STATE_ENTERING_ON &&
6998 *lp_state == FPE_STATE_ENTERING_ON) {
6999 stmmac_fpe_configure(priv, priv->ioaddr,
7000 priv->plat->tx_queues_to_use,
7001 priv->plat->rx_queues_to_use,
7002 *enable);
7003
7004 netdev_info(priv->dev, "configured FPE\n");
7005
7006 *lo_state = FPE_STATE_ON;
7007 *lp_state = FPE_STATE_ON;
7008 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7009 break;
7010 }
7011
7012 if ((*lo_state == FPE_STATE_CAPABLE ||
7013 *lo_state == FPE_STATE_ENTERING_ON) &&
7014 *lp_state != FPE_STATE_ON) {
7015 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7016 *lo_state, *lp_state);
7017 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7018 MPACKET_VERIFY);
7019 }
7020 /* Sleep then retry */
7021 msleep(500);
7022 }
7023
7024 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7025 }
7026
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7027 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7028 {
7029 if (priv->plat->fpe_cfg->hs_enable != enable) {
7030 if (enable) {
7031 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7032 MPACKET_VERIFY);
7033 } else {
7034 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7035 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7036 }
7037
7038 priv->plat->fpe_cfg->hs_enable = enable;
7039 }
7040 }
7041
7042 /**
7043 * stmmac_dvr_probe
7044 * @device: device pointer
7045 * @plat_dat: platform data pointer
7046 * @res: stmmac resource pointer
7047 * Description: this is the main probe function used to
7048 * call the alloc_etherdev, allocate the priv structure.
7049 * Return:
7050 * returns 0 on success, otherwise errno.
7051 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7052 int stmmac_dvr_probe(struct device *device,
7053 struct plat_stmmacenet_data *plat_dat,
7054 struct stmmac_resources *res)
7055 {
7056 struct net_device *ndev = NULL;
7057 struct stmmac_priv *priv;
7058 u32 rxq;
7059 int i, ret = 0;
7060
7061 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7062 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7063 if (!ndev)
7064 return -ENOMEM;
7065
7066 SET_NETDEV_DEV(ndev, device);
7067
7068 priv = netdev_priv(ndev);
7069 priv->device = device;
7070 priv->dev = ndev;
7071
7072 stmmac_set_ethtool_ops(ndev);
7073 priv->pause = pause;
7074 priv->plat = plat_dat;
7075 priv->ioaddr = res->addr;
7076 priv->dev->base_addr = (unsigned long)res->addr;
7077 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7078
7079 priv->dev->irq = res->irq;
7080 priv->wol_irq = res->wol_irq;
7081 priv->lpi_irq = res->lpi_irq;
7082 priv->sfty_ce_irq = res->sfty_ce_irq;
7083 priv->sfty_ue_irq = res->sfty_ue_irq;
7084 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7085 priv->rx_irq[i] = res->rx_irq[i];
7086 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7087 priv->tx_irq[i] = res->tx_irq[i];
7088
7089 if (!is_zero_ether_addr(res->mac))
7090 eth_hw_addr_set(priv->dev, res->mac);
7091
7092 dev_set_drvdata(device, priv->dev);
7093
7094 /* Verify driver arguments */
7095 stmmac_verify_args();
7096
7097 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7098 if (!priv->af_xdp_zc_qps)
7099 return -ENOMEM;
7100
7101 /* Allocate workqueue */
7102 priv->wq = create_singlethread_workqueue("stmmac_wq");
7103 if (!priv->wq) {
7104 dev_err(priv->device, "failed to create workqueue\n");
7105 ret = -ENOMEM;
7106 goto error_wq_init;
7107 }
7108
7109 INIT_WORK(&priv->service_task, stmmac_service_task);
7110
7111 /* Initialize Link Partner FPE workqueue */
7112 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7113
7114 /* Override with kernel parameters if supplied XXX CRS XXX
7115 * this needs to have multiple instances
7116 */
7117 if ((phyaddr >= 0) && (phyaddr <= 31))
7118 priv->plat->phy_addr = phyaddr;
7119
7120 if (priv->plat->stmmac_rst) {
7121 ret = reset_control_assert(priv->plat->stmmac_rst);
7122 reset_control_deassert(priv->plat->stmmac_rst);
7123 /* Some reset controllers have only reset callback instead of
7124 * assert + deassert callbacks pair.
7125 */
7126 if (ret == -ENOTSUPP)
7127 reset_control_reset(priv->plat->stmmac_rst);
7128 }
7129
7130 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7131 if (ret == -ENOTSUPP)
7132 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7133 ERR_PTR(ret));
7134
7135 /* Init MAC and get the capabilities */
7136 ret = stmmac_hw_init(priv);
7137 if (ret)
7138 goto error_hw_init;
7139
7140 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7141 */
7142 if (priv->synopsys_id < DWMAC_CORE_5_20)
7143 priv->plat->dma_cfg->dche = false;
7144
7145 stmmac_check_ether_addr(priv);
7146
7147 ndev->netdev_ops = &stmmac_netdev_ops;
7148
7149 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7150 NETIF_F_RXCSUM;
7151
7152 ret = stmmac_tc_init(priv, priv);
7153 if (!ret) {
7154 ndev->hw_features |= NETIF_F_HW_TC;
7155 }
7156
7157 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7158 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7159 if (priv->plat->has_gmac4)
7160 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7161 priv->tso = true;
7162 dev_info(priv->device, "TSO feature enabled\n");
7163 }
7164
7165 if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
7166 ndev->hw_features |= NETIF_F_GRO;
7167 priv->sph_cap = true;
7168 priv->sph = priv->sph_cap;
7169 dev_info(priv->device, "SPH feature enabled\n");
7170 }
7171
7172 /* The current IP register MAC_HW_Feature1[ADDR64] only define
7173 * 32/40/64 bit width, but some SOC support others like i.MX8MP
7174 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7175 * So overwrite dma_cap.addr64 according to HW real design.
7176 */
7177 if (priv->plat->addr64)
7178 priv->dma_cap.addr64 = priv->plat->addr64;
7179
7180 if (priv->dma_cap.addr64) {
7181 ret = dma_set_mask_and_coherent(device,
7182 DMA_BIT_MASK(priv->dma_cap.addr64));
7183 if (!ret) {
7184 dev_info(priv->device, "Using %d bits DMA width\n",
7185 priv->dma_cap.addr64);
7186
7187 /*
7188 * If more than 32 bits can be addressed, make sure to
7189 * enable enhanced addressing mode.
7190 */
7191 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7192 priv->plat->dma_cfg->eame = true;
7193 } else {
7194 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7195 if (ret) {
7196 dev_err(priv->device, "Failed to set DMA Mask\n");
7197 goto error_hw_init;
7198 }
7199
7200 priv->dma_cap.addr64 = 32;
7201 }
7202 }
7203
7204 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7205 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7206 #ifdef STMMAC_VLAN_TAG_USED
7207 /* Both mac100 and gmac support receive VLAN tag detection */
7208 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7209 if (priv->dma_cap.vlhash) {
7210 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7211 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7212 }
7213 if (priv->dma_cap.vlins) {
7214 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7215 if (priv->dma_cap.dvlan)
7216 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7217 }
7218 #endif
7219 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7220
7221 /* Initialize RSS */
7222 rxq = priv->plat->rx_queues_to_use;
7223 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7224 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7225 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7226
7227 if (priv->dma_cap.rssen && priv->plat->rss_en)
7228 ndev->features |= NETIF_F_RXHASH;
7229
7230 /* MTU range: 46 - hw-specific max */
7231 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7232 if (priv->plat->has_xgmac)
7233 ndev->max_mtu = XGMAC_JUMBO_LEN;
7234 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7235 ndev->max_mtu = JUMBO_LEN;
7236 else
7237 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7238 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7239 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7240 */
7241 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7242 (priv->plat->maxmtu >= ndev->min_mtu))
7243 ndev->max_mtu = priv->plat->maxmtu;
7244 else if (priv->plat->maxmtu < ndev->min_mtu)
7245 dev_warn(priv->device,
7246 "%s: warning: maxmtu having invalid value (%d)\n",
7247 __func__, priv->plat->maxmtu);
7248
7249 if (flow_ctrl)
7250 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7251
7252 /* Setup channels NAPI */
7253 stmmac_napi_add(ndev);
7254
7255 mutex_init(&priv->lock);
7256
7257 /* If a specific clk_csr value is passed from the platform
7258 * this means that the CSR Clock Range selection cannot be
7259 * changed at run-time and it is fixed. Viceversa the driver'll try to
7260 * set the MDC clock dynamically according to the csr actual
7261 * clock input.
7262 */
7263 if (priv->plat->clk_csr >= 0)
7264 priv->clk_csr = priv->plat->clk_csr;
7265 else
7266 stmmac_clk_csr_set(priv);
7267
7268 stmmac_check_pcs_mode(priv);
7269
7270 pm_runtime_get_noresume(device);
7271 pm_runtime_set_active(device);
7272 if (!pm_runtime_enabled(device))
7273 pm_runtime_enable(device);
7274
7275 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7276 priv->hw->pcs != STMMAC_PCS_RTBI) {
7277 /* MDIO bus Registration */
7278 ret = stmmac_mdio_register(ndev);
7279 if (ret < 0) {
7280 dev_err_probe(priv->device, ret,
7281 "%s: MDIO bus (id: %d) registration failed\n",
7282 __func__, priv->plat->bus_id);
7283 goto error_mdio_register;
7284 }
7285 }
7286
7287 if (priv->plat->speed_mode_2500)
7288 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7289
7290 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7291 ret = stmmac_xpcs_setup(priv->mii);
7292 if (ret)
7293 goto error_xpcs_setup;
7294 }
7295
7296 ret = stmmac_phy_setup(priv);
7297 if (ret) {
7298 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7299 goto error_phy_setup;
7300 }
7301
7302 ret = register_netdev(ndev);
7303 if (ret) {
7304 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7305 __func__, ret);
7306 goto error_netdev_register;
7307 }
7308
7309 #ifdef CONFIG_DEBUG_FS
7310 stmmac_init_fs(ndev);
7311 #endif
7312
7313 if (priv->plat->dump_debug_regs)
7314 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7315
7316 /* Let pm_runtime_put() disable the clocks.
7317 * If CONFIG_PM is not enabled, the clocks will stay powered.
7318 */
7319 pm_runtime_put(device);
7320
7321 return ret;
7322
7323 error_netdev_register:
7324 phylink_destroy(priv->phylink);
7325 error_xpcs_setup:
7326 error_phy_setup:
7327 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7328 priv->hw->pcs != STMMAC_PCS_RTBI)
7329 stmmac_mdio_unregister(ndev);
7330 error_mdio_register:
7331 stmmac_napi_del(ndev);
7332 error_hw_init:
7333 destroy_workqueue(priv->wq);
7334 error_wq_init:
7335 bitmap_free(priv->af_xdp_zc_qps);
7336
7337 return ret;
7338 }
7339 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7340
7341 /**
7342 * stmmac_dvr_remove
7343 * @dev: device pointer
7344 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7345 * changes the link status, releases the DMA descriptor rings.
7346 */
stmmac_dvr_remove(struct device * dev)7347 int stmmac_dvr_remove(struct device *dev)
7348 {
7349 struct net_device *ndev = dev_get_drvdata(dev);
7350 struct stmmac_priv *priv = netdev_priv(ndev);
7351
7352 netdev_info(priv->dev, "%s: removing driver", __func__);
7353
7354 pm_runtime_get_sync(dev);
7355
7356 stmmac_stop_all_dma(priv);
7357 stmmac_mac_set(priv, priv->ioaddr, false);
7358 netif_carrier_off(ndev);
7359 unregister_netdev(ndev);
7360
7361 /* Serdes power down needs to happen after VLAN filter
7362 * is deleted that is triggered by unregister_netdev().
7363 */
7364 if (priv->plat->serdes_powerdown)
7365 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7366
7367 #ifdef CONFIG_DEBUG_FS
7368 stmmac_exit_fs(ndev);
7369 #endif
7370 phylink_destroy(priv->phylink);
7371 if (priv->plat->stmmac_rst)
7372 reset_control_assert(priv->plat->stmmac_rst);
7373 reset_control_assert(priv->plat->stmmac_ahb_rst);
7374 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7375 priv->hw->pcs != STMMAC_PCS_RTBI)
7376 stmmac_mdio_unregister(ndev);
7377 destroy_workqueue(priv->wq);
7378 mutex_destroy(&priv->lock);
7379 bitmap_free(priv->af_xdp_zc_qps);
7380
7381 pm_runtime_disable(dev);
7382 pm_runtime_put_noidle(dev);
7383
7384 return 0;
7385 }
7386 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7387
7388 /**
7389 * stmmac_suspend - suspend callback
7390 * @dev: device pointer
7391 * Description: this is the function to suspend the device and it is called
7392 * by the platform driver to stop the network queue, release the resources,
7393 * program the PMT register (for WoL), clean and release driver resources.
7394 */
stmmac_suspend(struct device * dev)7395 int stmmac_suspend(struct device *dev)
7396 {
7397 struct net_device *ndev = dev_get_drvdata(dev);
7398 struct stmmac_priv *priv = netdev_priv(ndev);
7399 u32 chan;
7400
7401 if (!ndev || !netif_running(ndev))
7402 return 0;
7403
7404 mutex_lock(&priv->lock);
7405
7406 netif_device_detach(ndev);
7407
7408 stmmac_disable_all_queues(priv);
7409
7410 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7411 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7412
7413 if (priv->eee_enabled) {
7414 priv->tx_path_in_lpi_mode = false;
7415 del_timer_sync(&priv->eee_ctrl_timer);
7416 }
7417
7418 /* Stop TX/RX DMA */
7419 stmmac_stop_all_dma(priv);
7420
7421 if (priv->plat->serdes_powerdown)
7422 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7423
7424 /* Enable Power down mode by programming the PMT regs */
7425 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7426 stmmac_pmt(priv, priv->hw, priv->wolopts);
7427 priv->irq_wake = 1;
7428 } else {
7429 stmmac_mac_set(priv, priv->ioaddr, false);
7430 pinctrl_pm_select_sleep_state(priv->device);
7431 }
7432
7433 mutex_unlock(&priv->lock);
7434
7435 rtnl_lock();
7436 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7437 phylink_suspend(priv->phylink, true);
7438 } else {
7439 if (device_may_wakeup(priv->device))
7440 phylink_speed_down(priv->phylink, false);
7441 phylink_suspend(priv->phylink, false);
7442 }
7443 rtnl_unlock();
7444
7445 if (priv->dma_cap.fpesel) {
7446 /* Disable FPE */
7447 stmmac_fpe_configure(priv, priv->ioaddr,
7448 priv->plat->tx_queues_to_use,
7449 priv->plat->rx_queues_to_use, false);
7450
7451 stmmac_fpe_handshake(priv, false);
7452 stmmac_fpe_stop_wq(priv);
7453 }
7454
7455 priv->speed = SPEED_UNKNOWN;
7456 return 0;
7457 }
7458 EXPORT_SYMBOL_GPL(stmmac_suspend);
7459
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7460 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7461 {
7462 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7463
7464 rx_q->cur_rx = 0;
7465 rx_q->dirty_rx = 0;
7466 }
7467
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7468 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7469 {
7470 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7471
7472 tx_q->cur_tx = 0;
7473 tx_q->dirty_tx = 0;
7474 tx_q->mss = 0;
7475
7476 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7477 }
7478
7479 /**
7480 * stmmac_reset_queues_param - reset queue parameters
7481 * @priv: device pointer
7482 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7483 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7484 {
7485 u32 rx_cnt = priv->plat->rx_queues_to_use;
7486 u32 tx_cnt = priv->plat->tx_queues_to_use;
7487 u32 queue;
7488
7489 for (queue = 0; queue < rx_cnt; queue++)
7490 stmmac_reset_rx_queue(priv, queue);
7491
7492 for (queue = 0; queue < tx_cnt; queue++)
7493 stmmac_reset_tx_queue(priv, queue);
7494 }
7495
7496 /**
7497 * stmmac_resume - resume callback
7498 * @dev: device pointer
7499 * Description: when resume this function is invoked to setup the DMA and CORE
7500 * in a usable state.
7501 */
stmmac_resume(struct device * dev)7502 int stmmac_resume(struct device *dev)
7503 {
7504 struct net_device *ndev = dev_get_drvdata(dev);
7505 struct stmmac_priv *priv = netdev_priv(ndev);
7506 int ret;
7507
7508 if (!netif_running(ndev))
7509 return 0;
7510
7511 /* Power Down bit, into the PM register, is cleared
7512 * automatically as soon as a magic packet or a Wake-up frame
7513 * is received. Anyway, it's better to manually clear
7514 * this bit because it can generate problems while resuming
7515 * from another devices (e.g. serial console).
7516 */
7517 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7518 mutex_lock(&priv->lock);
7519 stmmac_pmt(priv, priv->hw, 0);
7520 mutex_unlock(&priv->lock);
7521 priv->irq_wake = 0;
7522 } else {
7523 pinctrl_pm_select_default_state(priv->device);
7524 /* reset the phy so that it's ready */
7525 if (priv->mii)
7526 stmmac_mdio_reset(priv->mii);
7527 }
7528
7529 if (priv->plat->serdes_powerup) {
7530 ret = priv->plat->serdes_powerup(ndev,
7531 priv->plat->bsp_priv);
7532
7533 if (ret < 0)
7534 return ret;
7535 }
7536
7537 rtnl_lock();
7538 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7539 phylink_resume(priv->phylink);
7540 } else {
7541 phylink_resume(priv->phylink);
7542 if (device_may_wakeup(priv->device))
7543 phylink_speed_up(priv->phylink);
7544 }
7545 rtnl_unlock();
7546
7547 rtnl_lock();
7548 mutex_lock(&priv->lock);
7549
7550 stmmac_reset_queues_param(priv);
7551
7552 stmmac_free_tx_skbufs(priv);
7553 stmmac_clear_descriptors(priv, &priv->dma_conf);
7554
7555 stmmac_hw_setup(ndev, false);
7556 stmmac_init_coalesce(priv);
7557 stmmac_set_rx_mode(ndev);
7558
7559 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7560
7561 stmmac_enable_all_queues(priv);
7562 stmmac_enable_all_dma_irq(priv);
7563
7564 mutex_unlock(&priv->lock);
7565 rtnl_unlock();
7566
7567 netif_device_attach(ndev);
7568
7569 return 0;
7570 }
7571 EXPORT_SYMBOL_GPL(stmmac_resume);
7572
7573 #ifndef MODULE
stmmac_cmdline_opt(char * str)7574 static int __init stmmac_cmdline_opt(char *str)
7575 {
7576 char *opt;
7577
7578 if (!str || !*str)
7579 return 1;
7580 while ((opt = strsep(&str, ",")) != NULL) {
7581 if (!strncmp(opt, "debug:", 6)) {
7582 if (kstrtoint(opt + 6, 0, &debug))
7583 goto err;
7584 } else if (!strncmp(opt, "phyaddr:", 8)) {
7585 if (kstrtoint(opt + 8, 0, &phyaddr))
7586 goto err;
7587 } else if (!strncmp(opt, "buf_sz:", 7)) {
7588 if (kstrtoint(opt + 7, 0, &buf_sz))
7589 goto err;
7590 } else if (!strncmp(opt, "tc:", 3)) {
7591 if (kstrtoint(opt + 3, 0, &tc))
7592 goto err;
7593 } else if (!strncmp(opt, "watchdog:", 9)) {
7594 if (kstrtoint(opt + 9, 0, &watchdog))
7595 goto err;
7596 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7597 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7598 goto err;
7599 } else if (!strncmp(opt, "pause:", 6)) {
7600 if (kstrtoint(opt + 6, 0, &pause))
7601 goto err;
7602 } else if (!strncmp(opt, "eee_timer:", 10)) {
7603 if (kstrtoint(opt + 10, 0, &eee_timer))
7604 goto err;
7605 } else if (!strncmp(opt, "chain_mode:", 11)) {
7606 if (kstrtoint(opt + 11, 0, &chain_mode))
7607 goto err;
7608 }
7609 }
7610 return 1;
7611
7612 err:
7613 pr_err("%s: ERROR broken module parameter conversion", __func__);
7614 return 1;
7615 }
7616
7617 __setup("stmmaceth=", stmmac_cmdline_opt);
7618 #endif /* MODULE */
7619
stmmac_init(void)7620 static int __init stmmac_init(void)
7621 {
7622 #ifdef CONFIG_DEBUG_FS
7623 /* Create debugfs main directory if it doesn't exist yet */
7624 if (!stmmac_fs_dir)
7625 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7626 register_netdevice_notifier(&stmmac_notifier);
7627 #endif
7628
7629 return 0;
7630 }
7631
stmmac_exit(void)7632 static void __exit stmmac_exit(void)
7633 {
7634 #ifdef CONFIG_DEBUG_FS
7635 unregister_netdevice_notifier(&stmmac_notifier);
7636 debugfs_remove_recursive(stmmac_fs_dir);
7637 #endif
7638 }
7639
7640 module_init(stmmac_init)
7641 module_exit(stmmac_exit)
7642
7643 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7644 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7645 MODULE_LICENSE("GPL");
7646