1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
57 */
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO 5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK 256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 int ret = 0;
153
154 if (enabled) {
155 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 if (ret)
157 return ret;
158 ret = clk_prepare_enable(priv->plat->pclk);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 return ret;
162 }
163 if (priv->plat->clks_config) {
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 if (ret) {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
168 return ret;
169 }
170 }
171 } else {
172 clk_disable_unprepare(priv->plat->stmmac_clk);
173 clk_disable_unprepare(priv->plat->pclk);
174 if (priv->plat->clks_config)
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 }
177
178 return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 buf_sz = DEFAULT_BUFSIZE;
193 if (unlikely(flow_ctrl > 1))
194 flow_ctrl = FLOW_AUTO;
195 else if (likely(flow_ctrl < 0))
196 flow_ctrl = FLOW_OFF;
197 if (unlikely((pause < 0) || (pause > 0xffff)))
198 pause = PAUSE_TIME;
199 if (eee_timer < 0)
200 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 u32 queue;
209
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
212
213 if (stmmac_xdp_is_enabled(priv) &&
214 test_bit(queue, priv->af_xdp_zc_qps)) {
215 napi_disable(&ch->rxtx_napi);
216 continue;
217 }
218
219 if (queue < rx_queues_cnt)
220 napi_disable(&ch->rx_napi);
221 if (queue < tx_queues_cnt)
222 napi_disable(&ch->tx_napi);
223 }
224 }
225
226 /**
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
229 */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 struct stmmac_rx_queue *rx_q;
234 u32 queue;
235
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
240 synchronize_rcu();
241 break;
242 }
243 }
244
245 __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
251 */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 u32 queue;
258
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
261
262 if (stmmac_xdp_is_enabled(priv) &&
263 test_bit(queue, priv->af_xdp_zc_qps)) {
264 napi_enable(&ch->rxtx_napi);
265 continue;
266 }
267
268 if (queue < rx_queues_cnt)
269 napi_enable(&ch->rx_napi);
270 if (queue < tx_queues_cnt)
271 napi_enable(&ch->tx_napi);
272 }
273 }
274
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 queue_work(priv->wq, &priv->service_task);
280 }
281
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 netif_carrier_off(priv->dev);
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 stmmac_service_event_schedule(priv);
287 }
288
289 /**
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
293 * clock input.
294 * Note:
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
300 */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 u32 clk_rate;
304
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
312 * divider.
313 */
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 if (clk_rate < CSR_F_35M)
316 priv->clk_csr = STMMAC_CSR_20_35M;
317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 priv->clk_csr = STMMAC_CSR_35_60M;
319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 priv->clk_csr = STMMAC_CSR_60_100M;
321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 priv->clk_csr = STMMAC_CSR_100_150M;
323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 priv->clk_csr = STMMAC_CSR_150_250M;
325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 priv->clk_csr = STMMAC_CSR_250_300M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 int tx_lpi_timer;
396
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv->eee_sw_timer_en = en ? 0 : 1;
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
407 * EEE.
408 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 u32 tx_cnt = priv->plat->tx_queues_to_use;
412 u32 queue;
413
414 /* check if all TX queues have the work finished */
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418 if (tx_q->dirty_tx != tx_q->cur_tx)
419 return -EBUSY; /* still unfinished work */
420 }
421
422 /* Check and enter in LPI mode */
423 if (!priv->tx_path_in_lpi_mode)
424 stmmac_set_eee_mode(priv, priv->hw,
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 return 0;
427 }
428
429 /**
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
434 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 if (!priv->eee_sw_timer_en) {
438 stmmac_lpi_entry_timer_config(priv, 0);
439 return;
440 }
441
442 stmmac_reset_eee_mode(priv, priv->hw);
443 del_timer_sync(&priv->eee_ctrl_timer);
444 priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
450 * Description:
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
453 */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458 if (stmmac_enable_eee_mode(priv))
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
465 * Description:
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
468 * timer.
469 */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 int eee_tw_timer = priv->eee_tw_timer;
473
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
476 */
477 if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 priv->hw->pcs == STMMAC_PCS_RTBI)
479 return false;
480
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv->dma_cap.eee)
483 return false;
484
485 mutex_lock(&priv->lock);
486
487 /* Check if it needs to be deactivated */
488 if (!priv->eee_active) {
489 if (priv->eee_enabled) {
490 netdev_dbg(priv->dev, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv, 0);
492 del_timer_sync(&priv->eee_ctrl_timer);
493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 if (priv->hw->xpcs)
495 xpcs_config_eee(priv->hw->xpcs,
496 priv->plat->mult_fact_100ns,
497 false);
498 }
499 mutex_unlock(&priv->lock);
500 return false;
501 }
502
503 if (priv->eee_active && !priv->eee_enabled) {
504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 eee_tw_timer);
507 if (priv->hw->xpcs)
508 xpcs_config_eee(priv->hw->xpcs,
509 priv->plat->mult_fact_100ns,
510 true);
511 }
512
513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 del_timer_sync(&priv->eee_ctrl_timer);
515 priv->tx_path_in_lpi_mode = false;
516 stmmac_lpi_entry_timer_config(priv, 1);
517 } else {
518 stmmac_lpi_entry_timer_config(priv, 0);
519 mod_timer(&priv->eee_ctrl_timer,
520 STMMAC_LPI_T(priv->tx_lpi_timer));
521 }
522
523 mutex_unlock(&priv->lock);
524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 return true;
526 }
527
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
532 * Description :
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
535 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 struct dma_desc *p, struct sk_buff *skb)
538 {
539 struct skb_shared_hwtstamps shhwtstamp;
540 bool found = false;
541 u64 ns = 0;
542
543 if (!priv->hwts_tx_en)
544 return;
545
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 return;
549
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv, p)) {
552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 found = true;
554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 found = true;
556 }
557
558 if (found) {
559 ns -= priv->plat->cdc_error_adj;
560
561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp.hwtstamp = ns_to_ktime(ns);
563
564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb, &shhwtstamp);
567 }
568 }
569
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
575 * Description :
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
578 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 struct dma_desc *np, struct sk_buff *skb)
581 {
582 struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 struct dma_desc *desc = p;
584 u64 ns = 0;
585
586 if (!priv->hwts_rx_en)
587 return;
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 desc = np;
591
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595
596 ns -= priv->plat->cdc_error_adj;
597
598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 shhwtstamp = skb_hwtstamps(skb);
600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 } else {
603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 }
605 }
606
607 /**
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
612 * Description:
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
615 * Return Value:
616 * 0 on success and an appropriate -ve integer on failure.
617 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 struct stmmac_priv *priv = netdev_priv(dev);
621 struct hwtstamp_config config;
622 u32 ptp_v2 = 0;
623 u32 tstamp_all = 0;
624 u32 ptp_over_ipv4_udp = 0;
625 u32 ptp_over_ipv6_udp = 0;
626 u32 ptp_over_ethernet = 0;
627 u32 snap_type_sel = 0;
628 u32 ts_master_en = 0;
629 u32 ts_event_en = 0;
630
631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 netdev_alert(priv->dev, "No support for HW time stamping\n");
633 priv->hwts_tx_en = 0;
634 priv->hwts_rx_en = 0;
635
636 return -EOPNOTSUPP;
637 }
638
639 if (copy_from_user(&config, ifr->ifr_data,
640 sizeof(config)))
641 return -EFAULT;
642
643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__, config.flags, config.tx_type, config.rx_filter);
645
646 if (config.tx_type != HWTSTAMP_TX_OFF &&
647 config.tx_type != HWTSTAMP_TX_ON)
648 return -ERANGE;
649
650 if (priv->adv_ts) {
651 switch (config.rx_filter) {
652 case HWTSTAMP_FILTER_NONE:
653 /* time stamp no incoming packet at all */
654 config.rx_filter = HWTSTAMP_FILTER_NONE;
655 break;
656
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 /* PTP v1, UDP, any kind of event packet */
659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
664 * timestamping
665 */
666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 break;
670
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 /* PTP v1, UDP, Sync packet */
673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 /* take time stamp for SYNC messages only */
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 break;
680
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 /* PTP v1, UDP, Delay_req packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en = PTP_TCR_TSMSTRENA;
686 ts_event_en = PTP_TCR_TSEVNTENA;
687
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 break;
691
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 /* PTP v2, UDP, any kind of event packet */
694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 ptp_v2 = PTP_TCR_TSVER2ENA;
696 /* take time stamp for all event messages */
697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698
699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 break;
702
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 /* PTP v2, UDP, Sync packet */
705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 ptp_v2 = PTP_TCR_TSVER2ENA;
707 /* take time stamp for SYNC messages only */
708 ts_event_en = PTP_TCR_TSEVNTENA;
709
710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 /* PTP v2, UDP, Delay_req packet */
716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 break;
725
726 case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 ptp_v2 = PTP_TCR_TSVER2ENA;
730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 if (priv->synopsys_id < DWMAC_CORE_4_10)
732 ts_event_en = PTP_TCR_TSEVNTENA;
733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 ptp_over_ethernet = PTP_TCR_TSIPENA;
736 break;
737
738 case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 ptp_v2 = PTP_TCR_TSVER2ENA;
742 /* take time stamp for SYNC messages only */
743 ts_event_en = PTP_TCR_TSEVNTENA;
744
745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 ptp_over_ethernet = PTP_TCR_TSIPENA;
748 break;
749
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 ptp_v2 = PTP_TCR_TSVER2ENA;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en = PTP_TCR_TSMSTRENA;
756 ts_event_en = PTP_TCR_TSEVNTENA;
757
758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 ptp_over_ethernet = PTP_TCR_TSIPENA;
761 break;
762
763 case HWTSTAMP_FILTER_NTP_ALL:
764 case HWTSTAMP_FILTER_ALL:
765 /* time stamp any incoming packet */
766 config.rx_filter = HWTSTAMP_FILTER_ALL;
767 tstamp_all = PTP_TCR_TSENALL;
768 break;
769
770 default:
771 return -ERANGE;
772 }
773 } else {
774 switch (config.rx_filter) {
775 case HWTSTAMP_FILTER_NONE:
776 config.rx_filter = HWTSTAMP_FILTER_NONE;
777 break;
778 default:
779 /* PTP v1, UDP, any kind of event packet */
780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 break;
782 }
783 }
784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786
787 priv->systime_flags = STMMAC_HWTS_ACTIVE;
788
789 if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 priv->systime_flags |= tstamp_all | ptp_v2 |
791 ptp_over_ethernet | ptp_over_ipv6_udp |
792 ptp_over_ipv4_udp | ts_event_en |
793 ts_master_en | snap_type_sel;
794 }
795
796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797
798 memcpy(&priv->tstamp_config, &config, sizeof(config));
799
800 return copy_to_user(ifr->ifr_data, &config,
801 sizeof(config)) ? -EFAULT : 0;
802 }
803
804 /**
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
809 * Description:
810 * This function obtain the current hardware timestamping settings
811 * as requested.
812 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct hwtstamp_config *config = &priv->tstamp_config;
817
818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 return -EOPNOTSUPP;
820
821 return copy_to_user(ifr->ifr_data, config,
822 sizeof(*config)) ? -EFAULT : 0;
823 }
824
825 /**
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
829 * Description:
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
834 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 struct timespec64 now;
839 u32 sec_inc = 0;
840 u64 temp = 0;
841
842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 return -EOPNOTSUPP;
844
845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 priv->systime_flags = systime_flags;
847
848 /* program Sub Second Increment reg */
849 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 priv->plat->clk_ptp_rate,
851 xmac, &sec_inc);
852 temp = div_u64(1000000000ULL, sec_inc);
853
854 /* Store sub second increment for later use */
855 priv->sub_second_inc = sec_inc;
856
857 /* calculate default added value:
858 * formula is :
859 * addend = (2^32)/freq_div_ratio;
860 * where, freq_div_ratio = 1e9ns/sec_inc
861 */
862 temp = (u64)(temp << 32);
863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865
866 /* initialize system time */
867 ktime_get_real_ts64(&now);
868
869 /* lower 32 bits of tv_sec are safe until y2106 */
870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871
872 return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875
876 /**
877 * stmmac_init_ptp - init PTP
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880 * This is done by looking at the HW cap. register.
881 * This function also registers the ptp driver.
882 */
stmmac_init_ptp(struct stmmac_priv * priv)883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 int ret;
887
888 if (priv->plat->ptp_clk_freq_config)
889 priv->plat->ptp_clk_freq_config(priv);
890
891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 if (ret)
893 return ret;
894
895 priv->adv_ts = 0;
896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 if (xmac && priv->dma_cap.atime_stamp)
898 priv->adv_ts = 1;
899 /* Dwmac 3.x core with extend_desc can support adv_ts */
900 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 priv->adv_ts = 1;
902
903 if (priv->dma_cap.time_stamp)
904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905
906 if (priv->adv_ts)
907 netdev_info(priv->dev,
908 "IEEE 1588-2008 Advanced Timestamp supported\n");
909
910 priv->hwts_tx_en = 0;
911 priv->hwts_rx_en = 0;
912
913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 stmmac_hwtstamp_correct_latency(priv, priv);
915
916 return 0;
917 }
918
stmmac_release_ptp(struct stmmac_priv * priv)919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 stmmac_ptp_unregister(priv);
923 }
924
925 /**
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
927 * @priv: driver private structure
928 * @duplex: duplex passed to the next function
929 * Description: It is used for configuring the flow control in all queues
930 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 u32 tx_cnt = priv->plat->tx_queues_to_use;
934
935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 priv->pause, tx_cnt);
937 }
938
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 phy_interface_t interface)
941 {
942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
944 if (priv->hw->xpcs)
945 return &priv->hw->xpcs->pcs;
946
947 if (priv->hw->lynx_pcs)
948 return priv->hw->lynx_pcs;
949
950 return NULL;
951 }
952
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 const struct phylink_link_state *state)
955 {
956 /* Nothing to do, xpcs_config() handles everything */
957 }
958
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 bool *hs_enable = &fpe_cfg->hs_enable;
965
966 if (is_up && *hs_enable) {
967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 MPACKET_VERIFY);
969 } else {
970 *lo_state = FPE_STATE_OFF;
971 *lp_state = FPE_STATE_OFF;
972 }
973 }
974
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)975 static void stmmac_mac_link_down(struct phylink_config *config,
976 unsigned int mode, phy_interface_t interface)
977 {
978 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979
980 stmmac_mac_set(priv, priv->ioaddr, false);
981 priv->eee_active = false;
982 priv->tx_lpi_enabled = false;
983 priv->eee_enabled = stmmac_eee_init(priv);
984 stmmac_set_eee_pls(priv, priv->hw, false);
985
986 if (priv->dma_cap.fpesel)
987 stmmac_fpe_link_state_handle(priv, false);
988 }
989
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)990 static void stmmac_mac_link_up(struct phylink_config *config,
991 struct phy_device *phy,
992 unsigned int mode, phy_interface_t interface,
993 int speed, int duplex,
994 bool tx_pause, bool rx_pause)
995 {
996 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 u32 old_ctrl, ctrl;
998
999 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 priv->plat->serdes_powerup)
1001 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002
1003 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005
1006 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 switch (speed) {
1008 case SPEED_10000:
1009 ctrl |= priv->hw->link.xgmii.speed10000;
1010 break;
1011 case SPEED_5000:
1012 ctrl |= priv->hw->link.xgmii.speed5000;
1013 break;
1014 case SPEED_2500:
1015 ctrl |= priv->hw->link.xgmii.speed2500;
1016 break;
1017 default:
1018 return;
1019 }
1020 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 switch (speed) {
1022 case SPEED_100000:
1023 ctrl |= priv->hw->link.xlgmii.speed100000;
1024 break;
1025 case SPEED_50000:
1026 ctrl |= priv->hw->link.xlgmii.speed50000;
1027 break;
1028 case SPEED_40000:
1029 ctrl |= priv->hw->link.xlgmii.speed40000;
1030 break;
1031 case SPEED_25000:
1032 ctrl |= priv->hw->link.xlgmii.speed25000;
1033 break;
1034 case SPEED_10000:
1035 ctrl |= priv->hw->link.xgmii.speed10000;
1036 break;
1037 case SPEED_2500:
1038 ctrl |= priv->hw->link.speed2500;
1039 break;
1040 case SPEED_1000:
1041 ctrl |= priv->hw->link.speed1000;
1042 break;
1043 default:
1044 return;
1045 }
1046 } else {
1047 switch (speed) {
1048 case SPEED_2500:
1049 ctrl |= priv->hw->link.speed2500;
1050 break;
1051 case SPEED_1000:
1052 ctrl |= priv->hw->link.speed1000;
1053 break;
1054 case SPEED_100:
1055 ctrl |= priv->hw->link.speed100;
1056 break;
1057 case SPEED_10:
1058 ctrl |= priv->hw->link.speed10;
1059 break;
1060 default:
1061 return;
1062 }
1063 }
1064
1065 priv->speed = speed;
1066
1067 if (priv->plat->fix_mac_speed)
1068 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069
1070 if (!duplex)
1071 ctrl &= ~priv->hw->link.duplex;
1072 else
1073 ctrl |= priv->hw->link.duplex;
1074
1075 /* Flow Control operation */
1076 if (rx_pause && tx_pause)
1077 priv->flow_ctrl = FLOW_AUTO;
1078 else if (rx_pause && !tx_pause)
1079 priv->flow_ctrl = FLOW_RX;
1080 else if (!rx_pause && tx_pause)
1081 priv->flow_ctrl = FLOW_TX;
1082 else
1083 priv->flow_ctrl = FLOW_OFF;
1084
1085 stmmac_mac_flow_ctrl(priv, duplex);
1086
1087 if (ctrl != old_ctrl)
1088 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089
1090 stmmac_mac_set(priv, priv->ioaddr, true);
1091 if (phy && priv->dma_cap.eee) {
1092 priv->eee_active =
1093 phy_init_eee(phy, !(priv->plat->flags &
1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 priv->eee_enabled = stmmac_eee_init(priv);
1096 priv->tx_lpi_enabled = priv->eee_enabled;
1097 stmmac_set_eee_pls(priv, priv->hw, true);
1098 }
1099
1100 if (priv->dma_cap.fpesel)
1101 stmmac_fpe_link_state_handle(priv, true);
1102
1103 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 .mac_select_pcs = stmmac_mac_select_pcs,
1109 .mac_config = stmmac_mac_config,
1110 .mac_link_down = stmmac_mac_link_down,
1111 .mac_link_up = stmmac_mac_link_up,
1112 };
1113
1114 /**
1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116 * @priv: driver private structure
1117 * Description: this is to verify if the HW supports the PCS.
1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119 * configured for the TBI, RTBI, or SGMII PHY interface.
1120 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 int interface = priv->plat->mac_interface;
1124
1125 if (priv->dma_cap.pcs) {
1126 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 priv->hw->pcs = STMMAC_PCS_RGMII;
1132 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 priv->hw->pcs = STMMAC_PCS_SGMII;
1135 }
1136 }
1137 }
1138
1139 /**
1140 * stmmac_init_phy - PHY initialization
1141 * @dev: net device structure
1142 * Description: it initializes the driver's PHY state, and attaches the PHY
1143 * to the mac driver.
1144 * Return value:
1145 * 0 on success
1146 */
stmmac_init_phy(struct net_device * dev)1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 struct stmmac_priv *priv = netdev_priv(dev);
1150 struct fwnode_handle *phy_fwnode;
1151 struct fwnode_handle *fwnode;
1152 int ret;
1153
1154 if (!phylink_expects_phy(priv->phylink))
1155 return 0;
1156
1157 fwnode = priv->plat->port_node;
1158 if (!fwnode)
1159 fwnode = dev_fwnode(priv->device);
1160
1161 if (fwnode)
1162 phy_fwnode = fwnode_get_phy_node(fwnode);
1163 else
1164 phy_fwnode = NULL;
1165
1166 /* Some DT bindings do not set-up the PHY handle. Let's try to
1167 * manually parse it
1168 */
1169 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 int addr = priv->plat->phy_addr;
1171 struct phy_device *phydev;
1172
1173 if (addr < 0) {
1174 netdev_err(priv->dev, "no phy found\n");
1175 return -ENODEV;
1176 }
1177
1178 phydev = mdiobus_get_phy(priv->mii, addr);
1179 if (!phydev) {
1180 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 return -ENODEV;
1182 }
1183
1184 ret = phylink_connect_phy(priv->phylink, phydev);
1185 } else {
1186 fwnode_handle_put(phy_fwnode);
1187 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 }
1189
1190 if (!priv->plat->pmt) {
1191 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192
1193 phylink_ethtool_get_wol(priv->phylink, &wol);
1194 device_set_wakeup_capable(priv->device, !!wol.supported);
1195 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 }
1197
1198 return ret;
1199 }
1200
stmmac_set_half_duplex(struct stmmac_priv * priv)1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 /* Half-Duplex can only work with single tx queue */
1204 if (priv->plat->tx_queues_to_use > 1)
1205 priv->phylink_config.mac_capabilities &=
1206 ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 else
1208 priv->phylink_config.mac_capabilities |=
1209 (MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211
stmmac_phy_setup(struct stmmac_priv * priv)1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 struct stmmac_mdio_bus_data *mdio_bus_data;
1215 int mode = priv->plat->phy_interface;
1216 struct fwnode_handle *fwnode;
1217 struct phylink *phylink;
1218 int max_speed;
1219
1220 priv->phylink_config.dev = &priv->dev->dev;
1221 priv->phylink_config.type = PHYLINK_NETDEV;
1222 priv->phylink_config.mac_managed_pm = true;
1223
1224 mdio_bus_data = priv->plat->mdio_bus_data;
1225 if (mdio_bus_data)
1226 priv->phylink_config.ovr_an_inband =
1227 mdio_bus_data->xpcs_an_inband;
1228
1229 /* Set the platform/firmware specified interface mode. Note, phylink
1230 * deals with the PHY interface mode, not the MAC interface mode.
1231 */
1232 __set_bit(mode, priv->phylink_config.supported_interfaces);
1233
1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 if (priv->hw->xpcs)
1236 xpcs_get_interfaces(priv->hw->xpcs,
1237 priv->phylink_config.supported_interfaces);
1238
1239 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 MAC_10FD | MAC_100FD |
1241 MAC_1000FD;
1242
1243 stmmac_set_half_duplex(priv);
1244
1245 /* Get the MAC specific capabilities */
1246 stmmac_mac_phylink_get_caps(priv);
1247
1248 max_speed = priv->plat->max_speed;
1249 if (max_speed)
1250 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251
1252 fwnode = priv->plat->port_node;
1253 if (!fwnode)
1254 fwnode = dev_fwnode(priv->device);
1255
1256 phylink = phylink_create(&priv->phylink_config, fwnode,
1257 mode, &stmmac_phylink_mac_ops);
1258 if (IS_ERR(phylink))
1259 return PTR_ERR(phylink);
1260
1261 priv->phylink = phylink;
1262 return 0;
1263 }
1264
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 struct stmmac_dma_conf *dma_conf)
1267 {
1268 u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 unsigned int desc_size;
1270 void *head_rx;
1271 u32 queue;
1272
1273 /* Display RX rings */
1274 for (queue = 0; queue < rx_cnt; queue++) {
1275 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276
1277 pr_info("\tRX Queue %u rings\n", queue);
1278
1279 if (priv->extend_desc) {
1280 head_rx = (void *)rx_q->dma_erx;
1281 desc_size = sizeof(struct dma_extended_desc);
1282 } else {
1283 head_rx = (void *)rx_q->dma_rx;
1284 desc_size = sizeof(struct dma_desc);
1285 }
1286
1287 /* Display RX ring */
1288 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 rx_q->dma_rx_phy, desc_size);
1290 }
1291 }
1292
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 struct stmmac_dma_conf *dma_conf)
1295 {
1296 u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 unsigned int desc_size;
1298 void *head_tx;
1299 u32 queue;
1300
1301 /* Display TX rings */
1302 for (queue = 0; queue < tx_cnt; queue++) {
1303 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304
1305 pr_info("\tTX Queue %d rings\n", queue);
1306
1307 if (priv->extend_desc) {
1308 head_tx = (void *)tx_q->dma_etx;
1309 desc_size = sizeof(struct dma_extended_desc);
1310 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 head_tx = (void *)tx_q->dma_entx;
1312 desc_size = sizeof(struct dma_edesc);
1313 } else {
1314 head_tx = (void *)tx_q->dma_tx;
1315 desc_size = sizeof(struct dma_desc);
1316 }
1317
1318 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 tx_q->dma_tx_phy, desc_size);
1320 }
1321 }
1322
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 struct stmmac_dma_conf *dma_conf)
1325 {
1326 /* Display RX ring */
1327 stmmac_display_rx_rings(priv, dma_conf);
1328
1329 /* Display TX ring */
1330 stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332
stmmac_set_bfsize(int mtu,int bufsize)1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 int ret = bufsize;
1336
1337 if (mtu >= BUF_SIZE_8KiB)
1338 ret = BUF_SIZE_16KiB;
1339 else if (mtu >= BUF_SIZE_4KiB)
1340 ret = BUF_SIZE_8KiB;
1341 else if (mtu >= BUF_SIZE_2KiB)
1342 ret = BUF_SIZE_4KiB;
1343 else if (mtu > DEFAULT_BUFSIZE)
1344 ret = BUF_SIZE_2KiB;
1345 else
1346 ret = DEFAULT_BUFSIZE;
1347
1348 return ret;
1349 }
1350
1351 /**
1352 * stmmac_clear_rx_descriptors - clear RX descriptors
1353 * @priv: driver private structure
1354 * @dma_conf: structure to take the dma data
1355 * @queue: RX queue index
1356 * Description: this function is called to clear the RX descriptors
1357 * in case of both basic and extended descriptors are used.
1358 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 struct stmmac_dma_conf *dma_conf,
1361 u32 queue)
1362 {
1363 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 int i;
1365
1366 /* Clear the RX descriptors */
1367 for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 if (priv->extend_desc)
1369 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 priv->use_riwt, priv->mode,
1371 (i == dma_conf->dma_rx_size - 1),
1372 dma_conf->dma_buf_sz);
1373 else
1374 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 priv->use_riwt, priv->mode,
1376 (i == dma_conf->dma_rx_size - 1),
1377 dma_conf->dma_buf_sz);
1378 }
1379
1380 /**
1381 * stmmac_clear_tx_descriptors - clear tx descriptors
1382 * @priv: driver private structure
1383 * @dma_conf: structure to take the dma data
1384 * @queue: TX queue index.
1385 * Description: this function is called to clear the TX descriptors
1386 * in case of both basic and extended descriptors are used.
1387 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 struct stmmac_dma_conf *dma_conf,
1390 u32 queue)
1391 {
1392 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 int i;
1394
1395 /* Clear the TX descriptors */
1396 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 int last = (i == (dma_conf->dma_tx_size - 1));
1398 struct dma_desc *p;
1399
1400 if (priv->extend_desc)
1401 p = &tx_q->dma_etx[i].basic;
1402 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 p = &tx_q->dma_entx[i].basic;
1404 else
1405 p = &tx_q->dma_tx[i];
1406
1407 stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 }
1409 }
1410
1411 /**
1412 * stmmac_clear_descriptors - clear descriptors
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * Description: this function is called to clear the TX and RX descriptors
1416 * in case of both basic and extended descriptors are used.
1417 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 struct stmmac_dma_conf *dma_conf)
1420 {
1421 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 u32 queue;
1424
1425 /* Clear the RX descriptors */
1426 for (queue = 0; queue < rx_queue_cnt; queue++)
1427 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428
1429 /* Clear the TX descriptors */
1430 for (queue = 0; queue < tx_queue_cnt; queue++)
1431 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433
1434 /**
1435 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436 * @priv: driver private structure
1437 * @dma_conf: structure to take the dma data
1438 * @p: descriptor pointer
1439 * @i: descriptor index
1440 * @flags: gfp flag
1441 * @queue: RX queue index
1442 * Description: this function is called to allocate a receive buffer, perform
1443 * the DMA mapping and init the descriptor.
1444 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 struct stmmac_dma_conf *dma_conf,
1447 struct dma_desc *p,
1448 int i, gfp_t flags, u32 queue)
1449 {
1450 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453
1454 if (priv->dma_cap.host_dma_width <= 32)
1455 gfp |= GFP_DMA32;
1456
1457 if (!buf->page) {
1458 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 if (!buf->page)
1460 return -ENOMEM;
1461 buf->page_offset = stmmac_rx_offset(priv);
1462 }
1463
1464 if (priv->sph && !buf->sec_page) {
1465 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 if (!buf->sec_page)
1467 return -ENOMEM;
1468
1469 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 } else {
1472 buf->sec_page = NULL;
1473 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 }
1475
1476 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477
1478 stmmac_set_desc_addr(priv, p, buf->addr);
1479 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 stmmac_init_desc3(priv, p);
1481
1482 return 0;
1483 }
1484
1485 /**
1486 * stmmac_free_rx_buffer - free RX dma buffers
1487 * @priv: private structure
1488 * @rx_q: RX queue
1489 * @i: buffer index.
1490 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 struct stmmac_rx_queue *rx_q,
1493 int i)
1494 {
1495 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496
1497 if (buf->page)
1498 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 buf->page = NULL;
1500
1501 if (buf->sec_page)
1502 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 buf->sec_page = NULL;
1504 }
1505
1506 /**
1507 * stmmac_free_tx_buffer - free RX dma buffers
1508 * @priv: private structure
1509 * @dma_conf: structure to take the dma data
1510 * @queue: RX queue index
1511 * @i: buffer index.
1512 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 struct stmmac_dma_conf *dma_conf,
1515 u32 queue, int i)
1516 {
1517 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518
1519 if (tx_q->tx_skbuff_dma[i].buf &&
1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 dma_unmap_page(priv->device,
1523 tx_q->tx_skbuff_dma[i].buf,
1524 tx_q->tx_skbuff_dma[i].len,
1525 DMA_TO_DEVICE);
1526 else
1527 dma_unmap_single(priv->device,
1528 tx_q->tx_skbuff_dma[i].buf,
1529 tx_q->tx_skbuff_dma[i].len,
1530 DMA_TO_DEVICE);
1531 }
1532
1533 if (tx_q->xdpf[i] &&
1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 xdp_return_frame(tx_q->xdpf[i]);
1537 tx_q->xdpf[i] = NULL;
1538 }
1539
1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 tx_q->xsk_frames_done++;
1542
1543 if (tx_q->tx_skbuff[i] &&
1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 tx_q->tx_skbuff[i] = NULL;
1547 }
1548
1549 tx_q->tx_skbuff_dma[i].buf = 0;
1550 tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552
1553 /**
1554 * dma_free_rx_skbufs - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1558 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 struct stmmac_dma_conf *dma_conf,
1561 u32 queue)
1562 {
1563 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 int i;
1565
1566 for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 struct stmmac_dma_conf *dma_conf,
1572 u32 queue, gfp_t flags)
1573 {
1574 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 int i;
1576
1577 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 struct dma_desc *p;
1579 int ret;
1580
1581 if (priv->extend_desc)
1582 p = &((rx_q->dma_erx + i)->basic);
1583 else
1584 p = rx_q->dma_rx + i;
1585
1586 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 queue);
1588 if (ret)
1589 return ret;
1590
1591 rx_q->buf_alloc_num++;
1592 }
1593
1594 return 0;
1595 }
1596
1597 /**
1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599 * @priv: private structure
1600 * @dma_conf: structure to take the dma data
1601 * @queue: RX queue index
1602 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 struct stmmac_dma_conf *dma_conf,
1605 u32 queue)
1606 {
1607 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 int i;
1609
1610 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612
1613 if (!buf->xdp)
1614 continue;
1615
1616 xsk_buff_free(buf->xdp);
1617 buf->xdp = NULL;
1618 }
1619 }
1620
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 struct stmmac_dma_conf *dma_conf,
1623 u32 queue)
1624 {
1625 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 int i;
1627
1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 * use this macro to make sure no size violations.
1631 */
1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633
1634 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 struct stmmac_rx_buffer *buf;
1636 dma_addr_t dma_addr;
1637 struct dma_desc *p;
1638
1639 if (priv->extend_desc)
1640 p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 else
1642 p = rx_q->dma_rx + i;
1643
1644 buf = &rx_q->buf_pool[i];
1645
1646 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 if (!buf->xdp)
1648 return -ENOMEM;
1649
1650 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 stmmac_set_desc_addr(priv, p, dma_addr);
1652 rx_q->buf_alloc_num++;
1653 }
1654
1655 return 0;
1656 }
1657
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 return NULL;
1662
1663 return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665
1666 /**
1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668 * @priv: driver private structure
1669 * @dma_conf: structure to take the dma data
1670 * @queue: RX queue index
1671 * @flags: gfp flag.
1672 * Description: this function initializes the DMA RX descriptors
1673 * and allocates the socket buffers. It supports the chained and ring
1674 * modes.
1675 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 struct stmmac_dma_conf *dma_conf,
1678 u32 queue, gfp_t flags)
1679 {
1680 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 int ret;
1682
1683 netif_dbg(priv, probe, priv->dev,
1684 "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 (u32)rx_q->dma_rx_phy);
1686
1687 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688
1689 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690
1691 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692
1693 if (rx_q->xsk_pool) {
1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 MEM_TYPE_XSK_BUFF_POOL,
1696 NULL));
1697 netdev_info(priv->dev,
1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 rx_q->queue_index);
1700 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 } else {
1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 MEM_TYPE_PAGE_POOL,
1704 rx_q->page_pool));
1705 netdev_info(priv->dev,
1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 rx_q->queue_index);
1708 }
1709
1710 if (rx_q->xsk_pool) {
1711 /* RX XDP ZC buffer pool may not be populated, e.g.
1712 * xdpsock TX-only.
1713 */
1714 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 } else {
1716 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 if (ret < 0)
1718 return -ENOMEM;
1719 }
1720
1721 /* Setup the chained descriptor addresses */
1722 if (priv->mode == STMMAC_CHAIN_MODE) {
1723 if (priv->extend_desc)
1724 stmmac_mode_init(priv, rx_q->dma_erx,
1725 rx_q->dma_rx_phy,
1726 dma_conf->dma_rx_size, 1);
1727 else
1728 stmmac_mode_init(priv, rx_q->dma_rx,
1729 rx_q->dma_rx_phy,
1730 dma_conf->dma_rx_size, 0);
1731 }
1732
1733 return 0;
1734 }
1735
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 struct stmmac_dma_conf *dma_conf,
1738 gfp_t flags)
1739 {
1740 struct stmmac_priv *priv = netdev_priv(dev);
1741 u32 rx_count = priv->plat->rx_queues_to_use;
1742 int queue;
1743 int ret;
1744
1745 /* RX INITIALIZATION */
1746 netif_dbg(priv, probe, priv->dev,
1747 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748
1749 for (queue = 0; queue < rx_count; queue++) {
1750 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 if (ret)
1752 goto err_init_rx_buffers;
1753 }
1754
1755 return 0;
1756
1757 err_init_rx_buffers:
1758 while (queue >= 0) {
1759 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760
1761 if (rx_q->xsk_pool)
1762 dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 else
1764 dma_free_rx_skbufs(priv, dma_conf, queue);
1765
1766 rx_q->buf_alloc_num = 0;
1767 rx_q->xsk_pool = NULL;
1768
1769 queue--;
1770 }
1771
1772 return ret;
1773 }
1774
1775 /**
1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777 * @priv: driver private structure
1778 * @dma_conf: structure to take the dma data
1779 * @queue: TX queue index
1780 * Description: this function initializes the DMA TX descriptors
1781 * and allocates the socket buffers. It supports the chained and ring
1782 * modes.
1783 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 struct stmmac_dma_conf *dma_conf,
1786 u32 queue)
1787 {
1788 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 int i;
1790
1791 netif_dbg(priv, probe, priv->dev,
1792 "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 (u32)tx_q->dma_tx_phy);
1794
1795 /* Setup the chained descriptor addresses */
1796 if (priv->mode == STMMAC_CHAIN_MODE) {
1797 if (priv->extend_desc)
1798 stmmac_mode_init(priv, tx_q->dma_etx,
1799 tx_q->dma_tx_phy,
1800 dma_conf->dma_tx_size, 1);
1801 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 stmmac_mode_init(priv, tx_q->dma_tx,
1803 tx_q->dma_tx_phy,
1804 dma_conf->dma_tx_size, 0);
1805 }
1806
1807 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808
1809 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 struct dma_desc *p;
1811
1812 if (priv->extend_desc)
1813 p = &((tx_q->dma_etx + i)->basic);
1814 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 p = &((tx_q->dma_entx + i)->basic);
1816 else
1817 p = tx_q->dma_tx + i;
1818
1819 stmmac_clear_desc(priv, p);
1820
1821 tx_q->tx_skbuff_dma[i].buf = 0;
1822 tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 tx_q->tx_skbuff_dma[i].len = 0;
1824 tx_q->tx_skbuff_dma[i].last_segment = false;
1825 tx_q->tx_skbuff[i] = NULL;
1826 }
1827
1828 return 0;
1829 }
1830
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 struct stmmac_dma_conf *dma_conf)
1833 {
1834 struct stmmac_priv *priv = netdev_priv(dev);
1835 u32 tx_queue_cnt;
1836 u32 queue;
1837
1838 tx_queue_cnt = priv->plat->tx_queues_to_use;
1839
1840 for (queue = 0; queue < tx_queue_cnt; queue++)
1841 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1842
1843 return 0;
1844 }
1845
1846 /**
1847 * init_dma_desc_rings - init the RX/TX descriptor rings
1848 * @dev: net device structure
1849 * @dma_conf: structure to take the dma data
1850 * @flags: gfp flag.
1851 * Description: this function initializes the DMA RX/TX descriptors
1852 * and allocates the socket buffers. It supports the chained and ring
1853 * modes.
1854 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1855 static int init_dma_desc_rings(struct net_device *dev,
1856 struct stmmac_dma_conf *dma_conf,
1857 gfp_t flags)
1858 {
1859 struct stmmac_priv *priv = netdev_priv(dev);
1860 int ret;
1861
1862 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 if (ret)
1864 return ret;
1865
1866 ret = init_dma_tx_desc_rings(dev, dma_conf);
1867
1868 stmmac_clear_descriptors(priv, dma_conf);
1869
1870 if (netif_msg_hw(priv))
1871 stmmac_display_rings(priv, dma_conf);
1872
1873 return ret;
1874 }
1875
1876 /**
1877 * dma_free_tx_skbufs - free TX dma buffers
1878 * @priv: private structure
1879 * @dma_conf: structure to take the dma data
1880 * @queue: TX queue index
1881 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 struct stmmac_dma_conf *dma_conf,
1884 u32 queue)
1885 {
1886 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 int i;
1888
1889 tx_q->xsk_frames_done = 0;
1890
1891 for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893
1894 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 tx_q->xsk_frames_done = 0;
1897 tx_q->xsk_pool = NULL;
1898 }
1899 }
1900
1901 /**
1902 * stmmac_free_tx_skbufs - free TX skb buffers
1903 * @priv: private structure
1904 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 u32 queue;
1909
1910 for (queue = 0; queue < tx_queue_cnt; queue++)
1911 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913
1914 /**
1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916 * @priv: private structure
1917 * @dma_conf: structure to take the dma data
1918 * @queue: RX queue index
1919 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 struct stmmac_dma_conf *dma_conf,
1922 u32 queue)
1923 {
1924 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925
1926 /* Release the DMA RX socket buffers */
1927 if (rx_q->xsk_pool)
1928 dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 else
1930 dma_free_rx_skbufs(priv, dma_conf, queue);
1931
1932 rx_q->buf_alloc_num = 0;
1933 rx_q->xsk_pool = NULL;
1934
1935 /* Free DMA regions of consistent memory previously allocated */
1936 if (!priv->extend_desc)
1937 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 sizeof(struct dma_desc),
1939 rx_q->dma_rx, rx_q->dma_rx_phy);
1940 else
1941 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 sizeof(struct dma_extended_desc),
1943 rx_q->dma_erx, rx_q->dma_rx_phy);
1944
1945 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947
1948 kfree(rx_q->buf_pool);
1949 if (rx_q->page_pool)
1950 page_pool_destroy(rx_q->page_pool);
1951 }
1952
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 struct stmmac_dma_conf *dma_conf)
1955 {
1956 u32 rx_count = priv->plat->rx_queues_to_use;
1957 u32 queue;
1958
1959 /* Free RX queue resources */
1960 for (queue = 0; queue < rx_count; queue++)
1961 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963
1964 /**
1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966 * @priv: private structure
1967 * @dma_conf: structure to take the dma data
1968 * @queue: TX queue index
1969 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 struct stmmac_dma_conf *dma_conf,
1972 u32 queue)
1973 {
1974 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 size_t size;
1976 void *addr;
1977
1978 /* Release the DMA TX socket buffers */
1979 dma_free_tx_skbufs(priv, dma_conf, queue);
1980
1981 if (priv->extend_desc) {
1982 size = sizeof(struct dma_extended_desc);
1983 addr = tx_q->dma_etx;
1984 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 size = sizeof(struct dma_edesc);
1986 addr = tx_q->dma_entx;
1987 } else {
1988 size = sizeof(struct dma_desc);
1989 addr = tx_q->dma_tx;
1990 }
1991
1992 size *= dma_conf->dma_tx_size;
1993
1994 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995
1996 kfree(tx_q->tx_skbuff_dma);
1997 kfree(tx_q->tx_skbuff);
1998 }
1999
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 struct stmmac_dma_conf *dma_conf)
2002 {
2003 u32 tx_count = priv->plat->tx_queues_to_use;
2004 u32 queue;
2005
2006 /* Free TX queue resources */
2007 for (queue = 0; queue < tx_count; queue++)
2008 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010
2011 /**
2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: RX queue index
2016 * Description: according to which descriptor can be used (extend or basic)
2017 * this function allocates the resources for TX and RX paths. In case of
2018 * reception, for example, it pre-allocated the RX socket buffer in order to
2019 * allow zero-copy mechanism.
2020 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 struct stmmac_dma_conf *dma_conf,
2023 u32 queue)
2024 {
2025 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 struct stmmac_channel *ch = &priv->channel[queue];
2027 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 struct page_pool_params pp_params = { 0 };
2029 unsigned int num_pages;
2030 unsigned int napi_id;
2031 int ret;
2032
2033 rx_q->queue_index = queue;
2034 rx_q->priv_data = priv;
2035
2036 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 pp_params.pool_size = dma_conf->dma_rx_size;
2038 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 pp_params.order = ilog2(num_pages);
2040 pp_params.nid = dev_to_node(priv->device);
2041 pp_params.dev = priv->device;
2042 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 pp_params.offset = stmmac_rx_offset(priv);
2044 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045
2046 rx_q->page_pool = page_pool_create(&pp_params);
2047 if (IS_ERR(rx_q->page_pool)) {
2048 ret = PTR_ERR(rx_q->page_pool);
2049 rx_q->page_pool = NULL;
2050 return ret;
2051 }
2052
2053 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 sizeof(*rx_q->buf_pool),
2055 GFP_KERNEL);
2056 if (!rx_q->buf_pool)
2057 return -ENOMEM;
2058
2059 if (priv->extend_desc) {
2060 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 dma_conf->dma_rx_size *
2062 sizeof(struct dma_extended_desc),
2063 &rx_q->dma_rx_phy,
2064 GFP_KERNEL);
2065 if (!rx_q->dma_erx)
2066 return -ENOMEM;
2067
2068 } else {
2069 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 dma_conf->dma_rx_size *
2071 sizeof(struct dma_desc),
2072 &rx_q->dma_rx_phy,
2073 GFP_KERNEL);
2074 if (!rx_q->dma_rx)
2075 return -ENOMEM;
2076 }
2077
2078 if (stmmac_xdp_is_enabled(priv) &&
2079 test_bit(queue, priv->af_xdp_zc_qps))
2080 napi_id = ch->rxtx_napi.napi_id;
2081 else
2082 napi_id = ch->rx_napi.napi_id;
2083
2084 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 rx_q->queue_index,
2086 napi_id);
2087 if (ret) {
2088 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 return -EINVAL;
2090 }
2091
2092 return 0;
2093 }
2094
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 struct stmmac_dma_conf *dma_conf)
2097 {
2098 u32 rx_count = priv->plat->rx_queues_to_use;
2099 u32 queue;
2100 int ret;
2101
2102 /* RX queues buffers and DMA */
2103 for (queue = 0; queue < rx_count; queue++) {
2104 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 if (ret)
2106 goto err_dma;
2107 }
2108
2109 return 0;
2110
2111 err_dma:
2112 free_dma_rx_desc_resources(priv, dma_conf);
2113
2114 return ret;
2115 }
2116
2117 /**
2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119 * @priv: private structure
2120 * @dma_conf: structure to take the dma data
2121 * @queue: TX queue index
2122 * Description: according to which descriptor can be used (extend or basic)
2123 * this function allocates the resources for TX and RX paths. In case of
2124 * reception, for example, it pre-allocated the RX socket buffer in order to
2125 * allow zero-copy mechanism.
2126 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 struct stmmac_dma_conf *dma_conf,
2129 u32 queue)
2130 {
2131 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 size_t size;
2133 void *addr;
2134
2135 tx_q->queue_index = queue;
2136 tx_q->priv_data = priv;
2137
2138 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 sizeof(*tx_q->tx_skbuff_dma),
2140 GFP_KERNEL);
2141 if (!tx_q->tx_skbuff_dma)
2142 return -ENOMEM;
2143
2144 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 sizeof(struct sk_buff *),
2146 GFP_KERNEL);
2147 if (!tx_q->tx_skbuff)
2148 return -ENOMEM;
2149
2150 if (priv->extend_desc)
2151 size = sizeof(struct dma_extended_desc);
2152 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 size = sizeof(struct dma_edesc);
2154 else
2155 size = sizeof(struct dma_desc);
2156
2157 size *= dma_conf->dma_tx_size;
2158
2159 addr = dma_alloc_coherent(priv->device, size,
2160 &tx_q->dma_tx_phy, GFP_KERNEL);
2161 if (!addr)
2162 return -ENOMEM;
2163
2164 if (priv->extend_desc)
2165 tx_q->dma_etx = addr;
2166 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 tx_q->dma_entx = addr;
2168 else
2169 tx_q->dma_tx = addr;
2170
2171 return 0;
2172 }
2173
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 struct stmmac_dma_conf *dma_conf)
2176 {
2177 u32 tx_count = priv->plat->tx_queues_to_use;
2178 u32 queue;
2179 int ret;
2180
2181 /* TX queues buffers and DMA */
2182 for (queue = 0; queue < tx_count; queue++) {
2183 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 if (ret)
2185 goto err_dma;
2186 }
2187
2188 return 0;
2189
2190 err_dma:
2191 free_dma_tx_desc_resources(priv, dma_conf);
2192 return ret;
2193 }
2194
2195 /**
2196 * alloc_dma_desc_resources - alloc TX/RX resources.
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 * Description: according to which descriptor can be used (extend or basic)
2200 * this function allocates the resources for TX and RX paths. In case of
2201 * reception, for example, it pre-allocated the RX socket buffer in order to
2202 * allow zero-copy mechanism.
2203 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 struct stmmac_dma_conf *dma_conf)
2206 {
2207 /* RX Allocation */
2208 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209
2210 if (ret)
2211 return ret;
2212
2213 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214
2215 return ret;
2216 }
2217
2218 /**
2219 * free_dma_desc_resources - free dma desc resources
2220 * @priv: private structure
2221 * @dma_conf: structure to take the dma data
2222 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 struct stmmac_dma_conf *dma_conf)
2225 {
2226 /* Release the DMA TX socket buffers */
2227 free_dma_tx_desc_resources(priv, dma_conf);
2228
2229 /* Release the DMA RX socket buffers later
2230 * to ensure all pending XDP_TX buffers are returned.
2231 */
2232 free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234
2235 /**
2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237 * @priv: driver private structure
2238 * Description: It is used for enabling the rx queues in the MAC
2239 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 int queue;
2244 u8 mode;
2245
2246 for (queue = 0; queue < rx_queues_count; queue++) {
2247 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 }
2250 }
2251
2252 /**
2253 * stmmac_start_rx_dma - start RX DMA channel
2254 * @priv: driver private structure
2255 * @chan: RX channel index
2256 * Description:
2257 * This starts a RX DMA channel
2258 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264
2265 /**
2266 * stmmac_start_tx_dma - start TX DMA channel
2267 * @priv: driver private structure
2268 * @chan: TX channel index
2269 * Description:
2270 * This starts a TX DMA channel
2271 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277
2278 /**
2279 * stmmac_stop_rx_dma - stop RX DMA channel
2280 * @priv: driver private structure
2281 * @chan: RX channel index
2282 * Description:
2283 * This stops a RX DMA channel
2284 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290
2291 /**
2292 * stmmac_stop_tx_dma - stop TX DMA channel
2293 * @priv: driver private structure
2294 * @chan: TX channel index
2295 * Description:
2296 * This stops a TX DMA channel
2297 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 u32 chan;
2310
2311 for (chan = 0; chan < dma_csr_ch; chan++) {
2312 struct stmmac_channel *ch = &priv->channel[chan];
2313 unsigned long flags;
2314
2315 spin_lock_irqsave(&ch->lock, flags);
2316 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 spin_unlock_irqrestore(&ch->lock, flags);
2318 }
2319 }
2320
2321 /**
2322 * stmmac_start_all_dma - start all RX and TX DMA channels
2323 * @priv: driver private structure
2324 * Description:
2325 * This starts all the RX and TX DMA channels
2326 */
stmmac_start_all_dma(struct stmmac_priv * priv)2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 u32 chan = 0;
2332
2333 for (chan = 0; chan < rx_channels_count; chan++)
2334 stmmac_start_rx_dma(priv, chan);
2335
2336 for (chan = 0; chan < tx_channels_count; chan++)
2337 stmmac_start_tx_dma(priv, chan);
2338 }
2339
2340 /**
2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342 * @priv: driver private structure
2343 * Description:
2344 * This stops the RX and TX DMA channels
2345 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 u32 chan = 0;
2351
2352 for (chan = 0; chan < rx_channels_count; chan++)
2353 stmmac_stop_rx_dma(priv, chan);
2354
2355 for (chan = 0; chan < tx_channels_count; chan++)
2356 stmmac_stop_tx_dma(priv, chan);
2357 }
2358
2359 /**
2360 * stmmac_dma_operation_mode - HW DMA operation mode
2361 * @priv: driver private structure
2362 * Description: it is used for configuring the DMA operation mode register in
2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 int rxfifosz = priv->plat->rx_fifo_size;
2370 int txfifosz = priv->plat->tx_fifo_size;
2371 u32 txmode = 0;
2372 u32 rxmode = 0;
2373 u32 chan = 0;
2374 u8 qmode = 0;
2375
2376 if (rxfifosz == 0)
2377 rxfifosz = priv->dma_cap.rx_fifo_size;
2378 if (txfifosz == 0)
2379 txfifosz = priv->dma_cap.tx_fifo_size;
2380
2381 /* Adjust for real per queue fifo size */
2382 rxfifosz /= rx_channels_count;
2383 txfifosz /= tx_channels_count;
2384
2385 if (priv->plat->force_thresh_dma_mode) {
2386 txmode = tc;
2387 rxmode = tc;
2388 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 /*
2390 * In case of GMAC, SF mode can be enabled
2391 * to perform the TX COE in HW. This depends on:
2392 * 1) TX COE if actually supported
2393 * 2) There is no bugged Jumbo frame support
2394 * that needs to not insert csum in the TDES.
2395 */
2396 txmode = SF_DMA_MODE;
2397 rxmode = SF_DMA_MODE;
2398 priv->xstats.threshold = SF_DMA_MODE;
2399 } else {
2400 txmode = tc;
2401 rxmode = SF_DMA_MODE;
2402 }
2403
2404 /* configure all channels */
2405 for (chan = 0; chan < rx_channels_count; chan++) {
2406 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 u32 buf_size;
2408
2409 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410
2411 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 rxfifosz, qmode);
2413
2414 if (rx_q->xsk_pool) {
2415 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 buf_size,
2418 chan);
2419 } else {
2420 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 priv->dma_conf.dma_buf_sz,
2422 chan);
2423 }
2424 }
2425
2426 for (chan = 0; chan < tx_channels_count; chan++) {
2427 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428
2429 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 txfifosz, qmode);
2431 }
2432 }
2433
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2434 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2435 {
2436 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2437 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2438 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2439 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2440 unsigned int entry = tx_q->cur_tx;
2441 struct dma_desc *tx_desc = NULL;
2442 struct xdp_desc xdp_desc;
2443 bool work_done = true;
2444 u32 tx_set_ic_bit = 0;
2445
2446 /* Avoids TX time-out as we are sharing with slow path */
2447 txq_trans_cond_update(nq);
2448
2449 budget = min(budget, stmmac_tx_avail(priv, queue));
2450
2451 while (budget-- > 0) {
2452 dma_addr_t dma_addr;
2453 bool set_ic;
2454
2455 /* We are sharing with slow path and stop XSK TX desc submission when
2456 * available TX ring is less than threshold.
2457 */
2458 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2459 !netif_carrier_ok(priv->dev)) {
2460 work_done = false;
2461 break;
2462 }
2463
2464 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2465 break;
2466
2467 if (likely(priv->extend_desc))
2468 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2469 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2470 tx_desc = &tx_q->dma_entx[entry].basic;
2471 else
2472 tx_desc = tx_q->dma_tx + entry;
2473
2474 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2475 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2476
2477 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2478
2479 /* To return XDP buffer to XSK pool, we simple call
2480 * xsk_tx_completed(), so we don't need to fill up
2481 * 'buf' and 'xdpf'.
2482 */
2483 tx_q->tx_skbuff_dma[entry].buf = 0;
2484 tx_q->xdpf[entry] = NULL;
2485
2486 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2487 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2488 tx_q->tx_skbuff_dma[entry].last_segment = true;
2489 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2490
2491 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2492
2493 tx_q->tx_count_frames++;
2494
2495 if (!priv->tx_coal_frames[queue])
2496 set_ic = false;
2497 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2498 set_ic = true;
2499 else
2500 set_ic = false;
2501
2502 if (set_ic) {
2503 tx_q->tx_count_frames = 0;
2504 stmmac_set_tx_ic(priv, tx_desc);
2505 tx_set_ic_bit++;
2506 }
2507
2508 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2509 true, priv->mode, true, true,
2510 xdp_desc.len);
2511
2512 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2513
2514 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2515 entry = tx_q->cur_tx;
2516 }
2517 u64_stats_update_begin(&txq_stats->napi_syncp);
2518 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2519 u64_stats_update_end(&txq_stats->napi_syncp);
2520
2521 if (tx_desc) {
2522 stmmac_flush_tx_descriptors(priv, queue);
2523 xsk_tx_release(pool);
2524 }
2525
2526 /* Return true if all of the 3 conditions are met
2527 * a) TX Budget is still available
2528 * b) work_done = true when XSK TX desc peek is empty (no more
2529 * pending XSK TX for transmission)
2530 */
2531 return !!budget && work_done;
2532 }
2533
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2534 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2535 {
2536 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2537 tc += 64;
2538
2539 if (priv->plat->force_thresh_dma_mode)
2540 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2541 else
2542 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2543 chan);
2544
2545 priv->xstats.threshold = tc;
2546 }
2547 }
2548
2549 /**
2550 * stmmac_tx_clean - to manage the transmission completion
2551 * @priv: driver private structure
2552 * @budget: napi budget limiting this functions packet handling
2553 * @queue: TX queue index
2554 * Description: it reclaims the transmit resources after transmission completes.
2555 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2556 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2557 {
2558 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2559 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2560 unsigned int bytes_compl = 0, pkts_compl = 0;
2561 unsigned int entry, xmits = 0, count = 0;
2562 u32 tx_packets = 0, tx_errors = 0;
2563
2564 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2565
2566 tx_q->xsk_frames_done = 0;
2567
2568 entry = tx_q->dirty_tx;
2569
2570 /* Try to clean all TX complete frame in 1 shot */
2571 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2572 struct xdp_frame *xdpf;
2573 struct sk_buff *skb;
2574 struct dma_desc *p;
2575 int status;
2576
2577 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2578 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2579 xdpf = tx_q->xdpf[entry];
2580 skb = NULL;
2581 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2582 xdpf = NULL;
2583 skb = tx_q->tx_skbuff[entry];
2584 } else {
2585 xdpf = NULL;
2586 skb = NULL;
2587 }
2588
2589 if (priv->extend_desc)
2590 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2591 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2592 p = &tx_q->dma_entx[entry].basic;
2593 else
2594 p = tx_q->dma_tx + entry;
2595
2596 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2597 /* Check if the descriptor is owned by the DMA */
2598 if (unlikely(status & tx_dma_own))
2599 break;
2600
2601 count++;
2602
2603 /* Make sure descriptor fields are read after reading
2604 * the own bit.
2605 */
2606 dma_rmb();
2607
2608 /* Just consider the last segment and ...*/
2609 if (likely(!(status & tx_not_ls))) {
2610 /* ... verify the status error condition */
2611 if (unlikely(status & tx_err)) {
2612 tx_errors++;
2613 if (unlikely(status & tx_err_bump_tc))
2614 stmmac_bump_dma_threshold(priv, queue);
2615 } else {
2616 tx_packets++;
2617 }
2618 if (skb)
2619 stmmac_get_tx_hwtstamp(priv, p, skb);
2620 }
2621
2622 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2623 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2624 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2625 dma_unmap_page(priv->device,
2626 tx_q->tx_skbuff_dma[entry].buf,
2627 tx_q->tx_skbuff_dma[entry].len,
2628 DMA_TO_DEVICE);
2629 else
2630 dma_unmap_single(priv->device,
2631 tx_q->tx_skbuff_dma[entry].buf,
2632 tx_q->tx_skbuff_dma[entry].len,
2633 DMA_TO_DEVICE);
2634 tx_q->tx_skbuff_dma[entry].buf = 0;
2635 tx_q->tx_skbuff_dma[entry].len = 0;
2636 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2637 }
2638
2639 stmmac_clean_desc3(priv, tx_q, p);
2640
2641 tx_q->tx_skbuff_dma[entry].last_segment = false;
2642 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2643
2644 if (xdpf &&
2645 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2646 xdp_return_frame_rx_napi(xdpf);
2647 tx_q->xdpf[entry] = NULL;
2648 }
2649
2650 if (xdpf &&
2651 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2652 xdp_return_frame(xdpf);
2653 tx_q->xdpf[entry] = NULL;
2654 }
2655
2656 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2657 tx_q->xsk_frames_done++;
2658
2659 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2660 if (likely(skb)) {
2661 pkts_compl++;
2662 bytes_compl += skb->len;
2663 dev_consume_skb_any(skb);
2664 tx_q->tx_skbuff[entry] = NULL;
2665 }
2666 }
2667
2668 stmmac_release_tx_desc(priv, p, priv->mode);
2669
2670 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2671 }
2672 tx_q->dirty_tx = entry;
2673
2674 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2675 pkts_compl, bytes_compl);
2676
2677 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2678 queue))) &&
2679 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2680
2681 netif_dbg(priv, tx_done, priv->dev,
2682 "%s: restart transmit\n", __func__);
2683 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2684 }
2685
2686 if (tx_q->xsk_pool) {
2687 bool work_done;
2688
2689 if (tx_q->xsk_frames_done)
2690 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2691
2692 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2693 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2694
2695 /* For XSK TX, we try to send as many as possible.
2696 * If XSK work done (XSK TX desc empty and budget still
2697 * available), return "budget - 1" to reenable TX IRQ.
2698 * Else, return "budget" to make NAPI continue polling.
2699 */
2700 work_done = stmmac_xdp_xmit_zc(priv, queue,
2701 STMMAC_XSK_TX_BUDGET_MAX);
2702 if (work_done)
2703 xmits = budget - 1;
2704 else
2705 xmits = budget;
2706 }
2707
2708 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2709 priv->eee_sw_timer_en) {
2710 if (stmmac_enable_eee_mode(priv))
2711 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2712 }
2713
2714 /* We still have pending packets, let's call for a new scheduling */
2715 if (tx_q->dirty_tx != tx_q->cur_tx)
2716 stmmac_tx_timer_arm(priv, queue);
2717
2718 u64_stats_update_begin(&txq_stats->napi_syncp);
2719 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2720 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2721 u64_stats_inc(&txq_stats->napi.tx_clean);
2722 u64_stats_update_end(&txq_stats->napi_syncp);
2723
2724 priv->xstats.tx_errors += tx_errors;
2725
2726 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2727
2728 /* Combine decisions from TX clean and XSK TX */
2729 return max(count, xmits);
2730 }
2731
2732 /**
2733 * stmmac_tx_err - to manage the tx error
2734 * @priv: driver private structure
2735 * @chan: channel index
2736 * Description: it cleans the descriptors and restarts the transmission
2737 * in case of transmission errors.
2738 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2739 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2740 {
2741 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2742
2743 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2744
2745 stmmac_stop_tx_dma(priv, chan);
2746 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2747 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2748 stmmac_reset_tx_queue(priv, chan);
2749 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2750 tx_q->dma_tx_phy, chan);
2751 stmmac_start_tx_dma(priv, chan);
2752
2753 priv->xstats.tx_errors++;
2754 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2755 }
2756
2757 /**
2758 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2759 * @priv: driver private structure
2760 * @txmode: TX operating mode
2761 * @rxmode: RX operating mode
2762 * @chan: channel index
2763 * Description: it is used for configuring of the DMA operation mode in
2764 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2765 * mode.
2766 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2767 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2768 u32 rxmode, u32 chan)
2769 {
2770 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2771 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2772 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2773 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2774 int rxfifosz = priv->plat->rx_fifo_size;
2775 int txfifosz = priv->plat->tx_fifo_size;
2776
2777 if (rxfifosz == 0)
2778 rxfifosz = priv->dma_cap.rx_fifo_size;
2779 if (txfifosz == 0)
2780 txfifosz = priv->dma_cap.tx_fifo_size;
2781
2782 /* Adjust for real per queue fifo size */
2783 rxfifosz /= rx_channels_count;
2784 txfifosz /= tx_channels_count;
2785
2786 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2787 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2788 }
2789
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2790 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2791 {
2792 int ret;
2793
2794 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2795 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2796 if (ret && (ret != -EINVAL)) {
2797 stmmac_global_err(priv);
2798 return true;
2799 }
2800
2801 return false;
2802 }
2803
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2804 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2805 {
2806 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2807 &priv->xstats, chan, dir);
2808 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2809 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2810 struct stmmac_channel *ch = &priv->channel[chan];
2811 struct napi_struct *rx_napi;
2812 struct napi_struct *tx_napi;
2813 unsigned long flags;
2814
2815 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2816 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2817
2818 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2819 if (napi_schedule_prep(rx_napi)) {
2820 spin_lock_irqsave(&ch->lock, flags);
2821 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2822 spin_unlock_irqrestore(&ch->lock, flags);
2823 __napi_schedule(rx_napi);
2824 }
2825 }
2826
2827 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2828 if (napi_schedule_prep(tx_napi)) {
2829 spin_lock_irqsave(&ch->lock, flags);
2830 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2831 spin_unlock_irqrestore(&ch->lock, flags);
2832 __napi_schedule(tx_napi);
2833 }
2834 }
2835
2836 return status;
2837 }
2838
2839 /**
2840 * stmmac_dma_interrupt - DMA ISR
2841 * @priv: driver private structure
2842 * Description: this is the DMA ISR. It is called by the main ISR.
2843 * It calls the dwmac dma routine and schedule poll method in case of some
2844 * work can be done.
2845 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2846 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2847 {
2848 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2849 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2850 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2851 tx_channel_count : rx_channel_count;
2852 u32 chan;
2853 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2854
2855 /* Make sure we never check beyond our status buffer. */
2856 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2857 channels_to_check = ARRAY_SIZE(status);
2858
2859 for (chan = 0; chan < channels_to_check; chan++)
2860 status[chan] = stmmac_napi_check(priv, chan,
2861 DMA_DIR_RXTX);
2862
2863 for (chan = 0; chan < tx_channel_count; chan++) {
2864 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2865 /* Try to bump up the dma threshold on this failure */
2866 stmmac_bump_dma_threshold(priv, chan);
2867 } else if (unlikely(status[chan] == tx_hard_error)) {
2868 stmmac_tx_err(priv, chan);
2869 }
2870 }
2871 }
2872
2873 /**
2874 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2875 * @priv: driver private structure
2876 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2877 */
stmmac_mmc_setup(struct stmmac_priv * priv)2878 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2879 {
2880 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2881 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2882
2883 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2884
2885 if (priv->dma_cap.rmon) {
2886 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2887 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2888 } else
2889 netdev_info(priv->dev, "No MAC Management Counters available\n");
2890 }
2891
2892 /**
2893 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2894 * @priv: driver private structure
2895 * Description:
2896 * new GMAC chip generations have a new register to indicate the
2897 * presence of the optional feature/functions.
2898 * This can be also used to override the value passed through the
2899 * platform and necessary for old MAC10/100 and GMAC chips.
2900 */
stmmac_get_hw_features(struct stmmac_priv * priv)2901 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2902 {
2903 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2904 }
2905
2906 /**
2907 * stmmac_check_ether_addr - check if the MAC addr is valid
2908 * @priv: driver private structure
2909 * Description:
2910 * it is to verify if the MAC address is valid, in case of failures it
2911 * generates a random MAC address
2912 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2913 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2914 {
2915 u8 addr[ETH_ALEN];
2916
2917 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2918 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2919 if (is_valid_ether_addr(addr))
2920 eth_hw_addr_set(priv->dev, addr);
2921 else
2922 eth_hw_addr_random(priv->dev);
2923 dev_info(priv->device, "device MAC address %pM\n",
2924 priv->dev->dev_addr);
2925 }
2926 }
2927
2928 /**
2929 * stmmac_init_dma_engine - DMA init.
2930 * @priv: driver private structure
2931 * Description:
2932 * It inits the DMA invoking the specific MAC/GMAC callback.
2933 * Some DMA parameters can be passed from the platform;
2934 * in case of these are not passed a default is kept for the MAC or GMAC.
2935 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2936 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2937 {
2938 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2939 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2940 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2941 struct stmmac_rx_queue *rx_q;
2942 struct stmmac_tx_queue *tx_q;
2943 u32 chan = 0;
2944 int atds = 0;
2945 int ret = 0;
2946
2947 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2948 dev_err(priv->device, "Invalid DMA configuration\n");
2949 return -EINVAL;
2950 }
2951
2952 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2953 atds = 1;
2954
2955 ret = stmmac_reset(priv, priv->ioaddr);
2956 if (ret) {
2957 dev_err(priv->device, "Failed to reset the dma\n");
2958 return ret;
2959 }
2960
2961 /* DMA Configuration */
2962 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2963
2964 if (priv->plat->axi)
2965 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2966
2967 /* DMA CSR Channel configuration */
2968 for (chan = 0; chan < dma_csr_ch; chan++) {
2969 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2970 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2971 }
2972
2973 /* DMA RX Channel Configuration */
2974 for (chan = 0; chan < rx_channels_count; chan++) {
2975 rx_q = &priv->dma_conf.rx_queue[chan];
2976
2977 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2978 rx_q->dma_rx_phy, chan);
2979
2980 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2981 (rx_q->buf_alloc_num *
2982 sizeof(struct dma_desc));
2983 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2984 rx_q->rx_tail_addr, chan);
2985 }
2986
2987 /* DMA TX Channel Configuration */
2988 for (chan = 0; chan < tx_channels_count; chan++) {
2989 tx_q = &priv->dma_conf.tx_queue[chan];
2990
2991 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2992 tx_q->dma_tx_phy, chan);
2993
2994 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2995 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2996 tx_q->tx_tail_addr, chan);
2997 }
2998
2999 return ret;
3000 }
3001
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3002 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3003 {
3004 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3005 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3006
3007 if (!tx_coal_timer)
3008 return;
3009
3010 hrtimer_start(&tx_q->txtimer,
3011 STMMAC_COAL_TIMER(tx_coal_timer),
3012 HRTIMER_MODE_REL);
3013 }
3014
3015 /**
3016 * stmmac_tx_timer - mitigation sw timer for tx.
3017 * @t: data pointer
3018 * Description:
3019 * This is the timer handler to directly invoke the stmmac_tx_clean.
3020 */
stmmac_tx_timer(struct hrtimer * t)3021 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3022 {
3023 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3024 struct stmmac_priv *priv = tx_q->priv_data;
3025 struct stmmac_channel *ch;
3026 struct napi_struct *napi;
3027
3028 ch = &priv->channel[tx_q->queue_index];
3029 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3030
3031 if (likely(napi_schedule_prep(napi))) {
3032 unsigned long flags;
3033
3034 spin_lock_irqsave(&ch->lock, flags);
3035 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3036 spin_unlock_irqrestore(&ch->lock, flags);
3037 __napi_schedule(napi);
3038 }
3039
3040 return HRTIMER_NORESTART;
3041 }
3042
3043 /**
3044 * stmmac_init_coalesce - init mitigation options.
3045 * @priv: driver private structure
3046 * Description:
3047 * This inits the coalesce parameters: i.e. timer rate,
3048 * timer handler and default threshold used for enabling the
3049 * interrupt on completion bit.
3050 */
stmmac_init_coalesce(struct stmmac_priv * priv)3051 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3052 {
3053 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3054 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3055 u32 chan;
3056
3057 for (chan = 0; chan < tx_channel_count; chan++) {
3058 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3059
3060 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3061 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3062
3063 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3064 tx_q->txtimer.function = stmmac_tx_timer;
3065 }
3066
3067 for (chan = 0; chan < rx_channel_count; chan++)
3068 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3069 }
3070
stmmac_set_rings_length(struct stmmac_priv * priv)3071 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3072 {
3073 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3074 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3075 u32 chan;
3076
3077 /* set TX ring length */
3078 for (chan = 0; chan < tx_channels_count; chan++)
3079 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3080 (priv->dma_conf.dma_tx_size - 1), chan);
3081
3082 /* set RX ring length */
3083 for (chan = 0; chan < rx_channels_count; chan++)
3084 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3085 (priv->dma_conf.dma_rx_size - 1), chan);
3086 }
3087
3088 /**
3089 * stmmac_set_tx_queue_weight - Set TX queue weight
3090 * @priv: driver private structure
3091 * Description: It is used for setting TX queues weight
3092 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3093 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3094 {
3095 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3096 u32 weight;
3097 u32 queue;
3098
3099 for (queue = 0; queue < tx_queues_count; queue++) {
3100 weight = priv->plat->tx_queues_cfg[queue].weight;
3101 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3102 }
3103 }
3104
3105 /**
3106 * stmmac_configure_cbs - Configure CBS in TX queue
3107 * @priv: driver private structure
3108 * Description: It is used for configuring CBS in AVB TX queues
3109 */
stmmac_configure_cbs(struct stmmac_priv * priv)3110 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3111 {
3112 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3113 u32 mode_to_use;
3114 u32 queue;
3115
3116 /* queue 0 is reserved for legacy traffic */
3117 for (queue = 1; queue < tx_queues_count; queue++) {
3118 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3119 if (mode_to_use == MTL_QUEUE_DCB)
3120 continue;
3121
3122 stmmac_config_cbs(priv, priv->hw,
3123 priv->plat->tx_queues_cfg[queue].send_slope,
3124 priv->plat->tx_queues_cfg[queue].idle_slope,
3125 priv->plat->tx_queues_cfg[queue].high_credit,
3126 priv->plat->tx_queues_cfg[queue].low_credit,
3127 queue);
3128 }
3129 }
3130
3131 /**
3132 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3133 * @priv: driver private structure
3134 * Description: It is used for mapping RX queues to RX dma channels
3135 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3136 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3137 {
3138 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3139 u32 queue;
3140 u32 chan;
3141
3142 for (queue = 0; queue < rx_queues_count; queue++) {
3143 chan = priv->plat->rx_queues_cfg[queue].chan;
3144 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3145 }
3146 }
3147
3148 /**
3149 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3150 * @priv: driver private structure
3151 * Description: It is used for configuring the RX Queue Priority
3152 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3153 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3154 {
3155 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3156 u32 queue;
3157 u32 prio;
3158
3159 for (queue = 0; queue < rx_queues_count; queue++) {
3160 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3161 continue;
3162
3163 prio = priv->plat->rx_queues_cfg[queue].prio;
3164 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3165 }
3166 }
3167
3168 /**
3169 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3170 * @priv: driver private structure
3171 * Description: It is used for configuring the TX Queue Priority
3172 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3173 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3174 {
3175 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3176 u32 queue;
3177 u32 prio;
3178
3179 for (queue = 0; queue < tx_queues_count; queue++) {
3180 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3181 continue;
3182
3183 prio = priv->plat->tx_queues_cfg[queue].prio;
3184 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3185 }
3186 }
3187
3188 /**
3189 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3190 * @priv: driver private structure
3191 * Description: It is used for configuring the RX queue routing
3192 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3193 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3194 {
3195 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3196 u32 queue;
3197 u8 packet;
3198
3199 for (queue = 0; queue < rx_queues_count; queue++) {
3200 /* no specific packet type routing specified for the queue */
3201 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3202 continue;
3203
3204 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3205 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3206 }
3207 }
3208
stmmac_mac_config_rss(struct stmmac_priv * priv)3209 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3210 {
3211 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3212 priv->rss.enable = false;
3213 return;
3214 }
3215
3216 if (priv->dev->features & NETIF_F_RXHASH)
3217 priv->rss.enable = true;
3218 else
3219 priv->rss.enable = false;
3220
3221 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3222 priv->plat->rx_queues_to_use);
3223 }
3224
3225 /**
3226 * stmmac_mtl_configuration - Configure MTL
3227 * @priv: driver private structure
3228 * Description: It is used for configurring MTL
3229 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3230 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3231 {
3232 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3233 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3234
3235 if (tx_queues_count > 1)
3236 stmmac_set_tx_queue_weight(priv);
3237
3238 /* Configure MTL RX algorithms */
3239 if (rx_queues_count > 1)
3240 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3241 priv->plat->rx_sched_algorithm);
3242
3243 /* Configure MTL TX algorithms */
3244 if (tx_queues_count > 1)
3245 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3246 priv->plat->tx_sched_algorithm);
3247
3248 /* Configure CBS in AVB TX queues */
3249 if (tx_queues_count > 1)
3250 stmmac_configure_cbs(priv);
3251
3252 /* Map RX MTL to DMA channels */
3253 stmmac_rx_queue_dma_chan_map(priv);
3254
3255 /* Enable MAC RX Queues */
3256 stmmac_mac_enable_rx_queues(priv);
3257
3258 /* Set RX priorities */
3259 if (rx_queues_count > 1)
3260 stmmac_mac_config_rx_queues_prio(priv);
3261
3262 /* Set TX priorities */
3263 if (tx_queues_count > 1)
3264 stmmac_mac_config_tx_queues_prio(priv);
3265
3266 /* Set RX routing */
3267 if (rx_queues_count > 1)
3268 stmmac_mac_config_rx_queues_routing(priv);
3269
3270 /* Receive Side Scaling */
3271 if (rx_queues_count > 1)
3272 stmmac_mac_config_rss(priv);
3273 }
3274
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3275 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3276 {
3277 if (priv->dma_cap.asp) {
3278 netdev_info(priv->dev, "Enabling Safety Features\n");
3279 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3280 priv->plat->safety_feat_cfg);
3281 } else {
3282 netdev_info(priv->dev, "No Safety Features support found\n");
3283 }
3284 }
3285
stmmac_fpe_start_wq(struct stmmac_priv * priv)3286 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3287 {
3288 char *name;
3289
3290 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3291 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3292
3293 name = priv->wq_name;
3294 sprintf(name, "%s-fpe", priv->dev->name);
3295
3296 priv->fpe_wq = create_singlethread_workqueue(name);
3297 if (!priv->fpe_wq) {
3298 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3299
3300 return -ENOMEM;
3301 }
3302 netdev_info(priv->dev, "FPE workqueue start");
3303
3304 return 0;
3305 }
3306
3307 /**
3308 * stmmac_hw_setup - setup mac in a usable state.
3309 * @dev : pointer to the device structure.
3310 * @ptp_register: register PTP if set
3311 * Description:
3312 * this is the main function to setup the HW in a usable state because the
3313 * dma engine is reset, the core registers are configured (e.g. AXI,
3314 * Checksum features, timers). The DMA is ready to start receiving and
3315 * transmitting.
3316 * Return value:
3317 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3318 * file on failure.
3319 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3320 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3321 {
3322 struct stmmac_priv *priv = netdev_priv(dev);
3323 u32 rx_cnt = priv->plat->rx_queues_to_use;
3324 u32 tx_cnt = priv->plat->tx_queues_to_use;
3325 bool sph_en;
3326 u32 chan;
3327 int ret;
3328
3329 /* DMA initialization and SW reset */
3330 ret = stmmac_init_dma_engine(priv);
3331 if (ret < 0) {
3332 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3333 __func__);
3334 return ret;
3335 }
3336
3337 /* Copy the MAC addr into the HW */
3338 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3339
3340 /* PS and related bits will be programmed according to the speed */
3341 if (priv->hw->pcs) {
3342 int speed = priv->plat->mac_port_sel_speed;
3343
3344 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3345 (speed == SPEED_1000)) {
3346 priv->hw->ps = speed;
3347 } else {
3348 dev_warn(priv->device, "invalid port speed\n");
3349 priv->hw->ps = 0;
3350 }
3351 }
3352
3353 /* Initialize the MAC Core */
3354 stmmac_core_init(priv, priv->hw, dev);
3355
3356 /* Initialize MTL*/
3357 stmmac_mtl_configuration(priv);
3358
3359 /* Initialize Safety Features */
3360 stmmac_safety_feat_configuration(priv);
3361
3362 ret = stmmac_rx_ipc(priv, priv->hw);
3363 if (!ret) {
3364 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3365 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3366 priv->hw->rx_csum = 0;
3367 }
3368
3369 /* Enable the MAC Rx/Tx */
3370 stmmac_mac_set(priv, priv->ioaddr, true);
3371
3372 /* Set the HW DMA mode and the COE */
3373 stmmac_dma_operation_mode(priv);
3374
3375 stmmac_mmc_setup(priv);
3376
3377 if (ptp_register) {
3378 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3379 if (ret < 0)
3380 netdev_warn(priv->dev,
3381 "failed to enable PTP reference clock: %pe\n",
3382 ERR_PTR(ret));
3383 }
3384
3385 ret = stmmac_init_ptp(priv);
3386 if (ret == -EOPNOTSUPP)
3387 netdev_info(priv->dev, "PTP not supported by HW\n");
3388 else if (ret)
3389 netdev_warn(priv->dev, "PTP init failed\n");
3390 else if (ptp_register)
3391 stmmac_ptp_register(priv);
3392
3393 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3394
3395 /* Convert the timer from msec to usec */
3396 if (!priv->tx_lpi_timer)
3397 priv->tx_lpi_timer = eee_timer * 1000;
3398
3399 if (priv->use_riwt) {
3400 u32 queue;
3401
3402 for (queue = 0; queue < rx_cnt; queue++) {
3403 if (!priv->rx_riwt[queue])
3404 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3405
3406 stmmac_rx_watchdog(priv, priv->ioaddr,
3407 priv->rx_riwt[queue], queue);
3408 }
3409 }
3410
3411 if (priv->hw->pcs)
3412 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3413
3414 /* set TX and RX rings length */
3415 stmmac_set_rings_length(priv);
3416
3417 /* Enable TSO */
3418 if (priv->tso) {
3419 for (chan = 0; chan < tx_cnt; chan++) {
3420 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3421
3422 /* TSO and TBS cannot co-exist */
3423 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3424 continue;
3425
3426 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3427 }
3428 }
3429
3430 /* Enable Split Header */
3431 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3432 for (chan = 0; chan < rx_cnt; chan++)
3433 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3434
3435
3436 /* VLAN Tag Insertion */
3437 if (priv->dma_cap.vlins)
3438 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3439
3440 /* TBS */
3441 for (chan = 0; chan < tx_cnt; chan++) {
3442 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3443 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3444
3445 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3446 }
3447
3448 /* Configure real RX and TX queues */
3449 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3450 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3451
3452 /* Start the ball rolling... */
3453 stmmac_start_all_dma(priv);
3454
3455 if (priv->dma_cap.fpesel) {
3456 stmmac_fpe_start_wq(priv);
3457
3458 if (priv->plat->fpe_cfg->enable)
3459 stmmac_fpe_handshake(priv, true);
3460 }
3461
3462 return 0;
3463 }
3464
stmmac_hw_teardown(struct net_device * dev)3465 static void stmmac_hw_teardown(struct net_device *dev)
3466 {
3467 struct stmmac_priv *priv = netdev_priv(dev);
3468
3469 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3470 }
3471
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3472 static void stmmac_free_irq(struct net_device *dev,
3473 enum request_irq_err irq_err, int irq_idx)
3474 {
3475 struct stmmac_priv *priv = netdev_priv(dev);
3476 int j;
3477
3478 switch (irq_err) {
3479 case REQ_IRQ_ERR_ALL:
3480 irq_idx = priv->plat->tx_queues_to_use;
3481 fallthrough;
3482 case REQ_IRQ_ERR_TX:
3483 for (j = irq_idx - 1; j >= 0; j--) {
3484 if (priv->tx_irq[j] > 0) {
3485 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3486 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3487 }
3488 }
3489 irq_idx = priv->plat->rx_queues_to_use;
3490 fallthrough;
3491 case REQ_IRQ_ERR_RX:
3492 for (j = irq_idx - 1; j >= 0; j--) {
3493 if (priv->rx_irq[j] > 0) {
3494 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3495 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3496 }
3497 }
3498
3499 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3500 free_irq(priv->sfty_ue_irq, dev);
3501 fallthrough;
3502 case REQ_IRQ_ERR_SFTY_UE:
3503 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3504 free_irq(priv->sfty_ce_irq, dev);
3505 fallthrough;
3506 case REQ_IRQ_ERR_SFTY_CE:
3507 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3508 free_irq(priv->lpi_irq, dev);
3509 fallthrough;
3510 case REQ_IRQ_ERR_LPI:
3511 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3512 free_irq(priv->wol_irq, dev);
3513 fallthrough;
3514 case REQ_IRQ_ERR_WOL:
3515 free_irq(dev->irq, dev);
3516 fallthrough;
3517 case REQ_IRQ_ERR_MAC:
3518 case REQ_IRQ_ERR_NO:
3519 /* If MAC IRQ request error, no more IRQ to free */
3520 break;
3521 }
3522 }
3523
stmmac_request_irq_multi_msi(struct net_device * dev)3524 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3525 {
3526 struct stmmac_priv *priv = netdev_priv(dev);
3527 enum request_irq_err irq_err;
3528 cpumask_t cpu_mask;
3529 int irq_idx = 0;
3530 char *int_name;
3531 int ret;
3532 int i;
3533
3534 /* For common interrupt */
3535 int_name = priv->int_name_mac;
3536 sprintf(int_name, "%s:%s", dev->name, "mac");
3537 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3538 0, int_name, dev);
3539 if (unlikely(ret < 0)) {
3540 netdev_err(priv->dev,
3541 "%s: alloc mac MSI %d (error: %d)\n",
3542 __func__, dev->irq, ret);
3543 irq_err = REQ_IRQ_ERR_MAC;
3544 goto irq_error;
3545 }
3546
3547 /* Request the Wake IRQ in case of another line
3548 * is used for WoL
3549 */
3550 priv->wol_irq_disabled = true;
3551 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3552 int_name = priv->int_name_wol;
3553 sprintf(int_name, "%s:%s", dev->name, "wol");
3554 ret = request_irq(priv->wol_irq,
3555 stmmac_mac_interrupt,
3556 0, int_name, dev);
3557 if (unlikely(ret < 0)) {
3558 netdev_err(priv->dev,
3559 "%s: alloc wol MSI %d (error: %d)\n",
3560 __func__, priv->wol_irq, ret);
3561 irq_err = REQ_IRQ_ERR_WOL;
3562 goto irq_error;
3563 }
3564 }
3565
3566 /* Request the LPI IRQ in case of another line
3567 * is used for LPI
3568 */
3569 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3570 int_name = priv->int_name_lpi;
3571 sprintf(int_name, "%s:%s", dev->name, "lpi");
3572 ret = request_irq(priv->lpi_irq,
3573 stmmac_mac_interrupt,
3574 0, int_name, dev);
3575 if (unlikely(ret < 0)) {
3576 netdev_err(priv->dev,
3577 "%s: alloc lpi MSI %d (error: %d)\n",
3578 __func__, priv->lpi_irq, ret);
3579 irq_err = REQ_IRQ_ERR_LPI;
3580 goto irq_error;
3581 }
3582 }
3583
3584 /* Request the Safety Feature Correctible Error line in
3585 * case of another line is used
3586 */
3587 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3588 int_name = priv->int_name_sfty_ce;
3589 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3590 ret = request_irq(priv->sfty_ce_irq,
3591 stmmac_safety_interrupt,
3592 0, int_name, dev);
3593 if (unlikely(ret < 0)) {
3594 netdev_err(priv->dev,
3595 "%s: alloc sfty ce MSI %d (error: %d)\n",
3596 __func__, priv->sfty_ce_irq, ret);
3597 irq_err = REQ_IRQ_ERR_SFTY_CE;
3598 goto irq_error;
3599 }
3600 }
3601
3602 /* Request the Safety Feature Uncorrectible Error line in
3603 * case of another line is used
3604 */
3605 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3606 int_name = priv->int_name_sfty_ue;
3607 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3608 ret = request_irq(priv->sfty_ue_irq,
3609 stmmac_safety_interrupt,
3610 0, int_name, dev);
3611 if (unlikely(ret < 0)) {
3612 netdev_err(priv->dev,
3613 "%s: alloc sfty ue MSI %d (error: %d)\n",
3614 __func__, priv->sfty_ue_irq, ret);
3615 irq_err = REQ_IRQ_ERR_SFTY_UE;
3616 goto irq_error;
3617 }
3618 }
3619
3620 /* Request Rx MSI irq */
3621 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3622 if (i >= MTL_MAX_RX_QUEUES)
3623 break;
3624 if (priv->rx_irq[i] == 0)
3625 continue;
3626
3627 int_name = priv->int_name_rx_irq[i];
3628 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3629 ret = request_irq(priv->rx_irq[i],
3630 stmmac_msi_intr_rx,
3631 0, int_name, &priv->dma_conf.rx_queue[i]);
3632 if (unlikely(ret < 0)) {
3633 netdev_err(priv->dev,
3634 "%s: alloc rx-%d MSI %d (error: %d)\n",
3635 __func__, i, priv->rx_irq[i], ret);
3636 irq_err = REQ_IRQ_ERR_RX;
3637 irq_idx = i;
3638 goto irq_error;
3639 }
3640 cpumask_clear(&cpu_mask);
3641 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3642 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3643 }
3644
3645 /* Request Tx MSI irq */
3646 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3647 if (i >= MTL_MAX_TX_QUEUES)
3648 break;
3649 if (priv->tx_irq[i] == 0)
3650 continue;
3651
3652 int_name = priv->int_name_tx_irq[i];
3653 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3654 ret = request_irq(priv->tx_irq[i],
3655 stmmac_msi_intr_tx,
3656 0, int_name, &priv->dma_conf.tx_queue[i]);
3657 if (unlikely(ret < 0)) {
3658 netdev_err(priv->dev,
3659 "%s: alloc tx-%d MSI %d (error: %d)\n",
3660 __func__, i, priv->tx_irq[i], ret);
3661 irq_err = REQ_IRQ_ERR_TX;
3662 irq_idx = i;
3663 goto irq_error;
3664 }
3665 cpumask_clear(&cpu_mask);
3666 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3667 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3668 }
3669
3670 return 0;
3671
3672 irq_error:
3673 stmmac_free_irq(dev, irq_err, irq_idx);
3674 return ret;
3675 }
3676
stmmac_request_irq_single(struct net_device * dev)3677 static int stmmac_request_irq_single(struct net_device *dev)
3678 {
3679 struct stmmac_priv *priv = netdev_priv(dev);
3680 enum request_irq_err irq_err;
3681 int ret;
3682
3683 ret = request_irq(dev->irq, stmmac_interrupt,
3684 IRQF_SHARED, dev->name, dev);
3685 if (unlikely(ret < 0)) {
3686 netdev_err(priv->dev,
3687 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3688 __func__, dev->irq, ret);
3689 irq_err = REQ_IRQ_ERR_MAC;
3690 goto irq_error;
3691 }
3692
3693 /* Request the Wake IRQ in case of another line
3694 * is used for WoL
3695 */
3696 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3697 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3698 IRQF_SHARED, dev->name, dev);
3699 if (unlikely(ret < 0)) {
3700 netdev_err(priv->dev,
3701 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3702 __func__, priv->wol_irq, ret);
3703 irq_err = REQ_IRQ_ERR_WOL;
3704 goto irq_error;
3705 }
3706 }
3707
3708 /* Request the IRQ lines */
3709 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3710 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3711 IRQF_SHARED, dev->name, dev);
3712 if (unlikely(ret < 0)) {
3713 netdev_err(priv->dev,
3714 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3715 __func__, priv->lpi_irq, ret);
3716 irq_err = REQ_IRQ_ERR_LPI;
3717 goto irq_error;
3718 }
3719 }
3720
3721 return 0;
3722
3723 irq_error:
3724 stmmac_free_irq(dev, irq_err, 0);
3725 return ret;
3726 }
3727
stmmac_request_irq(struct net_device * dev)3728 static int stmmac_request_irq(struct net_device *dev)
3729 {
3730 struct stmmac_priv *priv = netdev_priv(dev);
3731 int ret;
3732
3733 /* Request the IRQ lines */
3734 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3735 ret = stmmac_request_irq_multi_msi(dev);
3736 else
3737 ret = stmmac_request_irq_single(dev);
3738
3739 return ret;
3740 }
3741
3742 /**
3743 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3744 * @priv: driver private structure
3745 * @mtu: MTU to setup the dma queue and buf with
3746 * Description: Allocate and generate a dma_conf based on the provided MTU.
3747 * Allocate the Tx/Rx DMA queue and init them.
3748 * Return value:
3749 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3750 */
3751 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3752 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3753 {
3754 struct stmmac_dma_conf *dma_conf;
3755 int chan, bfsize, ret;
3756
3757 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3758 if (!dma_conf) {
3759 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3760 __func__);
3761 return ERR_PTR(-ENOMEM);
3762 }
3763
3764 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3765 if (bfsize < 0)
3766 bfsize = 0;
3767
3768 if (bfsize < BUF_SIZE_16KiB)
3769 bfsize = stmmac_set_bfsize(mtu, 0);
3770
3771 dma_conf->dma_buf_sz = bfsize;
3772 /* Chose the tx/rx size from the already defined one in the
3773 * priv struct. (if defined)
3774 */
3775 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3776 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3777
3778 if (!dma_conf->dma_tx_size)
3779 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3780 if (!dma_conf->dma_rx_size)
3781 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3782
3783 /* Earlier check for TBS */
3784 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3785 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3786 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3787
3788 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3789 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3790 }
3791
3792 ret = alloc_dma_desc_resources(priv, dma_conf);
3793 if (ret < 0) {
3794 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3795 __func__);
3796 goto alloc_error;
3797 }
3798
3799 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3800 if (ret < 0) {
3801 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3802 __func__);
3803 goto init_error;
3804 }
3805
3806 return dma_conf;
3807
3808 init_error:
3809 free_dma_desc_resources(priv, dma_conf);
3810 alloc_error:
3811 kfree(dma_conf);
3812 return ERR_PTR(ret);
3813 }
3814
3815 /**
3816 * __stmmac_open - open entry point of the driver
3817 * @dev : pointer to the device structure.
3818 * @dma_conf : structure to take the dma data
3819 * Description:
3820 * This function is the open entry point of the driver.
3821 * Return value:
3822 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3823 * file on failure.
3824 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3825 static int __stmmac_open(struct net_device *dev,
3826 struct stmmac_dma_conf *dma_conf)
3827 {
3828 struct stmmac_priv *priv = netdev_priv(dev);
3829 int mode = priv->plat->phy_interface;
3830 u32 chan;
3831 int ret;
3832
3833 ret = pm_runtime_resume_and_get(priv->device);
3834 if (ret < 0)
3835 return ret;
3836
3837 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3838 priv->hw->pcs != STMMAC_PCS_RTBI &&
3839 (!priv->hw->xpcs ||
3840 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3841 !priv->hw->lynx_pcs) {
3842 ret = stmmac_init_phy(dev);
3843 if (ret) {
3844 netdev_err(priv->dev,
3845 "%s: Cannot attach to PHY (error: %d)\n",
3846 __func__, ret);
3847 goto init_phy_error;
3848 }
3849 }
3850
3851 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3852
3853 buf_sz = dma_conf->dma_buf_sz;
3854 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3855 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3856 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3857 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3858
3859 stmmac_reset_queues_param(priv);
3860
3861 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3862 priv->plat->serdes_powerup) {
3863 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3864 if (ret < 0) {
3865 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3866 __func__);
3867 goto init_error;
3868 }
3869 }
3870
3871 ret = stmmac_hw_setup(dev, true);
3872 if (ret < 0) {
3873 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3874 goto init_error;
3875 }
3876
3877 stmmac_init_coalesce(priv);
3878
3879 phylink_start(priv->phylink);
3880 /* We may have called phylink_speed_down before */
3881 phylink_speed_up(priv->phylink);
3882
3883 ret = stmmac_request_irq(dev);
3884 if (ret)
3885 goto irq_error;
3886
3887 stmmac_enable_all_queues(priv);
3888 netif_tx_start_all_queues(priv->dev);
3889 stmmac_enable_all_dma_irq(priv);
3890
3891 return 0;
3892
3893 irq_error:
3894 phylink_stop(priv->phylink);
3895
3896 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3897 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3898
3899 stmmac_hw_teardown(dev);
3900 init_error:
3901 phylink_disconnect_phy(priv->phylink);
3902 init_phy_error:
3903 pm_runtime_put(priv->device);
3904 return ret;
3905 }
3906
stmmac_open(struct net_device * dev)3907 static int stmmac_open(struct net_device *dev)
3908 {
3909 struct stmmac_priv *priv = netdev_priv(dev);
3910 struct stmmac_dma_conf *dma_conf;
3911 int ret;
3912
3913 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3914 if (IS_ERR(dma_conf))
3915 return PTR_ERR(dma_conf);
3916
3917 ret = __stmmac_open(dev, dma_conf);
3918 if (ret)
3919 free_dma_desc_resources(priv, dma_conf);
3920
3921 kfree(dma_conf);
3922 return ret;
3923 }
3924
stmmac_fpe_stop_wq(struct stmmac_priv * priv)3925 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3926 {
3927 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3928
3929 if (priv->fpe_wq) {
3930 destroy_workqueue(priv->fpe_wq);
3931 priv->fpe_wq = NULL;
3932 }
3933
3934 netdev_info(priv->dev, "FPE workqueue stop");
3935 }
3936
3937 /**
3938 * stmmac_release - close entry point of the driver
3939 * @dev : device pointer.
3940 * Description:
3941 * This is the stop entry point of the driver.
3942 */
stmmac_release(struct net_device * dev)3943 static int stmmac_release(struct net_device *dev)
3944 {
3945 struct stmmac_priv *priv = netdev_priv(dev);
3946 u32 chan;
3947
3948 if (device_may_wakeup(priv->device))
3949 phylink_speed_down(priv->phylink, false);
3950 /* Stop and disconnect the PHY */
3951 phylink_stop(priv->phylink);
3952 phylink_disconnect_phy(priv->phylink);
3953
3954 stmmac_disable_all_queues(priv);
3955
3956 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3957 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3958
3959 netif_tx_disable(dev);
3960
3961 /* Free the IRQ lines */
3962 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3963
3964 if (priv->eee_enabled) {
3965 priv->tx_path_in_lpi_mode = false;
3966 del_timer_sync(&priv->eee_ctrl_timer);
3967 }
3968
3969 /* Stop TX/RX DMA and clear the descriptors */
3970 stmmac_stop_all_dma(priv);
3971
3972 /* Release and free the Rx/Tx resources */
3973 free_dma_desc_resources(priv, &priv->dma_conf);
3974
3975 /* Disable the MAC Rx/Tx */
3976 stmmac_mac_set(priv, priv->ioaddr, false);
3977
3978 /* Powerdown Serdes if there is */
3979 if (priv->plat->serdes_powerdown)
3980 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3981
3982 netif_carrier_off(dev);
3983
3984 stmmac_release_ptp(priv);
3985
3986 pm_runtime_put(priv->device);
3987
3988 if (priv->dma_cap.fpesel)
3989 stmmac_fpe_stop_wq(priv);
3990
3991 return 0;
3992 }
3993
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3994 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3995 struct stmmac_tx_queue *tx_q)
3996 {
3997 u16 tag = 0x0, inner_tag = 0x0;
3998 u32 inner_type = 0x0;
3999 struct dma_desc *p;
4000
4001 if (!priv->dma_cap.vlins)
4002 return false;
4003 if (!skb_vlan_tag_present(skb))
4004 return false;
4005 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4006 inner_tag = skb_vlan_tag_get(skb);
4007 inner_type = STMMAC_VLAN_INSERT;
4008 }
4009
4010 tag = skb_vlan_tag_get(skb);
4011
4012 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4013 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4014 else
4015 p = &tx_q->dma_tx[tx_q->cur_tx];
4016
4017 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4018 return false;
4019
4020 stmmac_set_tx_owner(priv, p);
4021 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4022 return true;
4023 }
4024
4025 /**
4026 * stmmac_tso_allocator - close entry point of the driver
4027 * @priv: driver private structure
4028 * @des: buffer start address
4029 * @total_len: total length to fill in descriptors
4030 * @last_segment: condition for the last descriptor
4031 * @queue: TX queue index
4032 * Description:
4033 * This function fills descriptor and request new descriptors according to
4034 * buffer length to fill
4035 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4036 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4037 int total_len, bool last_segment, u32 queue)
4038 {
4039 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4040 struct dma_desc *desc;
4041 u32 buff_size;
4042 int tmp_len;
4043
4044 tmp_len = total_len;
4045
4046 while (tmp_len > 0) {
4047 dma_addr_t curr_addr;
4048
4049 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4050 priv->dma_conf.dma_tx_size);
4051 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4052
4053 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4054 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4055 else
4056 desc = &tx_q->dma_tx[tx_q->cur_tx];
4057
4058 curr_addr = des + (total_len - tmp_len);
4059 if (priv->dma_cap.addr64 <= 32)
4060 desc->des0 = cpu_to_le32(curr_addr);
4061 else
4062 stmmac_set_desc_addr(priv, desc, curr_addr);
4063
4064 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4065 TSO_MAX_BUFF_SIZE : tmp_len;
4066
4067 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4068 0, 1,
4069 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4070 0, 0);
4071
4072 tmp_len -= TSO_MAX_BUFF_SIZE;
4073 }
4074 }
4075
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4076 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4077 {
4078 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4079 int desc_size;
4080
4081 if (likely(priv->extend_desc))
4082 desc_size = sizeof(struct dma_extended_desc);
4083 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4084 desc_size = sizeof(struct dma_edesc);
4085 else
4086 desc_size = sizeof(struct dma_desc);
4087
4088 /* The own bit must be the latest setting done when prepare the
4089 * descriptor and then barrier is needed to make sure that
4090 * all is coherent before granting the DMA engine.
4091 */
4092 wmb();
4093
4094 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4095 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4096 }
4097
4098 /**
4099 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4100 * @skb : the socket buffer
4101 * @dev : device pointer
4102 * Description: this is the transmit function that is called on TSO frames
4103 * (support available on GMAC4 and newer chips).
4104 * Diagram below show the ring programming in case of TSO frames:
4105 *
4106 * First Descriptor
4107 * --------
4108 * | DES0 |---> buffer1 = L2/L3/L4 header
4109 * | DES1 |---> TCP Payload (can continue on next descr...)
4110 * | DES2 |---> buffer 1 and 2 len
4111 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4112 * --------
4113 * |
4114 * ...
4115 * |
4116 * --------
4117 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4118 * | DES1 | --|
4119 * | DES2 | --> buffer 1 and 2 len
4120 * | DES3 |
4121 * --------
4122 *
4123 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4124 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4125 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4126 {
4127 struct dma_desc *desc, *first, *mss_desc = NULL;
4128 struct stmmac_priv *priv = netdev_priv(dev);
4129 int nfrags = skb_shinfo(skb)->nr_frags;
4130 u32 queue = skb_get_queue_mapping(skb);
4131 unsigned int first_entry, tx_packets;
4132 struct stmmac_txq_stats *txq_stats;
4133 int tmp_pay_len = 0, first_tx;
4134 struct stmmac_tx_queue *tx_q;
4135 bool has_vlan, set_ic;
4136 u8 proto_hdr_len, hdr;
4137 u32 pay_len, mss;
4138 dma_addr_t des;
4139 int i;
4140
4141 tx_q = &priv->dma_conf.tx_queue[queue];
4142 txq_stats = &priv->xstats.txq_stats[queue];
4143 first_tx = tx_q->cur_tx;
4144
4145 /* Compute header lengths */
4146 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4147 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4148 hdr = sizeof(struct udphdr);
4149 } else {
4150 proto_hdr_len = skb_tcp_all_headers(skb);
4151 hdr = tcp_hdrlen(skb);
4152 }
4153
4154 /* Desc availability based on threshold should be enough safe */
4155 if (unlikely(stmmac_tx_avail(priv, queue) <
4156 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4157 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4158 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4159 queue));
4160 /* This is a hard error, log it. */
4161 netdev_err(priv->dev,
4162 "%s: Tx Ring full when queue awake\n",
4163 __func__);
4164 }
4165 return NETDEV_TX_BUSY;
4166 }
4167
4168 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4169
4170 mss = skb_shinfo(skb)->gso_size;
4171
4172 /* set new MSS value if needed */
4173 if (mss != tx_q->mss) {
4174 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4175 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4176 else
4177 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4178
4179 stmmac_set_mss(priv, mss_desc, mss);
4180 tx_q->mss = mss;
4181 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4182 priv->dma_conf.dma_tx_size);
4183 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4184 }
4185
4186 if (netif_msg_tx_queued(priv)) {
4187 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4188 __func__, hdr, proto_hdr_len, pay_len, mss);
4189 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4190 skb->data_len);
4191 }
4192
4193 /* Check if VLAN can be inserted by HW */
4194 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4195
4196 first_entry = tx_q->cur_tx;
4197 WARN_ON(tx_q->tx_skbuff[first_entry]);
4198
4199 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4200 desc = &tx_q->dma_entx[first_entry].basic;
4201 else
4202 desc = &tx_q->dma_tx[first_entry];
4203 first = desc;
4204
4205 if (has_vlan)
4206 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4207
4208 /* first descriptor: fill Headers on Buf1 */
4209 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4210 DMA_TO_DEVICE);
4211 if (dma_mapping_error(priv->device, des))
4212 goto dma_map_err;
4213
4214 tx_q->tx_skbuff_dma[first_entry].buf = des;
4215 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4216 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4217 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4218
4219 if (priv->dma_cap.addr64 <= 32) {
4220 first->des0 = cpu_to_le32(des);
4221
4222 /* Fill start of payload in buff2 of first descriptor */
4223 if (pay_len)
4224 first->des1 = cpu_to_le32(des + proto_hdr_len);
4225
4226 /* If needed take extra descriptors to fill the remaining payload */
4227 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4228 } else {
4229 stmmac_set_desc_addr(priv, first, des);
4230 tmp_pay_len = pay_len;
4231 des += proto_hdr_len;
4232 pay_len = 0;
4233 }
4234
4235 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4236
4237 /* Prepare fragments */
4238 for (i = 0; i < nfrags; i++) {
4239 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4240
4241 des = skb_frag_dma_map(priv->device, frag, 0,
4242 skb_frag_size(frag),
4243 DMA_TO_DEVICE);
4244 if (dma_mapping_error(priv->device, des))
4245 goto dma_map_err;
4246
4247 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4248 (i == nfrags - 1), queue);
4249
4250 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4251 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4252 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4253 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4254 }
4255
4256 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4257
4258 /* Only the last descriptor gets to point to the skb. */
4259 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4260 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4261
4262 /* Manage tx mitigation */
4263 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4264 tx_q->tx_count_frames += tx_packets;
4265
4266 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4267 set_ic = true;
4268 else if (!priv->tx_coal_frames[queue])
4269 set_ic = false;
4270 else if (tx_packets > priv->tx_coal_frames[queue])
4271 set_ic = true;
4272 else if ((tx_q->tx_count_frames %
4273 priv->tx_coal_frames[queue]) < tx_packets)
4274 set_ic = true;
4275 else
4276 set_ic = false;
4277
4278 if (set_ic) {
4279 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4280 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4281 else
4282 desc = &tx_q->dma_tx[tx_q->cur_tx];
4283
4284 tx_q->tx_count_frames = 0;
4285 stmmac_set_tx_ic(priv, desc);
4286 }
4287
4288 /* We've used all descriptors we need for this skb, however,
4289 * advance cur_tx so that it references a fresh descriptor.
4290 * ndo_start_xmit will fill this descriptor the next time it's
4291 * called and stmmac_tx_clean may clean up to this descriptor.
4292 */
4293 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4294
4295 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4296 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4297 __func__);
4298 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4299 }
4300
4301 u64_stats_update_begin(&txq_stats->q_syncp);
4302 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4303 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4304 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4305 if (set_ic)
4306 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4307 u64_stats_update_end(&txq_stats->q_syncp);
4308
4309 if (priv->sarc_type)
4310 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4311
4312 skb_tx_timestamp(skb);
4313
4314 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4315 priv->hwts_tx_en)) {
4316 /* declare that device is doing timestamping */
4317 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4318 stmmac_enable_tx_timestamp(priv, first);
4319 }
4320
4321 /* Complete the first descriptor before granting the DMA */
4322 stmmac_prepare_tso_tx_desc(priv, first, 1,
4323 proto_hdr_len,
4324 pay_len,
4325 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4326 hdr / 4, (skb->len - proto_hdr_len));
4327
4328 /* If context desc is used to change MSS */
4329 if (mss_desc) {
4330 /* Make sure that first descriptor has been completely
4331 * written, including its own bit. This is because MSS is
4332 * actually before first descriptor, so we need to make
4333 * sure that MSS's own bit is the last thing written.
4334 */
4335 dma_wmb();
4336 stmmac_set_tx_owner(priv, mss_desc);
4337 }
4338
4339 if (netif_msg_pktdata(priv)) {
4340 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4341 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4342 tx_q->cur_tx, first, nfrags);
4343 pr_info(">>> frame to be transmitted: ");
4344 print_pkt(skb->data, skb_headlen(skb));
4345 }
4346
4347 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4348
4349 stmmac_flush_tx_descriptors(priv, queue);
4350 stmmac_tx_timer_arm(priv, queue);
4351
4352 return NETDEV_TX_OK;
4353
4354 dma_map_err:
4355 dev_err(priv->device, "Tx dma map failed\n");
4356 dev_kfree_skb(skb);
4357 priv->xstats.tx_dropped++;
4358 return NETDEV_TX_OK;
4359 }
4360
4361 /**
4362 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4363 * @skb: socket buffer to check
4364 *
4365 * Check if a packet has an ethertype that will trigger the IP header checks
4366 * and IP/TCP checksum engine of the stmmac core.
4367 *
4368 * Return: true if the ethertype can trigger the checksum engine, false
4369 * otherwise
4370 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4371 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4372 {
4373 int depth = 0;
4374 __be16 proto;
4375
4376 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4377 &depth);
4378
4379 return (depth <= ETH_HLEN) &&
4380 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4381 }
4382
4383 /**
4384 * stmmac_xmit - Tx entry point of the driver
4385 * @skb : the socket buffer
4386 * @dev : device pointer
4387 * Description : this is the tx entry point of the driver.
4388 * It programs the chain or the ring and supports oversized frames
4389 * and SG feature.
4390 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4391 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4392 {
4393 unsigned int first_entry, tx_packets, enh_desc;
4394 struct stmmac_priv *priv = netdev_priv(dev);
4395 unsigned int nopaged_len = skb_headlen(skb);
4396 int i, csum_insertion = 0, is_jumbo = 0;
4397 u32 queue = skb_get_queue_mapping(skb);
4398 int nfrags = skb_shinfo(skb)->nr_frags;
4399 int gso = skb_shinfo(skb)->gso_type;
4400 struct stmmac_txq_stats *txq_stats;
4401 struct dma_edesc *tbs_desc = NULL;
4402 struct dma_desc *desc, *first;
4403 struct stmmac_tx_queue *tx_q;
4404 bool has_vlan, set_ic;
4405 int entry, first_tx;
4406 dma_addr_t des;
4407
4408 tx_q = &priv->dma_conf.tx_queue[queue];
4409 txq_stats = &priv->xstats.txq_stats[queue];
4410 first_tx = tx_q->cur_tx;
4411
4412 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4413 stmmac_disable_eee_mode(priv);
4414
4415 /* Manage oversized TCP frames for GMAC4 device */
4416 if (skb_is_gso(skb) && priv->tso) {
4417 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4418 return stmmac_tso_xmit(skb, dev);
4419 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4420 return stmmac_tso_xmit(skb, dev);
4421 }
4422
4423 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4424 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4425 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4426 queue));
4427 /* This is a hard error, log it. */
4428 netdev_err(priv->dev,
4429 "%s: Tx Ring full when queue awake\n",
4430 __func__);
4431 }
4432 return NETDEV_TX_BUSY;
4433 }
4434
4435 /* Check if VLAN can be inserted by HW */
4436 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4437
4438 entry = tx_q->cur_tx;
4439 first_entry = entry;
4440 WARN_ON(tx_q->tx_skbuff[first_entry]);
4441
4442 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4443 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4444 * queues. In that case, checksum offloading for those queues that don't
4445 * support tx coe needs to fallback to software checksum calculation.
4446 *
4447 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4448 * also have to be checksummed in software.
4449 */
4450 if (csum_insertion &&
4451 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4452 !stmmac_has_ip_ethertype(skb))) {
4453 if (unlikely(skb_checksum_help(skb)))
4454 goto dma_map_err;
4455 csum_insertion = !csum_insertion;
4456 }
4457
4458 if (likely(priv->extend_desc))
4459 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4460 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4461 desc = &tx_q->dma_entx[entry].basic;
4462 else
4463 desc = tx_q->dma_tx + entry;
4464
4465 first = desc;
4466
4467 if (has_vlan)
4468 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4469
4470 enh_desc = priv->plat->enh_desc;
4471 /* To program the descriptors according to the size of the frame */
4472 if (enh_desc)
4473 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4474
4475 if (unlikely(is_jumbo)) {
4476 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4477 if (unlikely(entry < 0) && (entry != -EINVAL))
4478 goto dma_map_err;
4479 }
4480
4481 for (i = 0; i < nfrags; i++) {
4482 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4483 int len = skb_frag_size(frag);
4484 bool last_segment = (i == (nfrags - 1));
4485
4486 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4487 WARN_ON(tx_q->tx_skbuff[entry]);
4488
4489 if (likely(priv->extend_desc))
4490 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4491 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4492 desc = &tx_q->dma_entx[entry].basic;
4493 else
4494 desc = tx_q->dma_tx + entry;
4495
4496 des = skb_frag_dma_map(priv->device, frag, 0, len,
4497 DMA_TO_DEVICE);
4498 if (dma_mapping_error(priv->device, des))
4499 goto dma_map_err; /* should reuse desc w/o issues */
4500
4501 tx_q->tx_skbuff_dma[entry].buf = des;
4502
4503 stmmac_set_desc_addr(priv, desc, des);
4504
4505 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4506 tx_q->tx_skbuff_dma[entry].len = len;
4507 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4508 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4509
4510 /* Prepare the descriptor and set the own bit too */
4511 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4512 priv->mode, 1, last_segment, skb->len);
4513 }
4514
4515 /* Only the last descriptor gets to point to the skb. */
4516 tx_q->tx_skbuff[entry] = skb;
4517 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4518
4519 /* According to the coalesce parameter the IC bit for the latest
4520 * segment is reset and the timer re-started to clean the tx status.
4521 * This approach takes care about the fragments: desc is the first
4522 * element in case of no SG.
4523 */
4524 tx_packets = (entry + 1) - first_tx;
4525 tx_q->tx_count_frames += tx_packets;
4526
4527 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4528 set_ic = true;
4529 else if (!priv->tx_coal_frames[queue])
4530 set_ic = false;
4531 else if (tx_packets > priv->tx_coal_frames[queue])
4532 set_ic = true;
4533 else if ((tx_q->tx_count_frames %
4534 priv->tx_coal_frames[queue]) < tx_packets)
4535 set_ic = true;
4536 else
4537 set_ic = false;
4538
4539 if (set_ic) {
4540 if (likely(priv->extend_desc))
4541 desc = &tx_q->dma_etx[entry].basic;
4542 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4543 desc = &tx_q->dma_entx[entry].basic;
4544 else
4545 desc = &tx_q->dma_tx[entry];
4546
4547 tx_q->tx_count_frames = 0;
4548 stmmac_set_tx_ic(priv, desc);
4549 }
4550
4551 /* We've used all descriptors we need for this skb, however,
4552 * advance cur_tx so that it references a fresh descriptor.
4553 * ndo_start_xmit will fill this descriptor the next time it's
4554 * called and stmmac_tx_clean may clean up to this descriptor.
4555 */
4556 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4557 tx_q->cur_tx = entry;
4558
4559 if (netif_msg_pktdata(priv)) {
4560 netdev_dbg(priv->dev,
4561 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4562 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4563 entry, first, nfrags);
4564
4565 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4566 print_pkt(skb->data, skb->len);
4567 }
4568
4569 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4570 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4571 __func__);
4572 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4573 }
4574
4575 u64_stats_update_begin(&txq_stats->q_syncp);
4576 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4577 if (set_ic)
4578 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4579 u64_stats_update_end(&txq_stats->q_syncp);
4580
4581 if (priv->sarc_type)
4582 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4583
4584 skb_tx_timestamp(skb);
4585
4586 /* Ready to fill the first descriptor and set the OWN bit w/o any
4587 * problems because all the descriptors are actually ready to be
4588 * passed to the DMA engine.
4589 */
4590 if (likely(!is_jumbo)) {
4591 bool last_segment = (nfrags == 0);
4592
4593 des = dma_map_single(priv->device, skb->data,
4594 nopaged_len, DMA_TO_DEVICE);
4595 if (dma_mapping_error(priv->device, des))
4596 goto dma_map_err;
4597
4598 tx_q->tx_skbuff_dma[first_entry].buf = des;
4599 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4600 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4601
4602 stmmac_set_desc_addr(priv, first, des);
4603
4604 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4605 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4606
4607 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4608 priv->hwts_tx_en)) {
4609 /* declare that device is doing timestamping */
4610 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4611 stmmac_enable_tx_timestamp(priv, first);
4612 }
4613
4614 /* Prepare the first descriptor setting the OWN bit too */
4615 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4616 csum_insertion, priv->mode, 0, last_segment,
4617 skb->len);
4618 }
4619
4620 if (tx_q->tbs & STMMAC_TBS_EN) {
4621 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4622
4623 tbs_desc = &tx_q->dma_entx[first_entry];
4624 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4625 }
4626
4627 stmmac_set_tx_owner(priv, first);
4628
4629 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4630
4631 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4632
4633 stmmac_flush_tx_descriptors(priv, queue);
4634 stmmac_tx_timer_arm(priv, queue);
4635
4636 return NETDEV_TX_OK;
4637
4638 dma_map_err:
4639 netdev_err(priv->dev, "Tx DMA map failed\n");
4640 dev_kfree_skb(skb);
4641 priv->xstats.tx_dropped++;
4642 return NETDEV_TX_OK;
4643 }
4644
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4645 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4646 {
4647 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4648 __be16 vlan_proto = veth->h_vlan_proto;
4649 u16 vlanid;
4650
4651 if ((vlan_proto == htons(ETH_P_8021Q) &&
4652 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4653 (vlan_proto == htons(ETH_P_8021AD) &&
4654 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4655 /* pop the vlan tag */
4656 vlanid = ntohs(veth->h_vlan_TCI);
4657 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4658 skb_pull(skb, VLAN_HLEN);
4659 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4660 }
4661 }
4662
4663 /**
4664 * stmmac_rx_refill - refill used skb preallocated buffers
4665 * @priv: driver private structure
4666 * @queue: RX queue index
4667 * Description : this is to reallocate the skb for the reception process
4668 * that is based on zero-copy.
4669 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4670 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4671 {
4672 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4673 int dirty = stmmac_rx_dirty(priv, queue);
4674 unsigned int entry = rx_q->dirty_rx;
4675 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4676
4677 if (priv->dma_cap.host_dma_width <= 32)
4678 gfp |= GFP_DMA32;
4679
4680 while (dirty-- > 0) {
4681 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4682 struct dma_desc *p;
4683 bool use_rx_wd;
4684
4685 if (priv->extend_desc)
4686 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4687 else
4688 p = rx_q->dma_rx + entry;
4689
4690 if (!buf->page) {
4691 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4692 if (!buf->page)
4693 break;
4694 }
4695
4696 if (priv->sph && !buf->sec_page) {
4697 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4698 if (!buf->sec_page)
4699 break;
4700
4701 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4702 }
4703
4704 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4705
4706 stmmac_set_desc_addr(priv, p, buf->addr);
4707 if (priv->sph)
4708 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4709 else
4710 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4711 stmmac_refill_desc3(priv, rx_q, p);
4712
4713 rx_q->rx_count_frames++;
4714 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4715 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4716 rx_q->rx_count_frames = 0;
4717
4718 use_rx_wd = !priv->rx_coal_frames[queue];
4719 use_rx_wd |= rx_q->rx_count_frames > 0;
4720 if (!priv->use_riwt)
4721 use_rx_wd = false;
4722
4723 dma_wmb();
4724 stmmac_set_rx_owner(priv, p, use_rx_wd);
4725
4726 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4727 }
4728 rx_q->dirty_rx = entry;
4729 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4730 (rx_q->dirty_rx * sizeof(struct dma_desc));
4731 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4732 }
4733
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4734 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4735 struct dma_desc *p,
4736 int status, unsigned int len)
4737 {
4738 unsigned int plen = 0, hlen = 0;
4739 int coe = priv->hw->rx_csum;
4740
4741 /* Not first descriptor, buffer is always zero */
4742 if (priv->sph && len)
4743 return 0;
4744
4745 /* First descriptor, get split header length */
4746 stmmac_get_rx_header_len(priv, p, &hlen);
4747 if (priv->sph && hlen) {
4748 priv->xstats.rx_split_hdr_pkt_n++;
4749 return hlen;
4750 }
4751
4752 /* First descriptor, not last descriptor and not split header */
4753 if (status & rx_not_ls)
4754 return priv->dma_conf.dma_buf_sz;
4755
4756 plen = stmmac_get_rx_frame_len(priv, p, coe);
4757
4758 /* First descriptor and last descriptor and not split header */
4759 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4760 }
4761
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4762 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4763 struct dma_desc *p,
4764 int status, unsigned int len)
4765 {
4766 int coe = priv->hw->rx_csum;
4767 unsigned int plen = 0;
4768
4769 /* Not split header, buffer is not available */
4770 if (!priv->sph)
4771 return 0;
4772
4773 /* Not last descriptor */
4774 if (status & rx_not_ls)
4775 return priv->dma_conf.dma_buf_sz;
4776
4777 plen = stmmac_get_rx_frame_len(priv, p, coe);
4778
4779 /* Last descriptor */
4780 return plen - len;
4781 }
4782
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4783 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4784 struct xdp_frame *xdpf, bool dma_map)
4785 {
4786 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4787 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4788 unsigned int entry = tx_q->cur_tx;
4789 struct dma_desc *tx_desc;
4790 dma_addr_t dma_addr;
4791 bool set_ic;
4792
4793 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4794 return STMMAC_XDP_CONSUMED;
4795
4796 if (likely(priv->extend_desc))
4797 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4798 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4799 tx_desc = &tx_q->dma_entx[entry].basic;
4800 else
4801 tx_desc = tx_q->dma_tx + entry;
4802
4803 if (dma_map) {
4804 dma_addr = dma_map_single(priv->device, xdpf->data,
4805 xdpf->len, DMA_TO_DEVICE);
4806 if (dma_mapping_error(priv->device, dma_addr))
4807 return STMMAC_XDP_CONSUMED;
4808
4809 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4810 } else {
4811 struct page *page = virt_to_page(xdpf->data);
4812
4813 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4814 xdpf->headroom;
4815 dma_sync_single_for_device(priv->device, dma_addr,
4816 xdpf->len, DMA_BIDIRECTIONAL);
4817
4818 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4819 }
4820
4821 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4822 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4823 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4824 tx_q->tx_skbuff_dma[entry].last_segment = true;
4825 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4826
4827 tx_q->xdpf[entry] = xdpf;
4828
4829 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4830
4831 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4832 true, priv->mode, true, true,
4833 xdpf->len);
4834
4835 tx_q->tx_count_frames++;
4836
4837 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4838 set_ic = true;
4839 else
4840 set_ic = false;
4841
4842 if (set_ic) {
4843 tx_q->tx_count_frames = 0;
4844 stmmac_set_tx_ic(priv, tx_desc);
4845 u64_stats_update_begin(&txq_stats->q_syncp);
4846 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4847 u64_stats_update_end(&txq_stats->q_syncp);
4848 }
4849
4850 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4851
4852 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4853 tx_q->cur_tx = entry;
4854
4855 return STMMAC_XDP_TX;
4856 }
4857
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4858 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4859 int cpu)
4860 {
4861 int index = cpu;
4862
4863 if (unlikely(index < 0))
4864 index = 0;
4865
4866 while (index >= priv->plat->tx_queues_to_use)
4867 index -= priv->plat->tx_queues_to_use;
4868
4869 return index;
4870 }
4871
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4872 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4873 struct xdp_buff *xdp)
4874 {
4875 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4876 int cpu = smp_processor_id();
4877 struct netdev_queue *nq;
4878 int queue;
4879 int res;
4880
4881 if (unlikely(!xdpf))
4882 return STMMAC_XDP_CONSUMED;
4883
4884 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4885 nq = netdev_get_tx_queue(priv->dev, queue);
4886
4887 __netif_tx_lock(nq, cpu);
4888 /* Avoids TX time-out as we are sharing with slow path */
4889 txq_trans_cond_update(nq);
4890
4891 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4892 if (res == STMMAC_XDP_TX)
4893 stmmac_flush_tx_descriptors(priv, queue);
4894
4895 __netif_tx_unlock(nq);
4896
4897 return res;
4898 }
4899
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4900 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4901 struct bpf_prog *prog,
4902 struct xdp_buff *xdp)
4903 {
4904 u32 act;
4905 int res;
4906
4907 act = bpf_prog_run_xdp(prog, xdp);
4908 switch (act) {
4909 case XDP_PASS:
4910 res = STMMAC_XDP_PASS;
4911 break;
4912 case XDP_TX:
4913 res = stmmac_xdp_xmit_back(priv, xdp);
4914 break;
4915 case XDP_REDIRECT:
4916 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4917 res = STMMAC_XDP_CONSUMED;
4918 else
4919 res = STMMAC_XDP_REDIRECT;
4920 break;
4921 default:
4922 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4923 fallthrough;
4924 case XDP_ABORTED:
4925 trace_xdp_exception(priv->dev, prog, act);
4926 fallthrough;
4927 case XDP_DROP:
4928 res = STMMAC_XDP_CONSUMED;
4929 break;
4930 }
4931
4932 return res;
4933 }
4934
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)4935 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4936 struct xdp_buff *xdp)
4937 {
4938 struct bpf_prog *prog;
4939 int res;
4940
4941 prog = READ_ONCE(priv->xdp_prog);
4942 if (!prog) {
4943 res = STMMAC_XDP_PASS;
4944 goto out;
4945 }
4946
4947 res = __stmmac_xdp_run_prog(priv, prog, xdp);
4948 out:
4949 return ERR_PTR(-res);
4950 }
4951
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)4952 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4953 int xdp_status)
4954 {
4955 int cpu = smp_processor_id();
4956 int queue;
4957
4958 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4959
4960 if (xdp_status & STMMAC_XDP_TX)
4961 stmmac_tx_timer_arm(priv, queue);
4962
4963 if (xdp_status & STMMAC_XDP_REDIRECT)
4964 xdp_do_flush();
4965 }
4966
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)4967 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4968 struct xdp_buff *xdp)
4969 {
4970 unsigned int metasize = xdp->data - xdp->data_meta;
4971 unsigned int datasize = xdp->data_end - xdp->data;
4972 struct sk_buff *skb;
4973
4974 skb = __napi_alloc_skb(&ch->rxtx_napi,
4975 xdp->data_end - xdp->data_hard_start,
4976 GFP_ATOMIC | __GFP_NOWARN);
4977 if (unlikely(!skb))
4978 return NULL;
4979
4980 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4981 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4982 if (metasize)
4983 skb_metadata_set(skb, metasize);
4984
4985 return skb;
4986 }
4987
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)4988 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4989 struct dma_desc *p, struct dma_desc *np,
4990 struct xdp_buff *xdp)
4991 {
4992 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
4993 struct stmmac_channel *ch = &priv->channel[queue];
4994 unsigned int len = xdp->data_end - xdp->data;
4995 enum pkt_hash_types hash_type;
4996 int coe = priv->hw->rx_csum;
4997 struct sk_buff *skb;
4998 u32 hash;
4999
5000 skb = stmmac_construct_skb_zc(ch, xdp);
5001 if (!skb) {
5002 priv->xstats.rx_dropped++;
5003 return;
5004 }
5005
5006 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5007 stmmac_rx_vlan(priv->dev, skb);
5008 skb->protocol = eth_type_trans(skb, priv->dev);
5009
5010 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5011 skb_checksum_none_assert(skb);
5012 else
5013 skb->ip_summed = CHECKSUM_UNNECESSARY;
5014
5015 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5016 skb_set_hash(skb, hash, hash_type);
5017
5018 skb_record_rx_queue(skb, queue);
5019 napi_gro_receive(&ch->rxtx_napi, skb);
5020
5021 u64_stats_update_begin(&rxq_stats->napi_syncp);
5022 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5023 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5024 u64_stats_update_end(&rxq_stats->napi_syncp);
5025 }
5026
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5027 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5028 {
5029 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5030 unsigned int entry = rx_q->dirty_rx;
5031 struct dma_desc *rx_desc = NULL;
5032 bool ret = true;
5033
5034 budget = min(budget, stmmac_rx_dirty(priv, queue));
5035
5036 while (budget-- > 0 && entry != rx_q->cur_rx) {
5037 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5038 dma_addr_t dma_addr;
5039 bool use_rx_wd;
5040
5041 if (!buf->xdp) {
5042 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5043 if (!buf->xdp) {
5044 ret = false;
5045 break;
5046 }
5047 }
5048
5049 if (priv->extend_desc)
5050 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5051 else
5052 rx_desc = rx_q->dma_rx + entry;
5053
5054 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5055 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5056 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5057 stmmac_refill_desc3(priv, rx_q, rx_desc);
5058
5059 rx_q->rx_count_frames++;
5060 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5061 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5062 rx_q->rx_count_frames = 0;
5063
5064 use_rx_wd = !priv->rx_coal_frames[queue];
5065 use_rx_wd |= rx_q->rx_count_frames > 0;
5066 if (!priv->use_riwt)
5067 use_rx_wd = false;
5068
5069 dma_wmb();
5070 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5071
5072 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5073 }
5074
5075 if (rx_desc) {
5076 rx_q->dirty_rx = entry;
5077 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5078 (rx_q->dirty_rx * sizeof(struct dma_desc));
5079 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5080 }
5081
5082 return ret;
5083 }
5084
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5085 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5086 {
5087 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5088 * to represent incoming packet, whereas cb field in the same structure
5089 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5090 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5091 */
5092 return (struct stmmac_xdp_buff *)xdp;
5093 }
5094
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5095 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5096 {
5097 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5098 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5099 unsigned int count = 0, error = 0, len = 0;
5100 int dirty = stmmac_rx_dirty(priv, queue);
5101 unsigned int next_entry = rx_q->cur_rx;
5102 u32 rx_errors = 0, rx_dropped = 0;
5103 unsigned int desc_size;
5104 struct bpf_prog *prog;
5105 bool failure = false;
5106 int xdp_status = 0;
5107 int status = 0;
5108
5109 if (netif_msg_rx_status(priv)) {
5110 void *rx_head;
5111
5112 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5113 if (priv->extend_desc) {
5114 rx_head = (void *)rx_q->dma_erx;
5115 desc_size = sizeof(struct dma_extended_desc);
5116 } else {
5117 rx_head = (void *)rx_q->dma_rx;
5118 desc_size = sizeof(struct dma_desc);
5119 }
5120
5121 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5122 rx_q->dma_rx_phy, desc_size);
5123 }
5124 while (count < limit) {
5125 struct stmmac_rx_buffer *buf;
5126 struct stmmac_xdp_buff *ctx;
5127 unsigned int buf1_len = 0;
5128 struct dma_desc *np, *p;
5129 int entry;
5130 int res;
5131
5132 if (!count && rx_q->state_saved) {
5133 error = rx_q->state.error;
5134 len = rx_q->state.len;
5135 } else {
5136 rx_q->state_saved = false;
5137 error = 0;
5138 len = 0;
5139 }
5140
5141 if (count >= limit)
5142 break;
5143
5144 read_again:
5145 buf1_len = 0;
5146 entry = next_entry;
5147 buf = &rx_q->buf_pool[entry];
5148
5149 if (dirty >= STMMAC_RX_FILL_BATCH) {
5150 failure = failure ||
5151 !stmmac_rx_refill_zc(priv, queue, dirty);
5152 dirty = 0;
5153 }
5154
5155 if (priv->extend_desc)
5156 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5157 else
5158 p = rx_q->dma_rx + entry;
5159
5160 /* read the status of the incoming frame */
5161 status = stmmac_rx_status(priv, &priv->xstats, p);
5162 /* check if managed by the DMA otherwise go ahead */
5163 if (unlikely(status & dma_own))
5164 break;
5165
5166 /* Prefetch the next RX descriptor */
5167 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5168 priv->dma_conf.dma_rx_size);
5169 next_entry = rx_q->cur_rx;
5170
5171 if (priv->extend_desc)
5172 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5173 else
5174 np = rx_q->dma_rx + next_entry;
5175
5176 prefetch(np);
5177
5178 /* Ensure a valid XSK buffer before proceed */
5179 if (!buf->xdp)
5180 break;
5181
5182 if (priv->extend_desc)
5183 stmmac_rx_extended_status(priv, &priv->xstats,
5184 rx_q->dma_erx + entry);
5185 if (unlikely(status == discard_frame)) {
5186 xsk_buff_free(buf->xdp);
5187 buf->xdp = NULL;
5188 dirty++;
5189 error = 1;
5190 if (!priv->hwts_rx_en)
5191 rx_errors++;
5192 }
5193
5194 if (unlikely(error && (status & rx_not_ls)))
5195 goto read_again;
5196 if (unlikely(error)) {
5197 count++;
5198 continue;
5199 }
5200
5201 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5202 if (likely(status & rx_not_ls)) {
5203 xsk_buff_free(buf->xdp);
5204 buf->xdp = NULL;
5205 dirty++;
5206 count++;
5207 goto read_again;
5208 }
5209
5210 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5211 ctx->priv = priv;
5212 ctx->desc = p;
5213 ctx->ndesc = np;
5214
5215 /* XDP ZC Frame only support primary buffers for now */
5216 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5217 len += buf1_len;
5218
5219 /* ACS is disabled; strip manually. */
5220 if (likely(!(status & rx_not_ls))) {
5221 buf1_len -= ETH_FCS_LEN;
5222 len -= ETH_FCS_LEN;
5223 }
5224
5225 /* RX buffer is good and fit into a XSK pool buffer */
5226 buf->xdp->data_end = buf->xdp->data + buf1_len;
5227 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5228
5229 prog = READ_ONCE(priv->xdp_prog);
5230 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5231
5232 switch (res) {
5233 case STMMAC_XDP_PASS:
5234 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5235 xsk_buff_free(buf->xdp);
5236 break;
5237 case STMMAC_XDP_CONSUMED:
5238 xsk_buff_free(buf->xdp);
5239 rx_dropped++;
5240 break;
5241 case STMMAC_XDP_TX:
5242 case STMMAC_XDP_REDIRECT:
5243 xdp_status |= res;
5244 break;
5245 }
5246
5247 buf->xdp = NULL;
5248 dirty++;
5249 count++;
5250 }
5251
5252 if (status & rx_not_ls) {
5253 rx_q->state_saved = true;
5254 rx_q->state.error = error;
5255 rx_q->state.len = len;
5256 }
5257
5258 stmmac_finalize_xdp_rx(priv, xdp_status);
5259
5260 u64_stats_update_begin(&rxq_stats->napi_syncp);
5261 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5262 u64_stats_update_end(&rxq_stats->napi_syncp);
5263
5264 priv->xstats.rx_dropped += rx_dropped;
5265 priv->xstats.rx_errors += rx_errors;
5266
5267 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5268 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5269 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5270 else
5271 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5272
5273 return (int)count;
5274 }
5275
5276 return failure ? limit : (int)count;
5277 }
5278
5279 /**
5280 * stmmac_rx - manage the receive process
5281 * @priv: driver private structure
5282 * @limit: napi bugget
5283 * @queue: RX queue index.
5284 * Description : this the function called by the napi poll method.
5285 * It gets all the frames inside the ring.
5286 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5287 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5288 {
5289 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5290 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5291 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5292 struct stmmac_channel *ch = &priv->channel[queue];
5293 unsigned int count = 0, error = 0, len = 0;
5294 int status = 0, coe = priv->hw->rx_csum;
5295 unsigned int next_entry = rx_q->cur_rx;
5296 enum dma_data_direction dma_dir;
5297 unsigned int desc_size;
5298 struct sk_buff *skb = NULL;
5299 struct stmmac_xdp_buff ctx;
5300 int xdp_status = 0;
5301 int buf_sz;
5302
5303 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5304 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5305 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5306
5307 if (netif_msg_rx_status(priv)) {
5308 void *rx_head;
5309
5310 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5311 if (priv->extend_desc) {
5312 rx_head = (void *)rx_q->dma_erx;
5313 desc_size = sizeof(struct dma_extended_desc);
5314 } else {
5315 rx_head = (void *)rx_q->dma_rx;
5316 desc_size = sizeof(struct dma_desc);
5317 }
5318
5319 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5320 rx_q->dma_rx_phy, desc_size);
5321 }
5322 while (count < limit) {
5323 unsigned int buf1_len = 0, buf2_len = 0;
5324 enum pkt_hash_types hash_type;
5325 struct stmmac_rx_buffer *buf;
5326 struct dma_desc *np, *p;
5327 int entry;
5328 u32 hash;
5329
5330 if (!count && rx_q->state_saved) {
5331 skb = rx_q->state.skb;
5332 error = rx_q->state.error;
5333 len = rx_q->state.len;
5334 } else {
5335 rx_q->state_saved = false;
5336 skb = NULL;
5337 error = 0;
5338 len = 0;
5339 }
5340
5341 read_again:
5342 if (count >= limit)
5343 break;
5344
5345 buf1_len = 0;
5346 buf2_len = 0;
5347 entry = next_entry;
5348 buf = &rx_q->buf_pool[entry];
5349
5350 if (priv->extend_desc)
5351 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5352 else
5353 p = rx_q->dma_rx + entry;
5354
5355 /* read the status of the incoming frame */
5356 status = stmmac_rx_status(priv, &priv->xstats, p);
5357 /* check if managed by the DMA otherwise go ahead */
5358 if (unlikely(status & dma_own))
5359 break;
5360
5361 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5362 priv->dma_conf.dma_rx_size);
5363 next_entry = rx_q->cur_rx;
5364
5365 if (priv->extend_desc)
5366 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5367 else
5368 np = rx_q->dma_rx + next_entry;
5369
5370 prefetch(np);
5371
5372 if (priv->extend_desc)
5373 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5374 if (unlikely(status == discard_frame)) {
5375 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5376 buf->page = NULL;
5377 error = 1;
5378 if (!priv->hwts_rx_en)
5379 rx_errors++;
5380 }
5381
5382 if (unlikely(error && (status & rx_not_ls)))
5383 goto read_again;
5384 if (unlikely(error)) {
5385 dev_kfree_skb(skb);
5386 skb = NULL;
5387 count++;
5388 continue;
5389 }
5390
5391 /* Buffer is good. Go on. */
5392
5393 prefetch(page_address(buf->page) + buf->page_offset);
5394 if (buf->sec_page)
5395 prefetch(page_address(buf->sec_page));
5396
5397 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5398 len += buf1_len;
5399 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5400 len += buf2_len;
5401
5402 /* ACS is disabled; strip manually. */
5403 if (likely(!(status & rx_not_ls))) {
5404 if (buf2_len) {
5405 buf2_len -= ETH_FCS_LEN;
5406 len -= ETH_FCS_LEN;
5407 } else if (buf1_len) {
5408 buf1_len -= ETH_FCS_LEN;
5409 len -= ETH_FCS_LEN;
5410 }
5411 }
5412
5413 if (!skb) {
5414 unsigned int pre_len, sync_len;
5415
5416 dma_sync_single_for_cpu(priv->device, buf->addr,
5417 buf1_len, dma_dir);
5418
5419 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5420 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5421 buf->page_offset, buf1_len, true);
5422
5423 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5424 buf->page_offset;
5425
5426 ctx.priv = priv;
5427 ctx.desc = p;
5428 ctx.ndesc = np;
5429
5430 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5431 /* Due xdp_adjust_tail: DMA sync for_device
5432 * cover max len CPU touch
5433 */
5434 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5435 buf->page_offset;
5436 sync_len = max(sync_len, pre_len);
5437
5438 /* For Not XDP_PASS verdict */
5439 if (IS_ERR(skb)) {
5440 unsigned int xdp_res = -PTR_ERR(skb);
5441
5442 if (xdp_res & STMMAC_XDP_CONSUMED) {
5443 page_pool_put_page(rx_q->page_pool,
5444 virt_to_head_page(ctx.xdp.data),
5445 sync_len, true);
5446 buf->page = NULL;
5447 rx_dropped++;
5448
5449 /* Clear skb as it was set as
5450 * status by XDP program.
5451 */
5452 skb = NULL;
5453
5454 if (unlikely((status & rx_not_ls)))
5455 goto read_again;
5456
5457 count++;
5458 continue;
5459 } else if (xdp_res & (STMMAC_XDP_TX |
5460 STMMAC_XDP_REDIRECT)) {
5461 xdp_status |= xdp_res;
5462 buf->page = NULL;
5463 skb = NULL;
5464 count++;
5465 continue;
5466 }
5467 }
5468 }
5469
5470 if (!skb) {
5471 /* XDP program may expand or reduce tail */
5472 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5473
5474 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5475 if (!skb) {
5476 rx_dropped++;
5477 count++;
5478 goto drain_data;
5479 }
5480
5481 /* XDP program may adjust header */
5482 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5483 skb_put(skb, buf1_len);
5484
5485 /* Data payload copied into SKB, page ready for recycle */
5486 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5487 buf->page = NULL;
5488 } else if (buf1_len) {
5489 dma_sync_single_for_cpu(priv->device, buf->addr,
5490 buf1_len, dma_dir);
5491 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5492 buf->page, buf->page_offset, buf1_len,
5493 priv->dma_conf.dma_buf_sz);
5494
5495 /* Data payload appended into SKB */
5496 skb_mark_for_recycle(skb);
5497 buf->page = NULL;
5498 }
5499
5500 if (buf2_len) {
5501 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5502 buf2_len, dma_dir);
5503 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5504 buf->sec_page, 0, buf2_len,
5505 priv->dma_conf.dma_buf_sz);
5506
5507 /* Data payload appended into SKB */
5508 skb_mark_for_recycle(skb);
5509 buf->sec_page = NULL;
5510 }
5511
5512 drain_data:
5513 if (likely(status & rx_not_ls))
5514 goto read_again;
5515 if (!skb)
5516 continue;
5517
5518 /* Got entire packet into SKB. Finish it. */
5519
5520 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5521 stmmac_rx_vlan(priv->dev, skb);
5522 skb->protocol = eth_type_trans(skb, priv->dev);
5523
5524 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5525 skb_checksum_none_assert(skb);
5526 else
5527 skb->ip_summed = CHECKSUM_UNNECESSARY;
5528
5529 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5530 skb_set_hash(skb, hash, hash_type);
5531
5532 skb_record_rx_queue(skb, queue);
5533 napi_gro_receive(&ch->rx_napi, skb);
5534 skb = NULL;
5535
5536 rx_packets++;
5537 rx_bytes += len;
5538 count++;
5539 }
5540
5541 if (status & rx_not_ls || skb) {
5542 rx_q->state_saved = true;
5543 rx_q->state.skb = skb;
5544 rx_q->state.error = error;
5545 rx_q->state.len = len;
5546 }
5547
5548 stmmac_finalize_xdp_rx(priv, xdp_status);
5549
5550 stmmac_rx_refill(priv, queue);
5551
5552 u64_stats_update_begin(&rxq_stats->napi_syncp);
5553 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5554 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5555 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5556 u64_stats_update_end(&rxq_stats->napi_syncp);
5557
5558 priv->xstats.rx_dropped += rx_dropped;
5559 priv->xstats.rx_errors += rx_errors;
5560
5561 return count;
5562 }
5563
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5564 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5565 {
5566 struct stmmac_channel *ch =
5567 container_of(napi, struct stmmac_channel, rx_napi);
5568 struct stmmac_priv *priv = ch->priv_data;
5569 struct stmmac_rxq_stats *rxq_stats;
5570 u32 chan = ch->index;
5571 int work_done;
5572
5573 rxq_stats = &priv->xstats.rxq_stats[chan];
5574 u64_stats_update_begin(&rxq_stats->napi_syncp);
5575 u64_stats_inc(&rxq_stats->napi.poll);
5576 u64_stats_update_end(&rxq_stats->napi_syncp);
5577
5578 work_done = stmmac_rx(priv, budget, chan);
5579 if (work_done < budget && napi_complete_done(napi, work_done)) {
5580 unsigned long flags;
5581
5582 spin_lock_irqsave(&ch->lock, flags);
5583 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5584 spin_unlock_irqrestore(&ch->lock, flags);
5585 }
5586
5587 return work_done;
5588 }
5589
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5590 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5591 {
5592 struct stmmac_channel *ch =
5593 container_of(napi, struct stmmac_channel, tx_napi);
5594 struct stmmac_priv *priv = ch->priv_data;
5595 struct stmmac_txq_stats *txq_stats;
5596 u32 chan = ch->index;
5597 int work_done;
5598
5599 txq_stats = &priv->xstats.txq_stats[chan];
5600 u64_stats_update_begin(&txq_stats->napi_syncp);
5601 u64_stats_inc(&txq_stats->napi.poll);
5602 u64_stats_update_end(&txq_stats->napi_syncp);
5603
5604 work_done = stmmac_tx_clean(priv, budget, chan);
5605 work_done = min(work_done, budget);
5606
5607 if (work_done < budget && napi_complete_done(napi, work_done)) {
5608 unsigned long flags;
5609
5610 spin_lock_irqsave(&ch->lock, flags);
5611 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5612 spin_unlock_irqrestore(&ch->lock, flags);
5613 }
5614
5615 return work_done;
5616 }
5617
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5618 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5619 {
5620 struct stmmac_channel *ch =
5621 container_of(napi, struct stmmac_channel, rxtx_napi);
5622 struct stmmac_priv *priv = ch->priv_data;
5623 int rx_done, tx_done, rxtx_done;
5624 struct stmmac_rxq_stats *rxq_stats;
5625 struct stmmac_txq_stats *txq_stats;
5626 u32 chan = ch->index;
5627
5628 rxq_stats = &priv->xstats.rxq_stats[chan];
5629 u64_stats_update_begin(&rxq_stats->napi_syncp);
5630 u64_stats_inc(&rxq_stats->napi.poll);
5631 u64_stats_update_end(&rxq_stats->napi_syncp);
5632
5633 txq_stats = &priv->xstats.txq_stats[chan];
5634 u64_stats_update_begin(&txq_stats->napi_syncp);
5635 u64_stats_inc(&txq_stats->napi.poll);
5636 u64_stats_update_end(&txq_stats->napi_syncp);
5637
5638 tx_done = stmmac_tx_clean(priv, budget, chan);
5639 tx_done = min(tx_done, budget);
5640
5641 rx_done = stmmac_rx_zc(priv, budget, chan);
5642
5643 rxtx_done = max(tx_done, rx_done);
5644
5645 /* If either TX or RX work is not complete, return budget
5646 * and keep pooling
5647 */
5648 if (rxtx_done >= budget)
5649 return budget;
5650
5651 /* all work done, exit the polling mode */
5652 if (napi_complete_done(napi, rxtx_done)) {
5653 unsigned long flags;
5654
5655 spin_lock_irqsave(&ch->lock, flags);
5656 /* Both RX and TX work done are compelte,
5657 * so enable both RX & TX IRQs.
5658 */
5659 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5660 spin_unlock_irqrestore(&ch->lock, flags);
5661 }
5662
5663 return min(rxtx_done, budget - 1);
5664 }
5665
5666 /**
5667 * stmmac_tx_timeout
5668 * @dev : Pointer to net device structure
5669 * @txqueue: the index of the hanging transmit queue
5670 * Description: this function is called when a packet transmission fails to
5671 * complete within a reasonable time. The driver will mark the error in the
5672 * netdev structure and arrange for the device to be reset to a sane state
5673 * in order to transmit a new packet.
5674 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5675 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5676 {
5677 struct stmmac_priv *priv = netdev_priv(dev);
5678
5679 stmmac_global_err(priv);
5680 }
5681
5682 /**
5683 * stmmac_set_rx_mode - entry point for multicast addressing
5684 * @dev : pointer to the device structure
5685 * Description:
5686 * This function is a driver entry point which gets called by the kernel
5687 * whenever multicast addresses must be enabled/disabled.
5688 * Return value:
5689 * void.
5690 */
stmmac_set_rx_mode(struct net_device * dev)5691 static void stmmac_set_rx_mode(struct net_device *dev)
5692 {
5693 struct stmmac_priv *priv = netdev_priv(dev);
5694
5695 stmmac_set_filter(priv, priv->hw, dev);
5696 }
5697
5698 /**
5699 * stmmac_change_mtu - entry point to change MTU size for the device.
5700 * @dev : device pointer.
5701 * @new_mtu : the new MTU size for the device.
5702 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5703 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5704 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5705 * Return value:
5706 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5707 * file on failure.
5708 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5709 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5710 {
5711 struct stmmac_priv *priv = netdev_priv(dev);
5712 int txfifosz = priv->plat->tx_fifo_size;
5713 struct stmmac_dma_conf *dma_conf;
5714 const int mtu = new_mtu;
5715 int ret;
5716
5717 if (txfifosz == 0)
5718 txfifosz = priv->dma_cap.tx_fifo_size;
5719
5720 txfifosz /= priv->plat->tx_queues_to_use;
5721
5722 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5723 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5724 return -EINVAL;
5725 }
5726
5727 new_mtu = STMMAC_ALIGN(new_mtu);
5728
5729 /* If condition true, FIFO is too small or MTU too large */
5730 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5731 return -EINVAL;
5732
5733 if (netif_running(dev)) {
5734 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5735 /* Try to allocate the new DMA conf with the new mtu */
5736 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5737 if (IS_ERR(dma_conf)) {
5738 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5739 mtu);
5740 return PTR_ERR(dma_conf);
5741 }
5742
5743 stmmac_release(dev);
5744
5745 ret = __stmmac_open(dev, dma_conf);
5746 if (ret) {
5747 free_dma_desc_resources(priv, dma_conf);
5748 kfree(dma_conf);
5749 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5750 return ret;
5751 }
5752
5753 kfree(dma_conf);
5754
5755 stmmac_set_rx_mode(dev);
5756 }
5757
5758 dev->mtu = mtu;
5759 netdev_update_features(dev);
5760
5761 return 0;
5762 }
5763
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5764 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5765 netdev_features_t features)
5766 {
5767 struct stmmac_priv *priv = netdev_priv(dev);
5768
5769 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5770 features &= ~NETIF_F_RXCSUM;
5771
5772 if (!priv->plat->tx_coe)
5773 features &= ~NETIF_F_CSUM_MASK;
5774
5775 /* Some GMAC devices have a bugged Jumbo frame support that
5776 * needs to have the Tx COE disabled for oversized frames
5777 * (due to limited buffer sizes). In this case we disable
5778 * the TX csum insertion in the TDES and not use SF.
5779 */
5780 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5781 features &= ~NETIF_F_CSUM_MASK;
5782
5783 /* Disable tso if asked by ethtool */
5784 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5785 if (features & NETIF_F_TSO)
5786 priv->tso = true;
5787 else
5788 priv->tso = false;
5789 }
5790
5791 return features;
5792 }
5793
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5794 static int stmmac_set_features(struct net_device *netdev,
5795 netdev_features_t features)
5796 {
5797 struct stmmac_priv *priv = netdev_priv(netdev);
5798
5799 /* Keep the COE Type in case of csum is supporting */
5800 if (features & NETIF_F_RXCSUM)
5801 priv->hw->rx_csum = priv->plat->rx_coe;
5802 else
5803 priv->hw->rx_csum = 0;
5804 /* No check needed because rx_coe has been set before and it will be
5805 * fixed in case of issue.
5806 */
5807 stmmac_rx_ipc(priv, priv->hw);
5808
5809 if (priv->sph_cap) {
5810 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5811 u32 chan;
5812
5813 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5814 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5815 }
5816
5817 return 0;
5818 }
5819
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5820 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5821 {
5822 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5823 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5824 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5825 bool *hs_enable = &fpe_cfg->hs_enable;
5826
5827 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5828 return;
5829
5830 /* If LP has sent verify mPacket, LP is FPE capable */
5831 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5832 if (*lp_state < FPE_STATE_CAPABLE)
5833 *lp_state = FPE_STATE_CAPABLE;
5834
5835 /* If user has requested FPE enable, quickly response */
5836 if (*hs_enable)
5837 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5838 fpe_cfg,
5839 MPACKET_RESPONSE);
5840 }
5841
5842 /* If Local has sent verify mPacket, Local is FPE capable */
5843 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5844 if (*lo_state < FPE_STATE_CAPABLE)
5845 *lo_state = FPE_STATE_CAPABLE;
5846 }
5847
5848 /* If LP has sent response mPacket, LP is entering FPE ON */
5849 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5850 *lp_state = FPE_STATE_ENTERING_ON;
5851
5852 /* If Local has sent response mPacket, Local is entering FPE ON */
5853 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5854 *lo_state = FPE_STATE_ENTERING_ON;
5855
5856 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5857 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5858 priv->fpe_wq) {
5859 queue_work(priv->fpe_wq, &priv->fpe_task);
5860 }
5861 }
5862
stmmac_common_interrupt(struct stmmac_priv * priv)5863 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5864 {
5865 u32 rx_cnt = priv->plat->rx_queues_to_use;
5866 u32 tx_cnt = priv->plat->tx_queues_to_use;
5867 u32 queues_count;
5868 u32 queue;
5869 bool xmac;
5870
5871 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5872 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5873
5874 if (priv->irq_wake)
5875 pm_wakeup_event(priv->device, 0);
5876
5877 if (priv->dma_cap.estsel)
5878 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5879 &priv->xstats, tx_cnt);
5880
5881 if (priv->dma_cap.fpesel) {
5882 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5883 priv->dev);
5884
5885 stmmac_fpe_event_status(priv, status);
5886 }
5887
5888 /* To handle GMAC own interrupts */
5889 if ((priv->plat->has_gmac) || xmac) {
5890 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5891
5892 if (unlikely(status)) {
5893 /* For LPI we need to save the tx status */
5894 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5895 priv->tx_path_in_lpi_mode = true;
5896 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5897 priv->tx_path_in_lpi_mode = false;
5898 }
5899
5900 for (queue = 0; queue < queues_count; queue++) {
5901 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5902 queue);
5903 }
5904
5905 /* PCS link status */
5906 if (priv->hw->pcs &&
5907 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5908 if (priv->xstats.pcs_link)
5909 netif_carrier_on(priv->dev);
5910 else
5911 netif_carrier_off(priv->dev);
5912 }
5913
5914 stmmac_timestamp_interrupt(priv, priv);
5915 }
5916 }
5917
5918 /**
5919 * stmmac_interrupt - main ISR
5920 * @irq: interrupt number.
5921 * @dev_id: to pass the net device pointer.
5922 * Description: this is the main driver interrupt service routine.
5923 * It can call:
5924 * o DMA service routine (to manage incoming frame reception and transmission
5925 * status)
5926 * o Core interrupts to manage: remote wake-up, management counter, LPI
5927 * interrupts.
5928 */
stmmac_interrupt(int irq,void * dev_id)5929 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5930 {
5931 struct net_device *dev = (struct net_device *)dev_id;
5932 struct stmmac_priv *priv = netdev_priv(dev);
5933
5934 /* Check if adapter is up */
5935 if (test_bit(STMMAC_DOWN, &priv->state))
5936 return IRQ_HANDLED;
5937
5938 /* Check if a fatal error happened */
5939 if (stmmac_safety_feat_interrupt(priv))
5940 return IRQ_HANDLED;
5941
5942 /* To handle Common interrupts */
5943 stmmac_common_interrupt(priv);
5944
5945 /* To handle DMA interrupts */
5946 stmmac_dma_interrupt(priv);
5947
5948 return IRQ_HANDLED;
5949 }
5950
stmmac_mac_interrupt(int irq,void * dev_id)5951 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5952 {
5953 struct net_device *dev = (struct net_device *)dev_id;
5954 struct stmmac_priv *priv = netdev_priv(dev);
5955
5956 /* Check if adapter is up */
5957 if (test_bit(STMMAC_DOWN, &priv->state))
5958 return IRQ_HANDLED;
5959
5960 /* To handle Common interrupts */
5961 stmmac_common_interrupt(priv);
5962
5963 return IRQ_HANDLED;
5964 }
5965
stmmac_safety_interrupt(int irq,void * dev_id)5966 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5967 {
5968 struct net_device *dev = (struct net_device *)dev_id;
5969 struct stmmac_priv *priv = netdev_priv(dev);
5970
5971 /* Check if adapter is up */
5972 if (test_bit(STMMAC_DOWN, &priv->state))
5973 return IRQ_HANDLED;
5974
5975 /* Check if a fatal error happened */
5976 stmmac_safety_feat_interrupt(priv);
5977
5978 return IRQ_HANDLED;
5979 }
5980
stmmac_msi_intr_tx(int irq,void * data)5981 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5982 {
5983 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5984 struct stmmac_dma_conf *dma_conf;
5985 int chan = tx_q->queue_index;
5986 struct stmmac_priv *priv;
5987 int status;
5988
5989 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5990 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5991
5992 /* Check if adapter is up */
5993 if (test_bit(STMMAC_DOWN, &priv->state))
5994 return IRQ_HANDLED;
5995
5996 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5997
5998 if (unlikely(status & tx_hard_error_bump_tc)) {
5999 /* Try to bump up the dma threshold on this failure */
6000 stmmac_bump_dma_threshold(priv, chan);
6001 } else if (unlikely(status == tx_hard_error)) {
6002 stmmac_tx_err(priv, chan);
6003 }
6004
6005 return IRQ_HANDLED;
6006 }
6007
stmmac_msi_intr_rx(int irq,void * data)6008 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6009 {
6010 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6011 struct stmmac_dma_conf *dma_conf;
6012 int chan = rx_q->queue_index;
6013 struct stmmac_priv *priv;
6014
6015 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6016 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6017
6018 /* Check if adapter is up */
6019 if (test_bit(STMMAC_DOWN, &priv->state))
6020 return IRQ_HANDLED;
6021
6022 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6023
6024 return IRQ_HANDLED;
6025 }
6026
6027 /**
6028 * stmmac_ioctl - Entry point for the Ioctl
6029 * @dev: Device pointer.
6030 * @rq: An IOCTL specefic structure, that can contain a pointer to
6031 * a proprietary structure used to pass information to the driver.
6032 * @cmd: IOCTL command
6033 * Description:
6034 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6035 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6036 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6037 {
6038 struct stmmac_priv *priv = netdev_priv (dev);
6039 int ret = -EOPNOTSUPP;
6040
6041 if (!netif_running(dev))
6042 return -EINVAL;
6043
6044 switch (cmd) {
6045 case SIOCGMIIPHY:
6046 case SIOCGMIIREG:
6047 case SIOCSMIIREG:
6048 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6049 break;
6050 case SIOCSHWTSTAMP:
6051 ret = stmmac_hwtstamp_set(dev, rq);
6052 break;
6053 case SIOCGHWTSTAMP:
6054 ret = stmmac_hwtstamp_get(dev, rq);
6055 break;
6056 default:
6057 break;
6058 }
6059
6060 return ret;
6061 }
6062
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6063 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6064 void *cb_priv)
6065 {
6066 struct stmmac_priv *priv = cb_priv;
6067 int ret = -EOPNOTSUPP;
6068
6069 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6070 return ret;
6071
6072 __stmmac_disable_all_queues(priv);
6073
6074 switch (type) {
6075 case TC_SETUP_CLSU32:
6076 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6077 break;
6078 case TC_SETUP_CLSFLOWER:
6079 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6080 break;
6081 default:
6082 break;
6083 }
6084
6085 stmmac_enable_all_queues(priv);
6086 return ret;
6087 }
6088
6089 static LIST_HEAD(stmmac_block_cb_list);
6090
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6091 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6092 void *type_data)
6093 {
6094 struct stmmac_priv *priv = netdev_priv(ndev);
6095
6096 switch (type) {
6097 case TC_QUERY_CAPS:
6098 return stmmac_tc_query_caps(priv, priv, type_data);
6099 case TC_SETUP_BLOCK:
6100 return flow_block_cb_setup_simple(type_data,
6101 &stmmac_block_cb_list,
6102 stmmac_setup_tc_block_cb,
6103 priv, priv, true);
6104 case TC_SETUP_QDISC_CBS:
6105 return stmmac_tc_setup_cbs(priv, priv, type_data);
6106 case TC_SETUP_QDISC_TAPRIO:
6107 return stmmac_tc_setup_taprio(priv, priv, type_data);
6108 case TC_SETUP_QDISC_ETF:
6109 return stmmac_tc_setup_etf(priv, priv, type_data);
6110 default:
6111 return -EOPNOTSUPP;
6112 }
6113 }
6114
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6115 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6116 struct net_device *sb_dev)
6117 {
6118 int gso = skb_shinfo(skb)->gso_type;
6119
6120 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6121 /*
6122 * There is no way to determine the number of TSO/USO
6123 * capable Queues. Let's use always the Queue 0
6124 * because if TSO/USO is supported then at least this
6125 * one will be capable.
6126 */
6127 return 0;
6128 }
6129
6130 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6131 }
6132
stmmac_set_mac_address(struct net_device * ndev,void * addr)6133 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6134 {
6135 struct stmmac_priv *priv = netdev_priv(ndev);
6136 int ret = 0;
6137
6138 ret = pm_runtime_resume_and_get(priv->device);
6139 if (ret < 0)
6140 return ret;
6141
6142 ret = eth_mac_addr(ndev, addr);
6143 if (ret)
6144 goto set_mac_error;
6145
6146 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6147
6148 set_mac_error:
6149 pm_runtime_put(priv->device);
6150
6151 return ret;
6152 }
6153
6154 #ifdef CONFIG_DEBUG_FS
6155 static struct dentry *stmmac_fs_dir;
6156
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6157 static void sysfs_display_ring(void *head, int size, int extend_desc,
6158 struct seq_file *seq, dma_addr_t dma_phy_addr)
6159 {
6160 int i;
6161 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6162 struct dma_desc *p = (struct dma_desc *)head;
6163 dma_addr_t dma_addr;
6164
6165 for (i = 0; i < size; i++) {
6166 if (extend_desc) {
6167 dma_addr = dma_phy_addr + i * sizeof(*ep);
6168 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6169 i, &dma_addr,
6170 le32_to_cpu(ep->basic.des0),
6171 le32_to_cpu(ep->basic.des1),
6172 le32_to_cpu(ep->basic.des2),
6173 le32_to_cpu(ep->basic.des3));
6174 ep++;
6175 } else {
6176 dma_addr = dma_phy_addr + i * sizeof(*p);
6177 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6178 i, &dma_addr,
6179 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6180 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6181 p++;
6182 }
6183 seq_printf(seq, "\n");
6184 }
6185 }
6186
stmmac_rings_status_show(struct seq_file * seq,void * v)6187 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6188 {
6189 struct net_device *dev = seq->private;
6190 struct stmmac_priv *priv = netdev_priv(dev);
6191 u32 rx_count = priv->plat->rx_queues_to_use;
6192 u32 tx_count = priv->plat->tx_queues_to_use;
6193 u32 queue;
6194
6195 if ((dev->flags & IFF_UP) == 0)
6196 return 0;
6197
6198 for (queue = 0; queue < rx_count; queue++) {
6199 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6200
6201 seq_printf(seq, "RX Queue %d:\n", queue);
6202
6203 if (priv->extend_desc) {
6204 seq_printf(seq, "Extended descriptor ring:\n");
6205 sysfs_display_ring((void *)rx_q->dma_erx,
6206 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6207 } else {
6208 seq_printf(seq, "Descriptor ring:\n");
6209 sysfs_display_ring((void *)rx_q->dma_rx,
6210 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6211 }
6212 }
6213
6214 for (queue = 0; queue < tx_count; queue++) {
6215 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6216
6217 seq_printf(seq, "TX Queue %d:\n", queue);
6218
6219 if (priv->extend_desc) {
6220 seq_printf(seq, "Extended descriptor ring:\n");
6221 sysfs_display_ring((void *)tx_q->dma_etx,
6222 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6223 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6224 seq_printf(seq, "Descriptor ring:\n");
6225 sysfs_display_ring((void *)tx_q->dma_tx,
6226 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6227 }
6228 }
6229
6230 return 0;
6231 }
6232 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6233
stmmac_dma_cap_show(struct seq_file * seq,void * v)6234 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6235 {
6236 static const char * const dwxgmac_timestamp_source[] = {
6237 "None",
6238 "Internal",
6239 "External",
6240 "Both",
6241 };
6242 static const char * const dwxgmac_safety_feature_desc[] = {
6243 "No",
6244 "All Safety Features with ECC and Parity",
6245 "All Safety Features without ECC or Parity",
6246 "All Safety Features with Parity Only",
6247 "ECC Only",
6248 "UNDEFINED",
6249 "UNDEFINED",
6250 "UNDEFINED",
6251 };
6252 struct net_device *dev = seq->private;
6253 struct stmmac_priv *priv = netdev_priv(dev);
6254
6255 if (!priv->hw_cap_support) {
6256 seq_printf(seq, "DMA HW features not supported\n");
6257 return 0;
6258 }
6259
6260 seq_printf(seq, "==============================\n");
6261 seq_printf(seq, "\tDMA HW features\n");
6262 seq_printf(seq, "==============================\n");
6263
6264 seq_printf(seq, "\t10/100 Mbps: %s\n",
6265 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6266 seq_printf(seq, "\t1000 Mbps: %s\n",
6267 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6268 seq_printf(seq, "\tHalf duplex: %s\n",
6269 (priv->dma_cap.half_duplex) ? "Y" : "N");
6270 if (priv->plat->has_xgmac) {
6271 seq_printf(seq,
6272 "\tNumber of Additional MAC address registers: %d\n",
6273 priv->dma_cap.multi_addr);
6274 } else {
6275 seq_printf(seq, "\tHash Filter: %s\n",
6276 (priv->dma_cap.hash_filter) ? "Y" : "N");
6277 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6278 (priv->dma_cap.multi_addr) ? "Y" : "N");
6279 }
6280 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6281 (priv->dma_cap.pcs) ? "Y" : "N");
6282 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6283 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6284 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6285 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6286 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6287 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6288 seq_printf(seq, "\tRMON module: %s\n",
6289 (priv->dma_cap.rmon) ? "Y" : "N");
6290 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6291 (priv->dma_cap.time_stamp) ? "Y" : "N");
6292 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6293 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6294 if (priv->plat->has_xgmac)
6295 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6296 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6297 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6298 (priv->dma_cap.eee) ? "Y" : "N");
6299 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6300 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6301 (priv->dma_cap.tx_coe) ? "Y" : "N");
6302 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6303 priv->plat->has_xgmac) {
6304 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6305 (priv->dma_cap.rx_coe) ? "Y" : "N");
6306 } else {
6307 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6308 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6309 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6310 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6311 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6312 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6313 }
6314 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6315 priv->dma_cap.number_rx_channel);
6316 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6317 priv->dma_cap.number_tx_channel);
6318 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6319 priv->dma_cap.number_rx_queues);
6320 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6321 priv->dma_cap.number_tx_queues);
6322 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6323 (priv->dma_cap.enh_desc) ? "Y" : "N");
6324 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6325 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6326 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6327 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6328 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6329 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6330 priv->dma_cap.pps_out_num);
6331 seq_printf(seq, "\tSafety Features: %s\n",
6332 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6333 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6334 priv->dma_cap.frpsel ? "Y" : "N");
6335 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6336 priv->dma_cap.host_dma_width);
6337 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6338 priv->dma_cap.rssen ? "Y" : "N");
6339 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6340 priv->dma_cap.vlhash ? "Y" : "N");
6341 seq_printf(seq, "\tSplit Header: %s\n",
6342 priv->dma_cap.sphen ? "Y" : "N");
6343 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6344 priv->dma_cap.vlins ? "Y" : "N");
6345 seq_printf(seq, "\tDouble VLAN: %s\n",
6346 priv->dma_cap.dvlan ? "Y" : "N");
6347 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6348 priv->dma_cap.l3l4fnum);
6349 seq_printf(seq, "\tARP Offloading: %s\n",
6350 priv->dma_cap.arpoffsel ? "Y" : "N");
6351 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6352 priv->dma_cap.estsel ? "Y" : "N");
6353 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6354 priv->dma_cap.fpesel ? "Y" : "N");
6355 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6356 priv->dma_cap.tbssel ? "Y" : "N");
6357 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6358 priv->dma_cap.tbs_ch_num);
6359 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6360 priv->dma_cap.sgfsel ? "Y" : "N");
6361 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6362 BIT(priv->dma_cap.ttsfd) >> 1);
6363 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6364 priv->dma_cap.numtc);
6365 seq_printf(seq, "\tDCB Feature: %s\n",
6366 priv->dma_cap.dcben ? "Y" : "N");
6367 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6368 priv->dma_cap.advthword ? "Y" : "N");
6369 seq_printf(seq, "\tPTP Offload: %s\n",
6370 priv->dma_cap.ptoen ? "Y" : "N");
6371 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6372 priv->dma_cap.osten ? "Y" : "N");
6373 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6374 priv->dma_cap.pfcen ? "Y" : "N");
6375 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6376 BIT(priv->dma_cap.frpes) << 6);
6377 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6378 BIT(priv->dma_cap.frpbs) << 6);
6379 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6380 priv->dma_cap.frppipe_num);
6381 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6382 priv->dma_cap.nrvf_num ?
6383 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6384 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6385 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6386 seq_printf(seq, "\tDepth of GCL: %lu\n",
6387 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6388 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6389 priv->dma_cap.cbtisel ? "Y" : "N");
6390 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6391 priv->dma_cap.aux_snapshot_n);
6392 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6393 priv->dma_cap.pou_ost_en ? "Y" : "N");
6394 seq_printf(seq, "\tEnhanced DMA: %s\n",
6395 priv->dma_cap.edma ? "Y" : "N");
6396 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6397 priv->dma_cap.ediffc ? "Y" : "N");
6398 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6399 priv->dma_cap.vxn ? "Y" : "N");
6400 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6401 priv->dma_cap.dbgmem ? "Y" : "N");
6402 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6403 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6404 return 0;
6405 }
6406 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6407
6408 /* Use network device events to rename debugfs file entries.
6409 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6410 static int stmmac_device_event(struct notifier_block *unused,
6411 unsigned long event, void *ptr)
6412 {
6413 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6414 struct stmmac_priv *priv = netdev_priv(dev);
6415
6416 if (dev->netdev_ops != &stmmac_netdev_ops)
6417 goto done;
6418
6419 switch (event) {
6420 case NETDEV_CHANGENAME:
6421 if (priv->dbgfs_dir)
6422 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6423 priv->dbgfs_dir,
6424 stmmac_fs_dir,
6425 dev->name);
6426 break;
6427 }
6428 done:
6429 return NOTIFY_DONE;
6430 }
6431
6432 static struct notifier_block stmmac_notifier = {
6433 .notifier_call = stmmac_device_event,
6434 };
6435
stmmac_init_fs(struct net_device * dev)6436 static void stmmac_init_fs(struct net_device *dev)
6437 {
6438 struct stmmac_priv *priv = netdev_priv(dev);
6439
6440 rtnl_lock();
6441
6442 /* Create per netdev entries */
6443 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6444
6445 /* Entry to report DMA RX/TX rings */
6446 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6447 &stmmac_rings_status_fops);
6448
6449 /* Entry to report the DMA HW features */
6450 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6451 &stmmac_dma_cap_fops);
6452
6453 rtnl_unlock();
6454 }
6455
stmmac_exit_fs(struct net_device * dev)6456 static void stmmac_exit_fs(struct net_device *dev)
6457 {
6458 struct stmmac_priv *priv = netdev_priv(dev);
6459
6460 debugfs_remove_recursive(priv->dbgfs_dir);
6461 }
6462 #endif /* CONFIG_DEBUG_FS */
6463
stmmac_vid_crc32_le(__le16 vid_le)6464 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6465 {
6466 unsigned char *data = (unsigned char *)&vid_le;
6467 unsigned char data_byte = 0;
6468 u32 crc = ~0x0;
6469 u32 temp = 0;
6470 int i, bits;
6471
6472 bits = get_bitmask_order(VLAN_VID_MASK);
6473 for (i = 0; i < bits; i++) {
6474 if ((i % 8) == 0)
6475 data_byte = data[i / 8];
6476
6477 temp = ((crc & 1) ^ data_byte) & 1;
6478 crc >>= 1;
6479 data_byte >>= 1;
6480
6481 if (temp)
6482 crc ^= 0xedb88320;
6483 }
6484
6485 return crc;
6486 }
6487
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6488 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6489 {
6490 u32 crc, hash = 0;
6491 __le16 pmatch = 0;
6492 int count = 0;
6493 u16 vid = 0;
6494
6495 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6496 __le16 vid_le = cpu_to_le16(vid);
6497 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6498 hash |= (1 << crc);
6499 count++;
6500 }
6501
6502 if (!priv->dma_cap.vlhash) {
6503 if (count > 2) /* VID = 0 always passes filter */
6504 return -EOPNOTSUPP;
6505
6506 pmatch = cpu_to_le16(vid);
6507 hash = 0;
6508 }
6509
6510 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6511 }
6512
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6513 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6514 {
6515 struct stmmac_priv *priv = netdev_priv(ndev);
6516 bool is_double = false;
6517 int ret;
6518
6519 ret = pm_runtime_resume_and_get(priv->device);
6520 if (ret < 0)
6521 return ret;
6522
6523 if (be16_to_cpu(proto) == ETH_P_8021AD)
6524 is_double = true;
6525
6526 set_bit(vid, priv->active_vlans);
6527 ret = stmmac_vlan_update(priv, is_double);
6528 if (ret) {
6529 clear_bit(vid, priv->active_vlans);
6530 goto err_pm_put;
6531 }
6532
6533 if (priv->hw->num_vlan) {
6534 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6535 if (ret)
6536 goto err_pm_put;
6537 }
6538 err_pm_put:
6539 pm_runtime_put(priv->device);
6540
6541 return ret;
6542 }
6543
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6544 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6545 {
6546 struct stmmac_priv *priv = netdev_priv(ndev);
6547 bool is_double = false;
6548 int ret;
6549
6550 ret = pm_runtime_resume_and_get(priv->device);
6551 if (ret < 0)
6552 return ret;
6553
6554 if (be16_to_cpu(proto) == ETH_P_8021AD)
6555 is_double = true;
6556
6557 clear_bit(vid, priv->active_vlans);
6558
6559 if (priv->hw->num_vlan) {
6560 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6561 if (ret)
6562 goto del_vlan_error;
6563 }
6564
6565 ret = stmmac_vlan_update(priv, is_double);
6566
6567 del_vlan_error:
6568 pm_runtime_put(priv->device);
6569
6570 return ret;
6571 }
6572
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6573 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6574 {
6575 struct stmmac_priv *priv = netdev_priv(dev);
6576
6577 switch (bpf->command) {
6578 case XDP_SETUP_PROG:
6579 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6580 case XDP_SETUP_XSK_POOL:
6581 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6582 bpf->xsk.queue_id);
6583 default:
6584 return -EOPNOTSUPP;
6585 }
6586 }
6587
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6588 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6589 struct xdp_frame **frames, u32 flags)
6590 {
6591 struct stmmac_priv *priv = netdev_priv(dev);
6592 int cpu = smp_processor_id();
6593 struct netdev_queue *nq;
6594 int i, nxmit = 0;
6595 int queue;
6596
6597 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6598 return -ENETDOWN;
6599
6600 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6601 return -EINVAL;
6602
6603 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6604 nq = netdev_get_tx_queue(priv->dev, queue);
6605
6606 __netif_tx_lock(nq, cpu);
6607 /* Avoids TX time-out as we are sharing with slow path */
6608 txq_trans_cond_update(nq);
6609
6610 for (i = 0; i < num_frames; i++) {
6611 int res;
6612
6613 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6614 if (res == STMMAC_XDP_CONSUMED)
6615 break;
6616
6617 nxmit++;
6618 }
6619
6620 if (flags & XDP_XMIT_FLUSH) {
6621 stmmac_flush_tx_descriptors(priv, queue);
6622 stmmac_tx_timer_arm(priv, queue);
6623 }
6624
6625 __netif_tx_unlock(nq);
6626
6627 return nxmit;
6628 }
6629
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6630 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6631 {
6632 struct stmmac_channel *ch = &priv->channel[queue];
6633 unsigned long flags;
6634
6635 spin_lock_irqsave(&ch->lock, flags);
6636 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6637 spin_unlock_irqrestore(&ch->lock, flags);
6638
6639 stmmac_stop_rx_dma(priv, queue);
6640 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6641 }
6642
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6643 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6644 {
6645 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6646 struct stmmac_channel *ch = &priv->channel[queue];
6647 unsigned long flags;
6648 u32 buf_size;
6649 int ret;
6650
6651 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6652 if (ret) {
6653 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6654 return;
6655 }
6656
6657 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6658 if (ret) {
6659 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6660 netdev_err(priv->dev, "Failed to init RX desc.\n");
6661 return;
6662 }
6663
6664 stmmac_reset_rx_queue(priv, queue);
6665 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6666
6667 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6668 rx_q->dma_rx_phy, rx_q->queue_index);
6669
6670 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6671 sizeof(struct dma_desc));
6672 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6673 rx_q->rx_tail_addr, rx_q->queue_index);
6674
6675 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6676 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6677 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6678 buf_size,
6679 rx_q->queue_index);
6680 } else {
6681 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6682 priv->dma_conf.dma_buf_sz,
6683 rx_q->queue_index);
6684 }
6685
6686 stmmac_start_rx_dma(priv, queue);
6687
6688 spin_lock_irqsave(&ch->lock, flags);
6689 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6690 spin_unlock_irqrestore(&ch->lock, flags);
6691 }
6692
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6693 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6694 {
6695 struct stmmac_channel *ch = &priv->channel[queue];
6696 unsigned long flags;
6697
6698 spin_lock_irqsave(&ch->lock, flags);
6699 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6700 spin_unlock_irqrestore(&ch->lock, flags);
6701
6702 stmmac_stop_tx_dma(priv, queue);
6703 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6704 }
6705
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6706 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6707 {
6708 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6709 struct stmmac_channel *ch = &priv->channel[queue];
6710 unsigned long flags;
6711 int ret;
6712
6713 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6714 if (ret) {
6715 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6716 return;
6717 }
6718
6719 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6720 if (ret) {
6721 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6722 netdev_err(priv->dev, "Failed to init TX desc.\n");
6723 return;
6724 }
6725
6726 stmmac_reset_tx_queue(priv, queue);
6727 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6728
6729 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6730 tx_q->dma_tx_phy, tx_q->queue_index);
6731
6732 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6733 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6734
6735 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6736 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6737 tx_q->tx_tail_addr, tx_q->queue_index);
6738
6739 stmmac_start_tx_dma(priv, queue);
6740
6741 spin_lock_irqsave(&ch->lock, flags);
6742 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6743 spin_unlock_irqrestore(&ch->lock, flags);
6744 }
6745
stmmac_xdp_release(struct net_device * dev)6746 void stmmac_xdp_release(struct net_device *dev)
6747 {
6748 struct stmmac_priv *priv = netdev_priv(dev);
6749 u32 chan;
6750
6751 /* Ensure tx function is not running */
6752 netif_tx_disable(dev);
6753
6754 /* Disable NAPI process */
6755 stmmac_disable_all_queues(priv);
6756
6757 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6758 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6759
6760 /* Free the IRQ lines */
6761 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6762
6763 /* Stop TX/RX DMA channels */
6764 stmmac_stop_all_dma(priv);
6765
6766 /* Release and free the Rx/Tx resources */
6767 free_dma_desc_resources(priv, &priv->dma_conf);
6768
6769 /* Disable the MAC Rx/Tx */
6770 stmmac_mac_set(priv, priv->ioaddr, false);
6771
6772 /* set trans_start so we don't get spurious
6773 * watchdogs during reset
6774 */
6775 netif_trans_update(dev);
6776 netif_carrier_off(dev);
6777 }
6778
stmmac_xdp_open(struct net_device * dev)6779 int stmmac_xdp_open(struct net_device *dev)
6780 {
6781 struct stmmac_priv *priv = netdev_priv(dev);
6782 u32 rx_cnt = priv->plat->rx_queues_to_use;
6783 u32 tx_cnt = priv->plat->tx_queues_to_use;
6784 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6785 struct stmmac_rx_queue *rx_q;
6786 struct stmmac_tx_queue *tx_q;
6787 u32 buf_size;
6788 bool sph_en;
6789 u32 chan;
6790 int ret;
6791
6792 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6793 if (ret < 0) {
6794 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6795 __func__);
6796 goto dma_desc_error;
6797 }
6798
6799 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6800 if (ret < 0) {
6801 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6802 __func__);
6803 goto init_error;
6804 }
6805
6806 stmmac_reset_queues_param(priv);
6807
6808 /* DMA CSR Channel configuration */
6809 for (chan = 0; chan < dma_csr_ch; chan++) {
6810 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6811 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6812 }
6813
6814 /* Adjust Split header */
6815 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6816
6817 /* DMA RX Channel Configuration */
6818 for (chan = 0; chan < rx_cnt; chan++) {
6819 rx_q = &priv->dma_conf.rx_queue[chan];
6820
6821 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6822 rx_q->dma_rx_phy, chan);
6823
6824 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6825 (rx_q->buf_alloc_num *
6826 sizeof(struct dma_desc));
6827 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6828 rx_q->rx_tail_addr, chan);
6829
6830 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6831 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6832 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6833 buf_size,
6834 rx_q->queue_index);
6835 } else {
6836 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6837 priv->dma_conf.dma_buf_sz,
6838 rx_q->queue_index);
6839 }
6840
6841 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6842 }
6843
6844 /* DMA TX Channel Configuration */
6845 for (chan = 0; chan < tx_cnt; chan++) {
6846 tx_q = &priv->dma_conf.tx_queue[chan];
6847
6848 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6849 tx_q->dma_tx_phy, chan);
6850
6851 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6852 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6853 tx_q->tx_tail_addr, chan);
6854
6855 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6856 tx_q->txtimer.function = stmmac_tx_timer;
6857 }
6858
6859 /* Enable the MAC Rx/Tx */
6860 stmmac_mac_set(priv, priv->ioaddr, true);
6861
6862 /* Start Rx & Tx DMA Channels */
6863 stmmac_start_all_dma(priv);
6864
6865 ret = stmmac_request_irq(dev);
6866 if (ret)
6867 goto irq_error;
6868
6869 /* Enable NAPI process*/
6870 stmmac_enable_all_queues(priv);
6871 netif_carrier_on(dev);
6872 netif_tx_start_all_queues(dev);
6873 stmmac_enable_all_dma_irq(priv);
6874
6875 return 0;
6876
6877 irq_error:
6878 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6879 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6880
6881 stmmac_hw_teardown(dev);
6882 init_error:
6883 free_dma_desc_resources(priv, &priv->dma_conf);
6884 dma_desc_error:
6885 return ret;
6886 }
6887
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6888 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6889 {
6890 struct stmmac_priv *priv = netdev_priv(dev);
6891 struct stmmac_rx_queue *rx_q;
6892 struct stmmac_tx_queue *tx_q;
6893 struct stmmac_channel *ch;
6894
6895 if (test_bit(STMMAC_DOWN, &priv->state) ||
6896 !netif_carrier_ok(priv->dev))
6897 return -ENETDOWN;
6898
6899 if (!stmmac_xdp_is_enabled(priv))
6900 return -EINVAL;
6901
6902 if (queue >= priv->plat->rx_queues_to_use ||
6903 queue >= priv->plat->tx_queues_to_use)
6904 return -EINVAL;
6905
6906 rx_q = &priv->dma_conf.rx_queue[queue];
6907 tx_q = &priv->dma_conf.tx_queue[queue];
6908 ch = &priv->channel[queue];
6909
6910 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6911 return -EINVAL;
6912
6913 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6914 /* EQoS does not have per-DMA channel SW interrupt,
6915 * so we schedule RX Napi straight-away.
6916 */
6917 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6918 __napi_schedule(&ch->rxtx_napi);
6919 }
6920
6921 return 0;
6922 }
6923
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6924 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6925 {
6926 struct stmmac_priv *priv = netdev_priv(dev);
6927 u32 tx_cnt = priv->plat->tx_queues_to_use;
6928 u32 rx_cnt = priv->plat->rx_queues_to_use;
6929 unsigned int start;
6930 int q;
6931
6932 for (q = 0; q < tx_cnt; q++) {
6933 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
6934 u64 tx_packets;
6935 u64 tx_bytes;
6936
6937 do {
6938 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
6939 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
6940 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
6941 do {
6942 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
6943 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
6944 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
6945
6946 stats->tx_packets += tx_packets;
6947 stats->tx_bytes += tx_bytes;
6948 }
6949
6950 for (q = 0; q < rx_cnt; q++) {
6951 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
6952 u64 rx_packets;
6953 u64 rx_bytes;
6954
6955 do {
6956 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
6957 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
6958 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
6959 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
6960
6961 stats->rx_packets += rx_packets;
6962 stats->rx_bytes += rx_bytes;
6963 }
6964
6965 stats->rx_dropped = priv->xstats.rx_dropped;
6966 stats->rx_errors = priv->xstats.rx_errors;
6967 stats->tx_dropped = priv->xstats.tx_dropped;
6968 stats->tx_errors = priv->xstats.tx_errors;
6969 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
6970 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
6971 stats->rx_length_errors = priv->xstats.rx_length;
6972 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
6973 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
6974 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
6975 }
6976
6977 static const struct net_device_ops stmmac_netdev_ops = {
6978 .ndo_open = stmmac_open,
6979 .ndo_start_xmit = stmmac_xmit,
6980 .ndo_stop = stmmac_release,
6981 .ndo_change_mtu = stmmac_change_mtu,
6982 .ndo_fix_features = stmmac_fix_features,
6983 .ndo_set_features = stmmac_set_features,
6984 .ndo_set_rx_mode = stmmac_set_rx_mode,
6985 .ndo_tx_timeout = stmmac_tx_timeout,
6986 .ndo_eth_ioctl = stmmac_ioctl,
6987 .ndo_get_stats64 = stmmac_get_stats64,
6988 .ndo_setup_tc = stmmac_setup_tc,
6989 .ndo_select_queue = stmmac_select_queue,
6990 .ndo_set_mac_address = stmmac_set_mac_address,
6991 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6992 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6993 .ndo_bpf = stmmac_bpf,
6994 .ndo_xdp_xmit = stmmac_xdp_xmit,
6995 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6996 };
6997
stmmac_reset_subtask(struct stmmac_priv * priv)6998 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6999 {
7000 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7001 return;
7002 if (test_bit(STMMAC_DOWN, &priv->state))
7003 return;
7004
7005 netdev_err(priv->dev, "Reset adapter.\n");
7006
7007 rtnl_lock();
7008 netif_trans_update(priv->dev);
7009 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7010 usleep_range(1000, 2000);
7011
7012 set_bit(STMMAC_DOWN, &priv->state);
7013 dev_close(priv->dev);
7014 dev_open(priv->dev, NULL);
7015 clear_bit(STMMAC_DOWN, &priv->state);
7016 clear_bit(STMMAC_RESETING, &priv->state);
7017 rtnl_unlock();
7018 }
7019
stmmac_service_task(struct work_struct * work)7020 static void stmmac_service_task(struct work_struct *work)
7021 {
7022 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7023 service_task);
7024
7025 stmmac_reset_subtask(priv);
7026 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7027 }
7028
7029 /**
7030 * stmmac_hw_init - Init the MAC device
7031 * @priv: driver private structure
7032 * Description: this function is to configure the MAC device according to
7033 * some platform parameters or the HW capability register. It prepares the
7034 * driver to use either ring or chain modes and to setup either enhanced or
7035 * normal descriptors.
7036 */
stmmac_hw_init(struct stmmac_priv * priv)7037 static int stmmac_hw_init(struct stmmac_priv *priv)
7038 {
7039 int ret;
7040
7041 /* dwmac-sun8i only work in chain mode */
7042 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7043 chain_mode = 1;
7044 priv->chain_mode = chain_mode;
7045
7046 /* Initialize HW Interface */
7047 ret = stmmac_hwif_init(priv);
7048 if (ret)
7049 return ret;
7050
7051 /* Get the HW capability (new GMAC newer than 3.50a) */
7052 priv->hw_cap_support = stmmac_get_hw_features(priv);
7053 if (priv->hw_cap_support) {
7054 dev_info(priv->device, "DMA HW capability register supported\n");
7055
7056 /* We can override some gmac/dma configuration fields: e.g.
7057 * enh_desc, tx_coe (e.g. that are passed through the
7058 * platform) with the values from the HW capability
7059 * register (if supported).
7060 */
7061 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7062 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7063 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7064 priv->hw->pmt = priv->plat->pmt;
7065 if (priv->dma_cap.hash_tb_sz) {
7066 priv->hw->multicast_filter_bins =
7067 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7068 priv->hw->mcast_bits_log2 =
7069 ilog2(priv->hw->multicast_filter_bins);
7070 }
7071
7072 /* TXCOE doesn't work in thresh DMA mode */
7073 if (priv->plat->force_thresh_dma_mode)
7074 priv->plat->tx_coe = 0;
7075 else
7076 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7077
7078 /* In case of GMAC4 rx_coe is from HW cap register. */
7079 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7080
7081 if (priv->dma_cap.rx_coe_type2)
7082 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7083 else if (priv->dma_cap.rx_coe_type1)
7084 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7085
7086 } else {
7087 dev_info(priv->device, "No HW DMA feature register supported\n");
7088 }
7089
7090 if (priv->plat->rx_coe) {
7091 priv->hw->rx_csum = priv->plat->rx_coe;
7092 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7093 if (priv->synopsys_id < DWMAC_CORE_4_00)
7094 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7095 }
7096 if (priv->plat->tx_coe)
7097 dev_info(priv->device, "TX Checksum insertion supported\n");
7098
7099 if (priv->plat->pmt) {
7100 dev_info(priv->device, "Wake-Up On Lan supported\n");
7101 device_set_wakeup_capable(priv->device, 1);
7102 }
7103
7104 if (priv->dma_cap.tsoen)
7105 dev_info(priv->device, "TSO supported\n");
7106
7107 priv->hw->vlan_fail_q_en =
7108 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7109 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7110
7111 /* Run HW quirks, if any */
7112 if (priv->hwif_quirks) {
7113 ret = priv->hwif_quirks(priv);
7114 if (ret)
7115 return ret;
7116 }
7117
7118 /* Rx Watchdog is available in the COREs newer than the 3.40.
7119 * In some case, for example on bugged HW this feature
7120 * has to be disable and this can be done by passing the
7121 * riwt_off field from the platform.
7122 */
7123 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7124 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7125 priv->use_riwt = 1;
7126 dev_info(priv->device,
7127 "Enable RX Mitigation via HW Watchdog Timer\n");
7128 }
7129
7130 return 0;
7131 }
7132
stmmac_napi_add(struct net_device * dev)7133 static void stmmac_napi_add(struct net_device *dev)
7134 {
7135 struct stmmac_priv *priv = netdev_priv(dev);
7136 u32 queue, maxq;
7137
7138 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7139
7140 for (queue = 0; queue < maxq; queue++) {
7141 struct stmmac_channel *ch = &priv->channel[queue];
7142
7143 ch->priv_data = priv;
7144 ch->index = queue;
7145 spin_lock_init(&ch->lock);
7146
7147 if (queue < priv->plat->rx_queues_to_use) {
7148 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7149 }
7150 if (queue < priv->plat->tx_queues_to_use) {
7151 netif_napi_add_tx(dev, &ch->tx_napi,
7152 stmmac_napi_poll_tx);
7153 }
7154 if (queue < priv->plat->rx_queues_to_use &&
7155 queue < priv->plat->tx_queues_to_use) {
7156 netif_napi_add(dev, &ch->rxtx_napi,
7157 stmmac_napi_poll_rxtx);
7158 }
7159 }
7160 }
7161
stmmac_napi_del(struct net_device * dev)7162 static void stmmac_napi_del(struct net_device *dev)
7163 {
7164 struct stmmac_priv *priv = netdev_priv(dev);
7165 u32 queue, maxq;
7166
7167 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7168
7169 for (queue = 0; queue < maxq; queue++) {
7170 struct stmmac_channel *ch = &priv->channel[queue];
7171
7172 if (queue < priv->plat->rx_queues_to_use)
7173 netif_napi_del(&ch->rx_napi);
7174 if (queue < priv->plat->tx_queues_to_use)
7175 netif_napi_del(&ch->tx_napi);
7176 if (queue < priv->plat->rx_queues_to_use &&
7177 queue < priv->plat->tx_queues_to_use) {
7178 netif_napi_del(&ch->rxtx_napi);
7179 }
7180 }
7181 }
7182
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7183 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7184 {
7185 struct stmmac_priv *priv = netdev_priv(dev);
7186 int ret = 0, i;
7187
7188 if (netif_running(dev))
7189 stmmac_release(dev);
7190
7191 stmmac_napi_del(dev);
7192
7193 priv->plat->rx_queues_to_use = rx_cnt;
7194 priv->plat->tx_queues_to_use = tx_cnt;
7195 if (!netif_is_rxfh_configured(dev))
7196 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7197 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7198 rx_cnt);
7199
7200 stmmac_set_half_duplex(priv);
7201 stmmac_napi_add(dev);
7202
7203 if (netif_running(dev))
7204 ret = stmmac_open(dev);
7205
7206 return ret;
7207 }
7208
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7209 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7210 {
7211 struct stmmac_priv *priv = netdev_priv(dev);
7212 int ret = 0;
7213
7214 if (netif_running(dev))
7215 stmmac_release(dev);
7216
7217 priv->dma_conf.dma_rx_size = rx_size;
7218 priv->dma_conf.dma_tx_size = tx_size;
7219
7220 if (netif_running(dev))
7221 ret = stmmac_open(dev);
7222
7223 return ret;
7224 }
7225
7226 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7227 static void stmmac_fpe_lp_task(struct work_struct *work)
7228 {
7229 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7230 fpe_task);
7231 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7232 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7233 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7234 bool *hs_enable = &fpe_cfg->hs_enable;
7235 bool *enable = &fpe_cfg->enable;
7236 int retries = 20;
7237
7238 while (retries-- > 0) {
7239 /* Bail out immediately if FPE handshake is OFF */
7240 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7241 break;
7242
7243 if (*lo_state == FPE_STATE_ENTERING_ON &&
7244 *lp_state == FPE_STATE_ENTERING_ON) {
7245 stmmac_fpe_configure(priv, priv->ioaddr,
7246 fpe_cfg,
7247 priv->plat->tx_queues_to_use,
7248 priv->plat->rx_queues_to_use,
7249 *enable);
7250
7251 netdev_info(priv->dev, "configured FPE\n");
7252
7253 *lo_state = FPE_STATE_ON;
7254 *lp_state = FPE_STATE_ON;
7255 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7256 break;
7257 }
7258
7259 if ((*lo_state == FPE_STATE_CAPABLE ||
7260 *lo_state == FPE_STATE_ENTERING_ON) &&
7261 *lp_state != FPE_STATE_ON) {
7262 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7263 *lo_state, *lp_state);
7264 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7265 fpe_cfg,
7266 MPACKET_VERIFY);
7267 }
7268 /* Sleep then retry */
7269 msleep(500);
7270 }
7271
7272 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7273 }
7274
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7275 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7276 {
7277 if (priv->plat->fpe_cfg->hs_enable != enable) {
7278 if (enable) {
7279 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7280 priv->plat->fpe_cfg,
7281 MPACKET_VERIFY);
7282 } else {
7283 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7284 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7285 }
7286
7287 priv->plat->fpe_cfg->hs_enable = enable;
7288 }
7289 }
7290
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7291 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7292 {
7293 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7294 struct dma_desc *desc_contains_ts = ctx->desc;
7295 struct stmmac_priv *priv = ctx->priv;
7296 struct dma_desc *ndesc = ctx->ndesc;
7297 struct dma_desc *desc = ctx->desc;
7298 u64 ns = 0;
7299
7300 if (!priv->hwts_rx_en)
7301 return -ENODATA;
7302
7303 /* For GMAC4, the valid timestamp is from CTX next desc. */
7304 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7305 desc_contains_ts = ndesc;
7306
7307 /* Check if timestamp is available */
7308 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7309 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7310 ns -= priv->plat->cdc_error_adj;
7311 *timestamp = ns_to_ktime(ns);
7312 return 0;
7313 }
7314
7315 return -ENODATA;
7316 }
7317
7318 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7319 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7320 };
7321
7322 /**
7323 * stmmac_dvr_probe
7324 * @device: device pointer
7325 * @plat_dat: platform data pointer
7326 * @res: stmmac resource pointer
7327 * Description: this is the main probe function used to
7328 * call the alloc_etherdev, allocate the priv structure.
7329 * Return:
7330 * returns 0 on success, otherwise errno.
7331 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7332 int stmmac_dvr_probe(struct device *device,
7333 struct plat_stmmacenet_data *plat_dat,
7334 struct stmmac_resources *res)
7335 {
7336 struct net_device *ndev = NULL;
7337 struct stmmac_priv *priv;
7338 u32 rxq;
7339 int i, ret = 0;
7340
7341 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7342 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7343 if (!ndev)
7344 return -ENOMEM;
7345
7346 SET_NETDEV_DEV(ndev, device);
7347
7348 priv = netdev_priv(ndev);
7349 priv->device = device;
7350 priv->dev = ndev;
7351
7352 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7353 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7354 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7355 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7356 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7357 }
7358
7359 priv->xstats.pcpu_stats =
7360 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7361 if (!priv->xstats.pcpu_stats)
7362 return -ENOMEM;
7363
7364 stmmac_set_ethtool_ops(ndev);
7365 priv->pause = pause;
7366 priv->plat = plat_dat;
7367 priv->ioaddr = res->addr;
7368 priv->dev->base_addr = (unsigned long)res->addr;
7369 priv->plat->dma_cfg->multi_msi_en =
7370 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7371
7372 priv->dev->irq = res->irq;
7373 priv->wol_irq = res->wol_irq;
7374 priv->lpi_irq = res->lpi_irq;
7375 priv->sfty_ce_irq = res->sfty_ce_irq;
7376 priv->sfty_ue_irq = res->sfty_ue_irq;
7377 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7378 priv->rx_irq[i] = res->rx_irq[i];
7379 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7380 priv->tx_irq[i] = res->tx_irq[i];
7381
7382 if (!is_zero_ether_addr(res->mac))
7383 eth_hw_addr_set(priv->dev, res->mac);
7384
7385 dev_set_drvdata(device, priv->dev);
7386
7387 /* Verify driver arguments */
7388 stmmac_verify_args();
7389
7390 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7391 if (!priv->af_xdp_zc_qps)
7392 return -ENOMEM;
7393
7394 /* Allocate workqueue */
7395 priv->wq = create_singlethread_workqueue("stmmac_wq");
7396 if (!priv->wq) {
7397 dev_err(priv->device, "failed to create workqueue\n");
7398 ret = -ENOMEM;
7399 goto error_wq_init;
7400 }
7401
7402 INIT_WORK(&priv->service_task, stmmac_service_task);
7403
7404 /* Initialize Link Partner FPE workqueue */
7405 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7406
7407 /* Override with kernel parameters if supplied XXX CRS XXX
7408 * this needs to have multiple instances
7409 */
7410 if ((phyaddr >= 0) && (phyaddr <= 31))
7411 priv->plat->phy_addr = phyaddr;
7412
7413 if (priv->plat->stmmac_rst) {
7414 ret = reset_control_assert(priv->plat->stmmac_rst);
7415 reset_control_deassert(priv->plat->stmmac_rst);
7416 /* Some reset controllers have only reset callback instead of
7417 * assert + deassert callbacks pair.
7418 */
7419 if (ret == -ENOTSUPP)
7420 reset_control_reset(priv->plat->stmmac_rst);
7421 }
7422
7423 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7424 if (ret == -ENOTSUPP)
7425 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7426 ERR_PTR(ret));
7427
7428 /* Wait a bit for the reset to take effect */
7429 udelay(10);
7430
7431 /* Init MAC and get the capabilities */
7432 ret = stmmac_hw_init(priv);
7433 if (ret)
7434 goto error_hw_init;
7435
7436 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7437 */
7438 if (priv->synopsys_id < DWMAC_CORE_5_20)
7439 priv->plat->dma_cfg->dche = false;
7440
7441 stmmac_check_ether_addr(priv);
7442
7443 ndev->netdev_ops = &stmmac_netdev_ops;
7444
7445 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7446
7447 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7448 NETIF_F_RXCSUM;
7449 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7450 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7451
7452 ret = stmmac_tc_init(priv, priv);
7453 if (!ret) {
7454 ndev->hw_features |= NETIF_F_HW_TC;
7455 }
7456
7457 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7458 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7459 if (priv->plat->has_gmac4)
7460 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7461 priv->tso = true;
7462 dev_info(priv->device, "TSO feature enabled\n");
7463 }
7464
7465 if (priv->dma_cap.sphen &&
7466 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7467 ndev->hw_features |= NETIF_F_GRO;
7468 priv->sph_cap = true;
7469 priv->sph = priv->sph_cap;
7470 dev_info(priv->device, "SPH feature enabled\n");
7471 }
7472
7473 /* Ideally our host DMA address width is the same as for the
7474 * device. However, it may differ and then we have to use our
7475 * host DMA width for allocation and the device DMA width for
7476 * register handling.
7477 */
7478 if (priv->plat->host_dma_width)
7479 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7480 else
7481 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7482
7483 if (priv->dma_cap.host_dma_width) {
7484 ret = dma_set_mask_and_coherent(device,
7485 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7486 if (!ret) {
7487 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7488 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7489
7490 /*
7491 * If more than 32 bits can be addressed, make sure to
7492 * enable enhanced addressing mode.
7493 */
7494 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7495 priv->plat->dma_cfg->eame = true;
7496 } else {
7497 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7498 if (ret) {
7499 dev_err(priv->device, "Failed to set DMA Mask\n");
7500 goto error_hw_init;
7501 }
7502
7503 priv->dma_cap.host_dma_width = 32;
7504 }
7505 }
7506
7507 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7508 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7509 #ifdef STMMAC_VLAN_TAG_USED
7510 /* Both mac100 and gmac support receive VLAN tag detection */
7511 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7512 if (priv->dma_cap.vlhash) {
7513 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7514 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7515 }
7516 if (priv->dma_cap.vlins) {
7517 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7518 if (priv->dma_cap.dvlan)
7519 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7520 }
7521 #endif
7522 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7523
7524 priv->xstats.threshold = tc;
7525
7526 /* Initialize RSS */
7527 rxq = priv->plat->rx_queues_to_use;
7528 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7529 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7530 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7531
7532 if (priv->dma_cap.rssen && priv->plat->rss_en)
7533 ndev->features |= NETIF_F_RXHASH;
7534
7535 ndev->vlan_features |= ndev->features;
7536 /* TSO doesn't work on VLANs yet */
7537 ndev->vlan_features &= ~NETIF_F_TSO;
7538
7539 /* MTU range: 46 - hw-specific max */
7540 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7541 if (priv->plat->has_xgmac)
7542 ndev->max_mtu = XGMAC_JUMBO_LEN;
7543 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7544 ndev->max_mtu = JUMBO_LEN;
7545 else
7546 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7547 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7548 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7549 */
7550 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7551 (priv->plat->maxmtu >= ndev->min_mtu))
7552 ndev->max_mtu = priv->plat->maxmtu;
7553 else if (priv->plat->maxmtu < ndev->min_mtu)
7554 dev_warn(priv->device,
7555 "%s: warning: maxmtu having invalid value (%d)\n",
7556 __func__, priv->plat->maxmtu);
7557
7558 if (flow_ctrl)
7559 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7560
7561 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7562
7563 /* Setup channels NAPI */
7564 stmmac_napi_add(ndev);
7565
7566 mutex_init(&priv->lock);
7567
7568 /* If a specific clk_csr value is passed from the platform
7569 * this means that the CSR Clock Range selection cannot be
7570 * changed at run-time and it is fixed. Viceversa the driver'll try to
7571 * set the MDC clock dynamically according to the csr actual
7572 * clock input.
7573 */
7574 if (priv->plat->clk_csr >= 0)
7575 priv->clk_csr = priv->plat->clk_csr;
7576 else
7577 stmmac_clk_csr_set(priv);
7578
7579 stmmac_check_pcs_mode(priv);
7580
7581 pm_runtime_get_noresume(device);
7582 pm_runtime_set_active(device);
7583 if (!pm_runtime_enabled(device))
7584 pm_runtime_enable(device);
7585
7586 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7587 priv->hw->pcs != STMMAC_PCS_RTBI) {
7588 /* MDIO bus Registration */
7589 ret = stmmac_mdio_register(ndev);
7590 if (ret < 0) {
7591 dev_err_probe(priv->device, ret,
7592 "%s: MDIO bus (id: %d) registration failed\n",
7593 __func__, priv->plat->bus_id);
7594 goto error_mdio_register;
7595 }
7596 }
7597
7598 if (priv->plat->speed_mode_2500)
7599 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7600
7601 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7602 ret = stmmac_xpcs_setup(priv->mii);
7603 if (ret)
7604 goto error_xpcs_setup;
7605 }
7606
7607 ret = stmmac_phy_setup(priv);
7608 if (ret) {
7609 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7610 goto error_phy_setup;
7611 }
7612
7613 ret = register_netdev(ndev);
7614 if (ret) {
7615 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7616 __func__, ret);
7617 goto error_netdev_register;
7618 }
7619
7620 #ifdef CONFIG_DEBUG_FS
7621 stmmac_init_fs(ndev);
7622 #endif
7623
7624 if (priv->plat->dump_debug_regs)
7625 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7626
7627 /* Let pm_runtime_put() disable the clocks.
7628 * If CONFIG_PM is not enabled, the clocks will stay powered.
7629 */
7630 pm_runtime_put(device);
7631
7632 return ret;
7633
7634 error_netdev_register:
7635 phylink_destroy(priv->phylink);
7636 error_xpcs_setup:
7637 error_phy_setup:
7638 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7639 priv->hw->pcs != STMMAC_PCS_RTBI)
7640 stmmac_mdio_unregister(ndev);
7641 error_mdio_register:
7642 stmmac_napi_del(ndev);
7643 error_hw_init:
7644 destroy_workqueue(priv->wq);
7645 error_wq_init:
7646 bitmap_free(priv->af_xdp_zc_qps);
7647
7648 return ret;
7649 }
7650 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7651
7652 /**
7653 * stmmac_dvr_remove
7654 * @dev: device pointer
7655 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7656 * changes the link status, releases the DMA descriptor rings.
7657 */
stmmac_dvr_remove(struct device * dev)7658 void stmmac_dvr_remove(struct device *dev)
7659 {
7660 struct net_device *ndev = dev_get_drvdata(dev);
7661 struct stmmac_priv *priv = netdev_priv(ndev);
7662
7663 netdev_info(priv->dev, "%s: removing driver", __func__);
7664
7665 pm_runtime_get_sync(dev);
7666
7667 stmmac_stop_all_dma(priv);
7668 stmmac_mac_set(priv, priv->ioaddr, false);
7669 netif_carrier_off(ndev);
7670 unregister_netdev(ndev);
7671
7672 #ifdef CONFIG_DEBUG_FS
7673 stmmac_exit_fs(ndev);
7674 #endif
7675 phylink_destroy(priv->phylink);
7676 if (priv->plat->stmmac_rst)
7677 reset_control_assert(priv->plat->stmmac_rst);
7678 reset_control_assert(priv->plat->stmmac_ahb_rst);
7679 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7680 priv->hw->pcs != STMMAC_PCS_RTBI)
7681 stmmac_mdio_unregister(ndev);
7682 destroy_workqueue(priv->wq);
7683 mutex_destroy(&priv->lock);
7684 bitmap_free(priv->af_xdp_zc_qps);
7685
7686 pm_runtime_disable(dev);
7687 pm_runtime_put_noidle(dev);
7688 }
7689 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7690
7691 /**
7692 * stmmac_suspend - suspend callback
7693 * @dev: device pointer
7694 * Description: this is the function to suspend the device and it is called
7695 * by the platform driver to stop the network queue, release the resources,
7696 * program the PMT register (for WoL), clean and release driver resources.
7697 */
stmmac_suspend(struct device * dev)7698 int stmmac_suspend(struct device *dev)
7699 {
7700 struct net_device *ndev = dev_get_drvdata(dev);
7701 struct stmmac_priv *priv = netdev_priv(ndev);
7702 u32 chan;
7703
7704 if (!ndev || !netif_running(ndev))
7705 return 0;
7706
7707 mutex_lock(&priv->lock);
7708
7709 netif_device_detach(ndev);
7710
7711 stmmac_disable_all_queues(priv);
7712
7713 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7714 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7715
7716 if (priv->eee_enabled) {
7717 priv->tx_path_in_lpi_mode = false;
7718 del_timer_sync(&priv->eee_ctrl_timer);
7719 }
7720
7721 /* Stop TX/RX DMA */
7722 stmmac_stop_all_dma(priv);
7723
7724 if (priv->plat->serdes_powerdown)
7725 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7726
7727 /* Enable Power down mode by programming the PMT regs */
7728 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7729 stmmac_pmt(priv, priv->hw, priv->wolopts);
7730 priv->irq_wake = 1;
7731 } else {
7732 stmmac_mac_set(priv, priv->ioaddr, false);
7733 pinctrl_pm_select_sleep_state(priv->device);
7734 }
7735
7736 mutex_unlock(&priv->lock);
7737
7738 rtnl_lock();
7739 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7740 phylink_suspend(priv->phylink, true);
7741 } else {
7742 if (device_may_wakeup(priv->device))
7743 phylink_speed_down(priv->phylink, false);
7744 phylink_suspend(priv->phylink, false);
7745 }
7746 rtnl_unlock();
7747
7748 if (priv->dma_cap.fpesel) {
7749 /* Disable FPE */
7750 stmmac_fpe_configure(priv, priv->ioaddr,
7751 priv->plat->fpe_cfg,
7752 priv->plat->tx_queues_to_use,
7753 priv->plat->rx_queues_to_use, false);
7754
7755 stmmac_fpe_handshake(priv, false);
7756 stmmac_fpe_stop_wq(priv);
7757 }
7758
7759 priv->speed = SPEED_UNKNOWN;
7760 return 0;
7761 }
7762 EXPORT_SYMBOL_GPL(stmmac_suspend);
7763
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7764 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7765 {
7766 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7767
7768 rx_q->cur_rx = 0;
7769 rx_q->dirty_rx = 0;
7770 }
7771
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7772 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7773 {
7774 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7775
7776 tx_q->cur_tx = 0;
7777 tx_q->dirty_tx = 0;
7778 tx_q->mss = 0;
7779
7780 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7781 }
7782
7783 /**
7784 * stmmac_reset_queues_param - reset queue parameters
7785 * @priv: device pointer
7786 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7787 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7788 {
7789 u32 rx_cnt = priv->plat->rx_queues_to_use;
7790 u32 tx_cnt = priv->plat->tx_queues_to_use;
7791 u32 queue;
7792
7793 for (queue = 0; queue < rx_cnt; queue++)
7794 stmmac_reset_rx_queue(priv, queue);
7795
7796 for (queue = 0; queue < tx_cnt; queue++)
7797 stmmac_reset_tx_queue(priv, queue);
7798 }
7799
7800 /**
7801 * stmmac_resume - resume callback
7802 * @dev: device pointer
7803 * Description: when resume this function is invoked to setup the DMA and CORE
7804 * in a usable state.
7805 */
stmmac_resume(struct device * dev)7806 int stmmac_resume(struct device *dev)
7807 {
7808 struct net_device *ndev = dev_get_drvdata(dev);
7809 struct stmmac_priv *priv = netdev_priv(ndev);
7810 int ret;
7811
7812 if (!netif_running(ndev))
7813 return 0;
7814
7815 /* Power Down bit, into the PM register, is cleared
7816 * automatically as soon as a magic packet or a Wake-up frame
7817 * is received. Anyway, it's better to manually clear
7818 * this bit because it can generate problems while resuming
7819 * from another devices (e.g. serial console).
7820 */
7821 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7822 mutex_lock(&priv->lock);
7823 stmmac_pmt(priv, priv->hw, 0);
7824 mutex_unlock(&priv->lock);
7825 priv->irq_wake = 0;
7826 } else {
7827 pinctrl_pm_select_default_state(priv->device);
7828 /* reset the phy so that it's ready */
7829 if (priv->mii)
7830 stmmac_mdio_reset(priv->mii);
7831 }
7832
7833 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7834 priv->plat->serdes_powerup) {
7835 ret = priv->plat->serdes_powerup(ndev,
7836 priv->plat->bsp_priv);
7837
7838 if (ret < 0)
7839 return ret;
7840 }
7841
7842 rtnl_lock();
7843 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7844 phylink_resume(priv->phylink);
7845 } else {
7846 phylink_resume(priv->phylink);
7847 if (device_may_wakeup(priv->device))
7848 phylink_speed_up(priv->phylink);
7849 }
7850 rtnl_unlock();
7851
7852 rtnl_lock();
7853 mutex_lock(&priv->lock);
7854
7855 stmmac_reset_queues_param(priv);
7856
7857 stmmac_free_tx_skbufs(priv);
7858 stmmac_clear_descriptors(priv, &priv->dma_conf);
7859
7860 stmmac_hw_setup(ndev, false);
7861 stmmac_init_coalesce(priv);
7862 stmmac_set_rx_mode(ndev);
7863
7864 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7865
7866 stmmac_enable_all_queues(priv);
7867 stmmac_enable_all_dma_irq(priv);
7868
7869 mutex_unlock(&priv->lock);
7870 rtnl_unlock();
7871
7872 netif_device_attach(ndev);
7873
7874 return 0;
7875 }
7876 EXPORT_SYMBOL_GPL(stmmac_resume);
7877
7878 #ifndef MODULE
stmmac_cmdline_opt(char * str)7879 static int __init stmmac_cmdline_opt(char *str)
7880 {
7881 char *opt;
7882
7883 if (!str || !*str)
7884 return 1;
7885 while ((opt = strsep(&str, ",")) != NULL) {
7886 if (!strncmp(opt, "debug:", 6)) {
7887 if (kstrtoint(opt + 6, 0, &debug))
7888 goto err;
7889 } else if (!strncmp(opt, "phyaddr:", 8)) {
7890 if (kstrtoint(opt + 8, 0, &phyaddr))
7891 goto err;
7892 } else if (!strncmp(opt, "buf_sz:", 7)) {
7893 if (kstrtoint(opt + 7, 0, &buf_sz))
7894 goto err;
7895 } else if (!strncmp(opt, "tc:", 3)) {
7896 if (kstrtoint(opt + 3, 0, &tc))
7897 goto err;
7898 } else if (!strncmp(opt, "watchdog:", 9)) {
7899 if (kstrtoint(opt + 9, 0, &watchdog))
7900 goto err;
7901 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7902 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7903 goto err;
7904 } else if (!strncmp(opt, "pause:", 6)) {
7905 if (kstrtoint(opt + 6, 0, &pause))
7906 goto err;
7907 } else if (!strncmp(opt, "eee_timer:", 10)) {
7908 if (kstrtoint(opt + 10, 0, &eee_timer))
7909 goto err;
7910 } else if (!strncmp(opt, "chain_mode:", 11)) {
7911 if (kstrtoint(opt + 11, 0, &chain_mode))
7912 goto err;
7913 }
7914 }
7915 return 1;
7916
7917 err:
7918 pr_err("%s: ERROR broken module parameter conversion", __func__);
7919 return 1;
7920 }
7921
7922 __setup("stmmaceth=", stmmac_cmdline_opt);
7923 #endif /* MODULE */
7924
stmmac_init(void)7925 static int __init stmmac_init(void)
7926 {
7927 #ifdef CONFIG_DEBUG_FS
7928 /* Create debugfs main directory if it doesn't exist yet */
7929 if (!stmmac_fs_dir)
7930 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7931 register_netdevice_notifier(&stmmac_notifier);
7932 #endif
7933
7934 return 0;
7935 }
7936
stmmac_exit(void)7937 static void __exit stmmac_exit(void)
7938 {
7939 #ifdef CONFIG_DEBUG_FS
7940 unregister_netdevice_notifier(&stmmac_notifier);
7941 debugfs_remove_recursive(stmmac_fs_dir);
7942 #endif
7943 }
7944
7945 module_init(stmmac_init)
7946 module_exit(stmmac_exit)
7947
7948 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7949 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7950 MODULE_LICENSE("GPL");
7951