1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
3
4 #include "en/ptp.h"
5 #include "en/txrx.h"
6 #include "en/params.h"
7 #include "en/fs_tt_redirect.h"
8
9 struct mlx5e_ptp_fs {
10 struct mlx5_flow_handle *l2_rule;
11 struct mlx5_flow_handle *udp_v4_rule;
12 struct mlx5_flow_handle *udp_v6_rule;
13 bool valid;
14 };
15
16 struct mlx5e_ptp_params {
17 struct mlx5e_params params;
18 struct mlx5e_sq_param txq_sq_param;
19 struct mlx5e_rq_param rq_param;
20 };
21
22 struct mlx5e_skb_cb_hwtstamp {
23 ktime_t cqe_hwtstamp;
24 ktime_t port_hwtstamp;
25 };
26
mlx5e_skb_cb_hwtstamp_init(struct sk_buff * skb)27 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
28 {
29 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
30 }
31
mlx5e_skb_cb_get_hwts(struct sk_buff * skb)32 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
33 {
34 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
35 return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
36 }
37
mlx5e_skb_cb_hwtstamp_tx(struct sk_buff * skb,struct mlx5e_ptp_cq_stats * cq_stats)38 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
39 struct mlx5e_ptp_cq_stats *cq_stats)
40 {
41 struct skb_shared_hwtstamps hwts = {};
42 ktime_t diff;
43
44 diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
45 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
46
47 /* Maximal allowed diff is 1 / 128 second */
48 if (diff > (NSEC_PER_SEC >> 7)) {
49 cq_stats->abort++;
50 cq_stats->abort_abs_diff_ns += diff;
51 return;
52 }
53
54 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
55 skb_tstamp_tx(skb, &hwts);
56 }
57
mlx5e_skb_cb_hwtstamp_handler(struct sk_buff * skb,int hwtstamp_type,ktime_t hwtstamp,struct mlx5e_ptp_cq_stats * cq_stats)58 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
59 ktime_t hwtstamp,
60 struct mlx5e_ptp_cq_stats *cq_stats)
61 {
62 switch (hwtstamp_type) {
63 case (MLX5E_SKB_CB_CQE_HWTSTAMP):
64 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
65 break;
66 case (MLX5E_SKB_CB_PORT_HWTSTAMP):
67 mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
68 break;
69 }
70
71 /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
72 * skb soon to be released.
73 */
74 if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
75 !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
76 return;
77
78 mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
79 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
80 }
81
82 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
83
mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq * ptpsq,u16 skb_cc,u16 skb_id)84 static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
85 {
86 return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
87 }
88
mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq * ptpsq,u16 skb_cc,u16 skb_id)89 static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
90 {
91 struct skb_shared_hwtstamps hwts = {};
92 struct sk_buff *skb;
93
94 ptpsq->cq_stats->resync_event++;
95
96 while (skb_cc != skb_id) {
97 skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
98 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
99 skb_tstamp_tx(skb, &hwts);
100 ptpsq->cq_stats->resync_cqe++;
101 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
102 }
103 }
104
mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq * ptpsq,struct mlx5_cqe64 * cqe,int budget)105 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
106 struct mlx5_cqe64 *cqe,
107 int budget)
108 {
109 u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
110 u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
111 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
112 struct sk_buff *skb;
113 ktime_t hwtstamp;
114
115 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
116 skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
117 ptpsq->cq_stats->err_cqe++;
118 goto out;
119 }
120
121 if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
122 mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
123
124 skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
125 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
126 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
127 hwtstamp, ptpsq->cq_stats);
128 ptpsq->cq_stats->cqe++;
129
130 out:
131 napi_consume_skb(skb, budget);
132 }
133
mlx5e_ptp_poll_ts_cq(struct mlx5e_cq * cq,int budget)134 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
135 {
136 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
137 struct mlx5_cqwq *cqwq = &cq->wq;
138 struct mlx5_cqe64 *cqe;
139 int work_done = 0;
140
141 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
142 return false;
143
144 cqe = mlx5_cqwq_get_cqe(cqwq);
145 if (!cqe)
146 return false;
147
148 do {
149 mlx5_cqwq_pop(cqwq);
150
151 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
152 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
153
154 mlx5_cqwq_update_db_record(cqwq);
155
156 /* ensure cq space is freed before enabling more cqes */
157 wmb();
158
159 return work_done == budget;
160 }
161
mlx5e_ptp_napi_poll(struct napi_struct * napi,int budget)162 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
163 {
164 struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
165 struct mlx5e_ch_stats *ch_stats = c->stats;
166 struct mlx5e_rq *rq = &c->rq;
167 bool busy = false;
168 int work_done = 0;
169 int i;
170
171 rcu_read_lock();
172
173 ch_stats->poll++;
174
175 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
176 for (i = 0; i < c->num_tc; i++) {
177 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
178 busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
179 }
180 }
181 if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
182 work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
183 busy |= work_done == budget;
184 busy |= INDIRECT_CALL_2(rq->post_wqes,
185 mlx5e_post_rx_mpwqes,
186 mlx5e_post_rx_wqes,
187 rq);
188 }
189
190 if (busy) {
191 work_done = budget;
192 goto out;
193 }
194
195 if (unlikely(!napi_complete_done(napi, work_done)))
196 goto out;
197
198 ch_stats->arm++;
199
200 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
201 for (i = 0; i < c->num_tc; i++) {
202 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
203 mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
204 }
205 }
206 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
207 mlx5e_cq_arm(&rq->cq);
208
209 out:
210 rcu_read_unlock();
211
212 return work_done;
213 }
214
mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp * c,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc,struct mlx5e_ptpsq * ptpsq)215 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
216 struct mlx5e_params *params,
217 struct mlx5e_sq_param *param,
218 struct mlx5e_txqsq *sq, int tc,
219 struct mlx5e_ptpsq *ptpsq)
220 {
221 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
222 struct mlx5_core_dev *mdev = c->mdev;
223 struct mlx5_wq_cyc *wq = &sq->wq;
224 int err;
225 int node;
226
227 sq->pdev = c->pdev;
228 sq->clock = &mdev->clock;
229 sq->mkey_be = c->mkey_be;
230 sq->netdev = c->netdev;
231 sq->priv = c->priv;
232 sq->mdev = mdev;
233 sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
234 sq->txq_ix = txq_ix;
235 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
236 sq->min_inline_mode = params->tx_min_inline_mode;
237 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
238 sq->stats = &c->priv->ptp_stats.sq[tc];
239 sq->ptpsq = ptpsq;
240 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
241 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
242 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
243 sq->stop_room = param->stop_room;
244 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
245
246 node = dev_to_node(mlx5_core_dma_dev(mdev));
247
248 param->wq.db_numa_node = node;
249 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
250 if (err)
251 return err;
252 wq->db = &wq->db[MLX5_SND_DBR];
253
254 err = mlx5e_alloc_txqsq_db(sq, node);
255 if (err)
256 goto err_sq_wq_destroy;
257
258 return 0;
259
260 err_sq_wq_destroy:
261 mlx5_wq_destroy(&sq->wq_ctrl);
262
263 return err;
264 }
265
mlx5e_ptp_destroy_sq(struct mlx5_core_dev * mdev,u32 sqn)266 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
267 {
268 mlx5_core_destroy_sq(mdev, sqn);
269 }
270
mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq * ptpsq,int numa)271 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
272 {
273 int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
274 struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
275
276 ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
277 GFP_KERNEL, numa);
278 if (!ptpsq->skb_fifo.fifo)
279 return -ENOMEM;
280
281 ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
282 ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
283 ptpsq->skb_fifo.mask = wq_sz - 1;
284 if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
285 ptpsq->ts_cqe_ctr_mask =
286 (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
287 return 0;
288 }
289
mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo * skb_fifo)290 static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
291 {
292 while (*skb_fifo->pc != *skb_fifo->cc) {
293 struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
294
295 dev_kfree_skb_any(skb);
296 }
297 }
298
mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo * skb_fifo)299 static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
300 {
301 mlx5e_ptp_drain_skb_fifo(skb_fifo);
302 kvfree(skb_fifo->fifo);
303 }
304
mlx5e_ptp_open_txqsq(struct mlx5e_ptp * c,u32 tisn,int txq_ix,struct mlx5e_ptp_params * cparams,int tc,struct mlx5e_ptpsq * ptpsq)305 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
306 int txq_ix, struct mlx5e_ptp_params *cparams,
307 int tc, struct mlx5e_ptpsq *ptpsq)
308 {
309 struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
310 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
311 struct mlx5e_create_sq_param csp = {};
312 int err;
313
314 err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
315 txqsq, tc, ptpsq);
316 if (err)
317 return err;
318
319 csp.tisn = tisn;
320 csp.tis_lst_sz = 1;
321 csp.cqn = txqsq->cq.mcq.cqn;
322 csp.wq_ctrl = &txqsq->wq_ctrl;
323 csp.min_inline_mode = txqsq->min_inline_mode;
324 csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
325
326 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
327 if (err)
328 goto err_free_txqsq;
329
330 err = mlx5e_ptp_alloc_traffic_db(ptpsq,
331 dev_to_node(mlx5_core_dma_dev(c->mdev)));
332 if (err)
333 goto err_free_txqsq;
334
335 return 0;
336
337 err_free_txqsq:
338 mlx5e_free_txqsq(txqsq);
339
340 return err;
341 }
342
mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq * ptpsq)343 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
344 {
345 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
346 struct mlx5_core_dev *mdev = sq->mdev;
347
348 mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
349 cancel_work_sync(&sq->recover_work);
350 mlx5e_ptp_destroy_sq(mdev, sq->sqn);
351 mlx5e_free_txqsq_descs(sq);
352 mlx5e_free_txqsq(sq);
353 }
354
mlx5e_ptp_open_txqsqs(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)355 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
356 struct mlx5e_ptp_params *cparams)
357 {
358 struct mlx5e_params *params = &cparams->params;
359 u8 num_tc = mlx5e_get_dcb_num_tc(params);
360 int ix_base;
361 int err;
362 int tc;
363
364 ix_base = num_tc * params->num_channels;
365
366 for (tc = 0; tc < num_tc; tc++) {
367 int txq_ix = ix_base + tc;
368
369 err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
370 cparams, tc, &c->ptpsq[tc]);
371 if (err)
372 goto close_txqsq;
373 }
374
375 return 0;
376
377 close_txqsq:
378 for (--tc; tc >= 0; tc--)
379 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
380
381 return err;
382 }
383
mlx5e_ptp_close_txqsqs(struct mlx5e_ptp * c)384 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
385 {
386 int tc;
387
388 for (tc = 0; tc < c->num_tc; tc++)
389 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
390 }
391
mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)392 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
393 struct mlx5e_ptp_params *cparams)
394 {
395 struct mlx5e_params *params = &cparams->params;
396 struct mlx5e_create_cq_param ccp = {};
397 struct dim_cq_moder ptp_moder = {};
398 struct mlx5e_cq_param *cq_param;
399 u8 num_tc;
400 int err;
401 int tc;
402
403 num_tc = mlx5e_get_dcb_num_tc(params);
404
405 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
406 ccp.ch_stats = c->stats;
407 ccp.napi = &c->napi;
408 ccp.ix = MLX5E_PTP_CHANNEL_IX;
409
410 cq_param = &cparams->txq_sq_param.cqp;
411
412 for (tc = 0; tc < num_tc; tc++) {
413 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
414
415 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
416 if (err)
417 goto out_err_txqsq_cq;
418 }
419
420 for (tc = 0; tc < num_tc; tc++) {
421 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
422 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
423
424 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
425 if (err)
426 goto out_err_ts_cq;
427
428 ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
429 }
430
431 return 0;
432
433 out_err_ts_cq:
434 for (--tc; tc >= 0; tc--)
435 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
436 tc = num_tc;
437 out_err_txqsq_cq:
438 for (--tc; tc >= 0; tc--)
439 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
440
441 return err;
442 }
443
mlx5e_ptp_open_rx_cq(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)444 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
445 struct mlx5e_ptp_params *cparams)
446 {
447 struct mlx5e_create_cq_param ccp = {};
448 struct dim_cq_moder ptp_moder = {};
449 struct mlx5e_cq_param *cq_param;
450 struct mlx5e_cq *cq = &c->rq.cq;
451
452 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
453 ccp.ch_stats = c->stats;
454 ccp.napi = &c->napi;
455 ccp.ix = MLX5E_PTP_CHANNEL_IX;
456
457 cq_param = &cparams->rq_param.cqp;
458
459 return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
460 }
461
mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp * c)462 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
463 {
464 int tc;
465
466 for (tc = 0; tc < c->num_tc; tc++)
467 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
468
469 for (tc = 0; tc < c->num_tc; tc++)
470 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
471 }
472
mlx5e_ptp_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)473 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
474 struct mlx5e_params *params,
475 struct mlx5e_sq_param *param)
476 {
477 void *sqc = param->sqc;
478 void *wq;
479
480 mlx5e_build_sq_param_common(mdev, param);
481
482 wq = MLX5_ADDR_OF(sqc, sqc, wq);
483 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
484 param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
485 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
486 }
487
mlx5e_ptp_build_rq_param(struct mlx5_core_dev * mdev,struct net_device * netdev,u16 q_counter,struct mlx5e_ptp_params * ptp_params)488 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
489 struct net_device *netdev,
490 u16 q_counter,
491 struct mlx5e_ptp_params *ptp_params)
492 {
493 struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
494 struct mlx5e_params *params = &ptp_params->params;
495
496 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
497 mlx5e_init_rq_type_params(mdev, params);
498 params->sw_mtu = netdev->max_mtu;
499 mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
500 }
501
mlx5e_ptp_build_params(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams,struct mlx5e_params * orig)502 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
503 struct mlx5e_ptp_params *cparams,
504 struct mlx5e_params *orig)
505 {
506 struct mlx5e_params *params = &cparams->params;
507
508 params->tx_min_inline_mode = orig->tx_min_inline_mode;
509 params->num_channels = orig->num_channels;
510 params->hard_mtu = orig->hard_mtu;
511 params->sw_mtu = orig->sw_mtu;
512 params->mqprio = orig->mqprio;
513
514 /* SQ */
515 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
516 params->log_sq_size = orig->log_sq_size;
517 mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
518 }
519 /* RQ */
520 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
521 params->vlan_strip_disable = orig->vlan_strip_disable;
522 mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
523 }
524 }
525
mlx5e_init_ptp_rq(struct mlx5e_ptp * c,struct mlx5e_params * params,struct mlx5e_rq * rq)526 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
527 struct mlx5e_rq *rq)
528 {
529 struct mlx5_core_dev *mdev = c->mdev;
530 struct mlx5e_priv *priv = c->priv;
531 int err;
532
533 rq->wq_type = params->rq_wq_type;
534 rq->pdev = c->pdev;
535 rq->netdev = priv->netdev;
536 rq->priv = priv;
537 rq->clock = &mdev->clock;
538 rq->tstamp = &priv->tstamp;
539 rq->mdev = mdev;
540 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
541 rq->stats = &c->priv->ptp_stats.rq;
542 rq->ix = MLX5E_PTP_CHANNEL_IX;
543 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
544 err = mlx5e_rq_set_handlers(rq, params, false);
545 if (err)
546 return err;
547
548 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
549 }
550
mlx5e_ptp_open_rq(struct mlx5e_ptp * c,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)551 static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
552 struct mlx5e_rq_param *rq_param)
553 {
554 int node = dev_to_node(c->mdev->device);
555 int err;
556
557 err = mlx5e_init_ptp_rq(c, params, &c->rq);
558 if (err)
559 return err;
560
561 return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
562 }
563
mlx5e_ptp_open_queues(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)564 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
565 struct mlx5e_ptp_params *cparams)
566 {
567 int err;
568
569 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
570 err = mlx5e_ptp_open_tx_cqs(c, cparams);
571 if (err)
572 return err;
573
574 err = mlx5e_ptp_open_txqsqs(c, cparams);
575 if (err)
576 goto close_tx_cqs;
577 }
578 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
579 err = mlx5e_ptp_open_rx_cq(c, cparams);
580 if (err)
581 goto close_txqsq;
582
583 err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
584 if (err)
585 goto close_rx_cq;
586 }
587 return 0;
588
589 close_rx_cq:
590 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
591 mlx5e_close_cq(&c->rq.cq);
592 close_txqsq:
593 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
594 mlx5e_ptp_close_txqsqs(c);
595 close_tx_cqs:
596 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
597 mlx5e_ptp_close_tx_cqs(c);
598
599 return err;
600 }
601
mlx5e_ptp_close_queues(struct mlx5e_ptp * c)602 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
603 {
604 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
605 mlx5e_close_rq(&c->rq);
606 mlx5e_close_cq(&c->rq.cq);
607 }
608 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
609 mlx5e_ptp_close_txqsqs(c);
610 mlx5e_ptp_close_tx_cqs(c);
611 }
612 }
613
mlx5e_ptp_set_state(struct mlx5e_ptp * c,struct mlx5e_params * params)614 static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
615 {
616 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
617 __set_bit(MLX5E_PTP_STATE_TX, c->state);
618
619 if (params->ptp_rx)
620 __set_bit(MLX5E_PTP_STATE_RX, c->state);
621
622 return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
623 }
624
mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering * fs)625 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
626 {
627 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
628
629 if (!ptp_fs->valid)
630 return;
631
632 mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
633 mlx5e_fs_tt_redirect_any_destroy(fs);
634
635 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
636 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
637 mlx5e_fs_tt_redirect_udp_destroy(fs);
638 ptp_fs->valid = false;
639 }
640
mlx5e_ptp_rx_set_fs(struct mlx5e_priv * priv)641 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
642 {
643 u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
644 struct mlx5e_flow_steering *fs = priv->fs;
645 struct mlx5_flow_handle *rule;
646 struct mlx5e_ptp_fs *ptp_fs;
647 int err;
648
649 ptp_fs = mlx5e_fs_get_ptp(fs);
650 if (ptp_fs->valid)
651 return 0;
652
653 err = mlx5e_fs_tt_redirect_udp_create(fs);
654 if (err)
655 goto out_free;
656
657 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
658 tirn, PTP_EV_PORT);
659 if (IS_ERR(rule)) {
660 err = PTR_ERR(rule);
661 goto out_destroy_fs_udp;
662 }
663 ptp_fs->udp_v4_rule = rule;
664
665 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
666 tirn, PTP_EV_PORT);
667 if (IS_ERR(rule)) {
668 err = PTR_ERR(rule);
669 goto out_destroy_udp_v4_rule;
670 }
671 ptp_fs->udp_v6_rule = rule;
672
673 err = mlx5e_fs_tt_redirect_any_create(fs);
674 if (err)
675 goto out_destroy_udp_v6_rule;
676
677 rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
678 if (IS_ERR(rule)) {
679 err = PTR_ERR(rule);
680 goto out_destroy_fs_any;
681 }
682 ptp_fs->l2_rule = rule;
683 ptp_fs->valid = true;
684
685 return 0;
686
687 out_destroy_fs_any:
688 mlx5e_fs_tt_redirect_any_destroy(fs);
689 out_destroy_udp_v6_rule:
690 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
691 out_destroy_udp_v4_rule:
692 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
693 out_destroy_fs_udp:
694 mlx5e_fs_tt_redirect_udp_destroy(fs);
695 out_free:
696 return err;
697 }
698
mlx5e_ptp_open(struct mlx5e_priv * priv,struct mlx5e_params * params,u8 lag_port,struct mlx5e_ptp ** cp)699 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
700 u8 lag_port, struct mlx5e_ptp **cp)
701 {
702 struct net_device *netdev = priv->netdev;
703 struct mlx5_core_dev *mdev = priv->mdev;
704 struct mlx5e_ptp_params *cparams;
705 struct mlx5e_ptp *c;
706 int err;
707
708
709 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
710 cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
711 if (!c || !cparams)
712 return -ENOMEM;
713
714 c->priv = priv;
715 c->mdev = priv->mdev;
716 c->tstamp = &priv->tstamp;
717 c->pdev = mlx5_core_dma_dev(priv->mdev);
718 c->netdev = priv->netdev;
719 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
720 c->num_tc = mlx5e_get_dcb_num_tc(params);
721 c->stats = &priv->ptp_stats.ch;
722 c->lag_port = lag_port;
723
724 err = mlx5e_ptp_set_state(c, params);
725 if (err)
726 goto err_free;
727
728 netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
729
730 mlx5e_ptp_build_params(c, cparams, params);
731
732 err = mlx5e_ptp_open_queues(c, cparams);
733 if (unlikely(err))
734 goto err_napi_del;
735
736 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
737 priv->rx_ptp_opened = true;
738
739 *cp = c;
740
741 kvfree(cparams);
742
743 return 0;
744
745 err_napi_del:
746 netif_napi_del(&c->napi);
747 err_free:
748 kvfree(cparams);
749 kvfree(c);
750 return err;
751 }
752
mlx5e_ptp_close(struct mlx5e_ptp * c)753 void mlx5e_ptp_close(struct mlx5e_ptp *c)
754 {
755 mlx5e_ptp_close_queues(c);
756 netif_napi_del(&c->napi);
757
758 kvfree(c);
759 }
760
mlx5e_ptp_activate_channel(struct mlx5e_ptp * c)761 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
762 {
763 int tc;
764
765 napi_enable(&c->napi);
766
767 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
768 for (tc = 0; tc < c->num_tc; tc++)
769 mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
770 }
771 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
772 mlx5e_ptp_rx_set_fs(c->priv);
773 mlx5e_activate_rq(&c->rq);
774 mlx5e_trigger_napi_sched(&c->napi);
775 }
776 }
777
mlx5e_ptp_deactivate_channel(struct mlx5e_ptp * c)778 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
779 {
780 int tc;
781
782 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
783 mlx5e_deactivate_rq(&c->rq);
784
785 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
786 for (tc = 0; tc < c->num_tc; tc++)
787 mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
788 }
789
790 napi_disable(&c->napi);
791 }
792
mlx5e_ptp_get_rqn(struct mlx5e_ptp * c,u32 * rqn)793 int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
794 {
795 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
796 return -EINVAL;
797
798 *rqn = c->rq.rqn;
799 return 0;
800 }
801
mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering * fs,const struct mlx5e_profile * profile)802 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
803 const struct mlx5e_profile *profile)
804 {
805 struct mlx5e_ptp_fs *ptp_fs;
806
807 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
808 return 0;
809
810 ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
811 if (!ptp_fs)
812 return -ENOMEM;
813 mlx5e_fs_set_ptp(fs, ptp_fs);
814
815 return 0;
816 }
817
mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering * fs,const struct mlx5e_profile * profile)818 void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
819 const struct mlx5e_profile *profile)
820 {
821 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
822
823 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
824 return;
825
826 mlx5e_ptp_rx_unset_fs(fs);
827 kfree(ptp_fs);
828 }
829
mlx5e_ptp_rx_manage_fs(struct mlx5e_priv * priv,bool set)830 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
831 {
832 struct mlx5e_ptp *c = priv->channels.ptp;
833
834 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
835 return 0;
836
837 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
838 return 0;
839
840 if (set) {
841 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
842 netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
843 return -EINVAL;
844 }
845 return mlx5e_ptp_rx_set_fs(priv);
846 }
847 /* set == false */
848 if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
849 netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
850 return -EINVAL;
851 }
852 mlx5e_ptp_rx_unset_fs(priv->fs);
853 return 0;
854 }
855