1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3
4 #include "rx_res.h"
5 #include "channels.h"
6 #include "params.h"
7
8 #define MLX5E_MAX_NUM_RSS 16
9
10 struct mlx5e_rx_res {
11 struct mlx5_core_dev *mdev;
12 enum mlx5e_rx_res_features features;
13 unsigned int max_nch;
14 u32 drop_rqn;
15
16 struct mlx5e_packet_merge_param pkt_merge_param;
17 struct rw_semaphore pkt_merge_param_sem;
18
19 struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
20 bool rss_active;
21 u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
22 unsigned int rss_nch;
23
24 struct {
25 struct mlx5e_rqt direct_rqt;
26 struct mlx5e_tir direct_tir;
27 struct mlx5e_rqt xsk_rqt;
28 struct mlx5e_tir xsk_tir;
29 } *channels;
30
31 struct {
32 struct mlx5e_rqt rqt;
33 struct mlx5e_tir tir;
34 } ptp;
35 };
36
37 /* API for rx_res_rss_* */
38
mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res * res,unsigned int init_nch)39 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
40 unsigned int init_nch)
41 {
42 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
43 struct mlx5e_rss *rss;
44 int err;
45
46 if (WARN_ON(res->rss[0]))
47 return -EINVAL;
48
49 rss = mlx5e_rss_alloc();
50 if (!rss)
51 return -ENOMEM;
52
53 err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
54 &res->pkt_merge_param);
55 if (err)
56 goto err_rss_free;
57
58 mlx5e_rss_set_indir_uniform(rss, init_nch);
59
60 res->rss[0] = rss;
61
62 return 0;
63
64 err_rss_free:
65 mlx5e_rss_free(rss);
66 return err;
67 }
68
mlx5e_rx_res_rss_init(struct mlx5e_rx_res * res,u32 * rss_idx,unsigned int init_nch)69 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
70 {
71 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
72 struct mlx5e_rss *rss;
73 int err, i;
74
75 for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
76 if (!res->rss[i])
77 break;
78
79 if (i == MLX5E_MAX_NUM_RSS)
80 return -ENOSPC;
81
82 rss = mlx5e_rss_alloc();
83 if (!rss)
84 return -ENOMEM;
85
86 err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn);
87 if (err)
88 goto err_rss_free;
89
90 mlx5e_rss_set_indir_uniform(rss, init_nch);
91 if (res->rss_active)
92 mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
93
94 res->rss[i] = rss;
95 *rss_idx = i;
96
97 return 0;
98
99 err_rss_free:
100 mlx5e_rss_free(rss);
101 return err;
102 }
103
__mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)104 static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
105 {
106 struct mlx5e_rss *rss = res->rss[rss_idx];
107 int err;
108
109 err = mlx5e_rss_cleanup(rss);
110 if (err)
111 return err;
112
113 mlx5e_rss_free(rss);
114 res->rss[rss_idx] = NULL;
115
116 return 0;
117 }
118
mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)119 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
120 {
121 struct mlx5e_rss *rss;
122
123 if (rss_idx >= MLX5E_MAX_NUM_RSS)
124 return -EINVAL;
125
126 rss = res->rss[rss_idx];
127 if (!rss)
128 return -EINVAL;
129
130 return __mlx5e_rx_res_rss_destroy(res, rss_idx);
131 }
132
mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res * res)133 static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
134 {
135 int i;
136
137 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
138 struct mlx5e_rss *rss = res->rss[i];
139 int err;
140
141 if (!rss)
142 continue;
143
144 err = __mlx5e_rx_res_rss_destroy(res, i);
145 if (err) {
146 unsigned int refcount;
147
148 refcount = mlx5e_rss_refcnt_read(rss);
149 mlx5_core_warn(res->mdev,
150 "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
151 i, refcount, err);
152 }
153 }
154 }
155
mlx5e_rx_res_rss_enable(struct mlx5e_rx_res * res)156 static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
157 {
158 int i;
159
160 res->rss_active = true;
161
162 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
163 struct mlx5e_rss *rss = res->rss[i];
164
165 if (!rss)
166 continue;
167 mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
168 }
169 }
170
mlx5e_rx_res_rss_disable(struct mlx5e_rx_res * res)171 static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
172 {
173 int i;
174
175 res->rss_active = false;
176
177 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
178 struct mlx5e_rss *rss = res->rss[i];
179
180 if (!rss)
181 continue;
182 mlx5e_rss_disable(rss);
183 }
184 }
185
186 /* Updates the indirection table SW shadow, does not update the HW resources yet */
mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res * res,unsigned int nch)187 void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
188 {
189 WARN_ON_ONCE(res->rss_active);
190 mlx5e_rss_set_indir_uniform(res->rss[0], nch);
191 }
192
mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,u32 * indir,u8 * key,u8 * hfunc)193 int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
194 u32 *indir, u8 *key, u8 *hfunc)
195 {
196 struct mlx5e_rss *rss;
197
198 if (rss_idx >= MLX5E_MAX_NUM_RSS)
199 return -EINVAL;
200
201 rss = res->rss[rss_idx];
202 if (!rss)
203 return -ENOENT;
204
205 return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
206 }
207
mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,const u32 * indir,const u8 * key,const u8 * hfunc)208 int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
209 const u32 *indir, const u8 *key, const u8 *hfunc)
210 {
211 struct mlx5e_rss *rss;
212
213 if (rss_idx >= MLX5E_MAX_NUM_RSS)
214 return -EINVAL;
215
216 rss = res->rss[rss_idx];
217 if (!rss)
218 return -ENOENT;
219
220 return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
221 }
222
mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)223 u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
224 {
225 struct mlx5e_rss *rss = res->rss[0];
226
227 return mlx5e_rss_get_hash_fields(rss, tt);
228 }
229
mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt,u8 rx_hash_fields)230 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
231 u8 rx_hash_fields)
232 {
233 struct mlx5e_rss *rss = res->rss[0];
234
235 return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
236 }
237
mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res * res)238 int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
239 {
240 int i, cnt;
241
242 cnt = 0;
243 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
244 if (res->rss[i])
245 cnt++;
246
247 return cnt;
248 }
249
mlx5e_rx_res_rss_index(struct mlx5e_rx_res * res,struct mlx5e_rss * rss)250 int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
251 {
252 int i;
253
254 if (!rss)
255 return -EINVAL;
256
257 for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
258 if (rss == res->rss[i])
259 return i;
260
261 return -ENOENT;
262 }
263
mlx5e_rx_res_rss_get(struct mlx5e_rx_res * res,u32 rss_idx)264 struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
265 {
266 if (rss_idx >= MLX5E_MAX_NUM_RSS)
267 return NULL;
268
269 return res->rss[rss_idx];
270 }
271
272 /* End of API rx_res_rss_* */
273
mlx5e_rx_res_alloc(void)274 struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
275 {
276 return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
277 }
278
mlx5e_rx_res_channels_init(struct mlx5e_rx_res * res)279 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
280 {
281 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
282 struct mlx5e_tir_builder *builder;
283 int err = 0;
284 int ix;
285
286 builder = mlx5e_tir_builder_alloc(false);
287 if (!builder)
288 return -ENOMEM;
289
290 res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
291 if (!res->channels) {
292 err = -ENOMEM;
293 goto out;
294 }
295
296 for (ix = 0; ix < res->max_nch; ix++) {
297 err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
298 res->mdev, false, res->drop_rqn);
299 if (err) {
300 mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
301 err, ix);
302 goto err_destroy_direct_rqts;
303 }
304 }
305
306 for (ix = 0; ix < res->max_nch; ix++) {
307 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
308 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
309 inner_ft_support);
310 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
311 mlx5e_tir_builder_build_direct(builder);
312
313 err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
314 if (err) {
315 mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
316 err, ix);
317 goto err_destroy_direct_tirs;
318 }
319
320 mlx5e_tir_builder_clear(builder);
321 }
322
323 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
324 goto out;
325
326 for (ix = 0; ix < res->max_nch; ix++) {
327 err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
328 res->mdev, false, res->drop_rqn);
329 if (err) {
330 mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
331 err, ix);
332 goto err_destroy_xsk_rqts;
333 }
334 }
335
336 for (ix = 0; ix < res->max_nch; ix++) {
337 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
338 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
339 inner_ft_support);
340 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
341 mlx5e_tir_builder_build_direct(builder);
342
343 err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
344 if (err) {
345 mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
346 err, ix);
347 goto err_destroy_xsk_tirs;
348 }
349
350 mlx5e_tir_builder_clear(builder);
351 }
352
353 goto out;
354
355 err_destroy_xsk_tirs:
356 while (--ix >= 0)
357 mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
358
359 ix = res->max_nch;
360 err_destroy_xsk_rqts:
361 while (--ix >= 0)
362 mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
363
364 ix = res->max_nch;
365 err_destroy_direct_tirs:
366 while (--ix >= 0)
367 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
368
369 ix = res->max_nch;
370 err_destroy_direct_rqts:
371 while (--ix >= 0)
372 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
373
374 kvfree(res->channels);
375
376 out:
377 mlx5e_tir_builder_free(builder);
378
379 return err;
380 }
381
mlx5e_rx_res_ptp_init(struct mlx5e_rx_res * res)382 static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
383 {
384 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
385 struct mlx5e_tir_builder *builder;
386 int err;
387
388 builder = mlx5e_tir_builder_alloc(false);
389 if (!builder)
390 return -ENOMEM;
391
392 err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
393 if (err)
394 goto out;
395
396 /* Separated from the channels RQs, does not share pkt_merge state with them */
397 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
398 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
399 inner_ft_support);
400 mlx5e_tir_builder_build_direct(builder);
401
402 err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
403 if (err)
404 goto err_destroy_ptp_rqt;
405
406 goto out;
407
408 err_destroy_ptp_rqt:
409 mlx5e_rqt_destroy(&res->ptp.rqt);
410
411 out:
412 mlx5e_tir_builder_free(builder);
413 return err;
414 }
415
mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res * res)416 static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
417 {
418 unsigned int ix;
419
420 for (ix = 0; ix < res->max_nch; ix++) {
421 mlx5e_tir_destroy(&res->channels[ix].direct_tir);
422 mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
423
424 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
425 continue;
426
427 mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
428 mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
429 }
430
431 kvfree(res->channels);
432 }
433
mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res * res)434 static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
435 {
436 mlx5e_tir_destroy(&res->ptp.tir);
437 mlx5e_rqt_destroy(&res->ptp.rqt);
438 }
439
mlx5e_rx_res_init(struct mlx5e_rx_res * res,struct mlx5_core_dev * mdev,enum mlx5e_rx_res_features features,unsigned int max_nch,u32 drop_rqn,const struct mlx5e_packet_merge_param * init_pkt_merge_param,unsigned int init_nch)440 int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
441 enum mlx5e_rx_res_features features, unsigned int max_nch,
442 u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
443 unsigned int init_nch)
444 {
445 int err;
446
447 res->mdev = mdev;
448 res->features = features;
449 res->max_nch = max_nch;
450 res->drop_rqn = drop_rqn;
451
452 res->pkt_merge_param = *init_pkt_merge_param;
453 init_rwsem(&res->pkt_merge_param_sem);
454
455 err = mlx5e_rx_res_rss_init_def(res, init_nch);
456 if (err)
457 goto err_out;
458
459 err = mlx5e_rx_res_channels_init(res);
460 if (err)
461 goto err_rss_destroy;
462
463 err = mlx5e_rx_res_ptp_init(res);
464 if (err)
465 goto err_channels_destroy;
466
467 return 0;
468
469 err_channels_destroy:
470 mlx5e_rx_res_channels_destroy(res);
471 err_rss_destroy:
472 __mlx5e_rx_res_rss_destroy(res, 0);
473 err_out:
474 return err;
475 }
476
mlx5e_rx_res_destroy(struct mlx5e_rx_res * res)477 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
478 {
479 mlx5e_rx_res_ptp_destroy(res);
480 mlx5e_rx_res_channels_destroy(res);
481 mlx5e_rx_res_rss_destroy_all(res);
482 }
483
mlx5e_rx_res_free(struct mlx5e_rx_res * res)484 void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
485 {
486 kvfree(res);
487 }
488
mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res * res,unsigned int ix)489 u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
490 {
491 return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
492 }
493
mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res * res,unsigned int ix)494 u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
495 {
496 WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
497
498 return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
499 }
500
mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)501 u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
502 {
503 struct mlx5e_rss *rss = res->rss[0];
504
505 return mlx5e_rss_get_tirn(rss, tt, false);
506 }
507
mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)508 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
509 {
510 struct mlx5e_rss *rss = res->rss[0];
511
512 return mlx5e_rss_get_tirn(rss, tt, true);
513 }
514
mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res * res)515 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
516 {
517 WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
518 return mlx5e_tir_get_tirn(&res->ptp.tir);
519 }
520
mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res * res,unsigned int ix)521 static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
522 {
523 return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
524 }
525
mlx5e_rx_res_channels_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs)526 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
527 {
528 unsigned int nch, ix;
529 int err;
530
531 nch = mlx5e_channels_get_num(chs);
532
533 for (ix = 0; ix < chs->num; ix++)
534 mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
535 res->rss_nch = chs->num;
536
537 mlx5e_rx_res_rss_enable(res);
538
539 for (ix = 0; ix < nch; ix++) {
540 u32 rqn;
541
542 mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
543 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
544 if (err)
545 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
546 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
547 rqn, ix, err);
548
549 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
550 continue;
551
552 if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
553 rqn = res->drop_rqn;
554 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
555 if (err)
556 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
557 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
558 rqn, ix, err);
559 }
560 for (ix = nch; ix < res->max_nch; ix++) {
561 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
562 if (err)
563 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
564 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
565 res->drop_rqn, ix, err);
566
567 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
568 continue;
569
570 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
571 if (err)
572 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
573 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
574 res->drop_rqn, ix, err);
575 }
576
577 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
578 u32 rqn;
579
580 if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
581 rqn = res->drop_rqn;
582
583 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
584 if (err)
585 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
586 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
587 rqn, err);
588 }
589 }
590
mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res * res)591 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
592 {
593 unsigned int ix;
594 int err;
595
596 mlx5e_rx_res_rss_disable(res);
597
598 for (ix = 0; ix < res->max_nch; ix++) {
599 err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
600 if (err)
601 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
602 mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
603 res->drop_rqn, ix, err);
604
605 if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
606 continue;
607
608 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
609 if (err)
610 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
611 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
612 res->drop_rqn, ix, err);
613 }
614
615 if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
616 err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
617 if (err)
618 mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
619 mlx5e_rqt_get_rqtn(&res->ptp.rqt),
620 res->drop_rqn, err);
621 }
622 }
623
mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix)624 int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
625 unsigned int ix)
626 {
627 u32 rqn;
628 int err;
629
630 if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
631 return -EINVAL;
632
633 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
634 if (err)
635 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
636 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
637 rqn, ix, err);
638 return err;
639 }
640
mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res * res,unsigned int ix)641 int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
642 {
643 int err;
644
645 err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
646 if (err)
647 mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
648 mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
649 res->drop_rqn, ix, err);
650 return err;
651 }
652
mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res * res,struct mlx5e_packet_merge_param * pkt_merge_param)653 int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
654 struct mlx5e_packet_merge_param *pkt_merge_param)
655 {
656 struct mlx5e_tir_builder *builder;
657 int err, final_err;
658 unsigned int ix;
659
660 builder = mlx5e_tir_builder_alloc(true);
661 if (!builder)
662 return -ENOMEM;
663
664 down_write(&res->pkt_merge_param_sem);
665 res->pkt_merge_param = *pkt_merge_param;
666
667 mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
668
669 final_err = 0;
670
671 for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
672 struct mlx5e_rss *rss = res->rss[ix];
673
674 if (!rss)
675 continue;
676
677 err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
678 if (err)
679 final_err = final_err ? : err;
680 }
681
682 for (ix = 0; ix < res->max_nch; ix++) {
683 err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
684 if (err) {
685 mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
686 mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
687 if (!final_err)
688 final_err = err;
689 }
690 }
691
692 up_write(&res->pkt_merge_param_sem);
693 mlx5e_tir_builder_free(builder);
694 return final_err;
695 }
696
mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * res)697 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
698 {
699 return mlx5e_rss_get_hash(res->rss[0]);
700 }
701
mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res * res,unsigned int rxq,struct mlx5e_tir * tir)702 int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
703 struct mlx5e_tir *tir)
704 {
705 bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
706 struct mlx5e_tir_builder *builder;
707 u32 rqtn;
708 int err;
709
710 builder = mlx5e_tir_builder_alloc(false);
711 if (!builder)
712 return -ENOMEM;
713
714 rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
715
716 mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
717 inner_ft_support);
718 mlx5e_tir_builder_build_direct(builder);
719 mlx5e_tir_builder_build_tls(builder);
720 down_read(&res->pkt_merge_param_sem);
721 mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
722 err = mlx5e_tir_init(tir, builder, res->mdev, false);
723 up_read(&res->pkt_merge_param_sem);
724
725 mlx5e_tir_builder_free(builder);
726
727 return err;
728 }
729