1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "en/params.h"
5 #include "en/txrx.h"
6 #include "en/port.h"
7 #include "en_accel/en_accel.h"
8 #include "en_accel/ipsec.h"
9
mlx5e_rx_is_xdp(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)10 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
11 struct mlx5e_xsk_param *xsk)
12 {
13 return params->xdp_prog || xsk;
14 }
15
mlx5e_get_linear_rq_headroom(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)16 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
17 struct mlx5e_xsk_param *xsk)
18 {
19 u16 headroom;
20
21 if (xsk)
22 return xsk->headroom;
23
24 headroom = NET_IP_ALIGN;
25 if (mlx5e_rx_is_xdp(params, xsk))
26 headroom += XDP_PACKET_HEADROOM;
27 else
28 headroom += MLX5_RX_HEADROOM;
29
30 return headroom;
31 }
32
mlx5e_rx_get_min_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)33 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
34 struct mlx5e_xsk_param *xsk)
35 {
36 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
37 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
38
39 return linear_rq_headroom + hw_mtu;
40 }
41
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)42 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
43 struct mlx5e_xsk_param *xsk)
44 {
45 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
46
47 /* AF_XDP doesn't build SKBs in place. */
48 if (!xsk)
49 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
50
51 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
52 * special case. It can run with frames smaller than a page, as it
53 * doesn't allocate pages dynamically. However, here we pretend that
54 * fragments are page-sized: it allows to treat XSK frames like pages
55 * by redirecting alloc and free operations to XSK rings and by using
56 * the fact there are no multiple packets per "page" (which is a frame).
57 * The latter is important, because frames may come in a random order,
58 * and we will have trouble assemblying a real page of multiple frames.
59 */
60 if (mlx5e_rx_is_xdp(params, xsk))
61 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
62
63 /* Even if we can go with a smaller fragment size, we must not put
64 * multiple packets into a single frame.
65 */
66 if (xsk)
67 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
68
69 return frag_sz;
70 }
71
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)72 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
73 struct mlx5e_xsk_param *xsk)
74 {
75 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
76
77 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
78 }
79
mlx5e_rx_is_linear_skb(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)80 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
81 struct mlx5e_xsk_param *xsk)
82 {
83 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
84 * than one page. For this, check both with and without xsk.
85 */
86 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
87 mlx5e_rx_get_linear_frag_sz(params, NULL));
88
89 return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
90 linear_frag_sz <= PAGE_SIZE;
91 }
92
mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev * mdev,u8 log_stride_sz,u8 log_num_strides)93 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
94 u8 log_stride_sz, u8 log_num_strides)
95 {
96 if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
97 return false;
98
99 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
100 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
101 return false;
102
103 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
104 return false;
105
106 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
107 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
108
109 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
110 }
111
mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)112 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
113 struct mlx5e_params *params,
114 struct mlx5e_xsk_param *xsk)
115 {
116 s8 log_num_strides;
117 u8 log_stride_sz;
118
119 if (!mlx5e_rx_is_linear_skb(params, xsk))
120 return false;
121
122 log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
123 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
124
125 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
126 }
127
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)128 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
129 struct mlx5e_xsk_param *xsk)
130 {
131 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
132
133 /* Numbers are unsigned, don't subtract to avoid underflow. */
134 if (params->log_rq_mtu_frames <
135 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
136 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
137
138 return params->log_rq_mtu_frames - log_pkts_per_wqe;
139 }
140
mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params)141 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
142 struct mlx5e_params *params)
143 {
144 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
145 }
146
mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params)147 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
148 struct mlx5e_params *params)
149 {
150 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
151 }
152
mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev * mdev,struct mlx5e_params * params)153 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
154 struct mlx5e_params *params)
155 {
156 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
157 PAGE_SIZE;
158
159 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
160 }
161
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)162 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
163 struct mlx5e_params *params,
164 struct mlx5e_xsk_param *xsk)
165 {
166 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
167 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
168
169 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
170 }
171
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)172 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
173 struct mlx5e_params *params,
174 struct mlx5e_xsk_param *xsk)
175 {
176 return MLX5_MPWRQ_LOG_WQE_SZ -
177 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
178 }
179
mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)180 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
181 {
182 #define UMR_WQE_BULK (2)
183 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
184 }
185
mlx5e_get_rq_headroom(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)186 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
187 struct mlx5e_params *params,
188 struct mlx5e_xsk_param *xsk)
189 {
190 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
191
192 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
193 return linear_headroom;
194
195 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
196 return linear_headroom;
197
198 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
199 return linear_headroom;
200
201 return 0;
202 }
203
mlx5e_calc_sq_stop_room(struct mlx5_core_dev * mdev,struct mlx5e_params * params)204 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
205 {
206 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
207 u16 stop_room;
208
209 stop_room = mlx5e_ktls_get_stop_room(mdev, params);
210 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
211 if (is_mpwqe)
212 /* A MPWQE can take up to the maximum-sized WQE + all the normal
213 * stop room can be taken if a new packet breaks the active
214 * MPWQE session and allocates its WQEs right away.
215 */
216 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
217
218 return stop_room;
219 }
220
mlx5e_validate_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)221 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
222 {
223 size_t sq_size = 1 << params->log_sq_size;
224 u16 stop_room;
225
226 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
227 if (stop_room >= sq_size) {
228 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
229 stop_room, sq_size);
230 return -EINVAL;
231 }
232
233 return 0;
234 }
235
mlx5e_get_def_tx_moderation(u8 cq_period_mode)236 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
237 {
238 struct dim_cq_moder moder = {};
239
240 moder.cq_period_mode = cq_period_mode;
241 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
242 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
243 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
244 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
245
246 return moder;
247 }
248
mlx5e_get_def_rx_moderation(u8 cq_period_mode)249 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
250 {
251 struct dim_cq_moder moder = {};
252
253 moder.cq_period_mode = cq_period_mode;
254 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
255 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
256 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
257 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
258
259 return moder;
260 }
261
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)262 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
263 {
264 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
265 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
266 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
267 }
268
mlx5e_reset_tx_moderation(struct mlx5e_params * params,u8 cq_period_mode)269 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
270 {
271 if (params->tx_dim_enabled) {
272 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
273
274 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
275 } else {
276 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
277 }
278 }
279
mlx5e_reset_rx_moderation(struct mlx5e_params * params,u8 cq_period_mode)280 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
281 {
282 if (params->rx_dim_enabled) {
283 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
284
285 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
286 } else {
287 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
288 }
289 }
290
mlx5e_set_tx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)291 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
292 {
293 mlx5e_reset_tx_moderation(params, cq_period_mode);
294 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
295 params->tx_cq_moderation.cq_period_mode ==
296 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
297 }
298
mlx5e_set_rx_cq_mode_params(struct mlx5e_params * params,u8 cq_period_mode)299 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
300 {
301 mlx5e_reset_rx_moderation(params, cq_period_mode);
302 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
303 params->rx_cq_moderation.cq_period_mode ==
304 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
305 }
306
slow_pci_heuristic(struct mlx5_core_dev * mdev)307 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
308 {
309 u32 link_speed = 0;
310 u32 pci_bw = 0;
311
312 mlx5e_port_max_linkspeed(mdev, &link_speed);
313 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
314 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
315 link_speed, pci_bw);
316
317 #define MLX5E_SLOW_PCI_RATIO (2)
318
319 return link_speed && pci_bw &&
320 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
321 }
322
mlx5e_striding_rq_possible(struct mlx5_core_dev * mdev,struct mlx5e_params * params)323 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
324 struct mlx5e_params *params)
325 {
326 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
327 return false;
328
329 if (params->xdp_prog) {
330 /* XSK params are not considered here. If striding RQ is in use,
331 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
332 * be called with the known XSK params.
333 */
334 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
335 return false;
336 }
337
338 return true;
339 }
340
mlx5e_init_rq_type_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)341 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
342 struct mlx5e_params *params)
343 {
344 params->log_rq_mtu_frames = is_kdump_kernel() ?
345 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
346 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
347
348 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
349 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
350 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
351 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
352 BIT(params->log_rq_mtu_frames),
353 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
354 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
355 }
356
mlx5e_set_rq_type(struct mlx5_core_dev * mdev,struct mlx5e_params * params)357 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
358 {
359 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
360 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
361 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
362 MLX5_WQ_TYPE_CYCLIC;
363 }
364
mlx5e_build_rq_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params)365 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
366 struct mlx5e_params *params)
367 {
368 /* Prefer Striding RQ, unless any of the following holds:
369 * - Striding RQ configuration is not possible/supported.
370 * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
371 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
372 *
373 * No XSK params: checking the availability of striding RQ in general.
374 */
375 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
376 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
377 mlx5e_striding_rq_possible(mdev, params) &&
378 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
379 !mlx5e_rx_is_linear_skb(params, NULL)))
380 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
381 mlx5e_set_rq_type(mdev, params);
382 mlx5e_init_rq_type_params(mdev, params);
383 }
384
385 /* Build queue parameters */
386
mlx5e_build_create_cq_param(struct mlx5e_create_cq_param * ccp,struct mlx5e_channel * c)387 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
388 {
389 *ccp = (struct mlx5e_create_cq_param) {
390 .napi = &c->napi,
391 .ch_stats = c->stats,
392 .node = cpu_to_node(c->cpu),
393 .ix = c->ix,
394 };
395 }
396
mlx5e_max_nonlinear_mtu(int first_frag_size,int frag_size,bool xdp)397 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
398 {
399 if (xdp)
400 /* XDP requires all fragments to be of the same size. */
401 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
402
403 /* Optimization for small packets: the last fragment is bigger than the others. */
404 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
405 }
406
407 #define DEFAULT_FRAG_SIZE (2048)
408
mlx5e_build_rq_frags_info(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_rq_frags_info * info)409 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
410 struct mlx5e_params *params,
411 struct mlx5e_xsk_param *xsk,
412 struct mlx5e_rq_frags_info *info)
413 {
414 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
415 int frag_size_max = DEFAULT_FRAG_SIZE;
416 int first_frag_size_max;
417 u32 buf_size = 0;
418 u16 headroom;
419 int max_mtu;
420 int i;
421
422 if (mlx5e_rx_is_linear_skb(params, xsk)) {
423 int frag_stride;
424
425 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
426 frag_stride = roundup_pow_of_two(frag_stride);
427
428 info->arr[0].frag_size = byte_count;
429 info->arr[0].frag_stride = frag_stride;
430 info->num_frags = 1;
431 info->wqe_bulk = PAGE_SIZE / frag_stride;
432 goto out;
433 }
434
435 headroom = mlx5e_get_linear_rq_headroom(params, xsk);
436 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
437
438 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
439 params->xdp_prog);
440 if (byte_count > max_mtu || params->xdp_prog) {
441 frag_size_max = PAGE_SIZE;
442 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
443
444 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
445 params->xdp_prog);
446 if (byte_count > max_mtu) {
447 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
448 params->sw_mtu, max_mtu);
449 return -EINVAL;
450 }
451 }
452
453 i = 0;
454 while (buf_size < byte_count) {
455 int frag_size = byte_count - buf_size;
456
457 if (i == 0)
458 frag_size = min(frag_size, first_frag_size_max);
459 else if (i < MLX5E_MAX_RX_FRAGS - 1)
460 frag_size = min(frag_size, frag_size_max);
461
462 info->arr[i].frag_size = frag_size;
463 buf_size += frag_size;
464
465 if (params->xdp_prog) {
466 /* XDP multi buffer expects fragments of the same size. */
467 info->arr[i].frag_stride = frag_size_max;
468 } else {
469 if (i == 0) {
470 /* Ensure that headroom and tailroom are included. */
471 frag_size += headroom;
472 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
473 }
474 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
475 }
476
477 i++;
478 }
479 info->num_frags = i;
480 /* number of different wqes sharing a page */
481 info->wqe_bulk = 1 + (info->num_frags % 2);
482
483 out:
484 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
485 info->log_num_frags = order_base_2(info->num_frags);
486
487 return 0;
488 }
489
mlx5e_get_rqwq_log_stride(u8 wq_type,int ndsegs)490 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
491 {
492 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
493
494 switch (wq_type) {
495 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
496 sz += sizeof(struct mlx5e_rx_wqe_ll);
497 break;
498 default: /* MLX5_WQ_TYPE_CYCLIC */
499 sz += sizeof(struct mlx5e_rx_wqe_cyc);
500 }
501
502 return order_base_2(sz);
503 }
504
mlx5e_build_common_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param)505 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
506 struct mlx5e_cq_param *param)
507 {
508 void *cqc = param->cqc;
509
510 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
511 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
512 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
513 }
514
mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk)515 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
516 struct mlx5e_params *params,
517 struct mlx5e_xsk_param *xsk)
518 {
519 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
520 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
521 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
522 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
523 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
524 int wqe_size = BIT(log_stride_sz) * num_strides;
525
526 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation
527 * so we get a filler cqe for the rest of the reservation.
528 */
529 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
530 }
531
mlx5e_build_rx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_cq_param * param)532 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
533 struct mlx5e_params *params,
534 struct mlx5e_xsk_param *xsk,
535 struct mlx5e_cq_param *param)
536 {
537 bool hw_stridx = false;
538 void *cqc = param->cqc;
539 u8 log_cq_size;
540
541 switch (params->rq_wq_type) {
542 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
543 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
544 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
545 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
546 else
547 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
548 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
549 break;
550 default: /* MLX5_WQ_TYPE_CYCLIC */
551 log_cq_size = params->log_rq_mtu_frames;
552 }
553
554 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
555 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
556 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
557 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
558 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
559 }
560
561 mlx5e_build_common_cq_param(mdev, param);
562 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
563 }
564
rq_end_pad_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params)565 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
566 {
567 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
568 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
569 MLX5_CAP_GEN(mdev, relaxed_ordering_write);
570
571 return ro && lro_en ?
572 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
573 }
574
mlx5e_build_rq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,u16 q_counter,struct mlx5e_rq_param * param)575 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
576 struct mlx5e_params *params,
577 struct mlx5e_xsk_param *xsk,
578 u16 q_counter,
579 struct mlx5e_rq_param *param)
580 {
581 void *rqc = param->rqc;
582 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
583 int ndsegs = 1;
584 int err;
585
586 switch (params->rq_wq_type) {
587 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
588 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
589 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
590
591 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
592 log_wqe_num_of_strides)) {
593 mlx5_core_err(mdev,
594 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
595 log_wqe_stride_size, log_wqe_num_of_strides);
596 return -EINVAL;
597 }
598
599 MLX5_SET(wq, wq, log_wqe_num_of_strides,
600 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
601 MLX5_SET(wq, wq, log_wqe_stride_size,
602 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
603 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
604 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
605 MLX5_SET(wq, wq, shampo_enable, true);
606 MLX5_SET(wq, wq, log_reservation_size,
607 mlx5e_shampo_get_log_rsrv_size(mdev, params));
608 MLX5_SET(wq, wq,
609 log_max_num_of_packets_per_reservation,
610 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
611 MLX5_SET(wq, wq, log_headers_entry_size,
612 mlx5e_shampo_get_log_hd_entry_size(mdev, params));
613 MLX5_SET(rqc, rqc, reservation_timeout,
614 params->packet_merge.timeout);
615 MLX5_SET(rqc, rqc, shampo_match_criteria_type,
616 params->packet_merge.shampo.match_criteria_type);
617 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
618 params->packet_merge.shampo.alignment_granularity);
619 }
620 break;
621 }
622 default: /* MLX5_WQ_TYPE_CYCLIC */
623 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
624 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
625 if (err)
626 return err;
627 ndsegs = param->frags_info.num_frags;
628 }
629
630 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
631 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
632 MLX5_SET(wq, wq, log_wq_stride,
633 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
634 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
635 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
636 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
637 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
638
639 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
640 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
641
642 return 0;
643 }
644
mlx5e_build_drop_rq_param(struct mlx5_core_dev * mdev,u16 q_counter,struct mlx5e_rq_param * param)645 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
646 u16 q_counter,
647 struct mlx5e_rq_param *param)
648 {
649 void *rqc = param->rqc;
650 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
651
652 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
653 MLX5_SET(wq, wq, log_wq_stride,
654 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
655 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
656
657 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
658 }
659
mlx5e_build_tx_cq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_cq_param * param)660 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
661 struct mlx5e_params *params,
662 struct mlx5e_cq_param *param)
663 {
664 void *cqc = param->cqc;
665
666 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
667
668 mlx5e_build_common_cq_param(mdev, param);
669 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
670 }
671
mlx5e_build_sq_param_common(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param)672 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
673 struct mlx5e_sq_param *param)
674 {
675 void *sqc = param->sqc;
676 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
677
678 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
679 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
680
681 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
682 }
683
mlx5e_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)684 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
685 struct mlx5e_params *params,
686 struct mlx5e_sq_param *param)
687 {
688 void *sqc = param->sqc;
689 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
690 bool allow_swp;
691
692 allow_swp =
693 mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
694 mlx5e_build_sq_param_common(mdev, param);
695 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
696 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
697 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
698 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
699 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
700 }
701
mlx5e_build_ico_cq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_cq_param * param)702 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
703 u8 log_wq_size,
704 struct mlx5e_cq_param *param)
705 {
706 void *cqc = param->cqc;
707
708 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
709
710 mlx5e_build_common_cq_param(mdev, param);
711
712 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
713 }
714
mlx5e_get_rq_log_wq_sz(void * rqc)715 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
716 {
717 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
718
719 return MLX5_GET(wq, wq, log_wq_sz);
720 }
721
722 /* This function calculates the maximum number of headers entries that are needed
723 * per WQE, the formula is based on the size of the reservations and the
724 * restriction we have about max packets for reservation that is equal to max
725 * headers per reservation.
726 */
mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)727 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
728 struct mlx5e_params *params,
729 struct mlx5e_rq_param *rq_param)
730 {
731 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
732 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
733 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
734 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
735 int wqe_size = BIT(log_stride_sz) * num_strides;
736 u32 hd_per_wqe;
737
738 /* Assumption: hd_per_wqe % 8 == 0. */
739 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
740 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
741 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
742 return hd_per_wqe;
743 }
744
745 /* This function calculates the maximum number of headers entries that are needed
746 * for the WQ, this value is uesed to allocate the header buffer in HW, thus
747 * must be a pow of 2.
748 */
mlx5e_shampo_hd_per_wq(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)749 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
750 struct mlx5e_params *params,
751 struct mlx5e_rq_param *rq_param)
752 {
753 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
754 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
755 u32 hd_per_wqe, hd_per_wq;
756
757 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
758 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
759 return hd_per_wq;
760 }
761
mlx5e_shampo_icosq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)762 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
763 struct mlx5e_params *params,
764 struct mlx5e_rq_param *rq_param)
765 {
766 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
767 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
768 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
769 u32 wqebbs;
770
771 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
772 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
773 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
774 rest = max_hd_per_wqe % max_klm_per_umr;
775 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
776 if (rest)
777 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
778 wqebbs *= wq_size;
779 return wqebbs;
780 }
781
mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rqp)782 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
783 struct mlx5e_params *params,
784 struct mlx5e_rq_param *rqp)
785 {
786 u32 wqebbs;
787
788 /* MLX5_WQ_TYPE_CYCLIC */
789 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
790 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
791
792 wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
793
794 /* If XDP program is attached, XSK may be turned on at any time without
795 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
796 * both regular RQ and XSK RQ.
797 * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
798 * doesn't affect its return value, as long as params->xdp_prog != NULL,
799 * so we can just multiply by 2.
800 */
801 if (params->xdp_prog)
802 wqebbs *= 2;
803
804 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
805 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
806
807 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
808 }
809
mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev * mdev)810 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
811 {
812 if (mlx5e_is_ktls_rx(mdev))
813 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
814
815 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
816 }
817
mlx5e_build_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)818 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
819 u8 log_wq_size,
820 struct mlx5e_sq_param *param)
821 {
822 void *sqc = param->sqc;
823 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
824
825 mlx5e_build_sq_param_common(mdev, param);
826
827 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
828 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
829 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
830 }
831
mlx5e_build_async_icosq_param(struct mlx5_core_dev * mdev,u8 log_wq_size,struct mlx5e_sq_param * param)832 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
833 u8 log_wq_size,
834 struct mlx5e_sq_param *param)
835 {
836 void *sqc = param->sqc;
837 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
838
839 mlx5e_build_sq_param_common(mdev, param);
840 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
841 param->is_tls = mlx5e_is_ktls_rx(mdev);
842 if (param->is_tls)
843 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
844 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
845 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
846 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
847 }
848
mlx5e_build_xdpsq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_xsk_param * xsk,struct mlx5e_sq_param * param)849 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
850 struct mlx5e_params *params,
851 struct mlx5e_xsk_param *xsk,
852 struct mlx5e_sq_param *param)
853 {
854 void *sqc = param->sqc;
855 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
856
857 mlx5e_build_sq_param_common(mdev, param);
858 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
859 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
860 param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
861 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
862 }
863
mlx5e_build_channel_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 q_counter,struct mlx5e_channel_param * cparam)864 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
865 struct mlx5e_params *params,
866 u16 q_counter,
867 struct mlx5e_channel_param *cparam)
868 {
869 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
870 int err;
871
872 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
873 if (err)
874 return err;
875
876 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
877 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
878
879 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
880 mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
881 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
882 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
883
884 return 0;
885 }
886