1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include <net/xdp_sock_drv.h>
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8 #include "ice_sriov.h"
9
10 /**
11 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
12 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
13 *
14 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
15 */
__ice_vsi_get_qs_contig(struct ice_qs_cfg * qs_cfg)16 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
17 {
18 unsigned int offset, i;
19
20 mutex_lock(qs_cfg->qs_mutex);
21 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
22 0, qs_cfg->q_count, 0);
23 if (offset >= qs_cfg->pf_map_size) {
24 mutex_unlock(qs_cfg->qs_mutex);
25 return -ENOMEM;
26 }
27
28 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
29 for (i = 0; i < qs_cfg->q_count; i++)
30 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
31 mutex_unlock(qs_cfg->qs_mutex);
32
33 return 0;
34 }
35
36 /**
37 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
38 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
39 *
40 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
41 */
__ice_vsi_get_qs_sc(struct ice_qs_cfg * qs_cfg)42 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
43 {
44 unsigned int i, index = 0;
45
46 mutex_lock(qs_cfg->qs_mutex);
47 for (i = 0; i < qs_cfg->q_count; i++) {
48 index = find_next_zero_bit(qs_cfg->pf_map,
49 qs_cfg->pf_map_size, index);
50 if (index >= qs_cfg->pf_map_size)
51 goto err_scatter;
52 set_bit(index, qs_cfg->pf_map);
53 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
54 }
55 mutex_unlock(qs_cfg->qs_mutex);
56
57 return 0;
58 err_scatter:
59 for (index = 0; index < i; index++) {
60 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
61 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
62 }
63 mutex_unlock(qs_cfg->qs_mutex);
64
65 return -ENOMEM;
66 }
67
68 /**
69 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
70 * @pf: the PF being configured
71 * @pf_q: the PF queue
72 * @ena: enable or disable state of the queue
73 *
74 * This routine will wait for the given Rx queue of the PF to reach the
75 * enabled or disabled state.
76 * Returns -ETIMEDOUT in case of failing to reach the requested state after
77 * multiple retries; else will return 0 in case of success.
78 */
ice_pf_rxq_wait(struct ice_pf * pf,int pf_q,bool ena)79 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
80 {
81 int i;
82
83 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
84 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
85 QRX_CTRL_QENA_STAT_M))
86 return 0;
87
88 usleep_range(20, 40);
89 }
90
91 return -ETIMEDOUT;
92 }
93
94 /**
95 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
96 * @vsi: the VSI being configured
97 * @v_idx: index of the vector in the VSI struct
98 *
99 * We allocate one q_vector and set default value for ITR setting associated
100 * with this q_vector. If allocation fails we return -ENOMEM.
101 */
ice_vsi_alloc_q_vector(struct ice_vsi * vsi,u16 v_idx)102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
103 {
104 struct ice_pf *pf = vsi->back;
105 struct ice_q_vector *q_vector;
106
107 /* allocate q_vector */
108 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
109 GFP_KERNEL);
110 if (!q_vector)
111 return -ENOMEM;
112
113 q_vector->vsi = vsi;
114 q_vector->v_idx = v_idx;
115 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
116 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
117 q_vector->tx.itr_mode = ITR_DYNAMIC;
118 q_vector->rx.itr_mode = ITR_DYNAMIC;
119 q_vector->tx.type = ICE_TX_CONTAINER;
120 q_vector->rx.type = ICE_RX_CONTAINER;
121
122 if (vsi->type == ICE_VSI_VF)
123 goto out;
124 /* only set affinity_mask if the CPU is online */
125 if (cpu_online(v_idx))
126 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
127
128 /* This will not be called in the driver load path because the netdev
129 * will not be created yet. All other cases with register the NAPI
130 * handler here (i.e. resume, reset/rebuild, etc.)
131 */
132 if (vsi->netdev)
133 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
134 NAPI_POLL_WEIGHT);
135
136 out:
137 /* tie q_vector and VSI together */
138 vsi->q_vectors[v_idx] = q_vector;
139
140 return 0;
141 }
142
143 /**
144 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
145 * @vsi: VSI having the memory freed
146 * @v_idx: index of the vector to be freed
147 */
ice_free_q_vector(struct ice_vsi * vsi,int v_idx)148 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
149 {
150 struct ice_q_vector *q_vector;
151 struct ice_pf *pf = vsi->back;
152 struct ice_tx_ring *tx_ring;
153 struct ice_rx_ring *rx_ring;
154 struct device *dev;
155
156 dev = ice_pf_to_dev(pf);
157 if (!vsi->q_vectors[v_idx]) {
158 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
159 return;
160 }
161 q_vector = vsi->q_vectors[v_idx];
162
163 ice_for_each_tx_ring(tx_ring, q_vector->tx)
164 tx_ring->q_vector = NULL;
165 ice_for_each_rx_ring(rx_ring, q_vector->rx)
166 rx_ring->q_vector = NULL;
167
168 /* only VSI with an associated netdev is set up with NAPI */
169 if (vsi->netdev)
170 netif_napi_del(&q_vector->napi);
171
172 devm_kfree(dev, q_vector);
173 vsi->q_vectors[v_idx] = NULL;
174 }
175
176 /**
177 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
178 * @hw: board specific structure
179 */
ice_cfg_itr_gran(struct ice_hw * hw)180 static void ice_cfg_itr_gran(struct ice_hw *hw)
181 {
182 u32 regval = rd32(hw, GLINT_CTL);
183
184 /* no need to update global register if ITR gran is already set */
185 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
186 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
187 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
188 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
189 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
190 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
191 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
192 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
193 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
194 return;
195
196 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
197 GLINT_CTL_ITR_GRAN_200_M) |
198 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
199 GLINT_CTL_ITR_GRAN_100_M) |
200 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
201 GLINT_CTL_ITR_GRAN_50_M) |
202 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
203 GLINT_CTL_ITR_GRAN_25_M);
204 wr32(hw, GLINT_CTL, regval);
205 }
206
207 /**
208 * ice_calc_txq_handle - calculate the queue handle
209 * @vsi: VSI that ring belongs to
210 * @ring: ring to get the absolute queue index
211 * @tc: traffic class number
212 */
ice_calc_txq_handle(struct ice_vsi * vsi,struct ice_tx_ring * ring,u8 tc)213 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
214 {
215 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
216
217 if (ring->ch)
218 return ring->q_index - ring->ch->base_q;
219
220 /* Idea here for calculation is that we subtract the number of queue
221 * count from TC that ring belongs to from it's absolute queue index
222 * and as a result we get the queue's index within TC.
223 */
224 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
225 }
226
227 /**
228 * ice_eswitch_calc_txq_handle
229 * @ring: pointer to ring which unique index is needed
230 *
231 * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
232 * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
233 * here by finding index in vsi->tx_rings of this ring.
234 *
235 * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
236 * because VSI is get from ring->vsi, so it has to be present in this VSI.
237 */
ice_eswitch_calc_txq_handle(struct ice_tx_ring * ring)238 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
239 {
240 struct ice_vsi *vsi = ring->vsi;
241 int i;
242
243 ice_for_each_txq(vsi, i) {
244 if (vsi->tx_rings[i] == ring)
245 return i;
246 }
247
248 return ICE_INVAL_Q_INDEX;
249 }
250
251 /**
252 * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
253 * @ring: The Tx ring to configure
254 *
255 * This enables/disables XPS for a given Tx descriptor ring
256 * based on the TCs enabled for the VSI that ring belongs to.
257 */
ice_cfg_xps_tx_ring(struct ice_tx_ring * ring)258 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
259 {
260 if (!ring->q_vector || !ring->netdev)
261 return;
262
263 /* We only initialize XPS once, so as not to overwrite user settings */
264 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
265 return;
266
267 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
268 ring->q_index);
269 }
270
271 /**
272 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
273 * @ring: The Tx ring to configure
274 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
275 * @pf_q: queue index in the PF space
276 *
277 * Configure the Tx descriptor ring in TLAN context.
278 */
279 static void
ice_setup_tx_ctx(struct ice_tx_ring * ring,struct ice_tlan_ctx * tlan_ctx,u16 pf_q)280 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
281 {
282 struct ice_vsi *vsi = ring->vsi;
283 struct ice_hw *hw = &vsi->back->hw;
284
285 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
286
287 tlan_ctx->port_num = vsi->port_info->lport;
288
289 /* Transmit Queue Length */
290 tlan_ctx->qlen = ring->count;
291
292 ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
293
294 /* PF number */
295 tlan_ctx->pf_num = hw->pf_id;
296
297 /* queue belongs to a specific VSI type
298 * VF / VM index should be programmed per vmvf_type setting:
299 * for vmvf_type = VF, it is VF number between 0-256
300 * for vmvf_type = VM, it is VM number between 0-767
301 * for PF or EMP this field should be set to zero
302 */
303 switch (vsi->type) {
304 case ICE_VSI_LB:
305 case ICE_VSI_CTRL:
306 case ICE_VSI_PF:
307 if (ring->ch)
308 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
309 else
310 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
311 break;
312 case ICE_VSI_VF:
313 /* Firmware expects vmvf_num to be absolute VF ID */
314 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
315 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
316 break;
317 case ICE_VSI_SWITCHDEV_CTRL:
318 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
319 break;
320 default:
321 return;
322 }
323
324 /* make sure the context is associated with the right VSI */
325 if (ring->ch)
326 tlan_ctx->src_vsi = ring->ch->vsi_num;
327 else
328 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
329
330 /* Restrict Tx timestamps to the PF VSI */
331 switch (vsi->type) {
332 case ICE_VSI_PF:
333 tlan_ctx->tsyn_ena = 1;
334 break;
335 default:
336 break;
337 }
338
339 tlan_ctx->tso_ena = ICE_TX_LEGACY;
340 tlan_ctx->tso_qnum = pf_q;
341
342 /* Legacy or Advanced Host Interface:
343 * 0: Advanced Host Interface
344 * 1: Legacy Host Interface
345 */
346 tlan_ctx->legacy_int = ICE_TX_LEGACY;
347 }
348
349 /**
350 * ice_rx_offset - Return expected offset into page to access data
351 * @rx_ring: Ring we are requesting offset of
352 *
353 * Returns the offset value for ring into the data buffer.
354 */
ice_rx_offset(struct ice_rx_ring * rx_ring)355 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
356 {
357 if (ice_ring_uses_build_skb(rx_ring))
358 return ICE_SKB_PAD;
359 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
360 return XDP_PACKET_HEADROOM;
361
362 return 0;
363 }
364
365 /**
366 * ice_setup_rx_ctx - Configure a receive ring context
367 * @ring: The Rx ring to configure
368 *
369 * Configure the Rx descriptor ring in RLAN context.
370 */
ice_setup_rx_ctx(struct ice_rx_ring * ring)371 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
372 {
373 int chain_len = ICE_MAX_CHAINED_RX_BUFS;
374 struct ice_vsi *vsi = ring->vsi;
375 u32 rxdid = ICE_RXDID_FLEX_NIC;
376 struct ice_rlan_ctx rlan_ctx;
377 struct ice_hw *hw;
378 u16 pf_q;
379 int err;
380
381 hw = &vsi->back->hw;
382
383 /* what is Rx queue number in global space of 2K Rx queues */
384 pf_q = vsi->rxq_map[ring->q_index];
385
386 /* clear the context structure first */
387 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
388
389 /* Receive Queue Base Address.
390 * Indicates the starting address of the descriptor queue defined in
391 * 128 Byte units.
392 */
393 rlan_ctx.base = ring->dma >> 7;
394
395 rlan_ctx.qlen = ring->count;
396
397 /* Receive Packet Data Buffer Size.
398 * The Packet Data Buffer Size is defined in 128 byte units.
399 */
400 rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
401
402 /* use 32 byte descriptors */
403 rlan_ctx.dsize = 1;
404
405 /* Strip the Ethernet CRC bytes before the packet is posted to host
406 * memory.
407 */
408 rlan_ctx.crcstrip = 1;
409
410 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
411 * and it needs to remain 1 for non-DVM capable configurations to not
412 * break backward compatibility for VF drivers. Setting this field to 0
413 * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
414 * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
415 * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
416 * check for the tag
417 */
418 if (ice_is_dvm_ena(hw))
419 if (vsi->type == ICE_VSI_VF &&
420 ice_vf_is_port_vlan_ena(vsi->vf))
421 rlan_ctx.l2tsel = 1;
422 else
423 rlan_ctx.l2tsel = 0;
424 else
425 rlan_ctx.l2tsel = 1;
426
427 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
428 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
429 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
430
431 /* This controls whether VLAN is stripped from inner headers
432 * The VLAN in the inner L2 header is stripped to the receive
433 * descriptor if enabled by this flag.
434 */
435 rlan_ctx.showiv = 0;
436
437 /* For AF_XDP ZC, we disallow packets to span on
438 * multiple buffers, thus letting us skip that
439 * handling in the fast-path.
440 */
441 if (ring->xsk_pool)
442 chain_len = 1;
443 /* Max packet size for this queue - must not be set to a larger value
444 * than 5 x DBUF
445 */
446 rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
447 chain_len * ring->rx_buf_len);
448
449 /* Rx queue threshold in units of 64 */
450 rlan_ctx.lrxqthresh = 1;
451
452 /* Enable Flexible Descriptors in the queue context which
453 * allows this driver to select a specific receive descriptor format
454 * increasing context priority to pick up profile ID; default is 0x01;
455 * setting to 0x03 to ensure profile is programming if prev context is
456 * of same priority
457 */
458 if (vsi->type != ICE_VSI_VF)
459 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
460 else
461 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
462 false);
463
464 /* Absolute queue number out of 2K needs to be passed */
465 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
466 if (err) {
467 dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
468 pf_q, err);
469 return -EIO;
470 }
471
472 if (vsi->type == ICE_VSI_VF)
473 return 0;
474
475 /* configure Rx buffer alignment */
476 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
477 ice_clear_ring_build_skb_ena(ring);
478 else
479 ice_set_ring_build_skb_ena(ring);
480
481 ring->rx_offset = ice_rx_offset(ring);
482
483 /* init queue specific tail register */
484 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
485 writel(0, ring->tail);
486
487 return 0;
488 }
489
490 /**
491 * ice_vsi_cfg_rxq - Configure an Rx queue
492 * @ring: the ring being configured
493 *
494 * Return 0 on success and a negative value on error.
495 */
ice_vsi_cfg_rxq(struct ice_rx_ring * ring)496 int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
497 {
498 struct device *dev = ice_pf_to_dev(ring->vsi->back);
499 u16 num_bufs = ICE_DESC_UNUSED(ring);
500 int err;
501
502 ring->rx_buf_len = ring->vsi->rx_buf_len;
503
504 if (ring->vsi->type == ICE_VSI_PF) {
505 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
506 /* coverity[check_return] */
507 xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
508 ring->q_index, ring->q_vector->napi.napi_id);
509
510 ring->xsk_pool = ice_xsk_pool(ring);
511 if (ring->xsk_pool) {
512 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
513
514 ring->rx_buf_len =
515 xsk_pool_get_rx_frame_size(ring->xsk_pool);
516 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
517 MEM_TYPE_XSK_BUFF_POOL,
518 NULL);
519 if (err)
520 return err;
521 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
522
523 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
524 ring->q_index);
525 } else {
526 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
527 /* coverity[check_return] */
528 xdp_rxq_info_reg(&ring->xdp_rxq,
529 ring->netdev,
530 ring->q_index, ring->q_vector->napi.napi_id);
531
532 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
533 MEM_TYPE_PAGE_SHARED,
534 NULL);
535 if (err)
536 return err;
537 }
538 }
539
540 err = ice_setup_rx_ctx(ring);
541 if (err) {
542 dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
543 ring->q_index, err);
544 return err;
545 }
546
547 if (ring->xsk_pool) {
548 bool ok;
549
550 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
551 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
552 num_bufs, ring->q_index);
553 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
554
555 return 0;
556 }
557
558 ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
559 if (!ok) {
560 u16 pf_q = ring->vsi->rxq_map[ring->q_index];
561
562 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
563 ring->q_index, pf_q);
564 }
565
566 return 0;
567 }
568
569 ice_alloc_rx_bufs(ring, num_bufs);
570
571 return 0;
572 }
573
574 /**
575 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
576 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
577 *
578 * This function first tries to find contiguous space. If it is not successful,
579 * it tries with the scatter approach.
580 *
581 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
582 */
__ice_vsi_get_qs(struct ice_qs_cfg * qs_cfg)583 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
584 {
585 int ret = 0;
586
587 ret = __ice_vsi_get_qs_contig(qs_cfg);
588 if (ret) {
589 /* contig failed, so try with scatter approach */
590 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
591 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
592 qs_cfg->scatter_count);
593 ret = __ice_vsi_get_qs_sc(qs_cfg);
594 }
595 return ret;
596 }
597
598 /**
599 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
600 * @vsi: the VSI being configured
601 * @ena: start or stop the Rx ring
602 * @rxq_idx: 0-based Rx queue index for the VSI passed in
603 * @wait: wait or don't wait for configuration to finish in hardware
604 *
605 * Return 0 on success and negative on error.
606 */
607 int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi * vsi,bool ena,u16 rxq_idx,bool wait)608 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
609 {
610 int pf_q = vsi->rxq_map[rxq_idx];
611 struct ice_pf *pf = vsi->back;
612 struct ice_hw *hw = &pf->hw;
613 u32 rx_reg;
614
615 rx_reg = rd32(hw, QRX_CTRL(pf_q));
616
617 /* Skip if the queue is already in the requested state */
618 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
619 return 0;
620
621 /* turn on/off the queue */
622 if (ena)
623 rx_reg |= QRX_CTRL_QENA_REQ_M;
624 else
625 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
626 wr32(hw, QRX_CTRL(pf_q), rx_reg);
627
628 if (!wait)
629 return 0;
630
631 ice_flush(hw);
632 return ice_pf_rxq_wait(pf, pf_q, ena);
633 }
634
635 /**
636 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
637 * @vsi: the VSI being configured
638 * @ena: true/false to verify Rx ring has been enabled/disabled respectively
639 * @rxq_idx: 0-based Rx queue index for the VSI passed in
640 *
641 * This routine will wait for the given Rx queue of the VSI to reach the
642 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
643 * the requested state after multiple retries; else will return 0 in case of
644 * success.
645 */
ice_vsi_wait_one_rx_ring(struct ice_vsi * vsi,bool ena,u16 rxq_idx)646 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
647 {
648 int pf_q = vsi->rxq_map[rxq_idx];
649 struct ice_pf *pf = vsi->back;
650
651 return ice_pf_rxq_wait(pf, pf_q, ena);
652 }
653
654 /**
655 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
656 * @vsi: the VSI being configured
657 *
658 * We allocate one q_vector per queue interrupt. If allocation fails we
659 * return -ENOMEM.
660 */
ice_vsi_alloc_q_vectors(struct ice_vsi * vsi)661 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
662 {
663 struct device *dev = ice_pf_to_dev(vsi->back);
664 u16 v_idx;
665 int err;
666
667 if (vsi->q_vectors[0]) {
668 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
669 return -EEXIST;
670 }
671
672 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
673 err = ice_vsi_alloc_q_vector(vsi, v_idx);
674 if (err)
675 goto err_out;
676 }
677
678 return 0;
679
680 err_out:
681 while (v_idx--)
682 ice_free_q_vector(vsi, v_idx);
683
684 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
685 vsi->num_q_vectors, vsi->vsi_num, err);
686 vsi->num_q_vectors = 0;
687 return err;
688 }
689
690 /**
691 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
692 * @vsi: the VSI being configured
693 *
694 * This function maps descriptor rings to the queue-specific vectors allotted
695 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
696 * and Rx rings to the vector as "efficiently" as possible.
697 */
ice_vsi_map_rings_to_vectors(struct ice_vsi * vsi)698 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
699 {
700 int q_vectors = vsi->num_q_vectors;
701 u16 tx_rings_rem, rx_rings_rem;
702 int v_id;
703
704 /* initially assigning remaining rings count to VSIs num queue value */
705 tx_rings_rem = vsi->num_txq;
706 rx_rings_rem = vsi->num_rxq;
707
708 for (v_id = 0; v_id < q_vectors; v_id++) {
709 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
710 u8 tx_rings_per_v, rx_rings_per_v;
711 u16 q_id, q_base;
712
713 /* Tx rings mapping to vector */
714 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
715 q_vectors - v_id);
716 q_vector->num_ring_tx = tx_rings_per_v;
717 q_vector->tx.tx_ring = NULL;
718 q_vector->tx.itr_idx = ICE_TX_ITR;
719 q_base = vsi->num_txq - tx_rings_rem;
720
721 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
722 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
723
724 tx_ring->q_vector = q_vector;
725 tx_ring->next = q_vector->tx.tx_ring;
726 q_vector->tx.tx_ring = tx_ring;
727 }
728 tx_rings_rem -= tx_rings_per_v;
729
730 /* Rx rings mapping to vector */
731 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
732 q_vectors - v_id);
733 q_vector->num_ring_rx = rx_rings_per_v;
734 q_vector->rx.rx_ring = NULL;
735 q_vector->rx.itr_idx = ICE_RX_ITR;
736 q_base = vsi->num_rxq - rx_rings_rem;
737
738 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
739 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
740
741 rx_ring->q_vector = q_vector;
742 rx_ring->next = q_vector->rx.rx_ring;
743 q_vector->rx.rx_ring = rx_ring;
744 }
745 rx_rings_rem -= rx_rings_per_v;
746 }
747 }
748
749 /**
750 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
751 * @vsi: the VSI having memory freed
752 */
ice_vsi_free_q_vectors(struct ice_vsi * vsi)753 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
754 {
755 int v_idx;
756
757 ice_for_each_q_vector(vsi, v_idx)
758 ice_free_q_vector(vsi, v_idx);
759 }
760
761 /**
762 * ice_vsi_cfg_txq - Configure single Tx queue
763 * @vsi: the VSI that queue belongs to
764 * @ring: Tx ring to be configured
765 * @qg_buf: queue group buffer
766 */
767 int
ice_vsi_cfg_txq(struct ice_vsi * vsi,struct ice_tx_ring * ring,struct ice_aqc_add_tx_qgrp * qg_buf)768 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
769 struct ice_aqc_add_tx_qgrp *qg_buf)
770 {
771 u8 buf_len = struct_size(qg_buf, txqs, 1);
772 struct ice_tlan_ctx tlan_ctx = { 0 };
773 struct ice_aqc_add_txqs_perq *txq;
774 struct ice_channel *ch = ring->ch;
775 struct ice_pf *pf = vsi->back;
776 struct ice_hw *hw = &pf->hw;
777 int status;
778 u16 pf_q;
779 u8 tc;
780
781 /* Configure XPS */
782 ice_cfg_xps_tx_ring(ring);
783
784 pf_q = ring->reg_idx;
785 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
786 /* copy context contents into the qg_buf */
787 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
788 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
789 ice_tlan_ctx_info);
790
791 /* init queue specific tail reg. It is referred as
792 * transmit comm scheduler queue doorbell.
793 */
794 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
795
796 if (IS_ENABLED(CONFIG_DCB))
797 tc = ring->dcb_tc;
798 else
799 tc = 0;
800
801 /* Add unique software queue handle of the Tx queue per
802 * TC into the VSI Tx ring
803 */
804 if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
805 ring->q_handle = ice_eswitch_calc_txq_handle(ring);
806
807 if (ring->q_handle == ICE_INVAL_Q_INDEX)
808 return -ENODEV;
809 } else {
810 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
811 }
812
813 if (ch)
814 status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
815 ring->q_handle, 1, qg_buf, buf_len,
816 NULL);
817 else
818 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
819 ring->q_handle, 1, qg_buf, buf_len,
820 NULL);
821 if (status) {
822 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
823 status);
824 return status;
825 }
826
827 /* Add Tx Queue TEID into the VSI Tx ring from the
828 * response. This will complete configuring and
829 * enabling the queue.
830 */
831 txq = &qg_buf->txqs[0];
832 if (pf_q == le16_to_cpu(txq->txq_id))
833 ring->txq_teid = le32_to_cpu(txq->q_teid);
834
835 return 0;
836 }
837
838 /**
839 * ice_cfg_itr - configure the initial interrupt throttle values
840 * @hw: pointer to the HW structure
841 * @q_vector: interrupt vector that's being configured
842 *
843 * Configure interrupt throttling values for the ring containers that are
844 * associated with the interrupt vector passed in.
845 */
ice_cfg_itr(struct ice_hw * hw,struct ice_q_vector * q_vector)846 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
847 {
848 ice_cfg_itr_gran(hw);
849
850 if (q_vector->num_ring_rx)
851 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
852
853 if (q_vector->num_ring_tx)
854 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
855
856 ice_write_intrl(q_vector, q_vector->intrl);
857 }
858
859 /**
860 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
861 * @vsi: the VSI being configured
862 * @txq: Tx queue being mapped to MSI-X vector
863 * @msix_idx: MSI-X vector index within the function
864 * @itr_idx: ITR index of the interrupt cause
865 *
866 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
867 * within the function space.
868 */
869 void
ice_cfg_txq_interrupt(struct ice_vsi * vsi,u16 txq,u16 msix_idx,u16 itr_idx)870 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
871 {
872 struct ice_pf *pf = vsi->back;
873 struct ice_hw *hw = &pf->hw;
874 u32 val;
875
876 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
877
878 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
879 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
880
881 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
882 if (ice_is_xdp_ena_vsi(vsi)) {
883 u32 xdp_txq = txq + vsi->num_xdp_txq;
884
885 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
886 val);
887 }
888 ice_flush(hw);
889 }
890
891 /**
892 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
893 * @vsi: the VSI being configured
894 * @rxq: Rx queue being mapped to MSI-X vector
895 * @msix_idx: MSI-X vector index within the function
896 * @itr_idx: ITR index of the interrupt cause
897 *
898 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
899 * within the function space.
900 */
901 void
ice_cfg_rxq_interrupt(struct ice_vsi * vsi,u16 rxq,u16 msix_idx,u16 itr_idx)902 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
903 {
904 struct ice_pf *pf = vsi->back;
905 struct ice_hw *hw = &pf->hw;
906 u32 val;
907
908 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
909
910 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
911 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
912
913 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
914
915 ice_flush(hw);
916 }
917
918 /**
919 * ice_trigger_sw_intr - trigger a software interrupt
920 * @hw: pointer to the HW structure
921 * @q_vector: interrupt vector to trigger the software interrupt for
922 */
ice_trigger_sw_intr(struct ice_hw * hw,struct ice_q_vector * q_vector)923 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
924 {
925 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
926 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
927 GLINT_DYN_CTL_SWINT_TRIG_M |
928 GLINT_DYN_CTL_INTENA_M);
929 }
930
931 /**
932 * ice_vsi_stop_tx_ring - Disable single Tx ring
933 * @vsi: the VSI being configured
934 * @rst_src: reset source
935 * @rel_vmvf_num: Relative ID of VF/VM
936 * @ring: Tx ring to be stopped
937 * @txq_meta: Meta data of Tx ring to be stopped
938 */
939 int
ice_vsi_stop_tx_ring(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num,struct ice_tx_ring * ring,struct ice_txq_meta * txq_meta)940 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
941 u16 rel_vmvf_num, struct ice_tx_ring *ring,
942 struct ice_txq_meta *txq_meta)
943 {
944 struct ice_pf *pf = vsi->back;
945 struct ice_q_vector *q_vector;
946 struct ice_hw *hw = &pf->hw;
947 int status;
948 u32 val;
949
950 /* clear cause_ena bit for disabled queues */
951 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
952 val &= ~QINT_TQCTL_CAUSE_ENA_M;
953 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
954
955 /* software is expected to wait for 100 ns */
956 ndelay(100);
957
958 /* trigger a software interrupt for the vector
959 * associated to the queue to schedule NAPI handler
960 */
961 q_vector = ring->q_vector;
962 if (q_vector)
963 ice_trigger_sw_intr(hw, q_vector);
964
965 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
966 txq_meta->tc, 1, &txq_meta->q_handle,
967 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
968 rel_vmvf_num, NULL);
969
970 /* if the disable queue command was exercised during an
971 * active reset flow, -EBUSY is returned.
972 * This is not an error as the reset operation disables
973 * queues at the hardware level anyway.
974 */
975 if (status == -EBUSY) {
976 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
977 } else if (status == -ENOENT) {
978 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
979 } else if (status) {
980 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
981 status);
982 return status;
983 }
984
985 return 0;
986 }
987
988 /**
989 * ice_fill_txq_meta - Prepare the Tx queue's meta data
990 * @vsi: VSI that ring belongs to
991 * @ring: ring that txq_meta will be based on
992 * @txq_meta: a helper struct that wraps Tx queue's information
993 *
994 * Set up a helper struct that will contain all the necessary fields that
995 * are needed for stopping Tx queue
996 */
997 void
ice_fill_txq_meta(struct ice_vsi * vsi,struct ice_tx_ring * ring,struct ice_txq_meta * txq_meta)998 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
999 struct ice_txq_meta *txq_meta)
1000 {
1001 struct ice_channel *ch = ring->ch;
1002 u8 tc;
1003
1004 if (IS_ENABLED(CONFIG_DCB))
1005 tc = ring->dcb_tc;
1006 else
1007 tc = 0;
1008
1009 txq_meta->q_id = ring->reg_idx;
1010 txq_meta->q_teid = ring->txq_teid;
1011 txq_meta->q_handle = ring->q_handle;
1012 if (ch) {
1013 txq_meta->vsi_idx = ch->ch_vsi->idx;
1014 txq_meta->tc = 0;
1015 } else {
1016 txq_meta->vsi_idx = vsi->idx;
1017 txq_meta->tc = tc;
1018 }
1019 }
1020