Home
last modified time | relevance | path

Searched refs:mpwqe (Results 1 – 12 of 12) sorted by relevance

/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
Drx.c21 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
26 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe()
34 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe()
40 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe()
42 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe()
44 if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) { in mlx5e_xsk_alloc_rx_mpwqe()
52 } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) { in mlx5e_xsk_alloc_rx_mpwqe()
61 } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) { in mlx5e_xsk_alloc_rx_mpwqe()
62 u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2); in mlx5e_xsk_alloc_rx_mpwqe()
85 __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) - in mlx5e_xsk_alloc_rx_mpwqe()
[all …]
Dtx.c103 if (sq->mpwqe.wqe) in mlx5e_xsk_tx()
115 if (sq->mpwqe.wqe) in mlx5e_xsk_tx()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/en/
Dtxrx.h182 return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_shampo_get_cqe_header_index()
329 mlx5_wq_ll_reset(&rq->mpwqe.wq); in mlx5e_rqwq_reset()
330 rq->mpwqe.actual_wq_head = 0; in mlx5e_rqwq_reset()
356 return mlx5_wq_ll_get_size(&rq->mpwqe.wq); in mlx5e_rqwq_get_size()
366 return rq->mpwqe.wq.cur_sz; in mlx5e_rqwq_get_cur_sz()
376 return mlx5_wq_ll_get_head(&rq->mpwqe.wq); in mlx5e_rqwq_get_head()
386 return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); in mlx5e_rqwq_get_wqe_counter()
486 size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe); in mlx5e_get_mpw_info()
488 return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz)); in mlx5e_get_mpw_info()
Dxdp.c230 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_session_start()
247 stats->mpwqe++; in mlx5e_xdp_mpwqe_session_start()
253 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_complete()
280 if (unlikely(!sq->mpwqe.wqe)) { in mlx5e_xmit_xdp_frame_check_mpwqe()
303 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xmit_xdp_frame_mpwqe()
311 if (unlikely(sq->mpwqe.wqe)) in mlx5e_xmit_xdp_frame_mpwqe()
662 if (sq->mpwqe.wqe) in mlx5e_xdp_xmit()
674 if (xdpsq->mpwqe.wqe) in mlx5e_xdp_rx_poll_complete()
Dxdp.h144 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_add_dseg()
Dparams.c254 bool mpwqe) in mlx5e_rx_get_linear_stride_sz() argument
260 return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; in mlx5e_rx_get_linear_stride_sz()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c458 if (bitmap_full(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe)) in mlx5e_free_rx_mpwqe()
461 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe); in mlx5e_free_rx_mpwqe()
468 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) in mlx5e_free_rx_mpwqe()
472 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) in mlx5e_free_rx_mpwqe()
480 struct mlx5_wq_ll *wq = &rq->mpwqe.wq; in mlx5e_post_rx_mpwqe()
535 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_build_shampo_hd_umr()
608 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_alloc_rx_hd_mpwqe()
638 index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); in mlx5e_alloc_rx_hd_mpwqe()
663 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs); in mlx5e_alloc_rx_mpwqe()
665 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_alloc_rx_mpwqe()
[all …]
Den_main.c225 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, in mlx5e_build_umr_wqe()
226 rq->mpwqe.umr_mode), in mlx5e_build_umr_wqe()
231 cseg->umr_mkey = rq->mpwqe.umr_mkey_be; in mlx5e_build_umr_wqe()
234 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode); in mlx5e_build_umr_wqe()
241 rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), in mlx5e_rq_shampo_hd_alloc()
243 if (!rq->mpwqe.shampo) in mlx5e_rq_shampo_hd_alloc()
250 kvfree(rq->mpwqe.shampo); in mlx5e_rq_shampo_hd_free()
255 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; in mlx5e_rq_shampo_hd_info_alloc()
274 kvfree(rq->mpwqe.shampo->bitmap); in mlx5e_rq_shampo_hd_info_free()
275 kvfree(rq->mpwqe.shampo->info); in mlx5e_rq_shampo_hd_info_free()
[all …]
Den_tx.c504 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_same_eseg()
513 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_session_start()
536 return sq->mpwqe.wqe; in mlx5e_tx_mpwqe_session_is_active()
541 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_add_dseg()
559 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_tx_mpwqe_session_complete()
616 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { in mlx5e_sq_xmit_mpwqe()
Den.h429 struct mlx5e_tx_mpwqe mpwqe; member
538 struct mlx5e_tx_mpwqe mpwqe; member
716 } mpwqe; member
Den_stats.c277 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xdp_red()
289 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xdpsq()
301 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; in mlx5e_stats_grp_sw_update_stats_xsksq()
2035 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2045 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2077 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
Den_stats.h437 u64 mpwqe; member