1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 #include "en/port.h"
39 
40 #ifdef CONFIG_PAGE_POOL_STATS
41 #include <net/page_pool.h>
42 #endif
43 
stats_grps_num(struct mlx5e_priv * priv)44 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
45 {
46 	return !priv->profile->stats_grps_num ? 0 :
47 		priv->profile->stats_grps_num(priv);
48 }
49 
mlx5e_stats_total_num(struct mlx5e_priv * priv)50 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
51 {
52 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
53 	const unsigned int num_stats_grps = stats_grps_num(priv);
54 	unsigned int total = 0;
55 	int i;
56 
57 	for (i = 0; i < num_stats_grps; i++)
58 		total += stats_grps[i]->get_num_stats(priv);
59 
60 	return total;
61 }
62 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)63 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
64 {
65 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
66 	const unsigned int num_stats_grps = stats_grps_num(priv);
67 	int i;
68 
69 	for (i = num_stats_grps - 1; i >= 0; i--)
70 		if (stats_grps[i]->update_stats &&
71 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
72 			stats_grps[i]->update_stats(priv);
73 }
74 
mlx5e_stats_update(struct mlx5e_priv * priv)75 void mlx5e_stats_update(struct mlx5e_priv *priv)
76 {
77 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
78 	const unsigned int num_stats_grps = stats_grps_num(priv);
79 	int i;
80 
81 	for (i = num_stats_grps - 1; i >= 0; i--)
82 		if (stats_grps[i]->update_stats)
83 			stats_grps[i]->update_stats(priv);
84 }
85 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)86 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
87 {
88 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
89 	const unsigned int num_stats_grps = stats_grps_num(priv);
90 	int i;
91 
92 	for (i = 0; i < num_stats_grps; i++)
93 		idx = stats_grps[i]->fill_stats(priv, data, idx);
94 }
95 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)96 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
97 {
98 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
99 	const unsigned int num_stats_grps = stats_grps_num(priv);
100 	int i, idx = 0;
101 
102 	for (i = 0; i < num_stats_grps; i++)
103 		idx = stats_grps[i]->fill_strings(priv, data, idx);
104 }
105 
106 /* Concrete NIC Stats */
107 
108 static const struct counter_desc sw_stats_desc[] = {
109 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
110 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
111 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
116 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
121 
122 #ifdef CONFIG_MLX5_EN_TLS
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
127 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
132 #endif
133 
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
189 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
190 #ifdef CONFIG_PAGE_POOL_STATS
191 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
202 #endif
203 #ifdef CONFIG_MLX5_EN_TLS
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
214 #endif
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
223 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
224 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
225 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
226 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
227 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
228 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
229 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
230 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
231 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
232 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
233 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
234 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
235 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
236 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
237 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
238 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
239 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
240 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
241 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
242 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
243 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
244 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
245 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
246 };
247 
248 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
249 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)250 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
251 {
252 	return NUM_SW_COUNTERS;
253 }
254 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)255 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
256 {
257 	int i;
258 
259 	for (i = 0; i < NUM_SW_COUNTERS; i++)
260 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
261 	return idx;
262 }
263 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)264 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
265 {
266 	int i;
267 
268 	for (i = 0; i < NUM_SW_COUNTERS; i++)
269 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
270 	return idx;
271 }
272 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)273 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
274 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
275 {
276 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
277 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
278 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
279 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
280 	s->tx_xdp_full  += xdpsq_red_stats->full;
281 	s->tx_xdp_err   += xdpsq_red_stats->err;
282 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
283 }
284 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)285 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
286 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
287 {
288 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
289 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
290 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
291 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
292 	s->rx_xdp_tx_full  += xdpsq_stats->full;
293 	s->rx_xdp_tx_err   += xdpsq_stats->err;
294 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
295 }
296 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)297 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
298 						  struct mlx5e_xdpsq_stats *xsksq_stats)
299 {
300 	s->tx_xsk_xmit  += xsksq_stats->xmit;
301 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
302 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
303 	s->tx_xsk_full  += xsksq_stats->full;
304 	s->tx_xsk_err   += xsksq_stats->err;
305 	s->tx_xsk_cqes  += xsksq_stats->cqes;
306 }
307 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)308 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
309 						  struct mlx5e_rq_stats *xskrq_stats)
310 {
311 	s->rx_xsk_packets                += xskrq_stats->packets;
312 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
313 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
314 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
315 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
316 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
317 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
318 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
319 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
320 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
321 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
322 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
323 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
324 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
325 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
326 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
327 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
328 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
329 	s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
330 }
331 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)332 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
333 						     struct mlx5e_rq_stats *rq_stats)
334 {
335 	s->rx_packets                 += rq_stats->packets;
336 	s->rx_bytes                   += rq_stats->bytes;
337 	s->rx_lro_packets             += rq_stats->lro_packets;
338 	s->rx_lro_bytes               += rq_stats->lro_bytes;
339 	s->rx_gro_packets             += rq_stats->gro_packets;
340 	s->rx_gro_bytes               += rq_stats->gro_bytes;
341 	s->rx_gro_skbs                += rq_stats->gro_skbs;
342 	s->rx_gro_match_packets       += rq_stats->gro_match_packets;
343 	s->rx_gro_large_hds           += rq_stats->gro_large_hds;
344 	s->rx_ecn_mark                += rq_stats->ecn_mark;
345 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
346 	s->rx_csum_none               += rq_stats->csum_none;
347 	s->rx_csum_complete           += rq_stats->csum_complete;
348 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
349 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
350 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
351 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
352 	s->rx_xdp_drop                += rq_stats->xdp_drop;
353 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
354 	s->rx_wqe_err                 += rq_stats->wqe_err;
355 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
356 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
357 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
358 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
359 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
360 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
361 	s->rx_cache_reuse             += rq_stats->cache_reuse;
362 	s->rx_cache_full              += rq_stats->cache_full;
363 	s->rx_cache_empty             += rq_stats->cache_empty;
364 	s->rx_cache_busy              += rq_stats->cache_busy;
365 	s->rx_cache_waive             += rq_stats->cache_waive;
366 	s->rx_congst_umr              += rq_stats->congst_umr;
367 	s->rx_arfs_err                += rq_stats->arfs_err;
368 	s->rx_recover                 += rq_stats->recover;
369 #ifdef CONFIG_PAGE_POOL_STATS
370 	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
371 	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
372 	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
373 	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
374 	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
375 	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
376 	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
377 	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
378 	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
379 	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
380 	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
381 #endif
382 #ifdef CONFIG_MLX5_EN_TLS
383 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
384 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
385 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
386 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
387 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
388 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
389 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
390 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
391 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
392 	s->rx_tls_err                 += rq_stats->tls_err;
393 #endif
394 }
395 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)396 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
397 						     struct mlx5e_ch_stats *ch_stats)
398 {
399 	s->ch_events      += ch_stats->events;
400 	s->ch_poll        += ch_stats->poll;
401 	s->ch_arm         += ch_stats->arm;
402 	s->ch_aff_change  += ch_stats->aff_change;
403 	s->ch_force_irq   += ch_stats->force_irq;
404 	s->ch_eq_rearm    += ch_stats->eq_rearm;
405 }
406 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)407 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
408 					       struct mlx5e_sq_stats *sq_stats)
409 {
410 	s->tx_packets               += sq_stats->packets;
411 	s->tx_bytes                 += sq_stats->bytes;
412 	s->tx_tso_packets           += sq_stats->tso_packets;
413 	s->tx_tso_bytes             += sq_stats->tso_bytes;
414 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
415 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
416 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
417 	s->tx_nop                   += sq_stats->nop;
418 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
419 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
420 	s->tx_queue_stopped         += sq_stats->stopped;
421 	s->tx_queue_wake            += sq_stats->wake;
422 	s->tx_queue_dropped         += sq_stats->dropped;
423 	s->tx_cqe_err               += sq_stats->cqe_err;
424 	s->tx_recover               += sq_stats->recover;
425 	s->tx_xmit_more             += sq_stats->xmit_more;
426 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
427 	s->tx_csum_none             += sq_stats->csum_none;
428 	s->tx_csum_partial          += sq_stats->csum_partial;
429 #ifdef CONFIG_MLX5_EN_TLS
430 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
431 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
432 	s->tx_tls_ooo               += sq_stats->tls_ooo;
433 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
434 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
435 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
436 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
437 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
438 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
439 #endif
440 	s->tx_cqes                  += sq_stats->cqes;
441 }
442 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)443 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
444 						struct mlx5e_sw_stats *s)
445 {
446 	int i;
447 
448 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
449 		return;
450 
451 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
452 
453 	if (priv->tx_ptp_opened) {
454 		for (i = 0; i < priv->max_opened_tc; i++) {
455 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
456 
457 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
458 			barrier();
459 		}
460 	}
461 	if (priv->rx_ptp_opened) {
462 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
463 
464 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
465 		barrier();
466 	}
467 }
468 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)469 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
470 						struct mlx5e_sw_stats *s)
471 {
472 	struct mlx5e_sq_stats **stats;
473 	u16 max_qos_sqs;
474 	int i;
475 
476 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
477 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
478 	stats = READ_ONCE(priv->htb_qos_sq_stats);
479 
480 	for (i = 0; i < max_qos_sqs; i++) {
481 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
482 
483 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
484 		barrier();
485 	}
486 }
487 
488 #ifdef CONFIG_PAGE_POOL_STATS
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)489 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
490 {
491 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
492 	struct page_pool *pool = c->rq.page_pool;
493 	struct page_pool_stats stats = { 0 };
494 
495 	if (!page_pool_get_stats(pool, &stats))
496 		return;
497 
498 	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
499 	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
500 	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
501 	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
502 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
503 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
504 
505 	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
506 	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
507 	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
508 	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
509 	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
510 }
511 #else
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)512 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
513 {
514 }
515 #endif
516 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)517 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
518 {
519 	struct mlx5e_sw_stats *s = &priv->stats.sw;
520 	int i;
521 
522 	memset(s, 0, sizeof(*s));
523 
524 	for (i = 0; i < priv->channels.num; i++) /* for active channels only */
525 		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
526 
527 	for (i = 0; i < priv->stats_nch; i++) {
528 		struct mlx5e_channel_stats *channel_stats =
529 			priv->channel_stats[i];
530 
531 		int j;
532 
533 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
534 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
535 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
536 		/* xdp redirect */
537 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
538 		/* AF_XDP zero-copy */
539 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
540 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
541 
542 		for (j = 0; j < priv->max_opened_tc; j++) {
543 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
544 
545 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
546 			barrier();
547 		}
548 	}
549 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
550 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
551 }
552 
553 static const struct counter_desc q_stats_desc[] = {
554 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
555 };
556 
557 static const struct counter_desc drop_rq_stats_desc[] = {
558 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
559 };
560 
561 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
562 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
563 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)564 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
565 {
566 	int num_stats = 0;
567 
568 	if (priv->q_counter)
569 		num_stats += NUM_Q_COUNTERS;
570 
571 	if (priv->drop_rq_q_counter)
572 		num_stats += NUM_DROP_RQ_COUNTERS;
573 
574 	return num_stats;
575 }
576 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)577 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
578 {
579 	int i;
580 
581 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
582 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
583 		       q_stats_desc[i].format);
584 
585 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
586 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
587 		       drop_rq_stats_desc[i].format);
588 
589 	return idx;
590 }
591 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)592 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
593 {
594 	int i;
595 
596 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
597 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
598 						   q_stats_desc, i);
599 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
600 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
601 						   drop_rq_stats_desc, i);
602 	return idx;
603 }
604 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)605 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
606 {
607 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
608 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
609 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
610 	int ret;
611 
612 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
613 
614 	if (priv->q_counter) {
615 		MLX5_SET(query_q_counter_in, in, counter_set_id,
616 			 priv->q_counter);
617 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
618 		if (!ret)
619 			qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
620 							  out, out_of_buffer);
621 	}
622 
623 	if (priv->drop_rq_q_counter) {
624 		MLX5_SET(query_q_counter_in, in, counter_set_id,
625 			 priv->drop_rq_q_counter);
626 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
627 		if (!ret)
628 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
629 							    out, out_of_buffer);
630 	}
631 }
632 
633 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
634 static const struct counter_desc vnic_env_stats_steer_desc[] = {
635 	{ "rx_steer_missed_packets",
636 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
637 };
638 
639 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
640 	{ "dev_internal_queue_oob",
641 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
642 };
643 
644 static const struct counter_desc vnic_env_stats_drop_desc[] = {
645 	{ "rx_oversize_pkts_buffer",
646 		VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
647 };
648 
649 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
650 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
651 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
652 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
653 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
654 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
655 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
656 	(MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
657 	 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
658 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)659 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
660 {
661 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
662 	       NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
663 	       NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
664 }
665 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)666 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
667 {
668 	int i;
669 
670 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
671 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
672 		       vnic_env_stats_steer_desc[i].format);
673 
674 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
675 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
676 		       vnic_env_stats_dev_oob_desc[i].format);
677 
678 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
679 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
680 		       vnic_env_stats_drop_desc[i].format);
681 
682 	return idx;
683 }
684 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)685 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
686 {
687 	int i;
688 
689 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
690 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
691 						  vnic_env_stats_steer_desc, i);
692 
693 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
694 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
695 						  vnic_env_stats_dev_oob_desc, i);
696 
697 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
698 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
699 						  vnic_env_stats_drop_desc, i);
700 
701 	return idx;
702 }
703 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)704 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
705 {
706 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
707 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
708 	struct mlx5_core_dev *mdev = priv->mdev;
709 
710 	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
711 		return;
712 
713 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
714 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
715 }
716 
717 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
718 static const struct counter_desc vport_stats_desc[] = {
719 	{ "rx_vport_unicast_packets",
720 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
721 	{ "rx_vport_unicast_bytes",
722 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
723 	{ "tx_vport_unicast_packets",
724 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
725 	{ "tx_vport_unicast_bytes",
726 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
727 	{ "rx_vport_multicast_packets",
728 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
729 	{ "rx_vport_multicast_bytes",
730 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
731 	{ "tx_vport_multicast_packets",
732 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
733 	{ "tx_vport_multicast_bytes",
734 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
735 	{ "rx_vport_broadcast_packets",
736 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
737 	{ "rx_vport_broadcast_bytes",
738 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
739 	{ "tx_vport_broadcast_packets",
740 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
741 	{ "tx_vport_broadcast_bytes",
742 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
743 	{ "rx_vport_rdma_unicast_packets",
744 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
745 	{ "rx_vport_rdma_unicast_bytes",
746 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
747 	{ "tx_vport_rdma_unicast_packets",
748 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
749 	{ "tx_vport_rdma_unicast_bytes",
750 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
751 	{ "rx_vport_rdma_multicast_packets",
752 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
753 	{ "rx_vport_rdma_multicast_bytes",
754 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
755 	{ "tx_vport_rdma_multicast_packets",
756 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
757 	{ "tx_vport_rdma_multicast_bytes",
758 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
759 };
760 
761 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
762 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)763 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
764 {
765 	return NUM_VPORT_COUNTERS;
766 }
767 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)768 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
769 {
770 	int i;
771 
772 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
773 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
774 	return idx;
775 }
776 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)777 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
778 {
779 	int i;
780 
781 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
782 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
783 						  vport_stats_desc, i);
784 	return idx;
785 }
786 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)787 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
788 {
789 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
790 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
791 	struct mlx5_core_dev *mdev = priv->mdev;
792 
793 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
794 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
795 }
796 
797 #define PPORT_802_3_OFF(c) \
798 	MLX5_BYTE_OFF(ppcnt_reg, \
799 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
800 static const struct counter_desc pport_802_3_stats_desc[] = {
801 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
802 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
803 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
804 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
805 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
806 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
807 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
808 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
809 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
810 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
811 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
812 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
813 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
814 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
815 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
816 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
817 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
818 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
819 };
820 
821 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
822 
823 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
824 {
825 	return NUM_PPORT_802_3_COUNTERS;
826 }
827 
828 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
829 {
830 	int i;
831 
832 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
833 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
834 	return idx;
835 }
836 
837 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
838 {
839 	int i;
840 
841 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
842 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
843 						  pport_802_3_stats_desc, i);
844 	return idx;
845 }
846 
847 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
848 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
849 
850 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
851 {
852 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
853 	struct mlx5_core_dev *mdev = priv->mdev;
854 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
855 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
856 	void *out;
857 
858 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
859 		return;
860 
861 	MLX5_SET(ppcnt_reg, in, local_port, 1);
862 	out = pstats->IEEE_802_3_counters;
863 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
864 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
865 }
866 
867 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
868 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
869 		MLX5_BYTE_OFF(ppcnt_reg,		\
870 			      counter_set.set.c##_high)))
871 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)872 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
873 				u32 *ppcnt_ieee_802_3)
874 {
875 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
876 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
877 
878 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
879 		return -EOPNOTSUPP;
880 
881 	MLX5_SET(ppcnt_reg, in, local_port, 1);
882 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
883 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
884 				    sz, MLX5_REG_PPCNT, 0, 0);
885 }
886 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)887 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
888 			   struct ethtool_pause_stats *pause_stats)
889 {
890 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
891 	struct mlx5_core_dev *mdev = priv->mdev;
892 
893 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
894 		return;
895 
896 	pause_stats->tx_pause_frames =
897 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
898 				      eth_802_3_cntrs_grp_data_layout,
899 				      a_pause_mac_ctrl_frames_transmitted);
900 	pause_stats->rx_pause_frames =
901 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
902 				      eth_802_3_cntrs_grp_data_layout,
903 				      a_pause_mac_ctrl_frames_received);
904 }
905 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)906 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
907 			     struct ethtool_eth_phy_stats *phy_stats)
908 {
909 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
910 	struct mlx5_core_dev *mdev = priv->mdev;
911 
912 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
913 		return;
914 
915 	phy_stats->SymbolErrorDuringCarrier =
916 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
917 				      eth_802_3_cntrs_grp_data_layout,
918 				      a_symbol_error_during_carrier);
919 }
920 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)921 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
922 			     struct ethtool_eth_mac_stats *mac_stats)
923 {
924 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
925 	struct mlx5_core_dev *mdev = priv->mdev;
926 
927 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
928 		return;
929 
930 #define RD(name)							\
931 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
932 			      eth_802_3_cntrs_grp_data_layout,		\
933 			      name)
934 
935 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
936 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
937 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
938 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
939 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
940 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
941 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
942 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
943 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
944 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
945 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
946 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
947 #undef RD
948 }
949 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)950 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
951 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
952 {
953 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
954 	struct mlx5_core_dev *mdev = priv->mdev;
955 
956 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
957 		return;
958 
959 	ctrl_stats->MACControlFramesTransmitted =
960 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
961 				      eth_802_3_cntrs_grp_data_layout,
962 				      a_mac_control_frames_transmitted);
963 	ctrl_stats->MACControlFramesReceived =
964 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
965 				      eth_802_3_cntrs_grp_data_layout,
966 				      a_mac_control_frames_received);
967 	ctrl_stats->UnsupportedOpcodesReceived =
968 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
969 				      eth_802_3_cntrs_grp_data_layout,
970 				      a_unsupported_opcodes_received);
971 }
972 
973 #define PPORT_2863_OFF(c) \
974 	MLX5_BYTE_OFF(ppcnt_reg, \
975 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
976 static const struct counter_desc pport_2863_stats_desc[] = {
977 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
978 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
979 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
980 };
981 
982 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
983 
984 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
985 {
986 	return NUM_PPORT_2863_COUNTERS;
987 }
988 
989 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
990 {
991 	int i;
992 
993 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
994 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
995 	return idx;
996 }
997 
998 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
999 {
1000 	int i;
1001 
1002 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1003 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
1004 						  pport_2863_stats_desc, i);
1005 	return idx;
1006 }
1007 
1008 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1009 {
1010 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1011 	struct mlx5_core_dev *mdev = priv->mdev;
1012 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1013 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1014 	void *out;
1015 
1016 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1017 	out = pstats->RFC_2863_counters;
1018 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1019 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1020 }
1021 
1022 #define PPORT_2819_OFF(c) \
1023 	MLX5_BYTE_OFF(ppcnt_reg, \
1024 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1025 static const struct counter_desc pport_2819_stats_desc[] = {
1026 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1027 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1028 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1029 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1030 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1031 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1032 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1033 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1034 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1035 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1036 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1037 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1038 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1039 };
1040 
1041 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
1042 
1043 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1044 {
1045 	return NUM_PPORT_2819_COUNTERS;
1046 }
1047 
1048 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1049 {
1050 	int i;
1051 
1052 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1053 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
1054 	return idx;
1055 }
1056 
1057 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1058 {
1059 	int i;
1060 
1061 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1062 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1063 						  pport_2819_stats_desc, i);
1064 	return idx;
1065 }
1066 
1067 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1068 {
1069 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1070 	struct mlx5_core_dev *mdev = priv->mdev;
1071 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1072 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1073 	void *out;
1074 
1075 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1076 		return;
1077 
1078 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1079 	out = pstats->RFC_2819_counters;
1080 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1081 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1082 }
1083 
1084 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1085 	{    0,    64 },
1086 	{   65,   127 },
1087 	{  128,   255 },
1088 	{  256,   511 },
1089 	{  512,  1023 },
1090 	{ 1024,  1518 },
1091 	{ 1519,  2047 },
1092 	{ 2048,  4095 },
1093 	{ 4096,  8191 },
1094 	{ 8192, 10239 },
1095 	{}
1096 };
1097 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1098 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1099 			  struct ethtool_rmon_stats *rmon,
1100 			  const struct ethtool_rmon_hist_range **ranges)
1101 {
1102 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1103 	struct mlx5_core_dev *mdev = priv->mdev;
1104 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1105 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1106 
1107 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1108 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1109 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1110 				 sz, MLX5_REG_PPCNT, 0, 0))
1111 		return;
1112 
1113 #define RD(name)						\
1114 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1115 			      eth_2819_cntrs_grp_data_layout,	\
1116 			      name)
1117 
1118 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1119 	rmon->fragments		= RD(ether_stats_fragments);
1120 	rmon->jabbers		= RD(ether_stats_jabbers);
1121 
1122 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1123 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1124 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1125 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1126 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1127 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1128 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1129 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1130 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1131 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1132 #undef RD
1133 
1134 	*ranges = mlx5e_rmon_ranges;
1135 }
1136 
1137 #define PPORT_PHY_STATISTICAL_OFF(c) \
1138 	MLX5_BYTE_OFF(ppcnt_reg, \
1139 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1140 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1141 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1142 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1143 };
1144 
1145 static const struct counter_desc
1146 pport_phy_statistical_err_lanes_stats_desc[] = {
1147 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1148 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1149 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1150 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1151 };
1152 
1153 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1154 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1155 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1156 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1157 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1158 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1159 {
1160 	struct mlx5_core_dev *mdev = priv->mdev;
1161 	int num_stats;
1162 
1163 	/* "1" for link_down_events special counter */
1164 	num_stats = 1;
1165 
1166 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1167 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1168 
1169 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1170 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1171 
1172 	return num_stats;
1173 }
1174 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1175 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1176 {
1177 	struct mlx5_core_dev *mdev = priv->mdev;
1178 	int i;
1179 
1180 	strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1181 
1182 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1183 		return idx;
1184 
1185 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1186 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1187 		       pport_phy_statistical_stats_desc[i].format);
1188 
1189 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1190 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1191 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1192 			       pport_phy_statistical_err_lanes_stats_desc[i].format);
1193 
1194 	return idx;
1195 }
1196 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1197 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1198 {
1199 	struct mlx5_core_dev *mdev = priv->mdev;
1200 	int i;
1201 
1202 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
1203 	data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1204 			       counter_set.phys_layer_cntrs.link_down_events);
1205 
1206 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1207 		return idx;
1208 
1209 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1210 		data[idx++] =
1211 			MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1212 					    pport_phy_statistical_stats_desc, i);
1213 
1214 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1215 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1216 			data[idx++] =
1217 				MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1218 						    pport_phy_statistical_err_lanes_stats_desc,
1219 						    i);
1220 	return idx;
1221 }
1222 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1223 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1224 {
1225 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1226 	struct mlx5_core_dev *mdev = priv->mdev;
1227 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1228 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1229 	void *out;
1230 
1231 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1232 	out = pstats->phy_counters;
1233 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1234 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1235 
1236 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1237 		return;
1238 
1239 	out = pstats->phy_statistical_counters;
1240 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1241 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1242 }
1243 
fec_num_lanes(struct mlx5_core_dev * dev)1244 static int fec_num_lanes(struct mlx5_core_dev *dev)
1245 {
1246 	u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1247 	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1248 	int err;
1249 
1250 	MLX5_SET(pmlp_reg, in, local_port, 1);
1251 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1252 				   MLX5_REG_PMLP, 0, 0);
1253 	if (err)
1254 		return 0;
1255 
1256 	return MLX5_GET(pmlp_reg, out, width);
1257 }
1258 
fec_active_mode(struct mlx5_core_dev * mdev)1259 static int fec_active_mode(struct mlx5_core_dev *mdev)
1260 {
1261 	unsigned long fec_active_long;
1262 	u32 fec_active;
1263 
1264 	if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1265 		return MLX5E_FEC_NOFEC;
1266 
1267 	fec_active_long = fec_active;
1268 	return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1269 }
1270 
1271 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1272 	fec_stats->corrected_blocks.lanes[(idx)] = \
1273 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1274 				      fc_fec_corrected_blocks_lane##idx); \
1275 	fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1276 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1277 				      fc_fec_uncorrectable_blocks_lane##idx); \
1278 })
1279 
fec_set_fc_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt,u8 lanes)1280 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1281 			     u32 *ppcnt, u8 lanes)
1282 {
1283 	if (lanes > 3) { /* 4 lanes */
1284 		MLX5E_STATS_SET_FEC_BLOCK(3);
1285 		MLX5E_STATS_SET_FEC_BLOCK(2);
1286 	}
1287 	if (lanes > 1) /* 2 lanes */
1288 		MLX5E_STATS_SET_FEC_BLOCK(1);
1289 	if (lanes > 0) /* 1 lane */
1290 		MLX5E_STATS_SET_FEC_BLOCK(0);
1291 }
1292 
fec_set_rs_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt)1293 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1294 {
1295 	fec_stats->corrected_blocks.total =
1296 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1297 				      rs_fec_corrected_blocks);
1298 	fec_stats->uncorrectable_blocks.total =
1299 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1300 				      rs_fec_uncorrectable_blocks);
1301 }
1302 
fec_set_block_stats(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1303 static void fec_set_block_stats(struct mlx5e_priv *priv,
1304 				struct ethtool_fec_stats *fec_stats)
1305 {
1306 	struct mlx5_core_dev *mdev = priv->mdev;
1307 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1308 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1309 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1310 	int mode = fec_active_mode(mdev);
1311 
1312 	if (mode == MLX5E_FEC_NOFEC)
1313 		return;
1314 
1315 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1316 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1317 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1318 		return;
1319 
1320 	switch (mode) {
1321 	case MLX5E_FEC_RS_528_514:
1322 	case MLX5E_FEC_RS_544_514:
1323 	case MLX5E_FEC_LLRS_272_257_1:
1324 		fec_set_rs_stats(fec_stats, out);
1325 		return;
1326 	case MLX5E_FEC_FIRECODE:
1327 		fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1328 	}
1329 }
1330 
fec_set_corrected_bits_total(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1331 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1332 					 struct ethtool_fec_stats *fec_stats)
1333 {
1334 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1335 	struct mlx5_core_dev *mdev = priv->mdev;
1336 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1337 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1338 
1339 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1340 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1341 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1342 				 sz, MLX5_REG_PPCNT, 0, 0))
1343 		return;
1344 
1345 	fec_stats->corrected_bits.total =
1346 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1347 				      phys_layer_statistical_cntrs,
1348 				      phy_corrected_bits);
1349 }
1350 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1351 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1352 			 struct ethtool_fec_stats *fec_stats)
1353 {
1354 	if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1355 		return;
1356 
1357 	fec_set_corrected_bits_total(priv, fec_stats);
1358 	fec_set_block_stats(priv, fec_stats);
1359 }
1360 
1361 #define PPORT_ETH_EXT_OFF(c) \
1362 	MLX5_BYTE_OFF(ppcnt_reg, \
1363 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1364 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1365 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1366 };
1367 
1368 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1369 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1370 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1371 {
1372 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1373 		return NUM_PPORT_ETH_EXT_COUNTERS;
1374 
1375 	return 0;
1376 }
1377 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1378 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1379 {
1380 	int i;
1381 
1382 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1383 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1384 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1385 			       pport_eth_ext_stats_desc[i].format);
1386 	return idx;
1387 }
1388 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1389 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1390 {
1391 	int i;
1392 
1393 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1394 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1395 			data[idx++] =
1396 				MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1397 						    pport_eth_ext_stats_desc, i);
1398 	return idx;
1399 }
1400 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1401 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1402 {
1403 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1404 	struct mlx5_core_dev *mdev = priv->mdev;
1405 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1406 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1407 	void *out;
1408 
1409 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1410 		return;
1411 
1412 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1413 	out = pstats->eth_ext_counters;
1414 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1415 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1416 }
1417 
1418 #define PCIE_PERF_OFF(c) \
1419 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1420 static const struct counter_desc pcie_perf_stats_desc[] = {
1421 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1422 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1423 };
1424 
1425 #define PCIE_PERF_OFF64(c) \
1426 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1427 static const struct counter_desc pcie_perf_stats_desc64[] = {
1428 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1429 };
1430 
1431 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1432 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1433 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1434 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1435 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1436 };
1437 
1438 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1439 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1440 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1441 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1442 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1443 {
1444 	int num_stats = 0;
1445 
1446 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1447 		num_stats += NUM_PCIE_PERF_COUNTERS;
1448 
1449 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1450 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1451 
1452 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1453 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1454 
1455 	return num_stats;
1456 }
1457 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1458 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1459 {
1460 	int i;
1461 
1462 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1463 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1464 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1465 			       pcie_perf_stats_desc[i].format);
1466 
1467 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1468 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1469 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1470 			       pcie_perf_stats_desc64[i].format);
1471 
1472 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1473 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1474 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1475 			       pcie_perf_stall_stats_desc[i].format);
1476 	return idx;
1477 }
1478 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1479 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1480 {
1481 	int i;
1482 
1483 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1484 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1485 			data[idx++] =
1486 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1487 						    pcie_perf_stats_desc, i);
1488 
1489 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1490 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1491 			data[idx++] =
1492 				MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1493 						    pcie_perf_stats_desc64, i);
1494 
1495 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1496 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1497 			data[idx++] =
1498 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1499 						    pcie_perf_stall_stats_desc, i);
1500 	return idx;
1501 }
1502 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1503 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1504 {
1505 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1506 	struct mlx5_core_dev *mdev = priv->mdev;
1507 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1508 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1509 	void *out;
1510 
1511 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1512 		return;
1513 
1514 	out = pcie_stats->pcie_perf_counters;
1515 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1516 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1517 }
1518 
1519 #define PPORT_PER_TC_PRIO_OFF(c) \
1520 	MLX5_BYTE_OFF(ppcnt_reg, \
1521 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1522 
1523 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1524 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1525 };
1526 
1527 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1528 
1529 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1530 	MLX5_BYTE_OFF(ppcnt_reg, \
1531 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1532 
1533 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1534 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1535 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1536 };
1537 
1538 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1539 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1540 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1541 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1542 {
1543 	struct mlx5_core_dev *mdev = priv->mdev;
1544 
1545 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1546 		return 0;
1547 
1548 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1549 }
1550 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1551 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1552 {
1553 	struct mlx5_core_dev *mdev = priv->mdev;
1554 	int i, prio;
1555 
1556 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1557 		return idx;
1558 
1559 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1560 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1561 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1562 				pport_per_tc_prio_stats_desc[i].format, prio);
1563 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1564 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1565 				pport_per_tc_congest_prio_stats_desc[i].format, prio);
1566 	}
1567 
1568 	return idx;
1569 }
1570 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1571 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1572 {
1573 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1574 	struct mlx5_core_dev *mdev = priv->mdev;
1575 	int i, prio;
1576 
1577 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1578 		return idx;
1579 
1580 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1581 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1582 			data[idx++] =
1583 				MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1584 						    pport_per_tc_prio_stats_desc, i);
1585 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1586 			data[idx++] =
1587 				MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1588 						    pport_per_tc_congest_prio_stats_desc, i);
1589 	}
1590 
1591 	return idx;
1592 }
1593 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1594 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1595 {
1596 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1597 	struct mlx5_core_dev *mdev = priv->mdev;
1598 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1599 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1600 	void *out;
1601 	int prio;
1602 
1603 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1604 		return;
1605 
1606 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1607 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1608 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1609 		out = pstats->per_tc_prio_counters[prio];
1610 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1611 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1612 	}
1613 }
1614 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1615 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1616 {
1617 	struct mlx5_core_dev *mdev = priv->mdev;
1618 
1619 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1620 		return 0;
1621 
1622 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1623 }
1624 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1625 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1626 {
1627 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1628 	struct mlx5_core_dev *mdev = priv->mdev;
1629 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1630 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1631 	void *out;
1632 	int prio;
1633 
1634 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1635 		return;
1636 
1637 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1638 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1639 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1640 		out = pstats->per_tc_congest_prio_counters[prio];
1641 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1642 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1643 	}
1644 }
1645 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1646 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1647 {
1648 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1649 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1650 }
1651 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1652 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1653 {
1654 	mlx5e_grp_per_tc_prio_update_stats(priv);
1655 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1656 }
1657 
1658 #define PPORT_PER_PRIO_OFF(c) \
1659 	MLX5_BYTE_OFF(ppcnt_reg, \
1660 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1661 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1662 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1663 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1664 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1665 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1666 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1667 };
1668 
1669 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1670 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1671 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1672 {
1673 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1674 }
1675 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1676 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1677 						   u8 *data,
1678 						   int idx)
1679 {
1680 	int i, prio;
1681 
1682 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1683 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1684 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1685 				pport_per_prio_traffic_stats_desc[i].format, prio);
1686 	}
1687 
1688 	return idx;
1689 }
1690 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1691 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1692 						 u64 *data,
1693 						 int idx)
1694 {
1695 	int i, prio;
1696 
1697 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1698 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1699 			data[idx++] =
1700 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1701 						    pport_per_prio_traffic_stats_desc, i);
1702 	}
1703 
1704 	return idx;
1705 }
1706 
1707 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1708 	/* %s is "global" or "prio{i}" */
1709 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1710 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1711 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1712 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1713 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1714 };
1715 
1716 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1717 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1718 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1719 };
1720 
1721 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1722 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1723 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1724 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1725 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1726 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1727 {
1728 	struct mlx5_core_dev *mdev = priv->mdev;
1729 	u8 pfc_en_tx;
1730 	u8 pfc_en_rx;
1731 	int err;
1732 
1733 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1734 		return 0;
1735 
1736 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1737 
1738 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1739 }
1740 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1741 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1742 {
1743 	struct mlx5_core_dev *mdev = priv->mdev;
1744 	u32 rx_pause;
1745 	u32 tx_pause;
1746 	int err;
1747 
1748 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1749 		return false;
1750 
1751 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1752 
1753 	return err ? false : rx_pause | tx_pause;
1754 }
1755 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1756 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1757 {
1758 	return (mlx5e_query_global_pause_combined(priv) +
1759 		hweight8(mlx5e_query_pfc_combined(priv))) *
1760 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1761 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1762 }
1763 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1764 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1765 					       u8 *data,
1766 					       int idx)
1767 {
1768 	unsigned long pfc_combined;
1769 	int i, prio;
1770 
1771 	pfc_combined = mlx5e_query_pfc_combined(priv);
1772 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1773 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1774 			char pfc_string[ETH_GSTRING_LEN];
1775 
1776 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1777 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1778 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1779 		}
1780 	}
1781 
1782 	if (mlx5e_query_global_pause_combined(priv)) {
1783 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1784 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1785 				pport_per_prio_pfc_stats_desc[i].format, "global");
1786 		}
1787 	}
1788 
1789 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1790 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1791 		       pport_pfc_stall_stats_desc[i].format);
1792 
1793 	return idx;
1794 }
1795 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1796 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1797 					     u64 *data,
1798 					     int idx)
1799 {
1800 	unsigned long pfc_combined;
1801 	int i, prio;
1802 
1803 	pfc_combined = mlx5e_query_pfc_combined(priv);
1804 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1805 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1806 			data[idx++] =
1807 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1808 						    pport_per_prio_pfc_stats_desc, i);
1809 		}
1810 	}
1811 
1812 	if (mlx5e_query_global_pause_combined(priv)) {
1813 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1814 			data[idx++] =
1815 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1816 						    pport_per_prio_pfc_stats_desc, i);
1817 		}
1818 	}
1819 
1820 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1821 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1822 						  pport_pfc_stall_stats_desc, i);
1823 
1824 	return idx;
1825 }
1826 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1827 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1828 {
1829 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1830 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1831 }
1832 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1833 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1834 {
1835 	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1836 	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1837 	return idx;
1838 }
1839 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1840 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1841 {
1842 	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1843 	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1844 	return idx;
1845 }
1846 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1847 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1848 {
1849 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1850 	struct mlx5_core_dev *mdev = priv->mdev;
1851 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1852 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1853 	int prio;
1854 	void *out;
1855 
1856 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1857 		return;
1858 
1859 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1860 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1861 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1862 		out = pstats->per_prio_counters[prio];
1863 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1864 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1865 				     MLX5_REG_PPCNT, 0, 0);
1866 	}
1867 }
1868 
1869 static const struct counter_desc mlx5e_pme_status_desc[] = {
1870 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1871 };
1872 
1873 static const struct counter_desc mlx5e_pme_error_desc[] = {
1874 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1875 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1876 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1877 };
1878 
1879 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1880 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1881 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)1882 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1883 {
1884 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1885 }
1886 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)1887 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1888 {
1889 	int i;
1890 
1891 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1892 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1893 
1894 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1895 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1896 
1897 	return idx;
1898 }
1899 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)1900 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1901 {
1902 	struct mlx5_pme_stats pme_stats;
1903 	int i;
1904 
1905 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
1906 
1907 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1908 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1909 						   mlx5e_pme_status_desc, i);
1910 
1911 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1912 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1913 						   mlx5e_pme_error_desc, i);
1914 
1915 	return idx;
1916 }
1917 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)1918 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1919 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)1920 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1921 {
1922 	return mlx5e_ktls_get_count(priv);
1923 }
1924 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)1925 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1926 {
1927 	return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1928 }
1929 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)1930 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1931 {
1932 	return idx + mlx5e_ktls_get_stats(priv, data + idx);
1933 }
1934 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)1935 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1936 
1937 static const struct counter_desc rq_stats_desc[] = {
1938 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1939 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1940 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1941 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1942 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1943 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1944 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1945 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1946 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1947 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1948 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1949 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1950 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1951 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1952 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1953 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1954 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1955 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1956 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1957 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1958 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1959 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1960 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1961 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1962 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1963 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1964 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1965 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1966 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1967 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1968 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1969 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1970 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1971 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1972 #ifdef CONFIG_PAGE_POOL_STATS
1973 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
1974 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
1975 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
1976 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
1977 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
1978 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
1979 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
1980 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
1981 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
1982 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
1983 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
1984 #endif
1985 #ifdef CONFIG_MLX5_EN_TLS
1986 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1987 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1988 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1989 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1990 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1991 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1992 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1993 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1994 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1995 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1996 #endif
1997 };
1998 
1999 static const struct counter_desc sq_stats_desc[] = {
2000 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2001 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2002 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2003 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2004 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2005 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2006 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2007 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2008 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2009 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2010 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2011 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2012 #ifdef CONFIG_MLX5_EN_TLS
2013 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2014 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2015 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2016 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2017 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2018 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2019 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2020 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2021 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2022 #endif
2023 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2024 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2025 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2026 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2027 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2028 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2029 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2030 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2031 };
2032 
2033 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2034 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2035 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2036 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2037 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2038 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2039 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2040 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2041 };
2042 
2043 static const struct counter_desc xdpsq_stats_desc[] = {
2044 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2045 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2046 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2047 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2048 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2049 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2050 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2051 };
2052 
2053 static const struct counter_desc xskrq_stats_desc[] = {
2054 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2055 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2056 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2057 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2058 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2059 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2060 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2061 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2062 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2063 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2064 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2065 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2066 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2067 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2068 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2069 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2070 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2071 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2072 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2073 };
2074 
2075 static const struct counter_desc xsksq_stats_desc[] = {
2076 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2077 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2078 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2079 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2080 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2081 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2082 };
2083 
2084 static const struct counter_desc ch_stats_desc[] = {
2085 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2086 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2087 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2088 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2089 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2090 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2091 };
2092 
2093 static const struct counter_desc ptp_sq_stats_desc[] = {
2094 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2095 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2096 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2097 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2098 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2099 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2100 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2101 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2102 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2103 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2104 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2105 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2106 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2107 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2108 };
2109 
2110 static const struct counter_desc ptp_ch_stats_desc[] = {
2111 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2112 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2113 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2114 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2115 };
2116 
2117 static const struct counter_desc ptp_cq_stats_desc[] = {
2118 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2119 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2120 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2121 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2122 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
2123 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
2124 };
2125 
2126 static const struct counter_desc ptp_rq_stats_desc[] = {
2127 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2128 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2129 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2130 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2131 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2132 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2133 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2134 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2135 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2136 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2137 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2138 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2139 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2140 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2141 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2142 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2143 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2144 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2145 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2146 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2147 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2148 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
2149 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
2150 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
2151 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
2152 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
2153 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2154 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2155 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2156 };
2157 
2158 static const struct counter_desc qos_sq_stats_desc[] = {
2159 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2160 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2161 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2162 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2163 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2164 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2165 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2166 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2167 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2168 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2169 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2170 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2171 #ifdef CONFIG_MLX5_EN_TLS
2172 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2173 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2174 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2175 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2176 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2177 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2178 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2179 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2180 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2181 #endif
2182 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2183 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2184 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2185 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2186 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2187 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2188 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2189 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2190 };
2191 
2192 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
2193 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
2194 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
2195 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
2196 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
2197 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
2198 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
2199 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
2200 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
2201 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
2202 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2203 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
2204 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)2205 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2206 {
2207 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2208 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2209 }
2210 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2211 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2212 {
2213 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2214 	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2215 	int i, qid;
2216 
2217 	for (qid = 0; qid < max_qos_sqs; qid++)
2218 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2219 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2220 				qos_sq_stats_desc[i].format, qid);
2221 
2222 	return idx;
2223 }
2224 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2225 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2226 {
2227 	struct mlx5e_sq_stats **stats;
2228 	u16 max_qos_sqs;
2229 	int i, qid;
2230 
2231 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2232 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2233 	stats = READ_ONCE(priv->htb_qos_sq_stats);
2234 
2235 	for (qid = 0; qid < max_qos_sqs; qid++) {
2236 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2237 
2238 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2239 			data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2240 	}
2241 
2242 	return idx;
2243 }
2244 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2245 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2246 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2247 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2248 {
2249 	int num = NUM_PTP_CH_STATS;
2250 
2251 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2252 		return 0;
2253 
2254 	if (priv->tx_ptp_opened)
2255 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2256 	if (priv->rx_ptp_opened)
2257 		num += NUM_PTP_RQ_STATS;
2258 
2259 	return num;
2260 }
2261 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2262 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2263 {
2264 	int i, tc;
2265 
2266 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2267 		return idx;
2268 
2269 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2270 		sprintf(data + (idx++) * ETH_GSTRING_LEN,
2271 			"%s", ptp_ch_stats_desc[i].format);
2272 
2273 	if (priv->tx_ptp_opened) {
2274 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2275 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2276 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2277 					ptp_sq_stats_desc[i].format, tc);
2278 
2279 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2280 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2281 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2282 					ptp_cq_stats_desc[i].format, tc);
2283 	}
2284 	if (priv->rx_ptp_opened) {
2285 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2286 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2287 				ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2288 	}
2289 	return idx;
2290 }
2291 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2292 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2293 {
2294 	int i, tc;
2295 
2296 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2297 		return idx;
2298 
2299 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2300 		data[idx++] =
2301 			MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2302 					     ptp_ch_stats_desc, i);
2303 
2304 	if (priv->tx_ptp_opened) {
2305 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2306 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2307 				data[idx++] =
2308 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2309 							     ptp_sq_stats_desc, i);
2310 
2311 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2312 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2313 				data[idx++] =
2314 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2315 							     ptp_cq_stats_desc, i);
2316 	}
2317 	if (priv->rx_ptp_opened) {
2318 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2319 			data[idx++] =
2320 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2321 						     ptp_rq_stats_desc, i);
2322 	}
2323 	return idx;
2324 }
2325 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2326 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2327 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2328 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2329 {
2330 	int max_nch = priv->stats_nch;
2331 
2332 	return (NUM_RQ_STATS * max_nch) +
2333 	       (NUM_CH_STATS * max_nch) +
2334 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2335 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2336 	       (NUM_XDPSQ_STATS * max_nch) +
2337 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2338 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2339 }
2340 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2341 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2342 {
2343 	bool is_xsk = priv->xsk.ever_used;
2344 	int max_nch = priv->stats_nch;
2345 	int i, j, tc;
2346 
2347 	for (i = 0; i < max_nch; i++)
2348 		for (j = 0; j < NUM_CH_STATS; j++)
2349 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2350 				ch_stats_desc[j].format, i);
2351 
2352 	for (i = 0; i < max_nch; i++) {
2353 		for (j = 0; j < NUM_RQ_STATS; j++)
2354 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2355 				rq_stats_desc[j].format, i);
2356 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2357 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2358 				xskrq_stats_desc[j].format, i);
2359 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2360 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2361 				rq_xdpsq_stats_desc[j].format, i);
2362 	}
2363 
2364 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2365 		for (i = 0; i < max_nch; i++)
2366 			for (j = 0; j < NUM_SQ_STATS; j++)
2367 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2368 					sq_stats_desc[j].format,
2369 					i + tc * max_nch);
2370 
2371 	for (i = 0; i < max_nch; i++) {
2372 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2373 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2374 				xsksq_stats_desc[j].format, i);
2375 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2376 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2377 				xdpsq_stats_desc[j].format, i);
2378 	}
2379 
2380 	return idx;
2381 }
2382 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2383 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2384 {
2385 	bool is_xsk = priv->xsk.ever_used;
2386 	int max_nch = priv->stats_nch;
2387 	int i, j, tc;
2388 
2389 	for (i = 0; i < max_nch; i++)
2390 		for (j = 0; j < NUM_CH_STATS; j++)
2391 			data[idx++] =
2392 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2393 						     ch_stats_desc, j);
2394 
2395 	for (i = 0; i < max_nch; i++) {
2396 		for (j = 0; j < NUM_RQ_STATS; j++)
2397 			data[idx++] =
2398 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2399 						     rq_stats_desc, j);
2400 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2401 			data[idx++] =
2402 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2403 						     xskrq_stats_desc, j);
2404 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2405 			data[idx++] =
2406 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2407 						     rq_xdpsq_stats_desc, j);
2408 	}
2409 
2410 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2411 		for (i = 0; i < max_nch; i++)
2412 			for (j = 0; j < NUM_SQ_STATS; j++)
2413 				data[idx++] =
2414 					MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2415 							     sq_stats_desc, j);
2416 
2417 	for (i = 0; i < max_nch; i++) {
2418 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2419 			data[idx++] =
2420 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2421 						     xsksq_stats_desc, j);
2422 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2423 			data[idx++] =
2424 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2425 						     xdpsq_stats_desc, j);
2426 	}
2427 
2428 	return idx;
2429 }
2430 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2431 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2432 
2433 MLX5E_DEFINE_STATS_GRP(sw, 0);
2434 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2435 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2436 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2437 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2438 MLX5E_DEFINE_STATS_GRP(2863, 0);
2439 MLX5E_DEFINE_STATS_GRP(2819, 0);
2440 MLX5E_DEFINE_STATS_GRP(phy, 0);
2441 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2442 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2443 MLX5E_DEFINE_STATS_GRP(pme, 0);
2444 MLX5E_DEFINE_STATS_GRP(channels, 0);
2445 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2446 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2447 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2448 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2449 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2450 
2451 /* The stats groups order is opposite to the update_stats() order calls */
2452 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2453 	&MLX5E_STATS_GRP(sw),
2454 	&MLX5E_STATS_GRP(qcnt),
2455 	&MLX5E_STATS_GRP(vnic_env),
2456 	&MLX5E_STATS_GRP(vport),
2457 	&MLX5E_STATS_GRP(802_3),
2458 	&MLX5E_STATS_GRP(2863),
2459 	&MLX5E_STATS_GRP(2819),
2460 	&MLX5E_STATS_GRP(phy),
2461 	&MLX5E_STATS_GRP(eth_ext),
2462 	&MLX5E_STATS_GRP(pcie),
2463 	&MLX5E_STATS_GRP(per_prio),
2464 	&MLX5E_STATS_GRP(pme),
2465 #ifdef CONFIG_MLX5_EN_IPSEC
2466 	&MLX5E_STATS_GRP(ipsec_sw),
2467 #endif
2468 	&MLX5E_STATS_GRP(tls),
2469 	&MLX5E_STATS_GRP(channels),
2470 	&MLX5E_STATS_GRP(per_port_buff_congest),
2471 	&MLX5E_STATS_GRP(ptp),
2472 	&MLX5E_STATS_GRP(qos),
2473 #ifdef CONFIG_MLX5_EN_MACSEC
2474 	&MLX5E_STATS_GRP(macsec_hw),
2475 #endif
2476 };
2477 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2478 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2479 {
2480 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2481 }
2482