1 /* bnx2x_stats.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 #include "bnx2x_cmn.h"
18 #include "bnx2x_stats.h"
19 
20 /* Statistics */
21 
22 /****************************************************************************
23 * Macros
24 ****************************************************************************/
25 
26 /* sum[hi:lo] += add[hi:lo] */
27 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
28 	do { \
29 		s_lo += a_lo; \
30 		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
31 	} while (0)
32 
33 /* difference = minuend - subtrahend */
34 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
35 	do { \
36 		if (m_lo < s_lo) { \
37 			/* underflow */ \
38 			d_hi = m_hi - s_hi; \
39 			if (d_hi > 0) { \
40 				/* we can 'loan' 1 */ \
41 				d_hi--; \
42 				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
43 			} else { \
44 				/* m_hi <= s_hi */ \
45 				d_hi = 0; \
46 				d_lo = 0; \
47 			} \
48 		} else { \
49 			/* m_lo >= s_lo */ \
50 			if (m_hi < s_hi) { \
51 				d_hi = 0; \
52 				d_lo = 0; \
53 			} else { \
54 				/* m_hi >= s_hi */ \
55 				d_hi = m_hi - s_hi; \
56 				d_lo = m_lo - s_lo; \
57 			} \
58 		} \
59 	} while (0)
60 
61 #define UPDATE_STAT64(s, t) \
62 	do { \
63 		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
64 			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
65 		pstats->mac_stx[0].t##_hi = new->s##_hi; \
66 		pstats->mac_stx[0].t##_lo = new->s##_lo; \
67 		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
68 		       pstats->mac_stx[1].t##_lo, diff.lo); \
69 	} while (0)
70 
71 #define UPDATE_STAT64_NIG(s, t) \
72 	do { \
73 		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
74 			diff.lo, new->s##_lo, old->s##_lo); \
75 		ADD_64(estats->t##_hi, diff.hi, \
76 		       estats->t##_lo, diff.lo); \
77 	} while (0)
78 
79 /* sum[hi:lo] += add */
80 #define ADD_EXTEND_64(s_hi, s_lo, a) \
81 	do { \
82 		s_lo += a; \
83 		s_hi += (s_lo < a) ? 1 : 0; \
84 	} while (0)
85 
86 #define UPDATE_EXTEND_STAT(s) \
87 	do { \
88 		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
89 			      pstats->mac_stx[1].s##_lo, \
90 			      new->s); \
91 	} while (0)
92 
93 #define UPDATE_EXTEND_TSTAT(s, t) \
94 	do { \
95 		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
96 		old_tclient->s = tclient->s; \
97 		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
98 	} while (0)
99 
100 #define UPDATE_EXTEND_USTAT(s, t) \
101 	do { \
102 		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
103 		old_uclient->s = uclient->s; \
104 		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
105 	} while (0)
106 
107 #define UPDATE_EXTEND_XSTAT(s, t) \
108 	do { \
109 		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
110 		old_xclient->s = xclient->s; \
111 		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
112 	} while (0)
113 
114 /* minuend -= subtrahend */
115 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
116 	do { \
117 		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
118 	} while (0)
119 
120 /* minuend[hi:lo] -= subtrahend */
121 #define SUB_EXTEND_64(m_hi, m_lo, s) \
122 	do { \
123 		SUB_64(m_hi, 0, m_lo, s); \
124 	} while (0)
125 
126 #define SUB_EXTEND_USTAT(s, t) \
127 	do { \
128 		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
129 		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
130 	} while (0)
131 
132 /*
133  * General service functions
134  */
135 
bnx2x_hilo(u32 * hiref)136 static inline long bnx2x_hilo(u32 *hiref)
137 {
138 	u32 lo = *(hiref + 1);
139 #if (BITS_PER_LONG == 64)
140 	u32 hi = *hiref;
141 
142 	return HILO_U64(hi, lo);
143 #else
144 	return lo;
145 #endif
146 }
147 
148 /*
149  * Init service functions
150  */
151 
152 
bnx2x_storm_stats_post(struct bnx2x * bp)153 static void bnx2x_storm_stats_post(struct bnx2x *bp)
154 {
155 	if (!bp->stats_pending) {
156 		struct common_query_ramrod_data ramrod_data = {0};
157 		int i, rc;
158 
159 		spin_lock_bh(&bp->stats_lock);
160 
161 		if (bp->stats_pending) {
162 			spin_unlock_bh(&bp->stats_lock);
163 			return;
164 		}
165 
166 		ramrod_data.drv_counter = bp->stats_counter++;
167 		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
168 		for_each_eth_queue(bp, i)
169 			ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
170 
171 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
172 				   ((u32 *)&ramrod_data)[1],
173 				   ((u32 *)&ramrod_data)[0], 1);
174 		if (rc == 0)
175 			bp->stats_pending = 1;
176 
177 		spin_unlock_bh(&bp->stats_lock);
178 	}
179 }
180 
bnx2x_hw_stats_post(struct bnx2x * bp)181 static void bnx2x_hw_stats_post(struct bnx2x *bp)
182 {
183 	struct dmae_command *dmae = &bp->stats_dmae;
184 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
185 
186 	*stats_comp = DMAE_COMP_VAL;
187 	if (CHIP_REV_IS_SLOW(bp))
188 		return;
189 
190 	/* loader */
191 	if (bp->executer_idx) {
192 		int loader_idx = PMF_DMAE_C(bp);
193 		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
194 						 true, DMAE_COMP_GRC);
195 		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
196 
197 		memset(dmae, 0, sizeof(struct dmae_command));
198 		dmae->opcode = opcode;
199 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
200 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
201 		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
202 				     sizeof(struct dmae_command) *
203 				     (loader_idx + 1)) >> 2;
204 		dmae->dst_addr_hi = 0;
205 		dmae->len = sizeof(struct dmae_command) >> 2;
206 		if (CHIP_IS_E1(bp))
207 			dmae->len--;
208 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
209 		dmae->comp_addr_hi = 0;
210 		dmae->comp_val = 1;
211 
212 		*stats_comp = 0;
213 		bnx2x_post_dmae(bp, dmae, loader_idx);
214 
215 	} else if (bp->func_stx) {
216 		*stats_comp = 0;
217 		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
218 	}
219 }
220 
bnx2x_stats_comp(struct bnx2x * bp)221 static int bnx2x_stats_comp(struct bnx2x *bp)
222 {
223 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
224 	int cnt = 10;
225 
226 	might_sleep();
227 	while (*stats_comp != DMAE_COMP_VAL) {
228 		if (!cnt) {
229 			BNX2X_ERR("timeout waiting for stats finished\n");
230 			break;
231 		}
232 		cnt--;
233 		msleep(1);
234 	}
235 	return 1;
236 }
237 
238 /*
239  * Statistics service functions
240  */
241 
bnx2x_stats_pmf_update(struct bnx2x * bp)242 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
243 {
244 	struct dmae_command *dmae;
245 	u32 opcode;
246 	int loader_idx = PMF_DMAE_C(bp);
247 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
248 
249 	/* sanity */
250 	if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
251 		BNX2X_ERR("BUG!\n");
252 		return;
253 	}
254 
255 	bp->executer_idx = 0;
256 
257 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
258 
259 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
260 	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
261 	dmae->src_addr_lo = bp->port.port_stx >> 2;
262 	dmae->src_addr_hi = 0;
263 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
264 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
265 	dmae->len = DMAE_LEN32_RD_MAX;
266 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
267 	dmae->comp_addr_hi = 0;
268 	dmae->comp_val = 1;
269 
270 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
271 	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
272 	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
273 	dmae->src_addr_hi = 0;
274 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
275 				   DMAE_LEN32_RD_MAX * 4);
276 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
277 				   DMAE_LEN32_RD_MAX * 4);
278 	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
279 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
280 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
281 	dmae->comp_val = DMAE_COMP_VAL;
282 
283 	*stats_comp = 0;
284 	bnx2x_hw_stats_post(bp);
285 	bnx2x_stats_comp(bp);
286 }
287 
bnx2x_port_stats_init(struct bnx2x * bp)288 static void bnx2x_port_stats_init(struct bnx2x *bp)
289 {
290 	struct dmae_command *dmae;
291 	int port = BP_PORT(bp);
292 	u32 opcode;
293 	int loader_idx = PMF_DMAE_C(bp);
294 	u32 mac_addr;
295 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
296 
297 	/* sanity */
298 	if (!bp->link_vars.link_up || !bp->port.pmf) {
299 		BNX2X_ERR("BUG!\n");
300 		return;
301 	}
302 
303 	bp->executer_idx = 0;
304 
305 	/* MCP */
306 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
307 				    true, DMAE_COMP_GRC);
308 
309 	if (bp->port.port_stx) {
310 
311 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
312 		dmae->opcode = opcode;
313 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
314 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
315 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
316 		dmae->dst_addr_hi = 0;
317 		dmae->len = sizeof(struct host_port_stats) >> 2;
318 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
319 		dmae->comp_addr_hi = 0;
320 		dmae->comp_val = 1;
321 	}
322 
323 	if (bp->func_stx) {
324 
325 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
326 		dmae->opcode = opcode;
327 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
328 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
329 		dmae->dst_addr_lo = bp->func_stx >> 2;
330 		dmae->dst_addr_hi = 0;
331 		dmae->len = sizeof(struct host_func_stats) >> 2;
332 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
333 		dmae->comp_addr_hi = 0;
334 		dmae->comp_val = 1;
335 	}
336 
337 	/* MAC */
338 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
339 				   true, DMAE_COMP_GRC);
340 
341 	if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
342 
343 		mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
344 				   NIG_REG_INGRESS_BMAC0_MEM);
345 
346 		/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
347 		   BIGMAC_REGISTER_TX_STAT_GTBYT */
348 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
349 		dmae->opcode = opcode;
350 		if (CHIP_IS_E1x(bp)) {
351 			dmae->src_addr_lo = (mac_addr +
352 				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
353 			dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
354 				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
355 		} else {
356 			dmae->src_addr_lo = (mac_addr +
357 				     BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
358 			dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
359 				     BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
360 		}
361 
362 		dmae->src_addr_hi = 0;
363 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
364 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
365 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
366 		dmae->comp_addr_hi = 0;
367 		dmae->comp_val = 1;
368 
369 		/* BIGMAC_REGISTER_RX_STAT_GR64 ..
370 		   BIGMAC_REGISTER_RX_STAT_GRIPJ */
371 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
372 		dmae->opcode = opcode;
373 		dmae->src_addr_hi = 0;
374 		if (CHIP_IS_E1x(bp)) {
375 			dmae->src_addr_lo = (mac_addr +
376 					     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
377 			dmae->dst_addr_lo =
378 				U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
379 				offsetof(struct bmac1_stats, rx_stat_gr64_lo));
380 			dmae->dst_addr_hi =
381 				U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
382 				offsetof(struct bmac1_stats, rx_stat_gr64_lo));
383 			dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
384 				     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
385 		} else {
386 			dmae->src_addr_lo =
387 				(mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
388 			dmae->dst_addr_lo =
389 				U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
390 				offsetof(struct bmac2_stats, rx_stat_gr64_lo));
391 			dmae->dst_addr_hi =
392 				U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
393 				offsetof(struct bmac2_stats, rx_stat_gr64_lo));
394 			dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
395 				     BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
396 		}
397 
398 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
399 		dmae->comp_addr_hi = 0;
400 		dmae->comp_val = 1;
401 
402 	} else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
403 
404 		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
405 
406 		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
407 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
408 		dmae->opcode = opcode;
409 		dmae->src_addr_lo = (mac_addr +
410 				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
411 		dmae->src_addr_hi = 0;
412 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
413 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
414 		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
415 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
416 		dmae->comp_addr_hi = 0;
417 		dmae->comp_val = 1;
418 
419 		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
420 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
421 		dmae->opcode = opcode;
422 		dmae->src_addr_lo = (mac_addr +
423 				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
424 		dmae->src_addr_hi = 0;
425 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
426 		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
427 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
428 		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
429 		dmae->len = 1;
430 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
431 		dmae->comp_addr_hi = 0;
432 		dmae->comp_val = 1;
433 
434 		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
435 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
436 		dmae->opcode = opcode;
437 		dmae->src_addr_lo = (mac_addr +
438 				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
439 		dmae->src_addr_hi = 0;
440 		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
441 			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
442 		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
443 			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
444 		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
445 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
446 		dmae->comp_addr_hi = 0;
447 		dmae->comp_val = 1;
448 	}
449 
450 	/* NIG */
451 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
452 	dmae->opcode = opcode;
453 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
454 				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
455 	dmae->src_addr_hi = 0;
456 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
457 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
458 	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
459 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
460 	dmae->comp_addr_hi = 0;
461 	dmae->comp_val = 1;
462 
463 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
464 	dmae->opcode = opcode;
465 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
466 				    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
467 	dmae->src_addr_hi = 0;
468 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
469 			offsetof(struct nig_stats, egress_mac_pkt0_lo));
470 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
471 			offsetof(struct nig_stats, egress_mac_pkt0_lo));
472 	dmae->len = (2*sizeof(u32)) >> 2;
473 	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
474 	dmae->comp_addr_hi = 0;
475 	dmae->comp_val = 1;
476 
477 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
478 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
479 					 true, DMAE_COMP_PCI);
480 	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
481 				    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
482 	dmae->src_addr_hi = 0;
483 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
484 			offsetof(struct nig_stats, egress_mac_pkt1_lo));
485 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
486 			offsetof(struct nig_stats, egress_mac_pkt1_lo));
487 	dmae->len = (2*sizeof(u32)) >> 2;
488 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
489 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
490 	dmae->comp_val = DMAE_COMP_VAL;
491 
492 	*stats_comp = 0;
493 }
494 
bnx2x_func_stats_init(struct bnx2x * bp)495 static void bnx2x_func_stats_init(struct bnx2x *bp)
496 {
497 	struct dmae_command *dmae = &bp->stats_dmae;
498 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
499 
500 	/* sanity */
501 	if (!bp->func_stx) {
502 		BNX2X_ERR("BUG!\n");
503 		return;
504 	}
505 
506 	bp->executer_idx = 0;
507 	memset(dmae, 0, sizeof(struct dmae_command));
508 
509 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
510 					 true, DMAE_COMP_PCI);
511 	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
512 	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
513 	dmae->dst_addr_lo = bp->func_stx >> 2;
514 	dmae->dst_addr_hi = 0;
515 	dmae->len = sizeof(struct host_func_stats) >> 2;
516 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
517 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
518 	dmae->comp_val = DMAE_COMP_VAL;
519 
520 	*stats_comp = 0;
521 }
522 
bnx2x_stats_start(struct bnx2x * bp)523 static void bnx2x_stats_start(struct bnx2x *bp)
524 {
525 	if (bp->port.pmf)
526 		bnx2x_port_stats_init(bp);
527 
528 	else if (bp->func_stx)
529 		bnx2x_func_stats_init(bp);
530 
531 	bnx2x_hw_stats_post(bp);
532 	bnx2x_storm_stats_post(bp);
533 }
534 
bnx2x_stats_pmf_start(struct bnx2x * bp)535 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
536 {
537 	bnx2x_stats_comp(bp);
538 	bnx2x_stats_pmf_update(bp);
539 	bnx2x_stats_start(bp);
540 }
541 
bnx2x_stats_restart(struct bnx2x * bp)542 static void bnx2x_stats_restart(struct bnx2x *bp)
543 {
544 	bnx2x_stats_comp(bp);
545 	bnx2x_stats_start(bp);
546 }
547 
bnx2x_bmac_stats_update(struct bnx2x * bp)548 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
549 {
550 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
551 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
552 	struct {
553 		u32 lo;
554 		u32 hi;
555 	} diff;
556 
557 	if (CHIP_IS_E1x(bp)) {
558 		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
559 
560 		/* the macros below will use "bmac1_stats" type */
561 		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
562 		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
563 		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
564 		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
565 		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
566 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
567 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
568 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
569 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
570 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
571 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
572 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
573 		UPDATE_STAT64(tx_stat_gt127,
574 				tx_stat_etherstatspkts65octetsto127octets);
575 		UPDATE_STAT64(tx_stat_gt255,
576 				tx_stat_etherstatspkts128octetsto255octets);
577 		UPDATE_STAT64(tx_stat_gt511,
578 				tx_stat_etherstatspkts256octetsto511octets);
579 		UPDATE_STAT64(tx_stat_gt1023,
580 				tx_stat_etherstatspkts512octetsto1023octets);
581 		UPDATE_STAT64(tx_stat_gt1518,
582 				tx_stat_etherstatspkts1024octetsto1522octets);
583 		UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
584 		UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
585 		UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
586 		UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
587 		UPDATE_STAT64(tx_stat_gterr,
588 				tx_stat_dot3statsinternalmactransmiterrors);
589 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
590 
591 	} else {
592 		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
593 
594 		/* the macros below will use "bmac2_stats" type */
595 		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
596 		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
597 		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
598 		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
599 		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
600 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
601 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
602 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
603 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
604 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
605 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
606 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
607 		UPDATE_STAT64(tx_stat_gt127,
608 				tx_stat_etherstatspkts65octetsto127octets);
609 		UPDATE_STAT64(tx_stat_gt255,
610 				tx_stat_etherstatspkts128octetsto255octets);
611 		UPDATE_STAT64(tx_stat_gt511,
612 				tx_stat_etherstatspkts256octetsto511octets);
613 		UPDATE_STAT64(tx_stat_gt1023,
614 				tx_stat_etherstatspkts512octetsto1023octets);
615 		UPDATE_STAT64(tx_stat_gt1518,
616 				tx_stat_etherstatspkts1024octetsto1522octets);
617 		UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
618 		UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
619 		UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
620 		UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
621 		UPDATE_STAT64(tx_stat_gterr,
622 				tx_stat_dot3statsinternalmactransmiterrors);
623 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
624 	}
625 
626 	estats->pause_frames_received_hi =
627 				pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
628 	estats->pause_frames_received_lo =
629 				pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
630 
631 	estats->pause_frames_sent_hi =
632 				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
633 	estats->pause_frames_sent_lo =
634 				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
635 }
636 
bnx2x_emac_stats_update(struct bnx2x * bp)637 static void bnx2x_emac_stats_update(struct bnx2x *bp)
638 {
639 	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
640 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
641 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
642 
643 	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
644 	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
645 	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
646 	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
647 	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
648 	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
649 	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
650 	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
651 	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
652 	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
653 	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
654 	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
655 	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
656 	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
657 	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
658 	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
659 	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
660 	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
661 	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
662 	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
663 	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
664 	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
665 	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
666 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
667 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
668 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
669 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
670 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
671 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
672 	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
673 	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
674 
675 	estats->pause_frames_received_hi =
676 			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
677 	estats->pause_frames_received_lo =
678 			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
679 	ADD_64(estats->pause_frames_received_hi,
680 	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
681 	       estats->pause_frames_received_lo,
682 	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
683 
684 	estats->pause_frames_sent_hi =
685 			pstats->mac_stx[1].tx_stat_outxonsent_hi;
686 	estats->pause_frames_sent_lo =
687 			pstats->mac_stx[1].tx_stat_outxonsent_lo;
688 	ADD_64(estats->pause_frames_sent_hi,
689 	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
690 	       estats->pause_frames_sent_lo,
691 	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
692 }
693 
bnx2x_hw_stats_update(struct bnx2x * bp)694 static int bnx2x_hw_stats_update(struct bnx2x *bp)
695 {
696 	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
697 	struct nig_stats *old = &(bp->port.old_nig_stats);
698 	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
699 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
700 	struct {
701 		u32 lo;
702 		u32 hi;
703 	} diff;
704 
705 	if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
706 		bnx2x_bmac_stats_update(bp);
707 
708 	else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
709 		bnx2x_emac_stats_update(bp);
710 
711 	else { /* unreached */
712 		BNX2X_ERR("stats updated by DMAE but no MAC active\n");
713 		return -1;
714 	}
715 
716 	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
717 		      new->brb_discard - old->brb_discard);
718 	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
719 		      new->brb_truncate - old->brb_truncate);
720 
721 	UPDATE_STAT64_NIG(egress_mac_pkt0,
722 					etherstatspkts1024octetsto1522octets);
723 	UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
724 
725 	memcpy(old, new, sizeof(struct nig_stats));
726 
727 	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
728 	       sizeof(struct mac_stx));
729 	estats->brb_drop_hi = pstats->brb_drop_hi;
730 	estats->brb_drop_lo = pstats->brb_drop_lo;
731 
732 	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
733 
734 	if (!BP_NOMCP(bp)) {
735 		u32 nig_timer_max =
736 			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
737 		if (nig_timer_max != estats->nig_timer_max) {
738 			estats->nig_timer_max = nig_timer_max;
739 			BNX2X_ERR("NIG timer max (%u)\n",
740 				  estats->nig_timer_max);
741 		}
742 	}
743 
744 	return 0;
745 }
746 
bnx2x_storm_stats_update(struct bnx2x * bp)747 static int bnx2x_storm_stats_update(struct bnx2x *bp)
748 {
749 	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
750 	struct tstorm_per_port_stats *tport =
751 					&stats->tstorm_common.port_statistics;
752 	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
753 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
754 	int i;
755 	u16 cur_stats_counter;
756 
757 	/* Make sure we use the value of the counter
758 	 * used for sending the last stats ramrod.
759 	 */
760 	spin_lock_bh(&bp->stats_lock);
761 	cur_stats_counter = bp->stats_counter - 1;
762 	spin_unlock_bh(&bp->stats_lock);
763 
764 	memcpy(&(fstats->total_bytes_received_hi),
765 	       &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
766 	       sizeof(struct host_func_stats) - 2*sizeof(u32));
767 	estats->error_bytes_received_hi = 0;
768 	estats->error_bytes_received_lo = 0;
769 	estats->etherstatsoverrsizepkts_hi = 0;
770 	estats->etherstatsoverrsizepkts_lo = 0;
771 	estats->no_buff_discard_hi = 0;
772 	estats->no_buff_discard_lo = 0;
773 
774 	for_each_eth_queue(bp, i) {
775 		struct bnx2x_fastpath *fp = &bp->fp[i];
776 		int cl_id = fp->cl_id;
777 		struct tstorm_per_client_stats *tclient =
778 				&stats->tstorm_common.client_statistics[cl_id];
779 		struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
780 		struct ustorm_per_client_stats *uclient =
781 				&stats->ustorm_common.client_statistics[cl_id];
782 		struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
783 		struct xstorm_per_client_stats *xclient =
784 				&stats->xstorm_common.client_statistics[cl_id];
785 		struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
786 		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
787 		u32 diff;
788 
789 		/* are storm stats valid? */
790 		if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
791 			DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
792 			   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
793 			   i, xclient->stats_counter, cur_stats_counter + 1);
794 			return -1;
795 		}
796 		if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
797 			DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
798 			   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
799 			   i, tclient->stats_counter, cur_stats_counter + 1);
800 			return -2;
801 		}
802 		if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
803 			DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
804 			   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
805 			   i, uclient->stats_counter, cur_stats_counter + 1);
806 			return -4;
807 		}
808 
809 		qstats->total_bytes_received_hi =
810 			le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
811 		qstats->total_bytes_received_lo =
812 			le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
813 
814 		ADD_64(qstats->total_bytes_received_hi,
815 		       le32_to_cpu(tclient->rcv_multicast_bytes.hi),
816 		       qstats->total_bytes_received_lo,
817 		       le32_to_cpu(tclient->rcv_multicast_bytes.lo));
818 
819 		ADD_64(qstats->total_bytes_received_hi,
820 		       le32_to_cpu(tclient->rcv_unicast_bytes.hi),
821 		       qstats->total_bytes_received_lo,
822 		       le32_to_cpu(tclient->rcv_unicast_bytes.lo));
823 
824 		SUB_64(qstats->total_bytes_received_hi,
825 		       le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
826 		       qstats->total_bytes_received_lo,
827 		       le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
828 
829 		SUB_64(qstats->total_bytes_received_hi,
830 		       le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
831 		       qstats->total_bytes_received_lo,
832 		       le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
833 
834 		SUB_64(qstats->total_bytes_received_hi,
835 		       le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
836 		       qstats->total_bytes_received_lo,
837 		       le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
838 
839 		qstats->valid_bytes_received_hi =
840 					qstats->total_bytes_received_hi;
841 		qstats->valid_bytes_received_lo =
842 					qstats->total_bytes_received_lo;
843 
844 		qstats->error_bytes_received_hi =
845 				le32_to_cpu(tclient->rcv_error_bytes.hi);
846 		qstats->error_bytes_received_lo =
847 				le32_to_cpu(tclient->rcv_error_bytes.lo);
848 
849 		ADD_64(qstats->total_bytes_received_hi,
850 		       qstats->error_bytes_received_hi,
851 		       qstats->total_bytes_received_lo,
852 		       qstats->error_bytes_received_lo);
853 
854 		UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
855 					total_unicast_packets_received);
856 		UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
857 					total_multicast_packets_received);
858 		UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
859 					total_broadcast_packets_received);
860 		UPDATE_EXTEND_TSTAT(packets_too_big_discard,
861 					etherstatsoverrsizepkts);
862 		UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
863 
864 		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
865 					total_unicast_packets_received);
866 		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
867 					total_multicast_packets_received);
868 		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
869 					total_broadcast_packets_received);
870 		UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
871 		UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
872 		UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
873 
874 		qstats->total_bytes_transmitted_hi =
875 				le32_to_cpu(xclient->unicast_bytes_sent.hi);
876 		qstats->total_bytes_transmitted_lo =
877 				le32_to_cpu(xclient->unicast_bytes_sent.lo);
878 
879 		ADD_64(qstats->total_bytes_transmitted_hi,
880 		       le32_to_cpu(xclient->multicast_bytes_sent.hi),
881 		       qstats->total_bytes_transmitted_lo,
882 		       le32_to_cpu(xclient->multicast_bytes_sent.lo));
883 
884 		ADD_64(qstats->total_bytes_transmitted_hi,
885 		       le32_to_cpu(xclient->broadcast_bytes_sent.hi),
886 		       qstats->total_bytes_transmitted_lo,
887 		       le32_to_cpu(xclient->broadcast_bytes_sent.lo));
888 
889 		UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
890 					total_unicast_packets_transmitted);
891 		UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
892 					total_multicast_packets_transmitted);
893 		UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
894 					total_broadcast_packets_transmitted);
895 
896 		old_tclient->checksum_discard = tclient->checksum_discard;
897 		old_tclient->ttl0_discard = tclient->ttl0_discard;
898 
899 		ADD_64(fstats->total_bytes_received_hi,
900 		       qstats->total_bytes_received_hi,
901 		       fstats->total_bytes_received_lo,
902 		       qstats->total_bytes_received_lo);
903 		ADD_64(fstats->total_bytes_transmitted_hi,
904 		       qstats->total_bytes_transmitted_hi,
905 		       fstats->total_bytes_transmitted_lo,
906 		       qstats->total_bytes_transmitted_lo);
907 		ADD_64(fstats->total_unicast_packets_received_hi,
908 		       qstats->total_unicast_packets_received_hi,
909 		       fstats->total_unicast_packets_received_lo,
910 		       qstats->total_unicast_packets_received_lo);
911 		ADD_64(fstats->total_multicast_packets_received_hi,
912 		       qstats->total_multicast_packets_received_hi,
913 		       fstats->total_multicast_packets_received_lo,
914 		       qstats->total_multicast_packets_received_lo);
915 		ADD_64(fstats->total_broadcast_packets_received_hi,
916 		       qstats->total_broadcast_packets_received_hi,
917 		       fstats->total_broadcast_packets_received_lo,
918 		       qstats->total_broadcast_packets_received_lo);
919 		ADD_64(fstats->total_unicast_packets_transmitted_hi,
920 		       qstats->total_unicast_packets_transmitted_hi,
921 		       fstats->total_unicast_packets_transmitted_lo,
922 		       qstats->total_unicast_packets_transmitted_lo);
923 		ADD_64(fstats->total_multicast_packets_transmitted_hi,
924 		       qstats->total_multicast_packets_transmitted_hi,
925 		       fstats->total_multicast_packets_transmitted_lo,
926 		       qstats->total_multicast_packets_transmitted_lo);
927 		ADD_64(fstats->total_broadcast_packets_transmitted_hi,
928 		       qstats->total_broadcast_packets_transmitted_hi,
929 		       fstats->total_broadcast_packets_transmitted_lo,
930 		       qstats->total_broadcast_packets_transmitted_lo);
931 		ADD_64(fstats->valid_bytes_received_hi,
932 		       qstats->valid_bytes_received_hi,
933 		       fstats->valid_bytes_received_lo,
934 		       qstats->valid_bytes_received_lo);
935 
936 		ADD_64(estats->error_bytes_received_hi,
937 		       qstats->error_bytes_received_hi,
938 		       estats->error_bytes_received_lo,
939 		       qstats->error_bytes_received_lo);
940 		ADD_64(estats->etherstatsoverrsizepkts_hi,
941 		       qstats->etherstatsoverrsizepkts_hi,
942 		       estats->etherstatsoverrsizepkts_lo,
943 		       qstats->etherstatsoverrsizepkts_lo);
944 		ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
945 		       estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
946 	}
947 
948 	ADD_64(fstats->total_bytes_received_hi,
949 	       estats->rx_stat_ifhcinbadoctets_hi,
950 	       fstats->total_bytes_received_lo,
951 	       estats->rx_stat_ifhcinbadoctets_lo);
952 
953 	memcpy(estats, &(fstats->total_bytes_received_hi),
954 	       sizeof(struct host_func_stats) - 2*sizeof(u32));
955 
956 	ADD_64(estats->etherstatsoverrsizepkts_hi,
957 	       estats->rx_stat_dot3statsframestoolong_hi,
958 	       estats->etherstatsoverrsizepkts_lo,
959 	       estats->rx_stat_dot3statsframestoolong_lo);
960 	ADD_64(estats->error_bytes_received_hi,
961 	       estats->rx_stat_ifhcinbadoctets_hi,
962 	       estats->error_bytes_received_lo,
963 	       estats->rx_stat_ifhcinbadoctets_lo);
964 
965 	if (bp->port.pmf) {
966 		estats->mac_filter_discard =
967 				le32_to_cpu(tport->mac_filter_discard);
968 		estats->xxoverflow_discard =
969 				le32_to_cpu(tport->xxoverflow_discard);
970 		estats->brb_truncate_discard =
971 				le32_to_cpu(tport->brb_truncate_discard);
972 		estats->mac_discard = le32_to_cpu(tport->mac_discard);
973 	}
974 
975 	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
976 
977 	bp->stats_pending = 0;
978 
979 	return 0;
980 }
981 
bnx2x_net_stats_update(struct bnx2x * bp)982 static void bnx2x_net_stats_update(struct bnx2x *bp)
983 {
984 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
985 	struct net_device_stats *nstats = &bp->dev->stats;
986 	unsigned long tmp;
987 	int i;
988 
989 	nstats->rx_packets =
990 		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
991 		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
992 		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
993 
994 	nstats->tx_packets =
995 		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
996 		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
997 		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
998 
999 	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1000 
1001 	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1002 
1003 	tmp = estats->mac_discard;
1004 	for_each_rx_queue(bp, i)
1005 		tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
1006 	nstats->rx_dropped = tmp;
1007 
1008 	nstats->tx_dropped = 0;
1009 
1010 	nstats->multicast =
1011 		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1012 
1013 	nstats->collisions =
1014 		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1015 
1016 	nstats->rx_length_errors =
1017 		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1018 		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1019 	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1020 				 bnx2x_hilo(&estats->brb_truncate_hi);
1021 	nstats->rx_crc_errors =
1022 		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1023 	nstats->rx_frame_errors =
1024 		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1025 	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1026 	nstats->rx_missed_errors = estats->xxoverflow_discard;
1027 
1028 	nstats->rx_errors = nstats->rx_length_errors +
1029 			    nstats->rx_over_errors +
1030 			    nstats->rx_crc_errors +
1031 			    nstats->rx_frame_errors +
1032 			    nstats->rx_fifo_errors +
1033 			    nstats->rx_missed_errors;
1034 
1035 	nstats->tx_aborted_errors =
1036 		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1037 		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1038 	nstats->tx_carrier_errors =
1039 		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1040 	nstats->tx_fifo_errors = 0;
1041 	nstats->tx_heartbeat_errors = 0;
1042 	nstats->tx_window_errors = 0;
1043 
1044 	nstats->tx_errors = nstats->tx_aborted_errors +
1045 			    nstats->tx_carrier_errors +
1046 	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1047 }
1048 
bnx2x_drv_stats_update(struct bnx2x * bp)1049 static void bnx2x_drv_stats_update(struct bnx2x *bp)
1050 {
1051 	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1052 	int i;
1053 
1054 	estats->driver_xoff = 0;
1055 	estats->rx_err_discard_pkt = 0;
1056 	estats->rx_skb_alloc_failed = 0;
1057 	estats->hw_csum_err = 0;
1058 	for_each_queue(bp, i) {
1059 		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1060 
1061 		estats->driver_xoff += qstats->driver_xoff;
1062 		estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
1063 		estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
1064 		estats->hw_csum_err += qstats->hw_csum_err;
1065 	}
1066 }
1067 
bnx2x_stats_update(struct bnx2x * bp)1068 static void bnx2x_stats_update(struct bnx2x *bp)
1069 {
1070 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1071 
1072 	if (*stats_comp != DMAE_COMP_VAL)
1073 		return;
1074 
1075 	if (bp->port.pmf)
1076 		bnx2x_hw_stats_update(bp);
1077 
1078 	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1079 		BNX2X_ERR("storm stats were not updated for 3 times\n");
1080 		bnx2x_panic();
1081 		return;
1082 	}
1083 
1084 	bnx2x_net_stats_update(bp);
1085 	bnx2x_drv_stats_update(bp);
1086 
1087 	if (netif_msg_timer(bp)) {
1088 		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1089 		int i;
1090 
1091 		printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
1092 		       bp->dev->name,
1093 		       estats->brb_drop_lo, estats->brb_truncate_lo);
1094 
1095 		for_each_eth_queue(bp, i) {
1096 			struct bnx2x_fastpath *fp = &bp->fp[i];
1097 			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1098 
1099 			printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
1100 					  "  rx pkt(%lu)  rx calls(%lu %lu)\n",
1101 			       fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
1102 			       fp->rx_comp_cons),
1103 			       le16_to_cpu(*fp->rx_cons_sb),
1104 			       bnx2x_hilo(&qstats->
1105 					  total_unicast_packets_received_hi),
1106 			       fp->rx_calls, fp->rx_pkt);
1107 		}
1108 
1109 		for_each_eth_queue(bp, i) {
1110 			struct bnx2x_fastpath *fp = &bp->fp[i];
1111 			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1112 			struct netdev_queue *txq =
1113 				netdev_get_tx_queue(bp->dev, i);
1114 
1115 			printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
1116 					  "  tx pkt(%lu) tx calls (%lu)"
1117 					  "  %s (Xoff events %u)\n",
1118 			       fp->name, bnx2x_tx_avail(fp),
1119 			       le16_to_cpu(*fp->tx_cons_sb),
1120 			       bnx2x_hilo(&qstats->
1121 					  total_unicast_packets_transmitted_hi),
1122 			       fp->tx_pkt,
1123 			       (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
1124 			       qstats->driver_xoff);
1125 		}
1126 	}
1127 
1128 	bnx2x_hw_stats_post(bp);
1129 	bnx2x_storm_stats_post(bp);
1130 }
1131 
bnx2x_port_stats_stop(struct bnx2x * bp)1132 static void bnx2x_port_stats_stop(struct bnx2x *bp)
1133 {
1134 	struct dmae_command *dmae;
1135 	u32 opcode;
1136 	int loader_idx = PMF_DMAE_C(bp);
1137 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1138 
1139 	bp->executer_idx = 0;
1140 
1141 	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1142 
1143 	if (bp->port.port_stx) {
1144 
1145 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1146 		if (bp->func_stx)
1147 			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1148 						opcode, DMAE_COMP_GRC);
1149 		else
1150 			dmae->opcode = bnx2x_dmae_opcode_add_comp(
1151 						opcode, DMAE_COMP_PCI);
1152 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1153 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1154 		dmae->dst_addr_lo = bp->port.port_stx >> 2;
1155 		dmae->dst_addr_hi = 0;
1156 		dmae->len = sizeof(struct host_port_stats) >> 2;
1157 		if (bp->func_stx) {
1158 			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1159 			dmae->comp_addr_hi = 0;
1160 			dmae->comp_val = 1;
1161 		} else {
1162 			dmae->comp_addr_lo =
1163 				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1164 			dmae->comp_addr_hi =
1165 				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1166 			dmae->comp_val = DMAE_COMP_VAL;
1167 
1168 			*stats_comp = 0;
1169 		}
1170 	}
1171 
1172 	if (bp->func_stx) {
1173 
1174 		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1175 		dmae->opcode =
1176 			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1177 		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1178 		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1179 		dmae->dst_addr_lo = bp->func_stx >> 2;
1180 		dmae->dst_addr_hi = 0;
1181 		dmae->len = sizeof(struct host_func_stats) >> 2;
1182 		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1183 		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1184 		dmae->comp_val = DMAE_COMP_VAL;
1185 
1186 		*stats_comp = 0;
1187 	}
1188 }
1189 
bnx2x_stats_stop(struct bnx2x * bp)1190 static void bnx2x_stats_stop(struct bnx2x *bp)
1191 {
1192 	int update = 0;
1193 
1194 	bnx2x_stats_comp(bp);
1195 
1196 	if (bp->port.pmf)
1197 		update = (bnx2x_hw_stats_update(bp) == 0);
1198 
1199 	update |= (bnx2x_storm_stats_update(bp) == 0);
1200 
1201 	if (update) {
1202 		bnx2x_net_stats_update(bp);
1203 
1204 		if (bp->port.pmf)
1205 			bnx2x_port_stats_stop(bp);
1206 
1207 		bnx2x_hw_stats_post(bp);
1208 		bnx2x_stats_comp(bp);
1209 	}
1210 }
1211 
bnx2x_stats_do_nothing(struct bnx2x * bp)1212 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1213 {
1214 }
1215 
1216 static const struct {
1217 	void (*action)(struct bnx2x *bp);
1218 	enum bnx2x_stats_state next_state;
1219 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1220 /* state	event	*/
1221 {
1222 /* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1223 /*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
1224 /*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1225 /*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1226 },
1227 {
1228 /* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
1229 /*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
1230 /*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
1231 /*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
1232 }
1233 };
1234 
bnx2x_stats_handle(struct bnx2x * bp,enum bnx2x_stats_event event)1235 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1236 {
1237 	enum bnx2x_stats_state state;
1238 
1239 	if (unlikely(bp->panic))
1240 		return;
1241 
1242 	bnx2x_stats_stm[bp->stats_state][event].action(bp);
1243 
1244 	/* Protect a state change flow */
1245 	spin_lock_bh(&bp->stats_lock);
1246 	state = bp->stats_state;
1247 	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1248 	spin_unlock_bh(&bp->stats_lock);
1249 
1250 	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1251 		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1252 		   state, event, bp->stats_state);
1253 }
1254 
bnx2x_port_stats_base_init(struct bnx2x * bp)1255 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1256 {
1257 	struct dmae_command *dmae;
1258 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1259 
1260 	/* sanity */
1261 	if (!bp->port.pmf || !bp->port.port_stx) {
1262 		BNX2X_ERR("BUG!\n");
1263 		return;
1264 	}
1265 
1266 	bp->executer_idx = 0;
1267 
1268 	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1269 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1270 					 true, DMAE_COMP_PCI);
1271 	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1272 	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1273 	dmae->dst_addr_lo = bp->port.port_stx >> 2;
1274 	dmae->dst_addr_hi = 0;
1275 	dmae->len = sizeof(struct host_port_stats) >> 2;
1276 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1277 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1278 	dmae->comp_val = DMAE_COMP_VAL;
1279 
1280 	*stats_comp = 0;
1281 	bnx2x_hw_stats_post(bp);
1282 	bnx2x_stats_comp(bp);
1283 }
1284 
bnx2x_func_stats_base_init(struct bnx2x * bp)1285 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1286 {
1287 	int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
1288 	u32 func_stx;
1289 
1290 	/* sanity */
1291 	if (!bp->port.pmf || !bp->func_stx) {
1292 		BNX2X_ERR("BUG!\n");
1293 		return;
1294 	}
1295 
1296 	/* save our func_stx */
1297 	func_stx = bp->func_stx;
1298 
1299 	for (vn = VN_0; vn < vn_max; vn++) {
1300 		int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
1301 
1302 		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1303 		bnx2x_func_stats_init(bp);
1304 		bnx2x_hw_stats_post(bp);
1305 		bnx2x_stats_comp(bp);
1306 	}
1307 
1308 	/* restore our func_stx */
1309 	bp->func_stx = func_stx;
1310 }
1311 
bnx2x_func_stats_base_update(struct bnx2x * bp)1312 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1313 {
1314 	struct dmae_command *dmae = &bp->stats_dmae;
1315 	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1316 
1317 	/* sanity */
1318 	if (!bp->func_stx) {
1319 		BNX2X_ERR("BUG!\n");
1320 		return;
1321 	}
1322 
1323 	bp->executer_idx = 0;
1324 	memset(dmae, 0, sizeof(struct dmae_command));
1325 
1326 	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1327 					 true, DMAE_COMP_PCI);
1328 	dmae->src_addr_lo = bp->func_stx >> 2;
1329 	dmae->src_addr_hi = 0;
1330 	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
1331 	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
1332 	dmae->len = sizeof(struct host_func_stats) >> 2;
1333 	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1334 	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1335 	dmae->comp_val = DMAE_COMP_VAL;
1336 
1337 	*stats_comp = 0;
1338 	bnx2x_hw_stats_post(bp);
1339 	bnx2x_stats_comp(bp);
1340 }
1341 
bnx2x_stats_init(struct bnx2x * bp)1342 void bnx2x_stats_init(struct bnx2x *bp)
1343 {
1344 	int port = BP_PORT(bp);
1345 	int mb_idx = BP_FW_MB_IDX(bp);
1346 	int i;
1347 	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
1348 
1349 	bp->stats_pending = 0;
1350 	bp->executer_idx = 0;
1351 	bp->stats_counter = 0;
1352 
1353 	/* port and func stats for management */
1354 	if (!BP_NOMCP(bp)) {
1355 		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1356 		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1357 
1358 	} else {
1359 		bp->port.port_stx = 0;
1360 		bp->func_stx = 0;
1361 	}
1362 	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
1363 	   bp->port.port_stx, bp->func_stx);
1364 
1365 	/* port stats */
1366 	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1367 	bp->port.old_nig_stats.brb_discard =
1368 			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1369 	bp->port.old_nig_stats.brb_truncate =
1370 			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1371 	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1372 		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1373 	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1374 		    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1375 
1376 	/* function stats */
1377 	for_each_queue(bp, i) {
1378 		struct bnx2x_fastpath *fp = &bp->fp[i];
1379 
1380 		memset(&fp->old_tclient, 0,
1381 		       sizeof(struct tstorm_per_client_stats));
1382 		memset(&fp->old_uclient, 0,
1383 		       sizeof(struct ustorm_per_client_stats));
1384 		memset(&fp->old_xclient, 0,
1385 		       sizeof(struct xstorm_per_client_stats));
1386 		memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1387 	}
1388 
1389 	/* FW stats are currently collected for ETH clients only */
1390 	for_each_eth_queue(bp, i) {
1391 		/* Set initial stats counter in the stats ramrod data to -1 */
1392 		int cl_id = bp->fp[i].cl_id;
1393 
1394 		stats->xstorm_common.client_statistics[cl_id].
1395 			stats_counter = 0xffff;
1396 		stats->ustorm_common.client_statistics[cl_id].
1397 			stats_counter = 0xffff;
1398 		stats->tstorm_common.client_statistics[cl_id].
1399 			stats_counter = 0xffff;
1400 	}
1401 
1402 	memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
1403 	memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
1404 
1405 	bp->stats_state = STATS_STATE_DISABLED;
1406 
1407 	if (bp->port.pmf) {
1408 		if (bp->port.port_stx)
1409 			bnx2x_port_stats_base_init(bp);
1410 
1411 		if (bp->func_stx)
1412 			bnx2x_func_stats_base_init(bp);
1413 
1414 	} else if (bp->func_stx)
1415 		bnx2x_func_stats_base_update(bp);
1416 }
1417