xref: /dpdk/drivers/net/bnx2x/bnx2x_stats.c (revision 3868c0ce5ce83eacc9611cc4a83d20120ae3442e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2007-2013 Broadcom Corporation.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * Copyright (c) 2015-2018 Cavium Inc.
10  * All rights reserved.
11  * www.cavium.com
12  */
13 
14 #include "bnx2x.h"
15 #include "bnx2x_stats.h"
16 
17 #ifdef __i386__
18 #define BITS_PER_LONG 32
19 #else
20 #define BITS_PER_LONG 64
21 #endif
22 
23 static inline uint16_t
24 bnx2x_get_port_stats_dma_len(struct bnx2x_softc *sc)
25 {
26 	uint16_t res = 0;
27 	uint32_t size;
28 
29 	/* 'newest' convention - shmem2 contains the size of the port stats */
30 	if (SHMEM2_HAS(sc, sizeof_port_stats)) {
31 		size = SHMEM2_RD(sc, sizeof_port_stats);
32 		if (size)
33 			res = size;
34 
35 		/* prevent newer BC from causing buffer overflow */
36 		if (res > sizeof(struct host_port_stats))
37 			res = sizeof(struct host_port_stats);
38 	}
39 
40 	/*
41 	 * Older convention - all BCs support the port stats fields up until
42 	 * the 'not_used' field
43 	 */
44 	if (!res) {
45 		res = offsetof(struct host_port_stats, not_used) + 4;
46 
47 		/* if PFC stats are supported by the MFW, DMA them as well */
48 		if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) {
49 			res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) -
50 				offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4);
51 		}
52 	}
53 
54 	res >>= 2;
55 
56 	return res;
57 }
58 
59 /*
60  * Init service functions
61  */
62 
63 /*
64  * Post the next statistics ramrod. Protect it with the lock in
65  * order to ensure the strict order between statistics ramrods
66  * (each ramrod has a sequence number passed in a
67  * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be
68  * sent in order).
69  */
70 static void
71 bnx2x_storm_stats_post(struct bnx2x_softc *sc)
72 {
73 	int rc;
74 
75 	if (!sc->stats_pending) {
76 		sc->fw_stats_req->hdr.drv_stats_counter =
77 			htole16(sc->stats_counter++);
78 
79 		PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
80 				"sending statistics ramrod %d",
81 				le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
82 
83 		/* adjust the ramrod to include VF queues statistics */
84 
85 		/* send FW stats ramrod */
86 		rc = bnx2x_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
87 				U64_HI(sc->fw_stats_req_mapping),
88 				U64_LO(sc->fw_stats_req_mapping),
89 				NONE_CONNECTION_TYPE);
90 		if (rc == 0)
91 			sc->stats_pending = 1;
92 	}
93 }
94 
95 static void
96 bnx2x_hw_stats_post(struct bnx2x_softc *sc)
97 {
98 	struct dmae_command *dmae = &sc->stats_dmae;
99 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
100 	int loader_idx;
101 	uint32_t opcode;
102 
103 	*stats_comp = DMAE_COMP_VAL;
104 	if (CHIP_REV_IS_SLOW(sc))
105 		return;
106 
107 	/* Update MCP's statistics if possible */
108 	if (sc->func_stx)
109 		memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats,
110 				sizeof(sc->func_stats));
111 
112 	/* loader */
113 	if (sc->executer_idx) {
114 		loader_idx = PMF_DMAE_C(sc);
115 		opcode =  bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
116 				TRUE, DMAE_COMP_GRC);
117 		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
118 
119 		memset(dmae, 0, sizeof(struct dmae_command));
120 		dmae->opcode = opcode;
121 		dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, dmae[0]));
122 		dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, dmae[0]));
123 		dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
124 					sizeof(struct dmae_command) *
125 					(loader_idx + 1)) >> 2);
126 		dmae->dst_addr_hi = 0;
127 		dmae->len = sizeof(struct dmae_command) >> 2;
128 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2);
129 		dmae->comp_addr_hi = 0;
130 		dmae->comp_val = 1;
131 
132 		*stats_comp = 0;
133 		bnx2x_post_dmae(sc, dmae, loader_idx);
134 	} else if (sc->func_stx) {
135 		*stats_comp = 0;
136 		bnx2x_post_dmae(sc, dmae, INIT_DMAE_C(sc));
137 	}
138 }
139 
140 static int
141 bnx2x_stats_comp(struct bnx2x_softc *sc)
142 {
143 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
144 	int cnt = 10;
145 
146 	while (*stats_comp != DMAE_COMP_VAL) {
147 		if (!cnt) {
148 			PMD_DRV_LOG(ERR, sc, "Timeout waiting for stats finished");
149 			break;
150 		}
151 
152 		cnt--;
153 		DELAY(1000);
154 	}
155 
156 	return 1;
157 }
158 
159 /*
160  * Statistics service functions
161  */
162 
163 static void
164 bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
165 {
166 	struct dmae_command *dmae;
167 	uint32_t opcode;
168 	int loader_idx = PMF_DMAE_C(sc);
169 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
170 
171 	if (sc->devinfo.bc_ver <= 0x06001400) {
172 		/*
173 		 * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing
174 		 * BRB registers while the BRB block is in reset. The DMA transfer
175 		 * below triggers this issue resulting in the DMAE to stop
176 		 * functioning. Skip this initial stats transfer for old bootcode
177 		 * versions <= 6.0.20.
178 		 */
179 		return;
180 	}
181 	/* sanity */
182 	if (!sc->port.pmf || !sc->port.port_stx) {
183 		PMD_DRV_LOG(ERR, sc, "BUG!");
184 		return;
185 	}
186 
187 	sc->executer_idx = 0;
188 
189 	opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0);
190 
191 	dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
192 	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
193 	dmae->src_addr_lo = (sc->port.port_stx >> 2);
194 	dmae->src_addr_hi = 0;
195 	dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
196 	dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
197 	dmae->len = DMAE_LEN32_RD_MAX;
198 	dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
199 	dmae->comp_addr_hi = 0;
200 	dmae->comp_val = 1;
201 
202 	dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
203 	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
204 	dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX);
205 	dmae->src_addr_hi = 0;
206 	dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats) +
207 			DMAE_LEN32_RD_MAX * 4);
208 	dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats) +
209 			DMAE_LEN32_RD_MAX * 4);
210 	dmae->len = (bnx2x_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX);
211 
212 	dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
213 	dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
214 	dmae->comp_val = DMAE_COMP_VAL;
215 
216 	*stats_comp = 0;
217 	bnx2x_hw_stats_post(sc);
218 	bnx2x_stats_comp(sc);
219 }
220 
221 static void
222 bnx2x_port_stats_init(struct bnx2x_softc *sc)
223 {
224 	struct dmae_command *dmae;
225 	int port = SC_PORT(sc);
226 	uint32_t opcode;
227 	int loader_idx = PMF_DMAE_C(sc);
228 	uint32_t mac_addr;
229 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
230 
231 	/* sanity */
232 	if (!sc->link_vars.link_up || !sc->port.pmf) {
233 		PMD_DRV_LOG(ERR, sc, "BUG!");
234 		return;
235 	}
236 
237 	sc->executer_idx = 0;
238 
239 	/* MCP */
240 	opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
241 				   TRUE, DMAE_COMP_GRC);
242 
243 	if (sc->port.port_stx) {
244 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
245 		dmae->opcode = opcode;
246 		dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
247 		dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
248 		dmae->dst_addr_lo = sc->port.port_stx >> 2;
249 		dmae->dst_addr_hi = 0;
250 		dmae->len = bnx2x_get_port_stats_dma_len(sc);
251 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
252 		dmae->comp_addr_hi = 0;
253 		dmae->comp_val = 1;
254 	}
255 
256 	if (sc->func_stx) {
257 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
258 		dmae->opcode = opcode;
259 		dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
260 		dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
261 		dmae->dst_addr_lo = (sc->func_stx >> 2);
262 		dmae->dst_addr_hi = 0;
263 		dmae->len = (sizeof(struct host_func_stats) >> 2);
264 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
265 		dmae->comp_addr_hi = 0;
266 		dmae->comp_val = 1;
267 	}
268 
269 	/* MAC */
270 	opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
271 				   TRUE, DMAE_COMP_GRC);
272 
273 	/* EMAC is special */
274 	if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) {
275 		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
276 
277 		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
278 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
279 		dmae->opcode = opcode;
280 		dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2;
281 		dmae->src_addr_hi = 0;
282 		dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
283 		dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
284 		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
285 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
286 		dmae->comp_addr_hi = 0;
287 		dmae->comp_val = 1;
288 
289 		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
290 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
291 		dmae->opcode = opcode;
292 		dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2);
293 		dmae->src_addr_hi = 0;
294 		dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
295 					   offsetof(struct emac_stats,
296 						    rx_stat_falsecarriererrors));
297 		dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
298 					   offsetof(struct emac_stats,
299 						    rx_stat_falsecarriererrors));
300 		dmae->len = 1;
301 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
302 		dmae->comp_addr_hi = 0;
303 		dmae->comp_val = 1;
304 
305 		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
306 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
307 		dmae->opcode = opcode;
308 		dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2);
309 		dmae->src_addr_hi = 0;
310 		dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) +
311 					   offsetof(struct emac_stats,
312 						    tx_stat_ifhcoutoctets));
313 		dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) +
314 					   offsetof(struct emac_stats,
315 						    tx_stat_ifhcoutoctets));
316 		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
317 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
318 		dmae->comp_addr_hi = 0;
319 		dmae->comp_val = 1;
320 	} else {
321 		uint32_t tx_src_addr_lo, rx_src_addr_lo;
322 		uint16_t rx_len, tx_len;
323 
324 		/* configure the params according to MAC type */
325 		switch (sc->link_vars.mac_type) {
326 		case ELINK_MAC_TYPE_BMAC:
327 			mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM :
328 			NIG_REG_INGRESS_BMAC0_MEM;
329 
330 			/*
331 			 * BIGMAC_REGISTER_TX_STAT_GTPKT ..
332 			 * BIGMAC_REGISTER_TX_STAT_GTBYT
333 			 */
334 			if (CHIP_IS_E1x(sc)) {
335 				tx_src_addr_lo =
336 					((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
337 				tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
338 					   BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2);
339 				rx_src_addr_lo =
340 					((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
341 				rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
342 					   BIGMAC_REGISTER_RX_STAT_GR64) >> 2);
343 			} else {
344 				tx_src_addr_lo =
345 					((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
346 				tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
347 					   BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2);
348 				rx_src_addr_lo =
349 					((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
350 				rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
351 					   BIGMAC2_REGISTER_RX_STAT_GR64) >> 2);
352 			}
353 
354 			break;
355 
356 		case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */
357 		case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */
358 		default:
359 			mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
360 			tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2);
361 			rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2);
362 			tx_len =
363 				(sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2);
364 			rx_len =
365 				(sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2);
366 			break;
367 		}
368 
369 		/* TX stats */
370 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
371 		dmae->opcode = opcode;
372 		dmae->src_addr_lo = tx_src_addr_lo;
373 		dmae->src_addr_hi = 0;
374 		dmae->len = tx_len;
375 		dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, mac_stats));
376 		dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, mac_stats));
377 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
378 		dmae->comp_addr_hi = 0;
379 		dmae->comp_val = 1;
380 
381 		/* RX stats */
382 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
383 		dmae->opcode = opcode;
384 		dmae->src_addr_hi = 0;
385 		dmae->src_addr_lo = rx_src_addr_lo;
386 		dmae->dst_addr_lo =
387 			U64_LO(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
388 		dmae->dst_addr_hi =
389 			U64_HI(BNX2X_SP_MAPPING(sc, mac_stats) + (tx_len << 2));
390 		dmae->len = rx_len;
391 		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
392 		dmae->comp_addr_hi = 0;
393 		dmae->comp_val = 1;
394 	}
395 
396 	/* NIG */
397 	if (!CHIP_IS_E3(sc)) {
398 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
399 		dmae->opcode = opcode;
400 		dmae->src_addr_lo =
401 			(port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
402 			 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
403 		dmae->src_addr_hi = 0;
404 		dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
405 					   offsetof(struct nig_stats,
406 						    egress_mac_pkt0_lo));
407 		dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
408 					   offsetof(struct nig_stats,
409 						    egress_mac_pkt0_lo));
410 		dmae->len = ((2 * sizeof(uint32_t)) >> 2);
411 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
412 		dmae->comp_addr_hi = 0;
413 		dmae->comp_val = 1;
414 
415 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
416 		dmae->opcode = opcode;
417 		dmae->src_addr_lo =
418 			(port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
419 			 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
420 		dmae->src_addr_hi = 0;
421 		dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats) +
422 					   offsetof(struct nig_stats,
423 						    egress_mac_pkt1_lo));
424 		dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats) +
425 					   offsetof(struct nig_stats,
426 						    egress_mac_pkt1_lo));
427 		dmae->len = ((2 * sizeof(uint32_t)) >> 2);
428 		dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
429 		dmae->comp_addr_hi = 0;
430 		dmae->comp_val = 1;
431 	}
432 
433 	dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
434 	dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI,
435 					 TRUE, DMAE_COMP_PCI);
436 	dmae->src_addr_lo =
437 		(port ? NIG_REG_STAT1_BRB_DISCARD :
438 		 NIG_REG_STAT0_BRB_DISCARD) >> 2;
439 	dmae->src_addr_hi = 0;
440 	dmae->dst_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, nig_stats));
441 	dmae->dst_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, nig_stats));
442 	dmae->len = (sizeof(struct nig_stats) - 4 * sizeof(uint32_t)) >> 2;
443 
444 	dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
445 	dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
446 	dmae->comp_val = DMAE_COMP_VAL;
447 
448 	*stats_comp = 0;
449 }
450 
451 static void
452 bnx2x_func_stats_init(struct bnx2x_softc *sc)
453 {
454 	struct dmae_command *dmae = &sc->stats_dmae;
455 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
456 
457 	/* sanity */
458 	if (!sc->func_stx) {
459 		PMD_DRV_LOG(ERR, sc, "BUG!");
460 		return;
461 	}
462 
463 	sc->executer_idx = 0;
464 	memset(dmae, 0, sizeof(struct dmae_command));
465 
466 	dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
467 					 TRUE, DMAE_COMP_PCI);
468 	dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
469 	dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
470 	dmae->dst_addr_lo = (sc->func_stx >> 2);
471 	dmae->dst_addr_hi = 0;
472 	dmae->len = (sizeof(struct host_func_stats) >> 2);
473 	dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
474 	dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
475 	dmae->comp_val = DMAE_COMP_VAL;
476 
477 	*stats_comp = 0;
478 }
479 
480 static void
481 bnx2x_stats_start(struct bnx2x_softc *sc)
482 {
483 	/*
484 	 * VFs travel through here as part of the statistics FSM, but no action
485 	 * is required
486 	 */
487 	if (IS_VF(sc))
488 		return;
489 
490 	if (sc->port.pmf)
491 		bnx2x_port_stats_init(sc);
492 	else if (sc->func_stx)
493 		bnx2x_func_stats_init(sc);
494 
495 	bnx2x_hw_stats_post(sc);
496 	bnx2x_storm_stats_post(sc);
497 }
498 
499 static void
500 bnx2x_stats_pmf_start(struct bnx2x_softc *sc)
501 {
502 	bnx2x_stats_comp(sc);
503 	bnx2x_stats_pmf_update(sc);
504 	bnx2x_stats_start(sc);
505 }
506 
507 static void
508 bnx2x_stats_restart(struct bnx2x_softc *sc)
509 {
510 	/*
511 	 * VFs travel through here as part of the statistics FSM, but no action
512 	 * is required
513 	 */
514 	if (IS_VF(sc))
515 		return;
516 
517 	bnx2x_stats_comp(sc);
518 	bnx2x_stats_start(sc);
519 }
520 
521 static void
522 bnx2x_bmac_stats_update(struct bnx2x_softc *sc)
523 {
524 	struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
525 	struct bnx2x_eth_stats *estats = &sc->eth_stats;
526 	struct {
527 		uint32_t lo;
528 		uint32_t hi;
529 	} diff;
530 
531 	if (CHIP_IS_E1x(sc)) {
532 		struct bmac1_stats *new = BNX2X_SP(sc, mac_stats.bmac1_stats);
533 
534 		/* the macros below will use "bmac1_stats" type */
535 		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
536 		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
537 		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
538 		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
539 		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
540 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
541 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
542 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
543 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
544 
545 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
546 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
547 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
548 		UPDATE_STAT64(tx_stat_gt127,
549 			      tx_stat_etherstatspkts65octetsto127octets);
550 		UPDATE_STAT64(tx_stat_gt255,
551 			      tx_stat_etherstatspkts128octetsto255octets);
552 		UPDATE_STAT64(tx_stat_gt511,
553 			      tx_stat_etherstatspkts256octetsto511octets);
554 		UPDATE_STAT64(tx_stat_gt1023,
555 			      tx_stat_etherstatspkts512octetsto1023octets);
556 		UPDATE_STAT64(tx_stat_gt1518,
557 			      tx_stat_etherstatspkts1024octetsto1522octets);
558 		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
559 		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
560 		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
561 		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
562 		UPDATE_STAT64(tx_stat_gterr,
563 			      tx_stat_dot3statsinternalmactransmiterrors);
564 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
565 	} else {
566 		struct bmac2_stats *new = BNX2X_SP(sc, mac_stats.bmac2_stats);
567 		struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
568 
569 		/* the macros below will use "bmac2_stats" type */
570 		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
571 		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
572 		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
573 		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
574 		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
575 		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
576 		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
577 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
578 		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
579 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
580 		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
581 		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
582 		UPDATE_STAT64(tx_stat_gt127,
583 			      tx_stat_etherstatspkts65octetsto127octets);
584 		UPDATE_STAT64(tx_stat_gt255,
585 			      tx_stat_etherstatspkts128octetsto255octets);
586 		UPDATE_STAT64(tx_stat_gt511,
587 			      tx_stat_etherstatspkts256octetsto511octets);
588 		UPDATE_STAT64(tx_stat_gt1023,
589 			      tx_stat_etherstatspkts512octetsto1023octets);
590 		UPDATE_STAT64(tx_stat_gt1518,
591 			      tx_stat_etherstatspkts1024octetsto1522octets);
592 		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
593 		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
594 		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
595 		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
596 		UPDATE_STAT64(tx_stat_gterr,
597 			      tx_stat_dot3statsinternalmactransmiterrors);
598 		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
599 
600 		/* collect PFC stats */
601 		pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
602 		pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
603 		ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi,
604 		       pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo);
605 
606 		pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
607 		pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
608 		ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi,
609 		       pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo);
610 	}
611 
612 	estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
613 	estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
614 
615 	estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
616 	estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
617 
618 	estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
619 	estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
620 	estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
621 	estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
622 }
623 
624 static void
625 bnx2x_mstat_stats_update(struct bnx2x_softc *sc)
626 {
627 	struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
628 	struct bnx2x_eth_stats *estats = &sc->eth_stats;
629 	struct mstat_stats *new = BNX2X_SP(sc, mac_stats.mstat_stats);
630 
631 	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
632 	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
633 	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
634 	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
635 	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
636 	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
637 	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
638 	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
639 	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
640 	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
641 
642 	/* collect pfc stats */
643 	ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
644 	       pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
645 	ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
646 	       pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
647 
648 	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
649 	ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets);
650 	ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets);
651 	ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets);
652 	ADD_STAT64(stats_tx.tx_gt1023,
653 		   tx_stat_etherstatspkts512octetsto1023octets);
654 	ADD_STAT64(stats_tx.tx_gt1518,
655 		   tx_stat_etherstatspkts1024octetsto1522octets);
656 	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
657 
658 	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
659 	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
660 	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
661 
662 	ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors);
663 	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
664 
665 	estats->etherstatspkts1024octetsto1522octets_hi =
666 		pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
667 	estats->etherstatspkts1024octetsto1522octets_lo =
668 		pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
669 
670 	estats->etherstatspktsover1522octets_hi =
671 		pstats->mac_stx[1].tx_stat_mac_2047_hi;
672 	estats->etherstatspktsover1522octets_lo =
673 		pstats->mac_stx[1].tx_stat_mac_2047_lo;
674 
675 	ADD_64(estats->etherstatspktsover1522octets_hi,
676 	       pstats->mac_stx[1].tx_stat_mac_4095_hi,
677 	       estats->etherstatspktsover1522octets_lo,
678 	       pstats->mac_stx[1].tx_stat_mac_4095_lo);
679 
680 	ADD_64(estats->etherstatspktsover1522octets_hi,
681 	       pstats->mac_stx[1].tx_stat_mac_9216_hi,
682 	       estats->etherstatspktsover1522octets_lo,
683 	       pstats->mac_stx[1].tx_stat_mac_9216_lo);
684 
685 	ADD_64(estats->etherstatspktsover1522octets_hi,
686 	       pstats->mac_stx[1].tx_stat_mac_16383_hi,
687 	       estats->etherstatspktsover1522octets_lo,
688 	       pstats->mac_stx[1].tx_stat_mac_16383_lo);
689 
690 	estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi;
691 	estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo;
692 
693 	estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi;
694 	estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo;
695 
696 	estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi;
697 	estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo;
698 	estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi;
699 	estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo;
700 }
701 
702 static void
703 bnx2x_emac_stats_update(struct bnx2x_softc *sc)
704 {
705 	struct emac_stats *new = BNX2X_SP(sc, mac_stats.emac_stats);
706 	struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
707 	struct bnx2x_eth_stats *estats = &sc->eth_stats;
708 
709 	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
710 	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
711 	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
712 	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
713 	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
714 	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
715 	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
716 	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
717 	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
718 	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
719 	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
720 	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
721 	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
722 	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
723 	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
724 	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
725 	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
726 	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
727 	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
728 	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
729 	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
730 	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
731 	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
732 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
733 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
734 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
735 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
736 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
737 	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
738 	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
739 	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
740 
741 	estats->pause_frames_received_hi =
742 		pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
743 	estats->pause_frames_received_lo =
744 		pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
745 	ADD_64(estats->pause_frames_received_hi,
746 	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
747 	       estats->pause_frames_received_lo,
748 	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
749 
750 	estats->pause_frames_sent_hi =
751 		pstats->mac_stx[1].tx_stat_outxonsent_hi;
752 	estats->pause_frames_sent_lo =
753 		pstats->mac_stx[1].tx_stat_outxonsent_lo;
754 	ADD_64(estats->pause_frames_sent_hi,
755 	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
756 	       estats->pause_frames_sent_lo,
757 	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
758 }
759 
760 static int
761 bnx2x_hw_stats_update(struct bnx2x_softc *sc)
762 {
763 	struct nig_stats *new = BNX2X_SP(sc, nig_stats);
764 	struct nig_stats *old = &sc->port.old_nig_stats;
765 	struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
766 	struct bnx2x_eth_stats *estats = &sc->eth_stats;
767 	uint32_t lpi_reg, nig_timer_max;
768 	struct {
769 		uint32_t lo;
770 		uint32_t hi;
771 	} diff;
772 
773 	switch (sc->link_vars.mac_type) {
774 	case ELINK_MAC_TYPE_BMAC:
775 		bnx2x_bmac_stats_update(sc);
776 		break;
777 
778 	case ELINK_MAC_TYPE_EMAC:
779 		bnx2x_emac_stats_update(sc);
780 		break;
781 
782 	case ELINK_MAC_TYPE_UMAC:
783 	case ELINK_MAC_TYPE_XMAC:
784 		bnx2x_mstat_stats_update(sc);
785 		break;
786 
787 	case ELINK_MAC_TYPE_NONE: /* unreached */
788 		PMD_DRV_LOG(DEBUG, sc,
789 			    "stats updated by DMAE but no MAC active");
790 		return -1;
791 
792 	default: /* unreached */
793 		PMD_DRV_LOG(ERR, sc, "stats update failed, unknown MAC type");
794 	}
795 
796 	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
797 		      new->brb_discard - old->brb_discard);
798 	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
799 		      new->brb_truncate - old->brb_truncate);
800 
801 	if (!CHIP_IS_E3(sc)) {
802 		UPDATE_STAT64_NIG(egress_mac_pkt0,
803 				  etherstatspkts1024octetsto1522octets);
804 		UPDATE_STAT64_NIG(egress_mac_pkt1,
805 				  etherstatspktsover1522octets);
806 	}
807 
808 	memcpy(old, new, sizeof(struct nig_stats));
809 
810 	memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)),
811 	       &pstats->mac_stx[1], sizeof(struct mac_stx));
812 	estats->brb_drop_hi = pstats->brb_drop_hi;
813 	estats->brb_drop_lo = pstats->brb_drop_lo;
814 
815 	pstats->host_port_stats_counter++;
816 
817 	if (CHIP_IS_E3(sc)) {
818 		lpi_reg = (SC_PORT(sc)) ?
819 			MISC_REG_CPMU_LP_SM_ENT_CNT_P1 :
820 			MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
821 		estats->eee_tx_lpi += REG_RD(sc, lpi_reg);
822 	}
823 
824 	if (!BNX2X_NOMCP(sc)) {
825 		nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
826 		if (nig_timer_max != estats->nig_timer_max) {
827 			estats->nig_timer_max = nig_timer_max;
828 			PMD_DRV_LOG(ERR, sc, "invalid NIG timer max (%u)",
829 				    estats->nig_timer_max);
830 		}
831 	}
832 
833 	return 0;
834 }
835 
836 static int
837 bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
838 {
839 	struct stats_counter *counters = &sc->fw_stats_data->storm_counters;
840 	uint16_t cur_stats_counter;
841 
842 	/*
843 	 * Make sure we use the value of the counter
844 	 * used for sending the last stats ramrod.
845 	 */
846 	cur_stats_counter = sc->stats_counter - 1;
847 
848 	/* are storm stats valid? */
849 	if (le16toh(counters->xstats_counter) != cur_stats_counter) {
850 		PMD_DRV_LOG(DEBUG, sc,
851 			    "stats not updated by xstorm, "
852 			    "counter 0x%x != stats_counter 0x%x",
853 			    le16toh(counters->xstats_counter), sc->stats_counter);
854 		return -EAGAIN;
855 	}
856 
857 	if (le16toh(counters->ustats_counter) != cur_stats_counter) {
858 		PMD_DRV_LOG(DEBUG, sc,
859 			    "stats not updated by ustorm, "
860 			    "counter 0x%x != stats_counter 0x%x",
861 			    le16toh(counters->ustats_counter), sc->stats_counter);
862 		return -EAGAIN;
863 	}
864 
865 	if (le16toh(counters->cstats_counter) != cur_stats_counter) {
866 		PMD_DRV_LOG(DEBUG, sc,
867 			    "stats not updated by cstorm, "
868 			    "counter 0x%x != stats_counter 0x%x",
869 			    le16toh(counters->cstats_counter), sc->stats_counter);
870 		return -EAGAIN;
871 	}
872 
873 	if (le16toh(counters->tstats_counter) != cur_stats_counter) {
874 		PMD_DRV_LOG(DEBUG, sc,
875 			    "stats not updated by tstorm, "
876 			    "counter 0x%x != stats_counter 0x%x",
877 			    le16toh(counters->tstats_counter), sc->stats_counter);
878 		return -EAGAIN;
879 	}
880 
881 	return 0;
882 }
883 
884 static int
885 bnx2x_storm_stats_update(struct bnx2x_softc *sc)
886 {
887 	struct tstorm_per_port_stats *tport =
888 		&sc->fw_stats_data->port.tstorm_port_statistics;
889 	struct tstorm_per_pf_stats *tfunc =
890 		&sc->fw_stats_data->pf.tstorm_pf_statistics;
891 	struct host_func_stats *fstats = &sc->func_stats;
892 	struct bnx2x_eth_stats *estats = &sc->eth_stats;
893 	struct bnx2x_eth_stats_old *estats_old = &sc->eth_stats_old;
894 	int i;
895 
896 	/* vfs stat counter is managed by pf */
897 	if (IS_PF(sc) && bnx2x_storm_stats_validate_counters(sc))
898 		return -EAGAIN;
899 
900 	estats->error_bytes_received_hi = 0;
901 	estats->error_bytes_received_lo = 0;
902 
903 	for (i = 0; i < sc->num_queues; i++) {
904 		struct bnx2x_fastpath *fp = &sc->fp[i];
905 		struct tstorm_per_queue_stats *tclient =
906 			&sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics;
907 		struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
908 		struct ustorm_per_queue_stats *uclient =
909 			&sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics;
910 		struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
911 		struct xstorm_per_queue_stats *xclient =
912 			&sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics;
913 		struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
914 		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
915 		struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
916 
917 		uint32_t diff;
918 
919 		/* PMD_DRV_LOG(DEBUG, sc,
920 				"queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
921 				i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
922 				xclient->mcast_pkts_sent);
923 
924 		PMD_DRV_LOG(DEBUG, sc, "---------------");
925 		 */
926 
927 		UPDATE_QSTAT(tclient->rcv_bcast_bytes,
928 				total_broadcast_bytes_received);
929 		UPDATE_QSTAT(tclient->rcv_mcast_bytes,
930 				total_multicast_bytes_received);
931 		UPDATE_QSTAT(tclient->rcv_ucast_bytes,
932 				total_unicast_bytes_received);
933 
934 		/*
935 		 * sum to total_bytes_received all
936 		 * unicast/multicast/broadcast
937 		 */
938 		qstats->total_bytes_received_hi =
939 			qstats->total_broadcast_bytes_received_hi;
940 		qstats->total_bytes_received_lo =
941 			qstats->total_broadcast_bytes_received_lo;
942 
943 		ADD_64(qstats->total_bytes_received_hi,
944 				qstats->total_multicast_bytes_received_hi,
945 				qstats->total_bytes_received_lo,
946 				qstats->total_multicast_bytes_received_lo);
947 
948 		ADD_64(qstats->total_bytes_received_hi,
949 				qstats->total_unicast_bytes_received_hi,
950 				qstats->total_bytes_received_lo,
951 				qstats->total_unicast_bytes_received_lo);
952 
953 		qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi;
954 		qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo;
955 
956 		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received);
957 		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received);
958 		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received);
959 		UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
960 				etherstatsoverrsizepkts, 32);
961 		UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
962 
963 		SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received);
964 		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
965 				total_multicast_packets_received);
966 		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
967 				total_broadcast_packets_received);
968 		UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
969 		UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
970 		UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
971 
972 		UPDATE_QSTAT(xclient->bcast_bytes_sent,
973 				total_broadcast_bytes_transmitted);
974 		UPDATE_QSTAT(xclient->mcast_bytes_sent,
975 				total_multicast_bytes_transmitted);
976 		UPDATE_QSTAT(xclient->ucast_bytes_sent,
977 				total_unicast_bytes_transmitted);
978 
979 		/*
980 		 * sum to total_bytes_transmitted all
981 		 * unicast/multicast/broadcast
982 		 */
983 		qstats->total_bytes_transmitted_hi =
984 			qstats->total_unicast_bytes_transmitted_hi;
985 		qstats->total_bytes_transmitted_lo =
986 			qstats->total_unicast_bytes_transmitted_lo;
987 
988 		ADD_64(qstats->total_bytes_transmitted_hi,
989 				qstats->total_broadcast_bytes_transmitted_hi,
990 				qstats->total_bytes_transmitted_lo,
991 				qstats->total_broadcast_bytes_transmitted_lo);
992 
993 		ADD_64(qstats->total_bytes_transmitted_hi,
994 				qstats->total_multicast_bytes_transmitted_hi,
995 				qstats->total_bytes_transmitted_lo,
996 				qstats->total_multicast_bytes_transmitted_lo);
997 
998 		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
999 				total_unicast_packets_transmitted);
1000 		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
1001 				total_multicast_packets_transmitted);
1002 		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1003 				total_broadcast_packets_transmitted);
1004 
1005 		UPDATE_EXTEND_TSTAT(checksum_discard,
1006 				total_packets_received_checksum_discarded);
1007 		UPDATE_EXTEND_TSTAT(ttl0_discard,
1008 				total_packets_received_ttl0_discarded);
1009 
1010 		UPDATE_EXTEND_XSTAT(error_drop_pkts,
1011 				total_transmitted_dropped_packets_error);
1012 
1013 		UPDATE_FSTAT_QSTAT(total_bytes_received);
1014 		UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1015 		UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1016 		UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1017 		UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1018 		UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1019 		UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1020 		UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1021 		UPDATE_FSTAT_QSTAT(valid_bytes_received);
1022 	}
1023 
1024 	ADD_64(estats->total_bytes_received_hi,
1025 			estats->rx_stat_ifhcinbadoctets_hi,
1026 			estats->total_bytes_received_lo,
1027 			estats->rx_stat_ifhcinbadoctets_lo);
1028 
1029 	ADD_64_LE(estats->total_bytes_received_hi,
1030 			tfunc->rcv_error_bytes.hi,
1031 			estats->total_bytes_received_lo,
1032 			tfunc->rcv_error_bytes.lo);
1033 
1034 	ADD_64_LE(estats->error_bytes_received_hi,
1035 			tfunc->rcv_error_bytes.hi,
1036 			estats->error_bytes_received_lo,
1037 			tfunc->rcv_error_bytes.lo);
1038 
1039 	UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1040 
1041 	ADD_64(estats->error_bytes_received_hi,
1042 			estats->rx_stat_ifhcinbadoctets_hi,
1043 			estats->error_bytes_received_lo,
1044 			estats->rx_stat_ifhcinbadoctets_lo);
1045 
1046 	if (sc->port.pmf) {
1047 		struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1048 		UPDATE_FW_STAT(mac_filter_discard);
1049 		UPDATE_FW_STAT(mf_tag_discard);
1050 		UPDATE_FW_STAT(brb_truncate_discard);
1051 		UPDATE_FW_STAT(mac_discard);
1052 	}
1053 
1054 	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1055 
1056 	sc->stats_pending = 0;
1057 
1058 	return 0;
1059 }
1060 
1061 static void
1062 bnx2x_drv_stats_update(struct bnx2x_softc *sc)
1063 {
1064 	struct bnx2x_eth_stats *estats = &sc->eth_stats;
1065 	int i;
1066 
1067 	for (i = 0; i < sc->num_queues; i++) {
1068 		struct bnx2x_eth_q_stats *qstats = &sc->fp[i].eth_q_stats;
1069 		struct bnx2x_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old;
1070 
1071 		UPDATE_ESTAT_QSTAT(rx_calls);
1072 		UPDATE_ESTAT_QSTAT(rx_pkts);
1073 		UPDATE_ESTAT_QSTAT(rx_soft_errors);
1074 		UPDATE_ESTAT_QSTAT(rx_hw_csum_errors);
1075 		UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip);
1076 		UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp);
1077 		UPDATE_ESTAT_QSTAT(rx_budget_reached);
1078 		UPDATE_ESTAT_QSTAT(tx_pkts);
1079 		UPDATE_ESTAT_QSTAT(tx_soft_errors);
1080 		UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip);
1081 		UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp);
1082 		UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp);
1083 		UPDATE_ESTAT_QSTAT(tx_encap_failures);
1084 		UPDATE_ESTAT_QSTAT(tx_hw_queue_full);
1085 		UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth);
1086 		UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure);
1087 		UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth);
1088 		UPDATE_ESTAT_QSTAT(tx_window_violation_std);
1089 		UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf);
1090 		UPDATE_ESTAT_QSTAT(tx_frames_deferred);
1091 		UPDATE_ESTAT_QSTAT(tx_queue_xoff);
1092 
1093 		/* mbuf driver statistics */
1094 		UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts);
1095 		UPDATE_ESTAT_QSTAT(mbuf_defrag_failures);
1096 		UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed);
1097 		UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed);
1098 
1099 		/* track the number of allocated mbufs */
1100 		UPDATE_ESTAT_QSTAT(mbuf_alloc_tx);
1101 		UPDATE_ESTAT_QSTAT(mbuf_alloc_rx);
1102 	}
1103 }
1104 
1105 static uint8_t
1106 bnx2x_edebug_stats_stopped(struct bnx2x_softc *sc)
1107 {
1108 	uint32_t val;
1109 
1110 	if (SHMEM2_HAS(sc, edebug_driver_if[1])) {
1111 		val = SHMEM2_RD(sc, edebug_driver_if[1]);
1112 
1113 		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1114 			return TRUE;
1115 	}
1116 
1117 	return FALSE;
1118 }
1119 
1120 static void
1121 bnx2x_stats_update(struct bnx2x_softc *sc)
1122 {
1123 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1124 
1125 	if (bnx2x_edebug_stats_stopped(sc))
1126 		return;
1127 
1128 	if (IS_PF(sc)) {
1129 
1130 		bnx2x_storm_stats_update(sc);
1131 		bnx2x_hw_stats_post(sc);
1132 		bnx2x_storm_stats_post(sc);
1133 		DELAY_MS(5);
1134 
1135 		if (*stats_comp != DMAE_COMP_VAL)
1136 			return;
1137 
1138 		if (sc->port.pmf)
1139 			bnx2x_hw_stats_update(sc);
1140 
1141 		if (bnx2x_storm_stats_update(sc)) {
1142 			if (sc->stats_pending++ == 3)
1143 				rte_panic("storm stats not updated for 3 times");
1144 			return;
1145 		}
1146 	} else {
1147 		/*
1148 		 * VF doesn't collect HW statistics, and doesn't get completions,
1149 		 * performs only update.
1150 		 */
1151 		bnx2x_storm_stats_update(sc);
1152 	}
1153 
1154 	bnx2x_drv_stats_update(sc);
1155 }
1156 
1157 static void
1158 bnx2x_port_stats_stop(struct bnx2x_softc *sc)
1159 {
1160 	struct dmae_command *dmae;
1161 	uint32_t opcode;
1162 	int loader_idx = PMF_DMAE_C(sc);
1163 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1164 
1165 	sc->executer_idx = 0;
1166 
1167 	opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0);
1168 
1169 	if (sc->port.port_stx) {
1170 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1171 
1172 		if (sc->func_stx)
1173 			dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
1174 		else
1175 			dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1176 
1177 		dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1178 		dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1179 		dmae->dst_addr_lo = sc->port.port_stx >> 2;
1180 		dmae->dst_addr_hi = 0;
1181 		dmae->len = bnx2x_get_port_stats_dma_len(sc);
1182 		if (sc->func_stx) {
1183 			dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2);
1184 			dmae->comp_addr_hi = 0;
1185 			dmae->comp_val = 1;
1186 		} else {
1187 			dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1188 			dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1189 			dmae->comp_val = DMAE_COMP_VAL;
1190 
1191 			*stats_comp = 0;
1192 		}
1193 	}
1194 
1195 	if (sc->func_stx) {
1196 		dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1197 		dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1198 		dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, func_stats));
1199 		dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, func_stats));
1200 		dmae->dst_addr_lo = (sc->func_stx >> 2);
1201 		dmae->dst_addr_hi = 0;
1202 		dmae->len = (sizeof(struct host_func_stats) >> 2);
1203 		dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1204 		dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1205 		dmae->comp_val = DMAE_COMP_VAL;
1206 
1207 		*stats_comp = 0;
1208 	}
1209 }
1210 
1211 static void
1212 bnx2x_stats_stop(struct bnx2x_softc *sc)
1213 {
1214 	uint8_t update = FALSE;
1215 
1216 	bnx2x_stats_comp(sc);
1217 
1218 	if (sc->port.pmf)
1219 		update = bnx2x_hw_stats_update(sc) == 0;
1220 
1221 	update |= bnx2x_storm_stats_update(sc) == 0;
1222 
1223 	if (update) {
1224 		if (sc->port.pmf)
1225 			bnx2x_port_stats_stop(sc);
1226 
1227 		bnx2x_hw_stats_post(sc);
1228 		bnx2x_stats_comp(sc);
1229 	}
1230 }
1231 
1232 static void
1233 bnx2x_stats_do_nothing(__rte_unused struct bnx2x_softc *sc)
1234 {
1235 }
1236 
1237 static const struct {
1238 	void (*action)(struct bnx2x_softc *sc);
1239 	enum bnx2x_stats_state next_state;
1240 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1241 	{
1242 	/* DISABLED PMF */ { bnx2x_stats_pmf_update, STATS_STATE_DISABLED },
1243 	/*      LINK_UP */ { bnx2x_stats_start,      STATS_STATE_ENABLED },
1244 	/*      UPDATE  */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED },
1245 	/*      STOP    */ { bnx2x_stats_do_nothing, STATS_STATE_DISABLED }
1246 	},
1247 	{
1248 	/* ENABLED  PMF */ { bnx2x_stats_pmf_start,  STATS_STATE_ENABLED },
1249 	/*      LINK_UP */ { bnx2x_stats_restart,    STATS_STATE_ENABLED },
1250 	/*      UPDATE  */ { bnx2x_stats_update,     STATS_STATE_ENABLED },
1251 	/*      STOP    */ { bnx2x_stats_stop,       STATS_STATE_DISABLED }
1252 	}
1253 };
1254 
1255 void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
1256 {
1257 	enum bnx2x_stats_state state;
1258 
1259 	if (unlikely(sc->panic))
1260 		return;
1261 
1262 	state = sc->stats_state;
1263 	sc->stats_state = bnx2x_stats_stm[state][event].next_state;
1264 
1265 	bnx2x_stats_stm[state][event].action(sc);
1266 
1267 	if (event != STATS_EVENT_UPDATE) {
1268 		PMD_DRV_LOG(DEBUG, sc,
1269 				"state %d -> event %d -> state %d",
1270 				state, event, sc->stats_state);
1271 	}
1272 }
1273 
1274 static void
1275 bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
1276 {
1277 	struct dmae_command *dmae;
1278 	uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
1279 
1280 	/* sanity */
1281 	if (!sc->port.pmf || !sc->port.port_stx) {
1282 		PMD_DRV_LOG(ERR, sc, "BUG!");
1283 		return;
1284 	}
1285 
1286 	sc->executer_idx = 0;
1287 
1288 	dmae = BNX2X_SP(sc, dmae[sc->executer_idx++]);
1289 	dmae->opcode = bnx2x_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
1290 					 TRUE, DMAE_COMP_PCI);
1291 	dmae->src_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, port_stats));
1292 	dmae->src_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, port_stats));
1293 	dmae->dst_addr_lo = (sc->port.port_stx >> 2);
1294 	dmae->dst_addr_hi = 0;
1295 	dmae->len = bnx2x_get_port_stats_dma_len(sc);
1296 	dmae->comp_addr_lo = U64_LO(BNX2X_SP_MAPPING(sc, stats_comp));
1297 	dmae->comp_addr_hi = U64_HI(BNX2X_SP_MAPPING(sc, stats_comp));
1298 	dmae->comp_val = DMAE_COMP_VAL;
1299 
1300 	*stats_comp = 0;
1301 	bnx2x_hw_stats_post(sc);
1302 	bnx2x_stats_comp(sc);
1303 }
1304 
1305 /*
1306  * This function will prepare the statistics ramrod data the way
1307  * we will only have to increment the statistics counter and
1308  * send the ramrod each time we have to.
1309  */
1310 static void
1311 bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
1312 {
1313 	int i;
1314 	int first_queue_query_index;
1315 	struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
1316 	rte_iova_t cur_data_offset;
1317 	struct stats_query_entry *cur_query_entry;
1318 
1319 	stats_hdr->cmd_num = sc->fw_stats_num;
1320 	stats_hdr->drv_stats_counter = 0;
1321 
1322 	/*
1323 	 * The storm_counters struct contains the counters of completed
1324 	 * statistics requests per storm which are incremented by FW
1325 	 * each time it completes hadning a statistics ramrod. We will
1326 	 * check these counters in the timer handler and discard a
1327 	 * (statistics) ramrod completion.
1328 	 */
1329 	cur_data_offset = (sc->fw_stats_data_mapping +
1330 			   offsetof(struct bnx2x_fw_stats_data, storm_counters));
1331 
1332 	stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset));
1333 	stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset));
1334 
1335 	/*
1336 	 * Prepare the first stats ramrod (will be completed with
1337 	 * the counters equal to zero) - init counters to something different.
1338 	 */
1339 	memset(&sc->fw_stats_data->storm_counters, 0xff,
1340 	       sizeof(struct stats_counter));
1341 
1342 	/**** Port FW statistics data ****/
1343 	cur_data_offset = (sc->fw_stats_data_mapping +
1344 			   offsetof(struct bnx2x_fw_stats_data, port));
1345 
1346 	cur_query_entry = &sc->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1347 
1348 	cur_query_entry->kind = STATS_TYPE_PORT;
1349 	/* For port query index is a DON'T CARE */
1350 	cur_query_entry->index = SC_PORT(sc);
1351 	/* For port query funcID is a DON'T CARE */
1352 	cur_query_entry->funcID = htole16(SC_FUNC(sc));
1353 	cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1354 	cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1355 
1356 	/**** PF FW statistics data ****/
1357 	cur_data_offset = (sc->fw_stats_data_mapping +
1358 			   offsetof(struct bnx2x_fw_stats_data, pf));
1359 
1360 	cur_query_entry = &sc->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1361 
1362 	cur_query_entry->kind = STATS_TYPE_PF;
1363 	/* For PF query index is a DON'T CARE */
1364 	cur_query_entry->index = SC_PORT(sc);
1365 	cur_query_entry->funcID = htole16(SC_FUNC(sc));
1366 	cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1367 	cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1368 
1369 	/**** Clients' queries ****/
1370 	cur_data_offset = (sc->fw_stats_data_mapping +
1371 			   offsetof(struct bnx2x_fw_stats_data, queue_stats));
1372 
1373 	/*
1374 	 * First queue query index depends whether FCoE offloaded request will
1375 	 * be included in the ramrod
1376 	 */
1377 	first_queue_query_index = (BNX2X_FIRST_QUEUE_QUERY_IDX - 1);
1378 
1379 	for (i = 0; i < sc->num_queues; i++) {
1380 		cur_query_entry =
1381 			&sc->fw_stats_req->query[first_queue_query_index + i];
1382 
1383 		cur_query_entry->kind = STATS_TYPE_QUEUE;
1384 		cur_query_entry->index = bnx2x_stats_id(&sc->fp[i]);
1385 		cur_query_entry->funcID = htole16(SC_FUNC(sc));
1386 		cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset));
1387 		cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset));
1388 
1389 		cur_data_offset += sizeof(struct per_queue_stats);
1390 	}
1391 }
1392 
1393 void bnx2x_memset_stats(struct bnx2x_softc *sc)
1394 {
1395 	int i;
1396 
1397 	/* function stats */
1398 	for (i = 0; i < sc->num_queues; i++) {
1399 		struct bnx2x_fastpath *fp = &sc->fp[i];
1400 
1401 		memset(&fp->old_tclient, 0,
1402 				sizeof(fp->old_tclient));
1403 		memset(&fp->old_uclient, 0,
1404 				sizeof(fp->old_uclient));
1405 		memset(&fp->old_xclient, 0,
1406 				sizeof(fp->old_xclient));
1407 		if (sc->stats_init) {
1408 			memset(&fp->eth_q_stats, 0,
1409 					sizeof(fp->eth_q_stats));
1410 			memset(&fp->eth_q_stats_old, 0,
1411 					sizeof(fp->eth_q_stats_old));
1412 		}
1413 	}
1414 
1415 	if (sc->stats_init) {
1416 		memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1417 		memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1418 		memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1419 		memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1420 		memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1421 	}
1422 
1423 	sc->stats_state = STATS_STATE_DISABLED;
1424 
1425 	if (sc->port.pmf && sc->port.port_stx)
1426 		bnx2x_port_stats_base_init(sc);
1427 
1428 	/* mark the end of statistics initialization */
1429 	sc->stats_init = false;
1430 }
1431 
1432 void
1433 bnx2x_stats_init(struct bnx2x_softc *sc)
1434 {
1435 	int /*abs*/port = SC_PORT(sc);
1436 	int mb_idx = SC_FW_MB_IDX(sc);
1437 	int i;
1438 
1439 	sc->stats_pending = 0;
1440 	sc->executer_idx = 0;
1441 	sc->stats_counter = 0;
1442 
1443 	sc->stats_init = TRUE;
1444 
1445 	/* port and func stats for management */
1446 	if (!BNX2X_NOMCP(sc)) {
1447 		sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
1448 		sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param);
1449 	} else {
1450 		sc->port.port_stx = 0;
1451 		sc->func_stx = 0;
1452 	}
1453 
1454 	PMD_DRV_LOG(DEBUG, sc, "port_stx 0x%x func_stx 0x%x",
1455 			sc->port.port_stx, sc->func_stx);
1456 
1457 	/* pmf should retrieve port statistics from SP on a non-init*/
1458 	if (!sc->stats_init && sc->port.pmf && sc->port.port_stx)
1459 		bnx2x_stats_handle(sc, STATS_EVENT_PMF);
1460 
1461 	port = SC_PORT(sc);
1462 	/* port stats */
1463 	memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
1464 	sc->port.old_nig_stats.brb_discard =
1465 		REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1466 	sc->port.old_nig_stats.brb_truncate =
1467 		REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1468 	if (!CHIP_IS_E3(sc)) {
1469 		REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1470 				RTE_PTR_ADD(&sc->port.old_nig_stats,
1471 				offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2);
1472 		REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1473 				RTE_PTR_ADD(&sc->port.old_nig_stats,
1474 				offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2);
1475 	}
1476 
1477 	/* function stats */
1478 	for (i = 0; i < sc->num_queues; i++) {
1479 		memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient));
1480 		memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient));
1481 		memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient));
1482 		if (sc->stats_init) {
1483 			memset(&sc->fp[i].eth_q_stats, 0,
1484 					sizeof(sc->fp[i].eth_q_stats));
1485 			memset(&sc->fp[i].eth_q_stats_old, 0,
1486 					sizeof(sc->fp[i].eth_q_stats_old));
1487 		}
1488 	}
1489 
1490 	/* prepare statistics ramrod data */
1491 	bnx2x_prep_fw_stats_req(sc);
1492 
1493 	if (sc->stats_init) {
1494 		memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old));
1495 		memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old));
1496 		memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old));
1497 		memset(&sc->eth_stats, 0, sizeof(sc->eth_stats));
1498 		memset(&sc->func_stats, 0, sizeof(sc->func_stats));
1499 
1500 		/* Clean SP from previous statistics */
1501 		if (sc->func_stx) {
1502 			memset(BNX2X_SP(sc, func_stats), 0, sizeof(struct host_func_stats));
1503 			bnx2x_func_stats_init(sc);
1504 			bnx2x_hw_stats_post(sc);
1505 			bnx2x_stats_comp(sc);
1506 		}
1507 	}
1508 
1509 	sc->stats_state = STATS_STATE_DISABLED;
1510 
1511 	if (sc->port.pmf && sc->port.port_stx)
1512 		bnx2x_port_stats_base_init(sc);
1513 
1514 	/* mark the end of statistics initialization */
1515 	sc->stats_init = FALSE;
1516 }
1517 
1518 void
1519 bnx2x_save_statistics(struct bnx2x_softc *sc)
1520 {
1521 	int i;
1522 
1523 	/* save queue statistics */
1524 	for (i = 0; i < sc->num_queues; i++) {
1525 		struct bnx2x_fastpath *fp = &sc->fp[i];
1526 		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1527 		struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
1528 
1529 		UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1530 		UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1531 		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1532 		UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1533 		UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1534 		UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1535 		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1536 		UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1537 		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1538 		UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1539 		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1540 		UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1541 	}
1542 
1543 	/* store port firmware statistics */
1544 	if (sc->port.pmf) {
1545 		struct bnx2x_eth_stats *estats = &sc->eth_stats;
1546 		struct bnx2x_fw_port_stats_old *fwstats = &sc->fw_stats_old;
1547 		struct host_port_stats *pstats = BNX2X_SP(sc, port_stats);
1548 
1549 		fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi;
1550 		fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo;
1551 		fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi;
1552 		fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo;
1553 
1554 		if (IS_MF(sc)) {
1555 			UPDATE_FW_STAT_OLD(mac_filter_discard);
1556 			UPDATE_FW_STAT_OLD(mf_tag_discard);
1557 			UPDATE_FW_STAT_OLD(brb_truncate_discard);
1558 			UPDATE_FW_STAT_OLD(mac_discard);
1559 		}
1560 	}
1561 }
1562