xref: /dpdk/drivers/net/bnxt/bnxt_stats.c (revision 6cc5dfa69a0335849fc0903d3ada943acb33c7ce)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_string_fns.h>
9 #include <rte_byteorder.h>
10 
11 #include "bnxt.h"
12 #include "bnxt_cpr.h"
13 #include "bnxt_filter.h"
14 #include "bnxt_hwrm.h"
15 #include "bnxt_rxq.h"
16 #include "bnxt_stats.h"
17 #include "bnxt_txq.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
20 
21 static const struct bnxt_xstats_name_off bnxt_rx_stats_strings[] = {
22 	{"rx_64b_frames", offsetof(struct rx_port_stats,
23 				rx_64b_frames)},
24 	{"rx_65b_127b_frames", offsetof(struct rx_port_stats,
25 				rx_65b_127b_frames)},
26 	{"rx_128b_255b_frames", offsetof(struct rx_port_stats,
27 				rx_128b_255b_frames)},
28 	{"rx_256b_511b_frames", offsetof(struct rx_port_stats,
29 				rx_256b_511b_frames)},
30 	{"rx_512b_1023b_frames", offsetof(struct rx_port_stats,
31 				rx_512b_1023b_frames)},
32 	{"rx_1024b_1518b_frames", offsetof(struct rx_port_stats,
33 				rx_1024b_1518b_frames)},
34 	{"rx_good_vlan_frames", offsetof(struct rx_port_stats,
35 				rx_good_vlan_frames)},
36 	{"rx_1519b_2047b_frames", offsetof(struct rx_port_stats,
37 				rx_1519b_2047b_frames)},
38 	{"rx_2048b_4095b_frames", offsetof(struct rx_port_stats,
39 				rx_2048b_4095b_frames)},
40 	{"rx_4096b_9216b_frames", offsetof(struct rx_port_stats,
41 				rx_4096b_9216b_frames)},
42 	{"rx_9217b_16383b_frames", offsetof(struct rx_port_stats,
43 				rx_9217b_16383b_frames)},
44 	{"rx_total_frames", offsetof(struct rx_port_stats,
45 				rx_total_frames)},
46 	{"rx_ucast_frames", offsetof(struct rx_port_stats,
47 				rx_ucast_frames)},
48 	{"rx_mcast_frames", offsetof(struct rx_port_stats,
49 				rx_mcast_frames)},
50 	{"rx_bcast_frames", offsetof(struct rx_port_stats,
51 				rx_bcast_frames)},
52 	{"rx_fcs_err_frames", offsetof(struct rx_port_stats,
53 				rx_fcs_err_frames)},
54 	{"rx_ctrl_frames", offsetof(struct rx_port_stats,
55 				rx_ctrl_frames)},
56 	{"rx_pause_frames", offsetof(struct rx_port_stats,
57 				rx_pause_frames)},
58 	{"rx_pfc_frames", offsetof(struct rx_port_stats,
59 				rx_pfc_frames)},
60 	{"rx_unsupported_opcode_frames", offsetof(struct rx_port_stats,
61 				rx_unsupported_opcode_frames)},
62 	{"rx_unsupported_da_pausepfc_frames", offsetof(struct rx_port_stats,
63 				rx_unsupported_da_pausepfc_frames)},
64 	{"rx_wrong_sa_frames", offsetof(struct rx_port_stats,
65 				rx_wrong_sa_frames)},
66 	{"rx_align_err_frames", offsetof(struct rx_port_stats,
67 				rx_align_err_frames)},
68 	{"rx_oor_len_frames", offsetof(struct rx_port_stats,
69 				rx_oor_len_frames)},
70 	{"rx_code_err_frames", offsetof(struct rx_port_stats,
71 				rx_code_err_frames)},
72 	{"rx_false_carrier_frames", offsetof(struct rx_port_stats,
73 				rx_false_carrier_frames)},
74 	{"rx_ovrsz_frames", offsetof(struct rx_port_stats,
75 				rx_ovrsz_frames)},
76 	{"rx_jbr_frames", offsetof(struct rx_port_stats,
77 				rx_jbr_frames)},
78 	{"rx_mtu_err_frames", offsetof(struct rx_port_stats,
79 				rx_mtu_err_frames)},
80 	{"rx_match_crc_frames", offsetof(struct rx_port_stats,
81 				rx_match_crc_frames)},
82 	{"rx_promiscuous_frames", offsetof(struct rx_port_stats,
83 				rx_promiscuous_frames)},
84 	{"rx_tagged_frames", offsetof(struct rx_port_stats,
85 				rx_tagged_frames)},
86 	{"rx_double_tagged_frames", offsetof(struct rx_port_stats,
87 				rx_double_tagged_frames)},
88 	{"rx_trunc_frames", offsetof(struct rx_port_stats,
89 				rx_trunc_frames)},
90 	{"rx_good_frames", offsetof(struct rx_port_stats,
91 				rx_good_frames)},
92 	{"rx_sch_crc_err_frames", offsetof(struct rx_port_stats,
93 				rx_sch_crc_err_frames)},
94 	{"rx_undrsz_frames", offsetof(struct rx_port_stats,
95 				rx_undrsz_frames)},
96 	{"rx_frag_frames", offsetof(struct rx_port_stats,
97 				rx_frag_frames)},
98 	{"rx_eee_lpi_events", offsetof(struct rx_port_stats,
99 				rx_eee_lpi_events)},
100 	{"rx_eee_lpi_duration", offsetof(struct rx_port_stats,
101 				rx_eee_lpi_duration)},
102 	{"rx_llfc_physical_msgs", offsetof(struct rx_port_stats,
103 				rx_llfc_physical_msgs)},
104 	{"rx_llfc_logical_msgs", offsetof(struct rx_port_stats,
105 				rx_llfc_logical_msgs)},
106 	{"rx_llfc_msgs_with_crc_err", offsetof(struct rx_port_stats,
107 				rx_llfc_msgs_with_crc_err)},
108 	{"rx_hcfc_msgs", offsetof(struct rx_port_stats,
109 				rx_hcfc_msgs)},
110 	{"rx_hcfc_msgs_with_crc_err", offsetof(struct rx_port_stats,
111 				rx_hcfc_msgs_with_crc_err)},
112 	{"rx_bytes", offsetof(struct rx_port_stats,
113 				rx_bytes)},
114 	{"rx_runt_bytes", offsetof(struct rx_port_stats,
115 				rx_runt_bytes)},
116 	{"rx_runt_frames", offsetof(struct rx_port_stats,
117 				rx_runt_frames)},
118 	{"rx_pfc_xon2xoff_frames_pri0", offsetof(struct rx_port_stats,
119 				rx_pfc_xon2xoff_frames_pri0)},
120 	{"rx_pfc_xon2xoff_frames_pri1", offsetof(struct rx_port_stats,
121 				rx_pfc_xon2xoff_frames_pri1)},
122 	{"rx_pfc_xon2xoff_frames_pri2", offsetof(struct rx_port_stats,
123 				rx_pfc_xon2xoff_frames_pri2)},
124 	{"rx_pfc_xon2xoff_frames_pri3", offsetof(struct rx_port_stats,
125 				rx_pfc_xon2xoff_frames_pri3)},
126 	{"rx_pfc_xon2xoff_frames_pri4", offsetof(struct rx_port_stats,
127 				rx_pfc_xon2xoff_frames_pri4)},
128 	{"rx_pfc_xon2xoff_frames_pri5", offsetof(struct rx_port_stats,
129 				rx_pfc_xon2xoff_frames_pri5)},
130 	{"rx_pfc_xon2xoff_frames_pri6", offsetof(struct rx_port_stats,
131 				rx_pfc_xon2xoff_frames_pri6)},
132 	{"rx_pfc_xon2xoff_frames_pri7", offsetof(struct rx_port_stats,
133 				rx_pfc_xon2xoff_frames_pri7)},
134 	{"rx_pfc_ena_frames_pri0", offsetof(struct rx_port_stats,
135 				rx_pfc_ena_frames_pri0)},
136 	{"rx_pfc_ena_frames_pri1", offsetof(struct rx_port_stats,
137 				rx_pfc_ena_frames_pri1)},
138 	{"rx_pfc_ena_frames_pri2", offsetof(struct rx_port_stats,
139 				rx_pfc_ena_frames_pri2)},
140 	{"rx_pfc_ena_frames_pri3", offsetof(struct rx_port_stats,
141 				rx_pfc_ena_frames_pri3)},
142 	{"rx_pfc_ena_frames_pri4", offsetof(struct rx_port_stats,
143 				rx_pfc_ena_frames_pri4)},
144 	{"rx_pfc_ena_frames_pri5", offsetof(struct rx_port_stats,
145 				rx_pfc_ena_frames_pri5)},
146 	{"rx_pfc_ena_frames_pri6", offsetof(struct rx_port_stats,
147 				rx_pfc_ena_frames_pri6)},
148 	{"rx_pfc_ena_frames_pri7", offsetof(struct rx_port_stats,
149 				rx_pfc_ena_frames_pri7)},
150 	{"rx_stat_discard", offsetof(struct rx_port_stats,
151 				rx_stat_discard)},
152 	{"rx_stat_err", offsetof(struct rx_port_stats,
153 				rx_stat_err)},
154 };
155 
156 static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
157 	{"tx_64b_frames", offsetof(struct tx_port_stats,
158 				tx_64b_frames)},
159 	{"tx_65b_127b_frames", offsetof(struct tx_port_stats,
160 				tx_65b_127b_frames)},
161 	{"tx_128b_255b_frames", offsetof(struct tx_port_stats,
162 				tx_128b_255b_frames)},
163 	{"tx_256b_511b_frames", offsetof(struct tx_port_stats,
164 				tx_256b_511b_frames)},
165 	{"tx_512b_1023b_frames", offsetof(struct tx_port_stats,
166 				tx_512b_1023b_frames)},
167 	{"tx_1024b_1518b_frames", offsetof(struct tx_port_stats,
168 				tx_1024b_1518b_frames)},
169 	{"tx_good_vlan_frames", offsetof(struct tx_port_stats,
170 				tx_good_vlan_frames)},
171 	{"tx_1519b_2047b_frames", offsetof(struct tx_port_stats,
172 				tx_1519b_2047b_frames)},
173 	{"tx_2048b_4095b_frames", offsetof(struct tx_port_stats,
174 				tx_2048b_4095b_frames)},
175 	{"tx_4096b_9216b_frames", offsetof(struct tx_port_stats,
176 				tx_4096b_9216b_frames)},
177 	{"tx_9217b_16383b_frames", offsetof(struct tx_port_stats,
178 				tx_9217b_16383b_frames)},
179 	{"tx_good_frames", offsetof(struct tx_port_stats,
180 				tx_good_frames)},
181 	{"tx_total_frames", offsetof(struct tx_port_stats,
182 				tx_total_frames)},
183 	{"tx_ucast_frames", offsetof(struct tx_port_stats,
184 				tx_ucast_frames)},
185 	{"tx_mcast_frames", offsetof(struct tx_port_stats,
186 				tx_mcast_frames)},
187 	{"tx_bcast_frames", offsetof(struct tx_port_stats,
188 				tx_bcast_frames)},
189 	{"tx_pause_frames", offsetof(struct tx_port_stats,
190 				tx_pause_frames)},
191 	{"tx_pfc_frames", offsetof(struct tx_port_stats,
192 				tx_pfc_frames)},
193 	{"tx_jabber_frames", offsetof(struct tx_port_stats,
194 				tx_jabber_frames)},
195 	{"tx_fcs_err_frames", offsetof(struct tx_port_stats,
196 				tx_fcs_err_frames)},
197 	{"tx_control_frames", offsetof(struct tx_port_stats,
198 				tx_control_frames)},
199 	{"tx_oversz_frames", offsetof(struct tx_port_stats,
200 				tx_oversz_frames)},
201 	{"tx_single_dfrl_frames", offsetof(struct tx_port_stats,
202 				tx_single_dfrl_frames)},
203 	{"tx_multi_dfrl_frames", offsetof(struct tx_port_stats,
204 				tx_multi_dfrl_frames)},
205 	{"tx_single_coll_frames", offsetof(struct tx_port_stats,
206 				tx_single_coll_frames)},
207 	{"tx_multi_coll_frames", offsetof(struct tx_port_stats,
208 				tx_multi_coll_frames)},
209 	{"tx_late_coll_frames", offsetof(struct tx_port_stats,
210 				tx_late_coll_frames)},
211 	{"tx_excessive_coll_frames", offsetof(struct tx_port_stats,
212 				tx_excessive_coll_frames)},
213 	{"tx_frag_frames", offsetof(struct tx_port_stats,
214 				tx_frag_frames)},
215 	{"tx_err", offsetof(struct tx_port_stats,
216 				tx_err)},
217 	{"tx_tagged_frames", offsetof(struct tx_port_stats,
218 				tx_tagged_frames)},
219 	{"tx_dbl_tagged_frames", offsetof(struct tx_port_stats,
220 				tx_dbl_tagged_frames)},
221 	{"tx_runt_frames", offsetof(struct tx_port_stats,
222 				tx_runt_frames)},
223 	{"tx_fifo_underruns", offsetof(struct tx_port_stats,
224 				tx_fifo_underruns)},
225 	{"tx_eee_lpi_events", offsetof(struct tx_port_stats,
226 				tx_eee_lpi_events)},
227 	{"tx_eee_lpi_duration", offsetof(struct tx_port_stats,
228 				tx_eee_lpi_duration)},
229 	{"tx_total_collisions", offsetof(struct tx_port_stats,
230 				tx_total_collisions)},
231 	{"tx_bytes", offsetof(struct tx_port_stats,
232 				tx_bytes)},
233 	{"tx_pfc_ena_frames_pri0", offsetof(struct tx_port_stats,
234 				tx_pfc_ena_frames_pri0)},
235 	{"tx_pfc_ena_frames_pri1", offsetof(struct tx_port_stats,
236 				tx_pfc_ena_frames_pri1)},
237 	{"tx_pfc_ena_frames_pri2", offsetof(struct tx_port_stats,
238 				tx_pfc_ena_frames_pri2)},
239 	{"tx_pfc_ena_frames_pri3", offsetof(struct tx_port_stats,
240 				tx_pfc_ena_frames_pri3)},
241 	{"tx_pfc_ena_frames_pri4", offsetof(struct tx_port_stats,
242 				tx_pfc_ena_frames_pri4)},
243 	{"tx_pfc_ena_frames_pri5", offsetof(struct tx_port_stats,
244 				tx_pfc_ena_frames_pri5)},
245 	{"tx_pfc_ena_frames_pri6", offsetof(struct tx_port_stats,
246 				tx_pfc_ena_frames_pri6)},
247 	{"tx_pfc_ena_frames_pri7", offsetof(struct tx_port_stats,
248 				tx_pfc_ena_frames_pri7)},
249 	{"tx_llfc_logical_msgs", offsetof(struct tx_port_stats,
250 				tx_llfc_logical_msgs)},
251 	{"tx_hcfc_msgs", offsetof(struct tx_port_stats,
252 				tx_hcfc_msgs)},
253 	{"tx_xthol_frames", offsetof(struct tx_port_stats,
254 				tx_xthol_frames)},
255 	{"tx_stat_discard", offsetof(struct tx_port_stats,
256 				tx_stat_discard)},
257 	{"tx_stat_error", offsetof(struct tx_port_stats,
258 				tx_stat_error)},
259 };
260 
261 static const struct bnxt_xstats_name_off bnxt_func_stats_ext_strings[] = {
262 	{"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
263 				tx_ucast_pkts)},
264 	{"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
265 				tx_mcast_pkts)},
266 	{"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
267 				tx_bcast_pkts)},
268 	{"tx_discard_pkts", offsetof(struct hwrm_func_qstats_ext_output,
269 				tx_discard_pkts)},
270 	{"tx_drop_pkts", offsetof(struct hwrm_func_qstats_ext_output,
271 				tx_error_pkts)},
272 	{"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
273 				tx_ucast_bytes)},
274 	{"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
275 				tx_mcast_bytes)},
276 	{"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
277 				tx_bcast_bytes)},
278 	{"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
279 				rx_ucast_pkts)},
280 	{"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
281 				rx_mcast_pkts)},
282 	{"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
283 				rx_bcast_pkts)},
284 	{"rx_discard_pkts", offsetof(struct hwrm_func_qstats_ext_output,
285 				rx_discard_pkts)},
286 	{"rx_drop_pkts", offsetof(struct hwrm_func_qstats_ext_output,
287 				rx_error_pkts)},
288 	{"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
289 				rx_ucast_bytes)},
290 	{"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
291 				rx_mcast_bytes)},
292 	{"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
293 				rx_bcast_bytes)},
294 	{"rx_tpa_eligible_pkt", offsetof(struct hwrm_func_qstats_ext_output,
295 				rx_tpa_eligible_pkt)},
296 	{"rx_tpa_eligible_bytes", offsetof(struct hwrm_func_qstats_ext_output,
297 				rx_tpa_eligible_bytes)},
298 	{"rx_tpa_pkt", offsetof(struct hwrm_func_qstats_ext_output,
299 				rx_tpa_pkt)},
300 	{"rx_tpa_bytes", offsetof(struct hwrm_func_qstats_ext_output,
301 				rx_tpa_bytes)},
302 	{"rx_tpa_errors", offsetof(struct hwrm_func_qstats_ext_output,
303 				rx_tpa_errors)},
304 	{"rx_tpa_events", offsetof(struct hwrm_func_qstats_ext_output,
305 				rx_tpa_events)},
306 };
307 
308 static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
309 	{"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
310 				tx_ucast_pkts)},
311 	{"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
312 				tx_mcast_pkts)},
313 	{"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
314 				tx_bcast_pkts)},
315 	{"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
316 				tx_discard_pkts)},
317 	{"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
318 				tx_drop_pkts)},
319 	{"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
320 				tx_ucast_bytes)},
321 	{"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
322 				tx_mcast_bytes)},
323 	{"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
324 				tx_bcast_bytes)},
325 	{"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
326 				rx_ucast_pkts)},
327 	{"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_output,
328 				rx_mcast_pkts)},
329 	{"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
330 				rx_bcast_pkts)},
331 	{"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
332 				rx_discard_pkts)},
333 	{"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
334 				rx_drop_pkts)},
335 	{"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
336 				rx_ucast_bytes)},
337 	{"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_output,
338 				rx_mcast_bytes)},
339 	{"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_output,
340 				rx_bcast_bytes)},
341 	{"rx_agg_pkts", offsetof(struct hwrm_func_qstats_output,
342 				rx_agg_pkts)},
343 	{"rx_agg_bytes", offsetof(struct hwrm_func_qstats_output,
344 				rx_agg_bytes)},
345 	{"rx_agg_events", offsetof(struct hwrm_func_qstats_output,
346 				rx_agg_events)},
347 	{"rx_agg_aborts", offsetof(struct hwrm_func_qstats_output,
348 				rx_agg_aborts)},
349 };
350 
351 
352 static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = {
353 	{"link_down_events", offsetof(struct rx_port_stats_ext,
354 				link_down_events)},
355 	{"continuous_pause_events", offsetof(struct rx_port_stats_ext,
356 				continuous_pause_events)},
357 	{"resume_pause_events", offsetof(struct rx_port_stats_ext,
358 				resume_pause_events)},
359 	{"continuous_roce_pause_events", offsetof(struct rx_port_stats_ext,
360 				continuous_roce_pause_events)},
361 	{"resume_roce_pause_events", offsetof(struct rx_port_stats_ext,
362 				resume_roce_pause_events)},
363 	{"rx_bytes_cos0", offsetof(struct rx_port_stats_ext,
364 				rx_bytes_cos0)},
365 	{"rx_bytes_cos1", offsetof(struct rx_port_stats_ext,
366 				rx_bytes_cos1)},
367 	{"rx_bytes_cos2", offsetof(struct rx_port_stats_ext,
368 				rx_bytes_cos2)},
369 	{"rx_bytes_cos3", offsetof(struct rx_port_stats_ext,
370 				rx_bytes_cos3)},
371 	{"rx_bytes_cos4", offsetof(struct rx_port_stats_ext,
372 				rx_bytes_cos4)},
373 	{"rx_bytes_cos5", offsetof(struct rx_port_stats_ext,
374 				rx_bytes_cos5)},
375 	{"rx_bytes_cos6", offsetof(struct rx_port_stats_ext,
376 				rx_bytes_cos6)},
377 	{"rx_bytes_cos7", offsetof(struct rx_port_stats_ext,
378 				rx_bytes_cos7)},
379 	{"rx_packets_cos0", offsetof(struct rx_port_stats_ext,
380 				rx_packets_cos0)},
381 	{"rx_packets_cos1", offsetof(struct rx_port_stats_ext,
382 				rx_packets_cos1)},
383 	{"rx_packets_cos2", offsetof(struct rx_port_stats_ext,
384 				rx_packets_cos2)},
385 	{"rx_packets_cos3", offsetof(struct rx_port_stats_ext,
386 				rx_packets_cos3)},
387 	{"rx_packets_cos4", offsetof(struct rx_port_stats_ext,
388 				rx_packets_cos4)},
389 	{"rx_packets_cos5", offsetof(struct rx_port_stats_ext,
390 				rx_packets_cos5)},
391 	{"rx_packets_cos6", offsetof(struct rx_port_stats_ext,
392 				rx_packets_cos6)},
393 	{"rx_packets_cos7", offsetof(struct rx_port_stats_ext,
394 				rx_packets_cos7)},
395 	{"pfc_pri0_rx_duration_us", offsetof(struct rx_port_stats_ext,
396 				pfc_pri0_rx_duration_us)},
397 	{"pfc_pri0_rx_transitions", offsetof(struct rx_port_stats_ext,
398 				pfc_pri0_rx_transitions)},
399 	{"pfc_pri1_rx_duration_us", offsetof(struct rx_port_stats_ext,
400 				pfc_pri1_rx_duration_us)},
401 	{"pfc_pri1_rx_transitions", offsetof(struct rx_port_stats_ext,
402 				pfc_pri1_rx_transitions)},
403 	{"pfc_pri2_rx_duration_us", offsetof(struct rx_port_stats_ext,
404 				pfc_pri2_rx_duration_us)},
405 	{"pfc_pri2_rx_transitions", offsetof(struct rx_port_stats_ext,
406 				pfc_pri2_rx_transitions)},
407 	{"pfc_pri3_rx_duration_us", offsetof(struct rx_port_stats_ext,
408 				pfc_pri3_rx_duration_us)},
409 	{"pfc_pri3_rx_transitions", offsetof(struct rx_port_stats_ext,
410 				pfc_pri3_rx_transitions)},
411 	{"pfc_pri4_rx_duration_us", offsetof(struct rx_port_stats_ext,
412 				pfc_pri4_rx_duration_us)},
413 	{"pfc_pri4_rx_transitions", offsetof(struct rx_port_stats_ext,
414 				pfc_pri4_rx_transitions)},
415 	{"pfc_pri5_rx_duration_us", offsetof(struct rx_port_stats_ext,
416 				pfc_pri5_rx_duration_us)},
417 	{"pfc_pri5_rx_transitions", offsetof(struct rx_port_stats_ext,
418 				pfc_pri5_rx_transitions)},
419 	{"pfc_pri6_rx_duration_us", offsetof(struct rx_port_stats_ext,
420 				pfc_pri6_rx_duration_us)},
421 	{"pfc_pri6_rx_transitions", offsetof(struct rx_port_stats_ext,
422 				pfc_pri6_rx_transitions)},
423 	{"pfc_pri7_rx_duration_us", offsetof(struct rx_port_stats_ext,
424 				pfc_pri7_rx_duration_us)},
425 	{"pfc_pri7_rx_transitions", offsetof(struct rx_port_stats_ext,
426 				pfc_pri7_rx_transitions)},
427 	{"rx_bits",		offsetof(struct rx_port_stats_ext,
428 				rx_bits)},
429 	{"rx_buffer_passed_threshold", offsetof(struct rx_port_stats_ext,
430 				rx_buffer_passed_threshold)},
431 	{"rx_pcs_symbol_err",	offsetof(struct rx_port_stats_ext,
432 				rx_pcs_symbol_err)},
433 	{"rx_corrected_bits",	offsetof(struct rx_port_stats_ext,
434 				rx_corrected_bits)},
435 	{"rx_discard_bytes_cos0", offsetof(struct rx_port_stats_ext,
436 				rx_discard_bytes_cos0)},
437 	{"rx_discard_bytes_cos1", offsetof(struct rx_port_stats_ext,
438 				rx_discard_bytes_cos1)},
439 	{"rx_discard_bytes_cos2", offsetof(struct rx_port_stats_ext,
440 				rx_discard_bytes_cos2)},
441 	{"rx_discard_bytes_cos3", offsetof(struct rx_port_stats_ext,
442 				rx_discard_bytes_cos3)},
443 	{"rx_discard_bytes_cos4", offsetof(struct rx_port_stats_ext,
444 				rx_discard_bytes_cos4)},
445 	{"rx_discard_bytes_cos5", offsetof(struct rx_port_stats_ext,
446 				rx_discard_bytes_cos5)},
447 	{"rx_discard_bytes_cos6", offsetof(struct rx_port_stats_ext,
448 				rx_discard_bytes_cos6)},
449 	{"rx_discard_bytes_cos7", offsetof(struct rx_port_stats_ext,
450 				rx_discard_bytes_cos7)},
451 	{"rx_discard_packets_cos0", offsetof(struct rx_port_stats_ext,
452 				rx_discard_packets_cos0)},
453 	{"rx_discard_packets_cos1", offsetof(struct rx_port_stats_ext,
454 				rx_discard_packets_cos1)},
455 	{"rx_discard_packets_cos2", offsetof(struct rx_port_stats_ext,
456 				rx_discard_packets_cos2)},
457 	{"rx_discard_packets_cos3", offsetof(struct rx_port_stats_ext,
458 				rx_discard_packets_cos3)},
459 	{"rx_discard_packets_cos4", offsetof(struct rx_port_stats_ext,
460 				rx_discard_packets_cos4)},
461 	{"rx_discard_packets_cos5", offsetof(struct rx_port_stats_ext,
462 				rx_discard_packets_cos5)},
463 	{"rx_discard_packets_cos6", offsetof(struct rx_port_stats_ext,
464 				rx_discard_packets_cos6)},
465 	{"rx_discard_packets_cos7", offsetof(struct rx_port_stats_ext,
466 				rx_discard_packets_cos7)},
467 	{"rx_fec_corrected_blocks", offsetof(struct rx_port_stats_ext,
468 				rx_fec_corrected_blocks)},
469 	{"rx_fec_uncorrectable_blocks", offsetof(struct rx_port_stats_ext,
470 				rx_fec_uncorrectable_blocks)},
471 	{"rx_filter_miss", offsetof(struct rx_port_stats_ext,
472 				rx_filter_miss)},
473 };
474 
475 static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = {
476 	{"tx_bytes_cos0", offsetof(struct tx_port_stats_ext,
477 				tx_bytes_cos0)},
478 	{"tx_bytes_cos1", offsetof(struct tx_port_stats_ext,
479 				tx_bytes_cos1)},
480 	{"tx_bytes_cos2", offsetof(struct tx_port_stats_ext,
481 				tx_bytes_cos2)},
482 	{"tx_bytes_cos3", offsetof(struct tx_port_stats_ext,
483 				tx_bytes_cos3)},
484 	{"tx_bytes_cos4", offsetof(struct tx_port_stats_ext,
485 				tx_bytes_cos4)},
486 	{"tx_bytes_cos5", offsetof(struct tx_port_stats_ext,
487 				tx_bytes_cos5)},
488 	{"tx_bytes_cos6", offsetof(struct tx_port_stats_ext,
489 				tx_bytes_cos6)},
490 	{"tx_bytes_cos7", offsetof(struct tx_port_stats_ext,
491 				tx_bytes_cos7)},
492 	{"tx_packets_cos0", offsetof(struct tx_port_stats_ext,
493 				tx_packets_cos0)},
494 	{"tx_packets_cos1", offsetof(struct tx_port_stats_ext,
495 				tx_packets_cos1)},
496 	{"tx_packets_cos2", offsetof(struct tx_port_stats_ext,
497 				tx_packets_cos2)},
498 	{"tx_packets_cos3", offsetof(struct tx_port_stats_ext,
499 				tx_packets_cos3)},
500 	{"tx_packets_cos4", offsetof(struct tx_port_stats_ext,
501 				tx_packets_cos4)},
502 	{"tx_packets_cos5", offsetof(struct tx_port_stats_ext,
503 				tx_packets_cos5)},
504 	{"tx_packets_cos6", offsetof(struct tx_port_stats_ext,
505 				tx_packets_cos6)},
506 	{"tx_packets_cos7", offsetof(struct tx_port_stats_ext,
507 				tx_packets_cos7)},
508 	{"pfc_pri0_tx_duration_us", offsetof(struct tx_port_stats_ext,
509 				pfc_pri0_tx_duration_us)},
510 	{"pfc_pri0_tx_transitions", offsetof(struct tx_port_stats_ext,
511 				pfc_pri0_tx_transitions)},
512 	{"pfc_pri1_tx_duration_us", offsetof(struct tx_port_stats_ext,
513 				pfc_pri1_tx_duration_us)},
514 	{"pfc_pri1_tx_transitions", offsetof(struct tx_port_stats_ext,
515 				pfc_pri1_tx_transitions)},
516 	{"pfc_pri2_tx_duration_us", offsetof(struct tx_port_stats_ext,
517 				pfc_pri2_tx_duration_us)},
518 	{"pfc_pri2_tx_transitions", offsetof(struct tx_port_stats_ext,
519 				pfc_pri2_tx_transitions)},
520 	{"pfc_pri3_tx_duration_us", offsetof(struct tx_port_stats_ext,
521 				pfc_pri3_tx_duration_us)},
522 	{"pfc_pri3_tx_transitions", offsetof(struct tx_port_stats_ext,
523 				pfc_pri3_tx_transitions)},
524 	{"pfc_pri4_tx_duration_us", offsetof(struct tx_port_stats_ext,
525 				pfc_pri4_tx_duration_us)},
526 	{"pfc_pri4_tx_transitions", offsetof(struct tx_port_stats_ext,
527 				pfc_pri4_tx_transitions)},
528 	{"pfc_pri5_tx_duration_us", offsetof(struct tx_port_stats_ext,
529 				pfc_pri5_tx_duration_us)},
530 	{"pfc_pri5_tx_transitions", offsetof(struct tx_port_stats_ext,
531 				pfc_pri5_tx_transitions)},
532 	{"pfc_pri6_tx_duration_us", offsetof(struct tx_port_stats_ext,
533 				pfc_pri6_tx_duration_us)},
534 	{"pfc_pri6_tx_transitions", offsetof(struct tx_port_stats_ext,
535 				pfc_pri6_tx_transitions)},
536 	{"pfc_pri7_tx_duration_us", offsetof(struct tx_port_stats_ext,
537 				pfc_pri7_tx_duration_us)},
538 	{"pfc_pri7_tx_transitions", offsetof(struct tx_port_stats_ext,
539 				pfc_pri7_tx_transitions)},
540 };
541 
542 /*
543  * Statistics functions
544  */
545 
546 void bnxt_free_stats(struct bnxt *bp)
547 {
548 	int i;
549 
550 	for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) {
551 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
552 
553 		bnxt_free_txq_stats(txq);
554 	}
555 	for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) {
556 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
557 
558 		bnxt_free_rxq_stats(rxq);
559 	}
560 }
561 
562 static void bnxt_fill_rte_eth_stats_ext(struct rte_eth_stats *stats,
563 					struct bnxt_ring_stats_ext *ring_stats,
564 					unsigned int i, bool rx)
565 {
566 	if (rx) {
567 		stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
568 		stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
569 		stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
570 
571 		stats->ipackets += stats->q_ipackets[i];
572 
573 		stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
574 		stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
575 		stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
576 
577 		stats->ibytes += stats->q_ibytes[i];
578 
579 		stats->q_errors[i] = ring_stats->rx_discard_pkts;
580 		stats->q_errors[i] += ring_stats->rx_error_pkts;
581 
582 		stats->imissed += ring_stats->rx_discard_pkts;
583 		stats->ierrors += ring_stats->rx_error_pkts;
584 	} else {
585 		stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
586 		stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
587 		stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
588 
589 		stats->opackets += stats->q_opackets[i];
590 
591 		stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
592 		stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
593 		stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
594 
595 		stats->obytes += stats->q_obytes[i];
596 
597 		stats->oerrors += ring_stats->tx_discard_pkts;
598 	}
599 }
600 
601 static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
602 				    struct bnxt_ring_stats *ring_stats,
603 				    unsigned int i, bool rx)
604 {
605 	if (rx) {
606 		stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
607 		stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
608 		stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
609 
610 		stats->ipackets += stats->q_ipackets[i];
611 
612 		stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
613 		stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
614 		stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
615 
616 		stats->ibytes += stats->q_ibytes[i];
617 
618 		stats->q_errors[i] = ring_stats->rx_discard_pkts;
619 		stats->q_errors[i] += ring_stats->rx_error_pkts;
620 
621 		stats->imissed += ring_stats->rx_discard_pkts;
622 		stats->ierrors += ring_stats->rx_error_pkts;
623 	} else {
624 		stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
625 		stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
626 		stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
627 
628 		stats->opackets += stats->q_opackets[i];
629 
630 		stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
631 		stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
632 		stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
633 
634 		stats->obytes += stats->q_obytes[i];
635 
636 		stats->oerrors += ring_stats->tx_discard_pkts;
637 	}
638 }
639 
640 static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
641 				 struct rte_eth_stats *bnxt_stats)
642 {
643 	int rc = 0;
644 	unsigned int i;
645 	struct bnxt *bp = eth_dev->data->dev_private;
646 	unsigned int num_q_stats;
647 
648 	num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
649 			      (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
650 
651 	for (i = 0; i < num_q_stats; i++) {
652 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
653 		struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
654 		struct bnxt_ring_stats_ext ring_stats = {0};
655 
656 		if (!rxq->rx_started)
657 			continue;
658 
659 		rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
660 					      &ring_stats, true);
661 		if (unlikely(rc))
662 			return rc;
663 
664 		bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
665 		bnxt_stats->rx_nombuf +=
666 				rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail,
667 							 rte_memory_order_relaxed);
668 	}
669 
670 	num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
671 			      (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
672 
673 	for (i = 0; i < num_q_stats; i++) {
674 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
675 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
676 		struct bnxt_ring_stats_ext ring_stats = {0};
677 
678 		if (!txq->tx_started)
679 			continue;
680 
681 		rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
682 					      &ring_stats, false);
683 		if (unlikely(rc))
684 			return rc;
685 
686 		bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, false);
687 	}
688 
689 	return rc;
690 }
691 
692 int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
693 		      struct rte_eth_stats *bnxt_stats)
694 {
695 	int rc = 0;
696 	unsigned int i;
697 	struct bnxt *bp = eth_dev->data->dev_private;
698 	unsigned int num_q_stats;
699 
700 	rc = is_bnxt_in_error(bp);
701 	if (rc)
702 		return rc;
703 
704 	if (!eth_dev->data->dev_started)
705 		return -EIO;
706 
707 	if (BNXT_TPA_V2_P7(bp))
708 		return bnxt_stats_get_ext(eth_dev, bnxt_stats);
709 
710 	num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
711 			      (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
712 
713 	for (i = 0; i < num_q_stats; i++) {
714 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
715 		struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
716 		struct bnxt_ring_stats ring_stats = {0};
717 
718 		if (!rxq->rx_started)
719 			continue;
720 
721 		rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
722 					  &ring_stats, true);
723 		if (unlikely(rc))
724 			return rc;
725 
726 		bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
727 		bnxt_stats->rx_nombuf +=
728 				rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail,
729 							 rte_memory_order_relaxed);
730 	}
731 
732 	num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
733 			      (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
734 
735 	for (i = 0; i < num_q_stats; i++) {
736 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
737 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
738 		struct bnxt_ring_stats ring_stats = {0};
739 
740 		if (!txq->tx_started)
741 			continue;
742 
743 		rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
744 					  &ring_stats, false);
745 		if (unlikely(rc))
746 			return rc;
747 
748 		bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, false);
749 		bnxt_stats->oerrors +=
750 				rte_atomic_load_explicit(&txq->tx_mbuf_drop,
751 							 rte_memory_order_relaxed);
752 	}
753 
754 	return rc;
755 }
756 
757 static void bnxt_clear_prev_stat(struct bnxt *bp)
758 {
759 	/*
760 	 * Clear the cached values of stats returned by HW in the previous
761 	 * get operation.
762 	 */
763 	if (BNXT_TPA_V2_P7(bp)) {
764 		memset(bp->prev_rx_ring_stats_ext, 0,
765 		       sizeof(struct bnxt_ring_stats_ext) * bp->rx_cp_nr_rings);
766 		memset(bp->prev_tx_ring_stats_ext, 0,
767 		       sizeof(struct bnxt_ring_stats_ext) * bp->tx_cp_nr_rings);
768 	} else {
769 		memset(bp->prev_rx_ring_stats, 0,
770 		       sizeof(struct bnxt_ring_stats) * bp->rx_cp_nr_rings);
771 		memset(bp->prev_tx_ring_stats, 0,
772 		       sizeof(struct bnxt_ring_stats) * bp->tx_cp_nr_rings);
773 	}
774 }
775 
776 int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
777 {
778 	struct bnxt *bp = eth_dev->data->dev_private;
779 	unsigned int i;
780 	int ret;
781 
782 	ret = is_bnxt_in_error(bp);
783 	if (ret)
784 		return ret;
785 
786 	if (!eth_dev->data->dev_started) {
787 		PMD_DRV_LOG_LINE(ERR, "Device Initialization not complete!");
788 		return -EINVAL;
789 	}
790 
791 	ret = bnxt_clear_all_hwrm_stat_ctxs(bp);
792 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
793 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
794 
795 		rxq->rx_mbuf_alloc_fail = 0;
796 	}
797 
798 	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
799 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
800 
801 		txq->tx_mbuf_drop = 0;
802 	}
803 
804 	bnxt_clear_prev_stat(bp);
805 
806 	return ret;
807 }
808 
809 static void bnxt_fill_func_qstats_ext(struct hwrm_func_qstats_ext_output *func_qstats,
810 				      struct bnxt_ring_stats_ext *ring_stats,
811 				      bool rx)
812 {
813 	if (rx) {
814 		func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
815 		func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
816 		func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
817 
818 		func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
819 		func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
820 		func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
821 
822 		func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
823 		func_qstats->rx_error_pkts += ring_stats->rx_error_pkts;
824 
825 		func_qstats->rx_tpa_eligible_pkt += ring_stats->rx_tpa_eligible_pkt;
826 		func_qstats->rx_tpa_eligible_bytes += ring_stats->rx_tpa_eligible_bytes;
827 		func_qstats->rx_tpa_pkt += ring_stats->rx_tpa_pkt;
828 		func_qstats->rx_tpa_bytes += ring_stats->rx_tpa_bytes;
829 		func_qstats->rx_tpa_errors += ring_stats->rx_tpa_errors;
830 		func_qstats->rx_tpa_events += ring_stats->rx_tpa_events;
831 	} else {
832 		func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
833 		func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
834 		func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
835 
836 		func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
837 		func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
838 		func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
839 
840 		func_qstats->tx_error_pkts += ring_stats->tx_error_pkts;
841 		func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
842 	}
843 }
844 
845 static void bnxt_fill_func_qstats(struct hwrm_func_qstats_output *func_qstats,
846 				  struct bnxt_ring_stats *ring_stats,
847 				  bool rx)
848 {
849 	if (rx) {
850 		func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
851 		func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
852 		func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
853 
854 		func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
855 		func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
856 		func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
857 
858 		func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
859 		func_qstats->rx_drop_pkts += ring_stats->rx_error_pkts;
860 
861 		func_qstats->rx_agg_pkts += ring_stats->rx_agg_pkts;
862 		func_qstats->rx_agg_bytes += ring_stats->rx_agg_bytes;
863 		func_qstats->rx_agg_events += ring_stats->rx_agg_events;
864 		func_qstats->rx_agg_aborts += ring_stats->rx_agg_aborts;
865 	} else {
866 		func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
867 		func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
868 		func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
869 
870 		func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
871 		func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
872 		func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
873 
874 		func_qstats->tx_drop_pkts += ring_stats->tx_error_pkts;
875 		func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
876 	}
877 }
878 
879 int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
880 			   struct rte_eth_xstat *xstats, unsigned int n)
881 {
882 	struct bnxt *bp = eth_dev->data->dev_private;
883 	unsigned int count, i;
884 	unsigned int rx_port_stats_ext_cnt;
885 	unsigned int tx_port_stats_ext_cnt;
886 	unsigned int stat_size = sizeof(uint64_t);
887 	struct hwrm_func_qstats_output func_qstats = {0};
888 	struct hwrm_func_qstats_ext_output func_qstats_ext = {0};
889 	unsigned int stat_count, sz;
890 	int rc;
891 
892 	rc = is_bnxt_in_error(bp);
893 	if (rc)
894 		return rc;
895 
896 	if (BNXT_TPA_V2_P7(bp))
897 		sz = RTE_DIM(bnxt_func_stats_ext_strings);
898 	else
899 		sz = RTE_DIM(bnxt_func_stats_strings);
900 
901 	stat_count = RTE_DIM(bnxt_rx_stats_strings) +
902 		RTE_DIM(bnxt_tx_stats_strings) + sz +
903 		RTE_DIM(bnxt_rx_ext_stats_strings) +
904 		RTE_DIM(bnxt_tx_ext_stats_strings) +
905 		bnxt_flow_stats_cnt(bp);
906 
907 	if (n < stat_count || xstats == NULL)
908 		return stat_count;
909 
910 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
911 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
912 		struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
913 		struct bnxt_ring_stats ring_stats = {0};
914 		struct bnxt_ring_stats_ext ring_stats_ext = {0};
915 
916 		if (!rxq->rx_started)
917 			continue;
918 
919 		if (BNXT_TPA_V2_P7(bp))
920 			rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
921 						      &ring_stats_ext, true);
922 		else
923 			rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
924 						  &ring_stats, true);
925 
926 		if (unlikely(rc))
927 			return rc;
928 
929 		if (BNXT_TPA_V2_P7(bp))
930 			bnxt_fill_func_qstats_ext(&func_qstats_ext,
931 						  &ring_stats_ext, true);
932 		else
933 			bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
934 	}
935 
936 	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
937 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
938 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
939 		struct bnxt_ring_stats ring_stats = {0};
940 		struct bnxt_ring_stats_ext ring_stats_ext = {0};
941 
942 		if (!txq->tx_started)
943 			continue;
944 
945 		if (BNXT_TPA_V2_P7(bp))
946 			rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
947 						      &ring_stats_ext, false);
948 		else
949 			rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
950 						  &ring_stats, false);
951 		if (unlikely(rc))
952 			return rc;
953 
954 		if (BNXT_TPA_V2_P7(bp))
955 			bnxt_fill_func_qstats_ext(&func_qstats_ext,
956 						  &ring_stats_ext, false);
957 		else
958 			bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
959 	}
960 
961 	bnxt_hwrm_port_qstats(bp);
962 	bnxt_hwrm_ext_port_qstats(bp);
963 	rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings),
964 					(bp->fw_rx_port_stats_ext_size /
965 					 stat_size));
966 	tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings),
967 					(bp->fw_tx_port_stats_ext_size /
968 					 stat_size));
969 
970 	memset(xstats, 0, sizeof(*xstats) * n);
971 
972 	count = 0;
973 	for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
974 		uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
975 		xstats[count].id = count;
976 		xstats[count].value = rte_le_to_cpu_64(
977 				*(uint64_t *)((char *)rx_stats +
978 				bnxt_rx_stats_strings[i].offset));
979 		count++;
980 	}
981 
982 	for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
983 		uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
984 		xstats[count].id = count;
985 		xstats[count].value = rte_le_to_cpu_64(
986 				 *(uint64_t *)((char *)tx_stats +
987 				bnxt_tx_stats_strings[i].offset));
988 		count++;
989 	}
990 
991 	if (BNXT_TPA_V2_P7(bp)) {
992 		for (i = 0; i < RTE_DIM(bnxt_func_stats_ext_strings); i++) {
993 			xstats[count].id = count;
994 			xstats[count].value = *(uint64_t *)((char *)&func_qstats_ext +
995 							    bnxt_func_stats_ext_strings[i].offset);
996 			count++;
997 		}
998 		goto skip_func_stats;
999 	}
1000 	for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
1001 		xstats[count].id = count;
1002 		xstats[count].value = *(uint64_t *)((char *)&func_qstats +
1003 					 bnxt_func_stats_strings[i].offset);
1004 		count++;
1005 	}
1006 
1007 skip_func_stats:
1008 	for (i = 0; i < rx_port_stats_ext_cnt; i++) {
1009 		uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
1010 
1011 		xstats[count].value = rte_le_to_cpu_64
1012 					(*(uint64_t *)((char *)rx_stats_ext +
1013 					 bnxt_rx_ext_stats_strings[i].offset));
1014 
1015 		count++;
1016 	}
1017 
1018 	for (i = 0; i < tx_port_stats_ext_cnt; i++) {
1019 		uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext;
1020 
1021 		xstats[count].value = rte_le_to_cpu_64
1022 					(*(uint64_t *)((char *)tx_stats_ext +
1023 					 bnxt_tx_ext_stats_strings[i].offset));
1024 		count++;
1025 	}
1026 
1027 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1028 	    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
1029 	    BNXT_FLOW_XSTATS_EN(bp)) {
1030 		int j;
1031 
1032 		i = 0;
1033 		for (j = 0; j < bp->max_vnics; j++) {
1034 			struct bnxt_filter_info *filter;
1035 			struct bnxt_vnic_info *vnic;
1036 			struct rte_flow *flow;
1037 
1038 			vnic = &bp->vnic_info[j];
1039 			if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1040 				continue;
1041 
1042 			if (STAILQ_EMPTY(&vnic->flow_list))
1043 				continue;
1044 
1045 			STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1046 				if (!flow || !flow->filter)
1047 					continue;
1048 
1049 				filter = flow->filter;
1050 				xstats[count].id = count;
1051 				xstats[count].value =
1052 					filter->hw_stats.bytes;
1053 				count++;
1054 				xstats[count].id = count;
1055 				xstats[count].value =
1056 					filter->hw_stats.packets;
1057 				count++;
1058 				if (++i > bp->max_l2_ctx)
1059 					break;
1060 			}
1061 			if (i > bp->max_l2_ctx)
1062 				break;
1063 		}
1064 	}
1065 
1066 	return stat_count;
1067 }
1068 
1069 int bnxt_flow_stats_cnt(struct bnxt *bp)
1070 {
1071 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1072 	    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
1073 	    BNXT_FLOW_XSTATS_EN(bp)) {
1074 		struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx];
1075 		struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx];
1076 
1077 		return RTE_DIM(flow_bytes) + RTE_DIM(flow_pkts);
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
1084 		struct rte_eth_xstat_name *xstats_names,
1085 		unsigned int size)
1086 {
1087 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1088 	unsigned int stat_cnt;
1089 	unsigned int i, count = 0, sz;
1090 	int rc;
1091 
1092 	rc = is_bnxt_in_error(bp);
1093 	if (rc)
1094 		return rc;
1095 
1096 	if (BNXT_TPA_V2_P7(bp))
1097 		sz = RTE_DIM(bnxt_func_stats_ext_strings);
1098 	else
1099 		sz = RTE_DIM(bnxt_func_stats_strings);
1100 
1101 	stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
1102 				RTE_DIM(bnxt_tx_stats_strings) +
1103 				sz +
1104 				RTE_DIM(bnxt_rx_ext_stats_strings) +
1105 				RTE_DIM(bnxt_tx_ext_stats_strings) +
1106 				bnxt_flow_stats_cnt(bp);
1107 
1108 	if (xstats_names == NULL || size < stat_cnt)
1109 		return stat_cnt;
1110 
1111 	for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
1112 		strlcpy(xstats_names[count].name,
1113 			bnxt_rx_stats_strings[i].name,
1114 			sizeof(xstats_names[count].name));
1115 		count++;
1116 	}
1117 
1118 	for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
1119 		strlcpy(xstats_names[count].name,
1120 			bnxt_tx_stats_strings[i].name,
1121 			sizeof(xstats_names[count].name));
1122 		count++;
1123 	}
1124 
1125 	if (BNXT_TPA_V2_P7(bp)) {
1126 		for (i = 0; i < RTE_DIM(bnxt_func_stats_ext_strings); i++) {
1127 			strlcpy(xstats_names[count].name,
1128 				bnxt_func_stats_ext_strings[i].name,
1129 				sizeof(xstats_names[count].name));
1130 			count++;
1131 		}
1132 		goto skip_func_stats;
1133 	}
1134 
1135 	for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
1136 		strlcpy(xstats_names[count].name,
1137 			bnxt_func_stats_strings[i].name,
1138 			sizeof(xstats_names[count].name));
1139 		count++;
1140 	}
1141 
1142 skip_func_stats:
1143 	for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
1144 		strlcpy(xstats_names[count].name,
1145 			bnxt_rx_ext_stats_strings[i].name,
1146 			sizeof(xstats_names[count].name));
1147 
1148 		count++;
1149 	}
1150 
1151 	for (i = 0; i < RTE_DIM(bnxt_tx_ext_stats_strings); i++) {
1152 		strlcpy(xstats_names[count].name,
1153 			bnxt_tx_ext_stats_strings[i].name,
1154 			sizeof(xstats_names[count].name));
1155 
1156 		count++;
1157 	}
1158 
1159 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1160 	    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
1161 	    BNXT_FLOW_XSTATS_EN(bp)) {
1162 		for (i = 0; i < bp->max_l2_ctx; i++) {
1163 			char buf[RTE_ETH_XSTATS_NAME_SIZE];
1164 
1165 			sprintf(buf, "flow_%d_bytes", i);
1166 			strlcpy(xstats_names[count].name, buf,
1167 				sizeof(xstats_names[count].name));
1168 			count++;
1169 
1170 			sprintf(buf, "flow_%d_packets", i);
1171 			strlcpy(xstats_names[count].name, buf,
1172 				sizeof(xstats_names[count].name));
1173 
1174 			count++;
1175 		}
1176 	}
1177 
1178 	return stat_cnt;
1179 }
1180 
1181 int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
1182 {
1183 	struct bnxt *bp = eth_dev->data->dev_private;
1184 	int ret;
1185 
1186 	ret = is_bnxt_in_error(bp);
1187 	if (ret)
1188 		return ret;
1189 
1190 	if (BNXT_VF(bp) || !BNXT_SINGLE_PF(bp) ||
1191 	    !(bp->flags & BNXT_FLAG_PORT_STATS)) {
1192 		PMD_DRV_LOG_LINE(ERR, "Operation not supported");
1193 		return -ENOTSUP;
1194 	}
1195 
1196 	ret = bnxt_hwrm_port_clr_stats(bp);
1197 	if (ret != 0)
1198 		PMD_DRV_LOG_LINE(ERR, "Failed to reset xstats: %s",
1199 			    strerror(-ret));
1200 
1201 	bnxt_clear_prev_stat(bp);
1202 
1203 	return ret;
1204 }
1205 
1206 /* Update the input context memory with the flow counter IDs
1207  * of the flows that we are interested in.
1208  * Also, update the output tables with the current local values
1209  * since that is what will be used by FW to accumulate
1210  */
1211 static void bnxt_update_fc_pre_qstat(uint32_t *in_tbl,
1212 				     uint64_t *out_tbl,
1213 				     struct bnxt_filter_info *filter,
1214 				     uint32_t *ptbl_cnt)
1215 {
1216 	uint32_t in_tbl_cnt = *ptbl_cnt;
1217 
1218 	in_tbl[in_tbl_cnt] = filter->flow_id;
1219 	out_tbl[2 * in_tbl_cnt] = filter->hw_stats.packets;
1220 	out_tbl[2 * in_tbl_cnt + 1] = filter->hw_stats.bytes;
1221 	in_tbl_cnt++;
1222 	*ptbl_cnt = in_tbl_cnt;
1223 }
1224 
1225 /* Post issuing counter_qstats cmd, update the driver's local stat
1226  * entries with the values DMA-ed by FW in the output table
1227  */
1228 static void bnxt_update_fc_post_qstat(struct bnxt_filter_info *filter,
1229 				      uint64_t *out_tbl,
1230 				      uint32_t out_tbl_idx)
1231 {
1232 	filter->hw_stats.packets = out_tbl[2 * out_tbl_idx];
1233 	filter->hw_stats.bytes = out_tbl[(2 * out_tbl_idx) + 1];
1234 }
1235 
1236 static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr,
1237 			      struct bnxt_filter_info *en_tbl[],
1238 			      uint16_t in_flow_cnt)
1239 {
1240 	uint32_t *in_rx_tbl;
1241 	uint64_t *out_rx_tbl;
1242 	uint32_t in_rx_tbl_cnt = 0;
1243 	uint32_t out_rx_tbl_cnt = 0;
1244 	int i, rc = 0;
1245 
1246 	in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va;
1247 	out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va;
1248 
1249 	for (i = 0; i < in_flow_cnt; i++) {
1250 		if (!en_tbl[i])
1251 			continue;
1252 
1253 		/* Currently only ingress/Rx flows are supported anyway. */
1254 		bnxt_update_fc_pre_qstat(in_rx_tbl, out_rx_tbl,
1255 					 en_tbl[i], &in_rx_tbl_cnt);
1256 	}
1257 
1258 	/* Currently only ingress/Rx flows are supported */
1259 	if (in_rx_tbl_cnt) {
1260 		rc = bnxt_hwrm_cfa_counter_qstats(bp, BNXT_DIR_RX, ctr,
1261 						  in_rx_tbl_cnt);
1262 		if (rc)
1263 			return rc;
1264 	}
1265 
1266 	for (i = 0; i < in_flow_cnt; i++) {
1267 		if (!en_tbl[i])
1268 			continue;
1269 
1270 		/* Currently only ingress/Rx flows are supported */
1271 		bnxt_update_fc_post_qstat(en_tbl[i], out_rx_tbl,
1272 					  out_rx_tbl_cnt);
1273 		out_rx_tbl_cnt++;
1274 	}
1275 
1276 	return rc;
1277 }
1278 
1279 /* Walks through the list which has all the flows
1280  * requesting for explicit flow counters.
1281  */
1282 int bnxt_flow_stats_req(struct bnxt *bp)
1283 {
1284 	int i;
1285 	int rc = 0;
1286 	struct rte_flow *flow;
1287 	uint16_t in_flow_tbl_cnt = 0;
1288 	struct bnxt_vnic_info *vnic = NULL;
1289 	struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc];
1290 	uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC;
1291 
1292 	bnxt_acquire_flow_lock(bp);
1293 	for (i = 0; i < bp->max_vnics; i++) {
1294 		vnic = &bp->vnic_info[i];
1295 		if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1296 			continue;
1297 
1298 		if (STAILQ_EMPTY(&vnic->flow_list))
1299 			continue;
1300 
1301 		STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1302 			if (!flow || !flow->filter)
1303 				continue;
1304 
1305 			valid_en_tbl[in_flow_tbl_cnt++] = flow->filter;
1306 			if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) {
1307 				rc = bnxt_update_fc_tbl(bp, counter_type,
1308 							valid_en_tbl,
1309 							in_flow_tbl_cnt);
1310 				if (rc)
1311 					goto err;
1312 				in_flow_tbl_cnt = 0;
1313 				continue;
1314 			}
1315 		}
1316 	}
1317 
1318 	if (!in_flow_tbl_cnt) {
1319 		bnxt_release_flow_lock(bp);
1320 		goto out;
1321 	}
1322 
1323 	rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl,
1324 				in_flow_tbl_cnt);
1325 	if (!rc) {
1326 		bnxt_release_flow_lock(bp);
1327 		return 0;
1328 	}
1329 
1330 err:
1331 	/* If cmd fails once, no need of
1332 	 * invoking again every second
1333 	 */
1334 	bnxt_release_flow_lock(bp);
1335 	bnxt_cancel_fc_thread(bp);
1336 out:
1337 	return rc;
1338 }
1339