xref: /dpdk/drivers/net/hns3/hns3_stats.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <rte_ethdev.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8 
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
13 
14 /* The statistics of the per-rxq basic stats */
15 static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
16 	{"packets",
17 		HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
18 	{"bytes",
19 		HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
20 	{"errors",
21 		HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
22 };
23 
24 /* The statistics of the per-txq basic stats */
25 static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
26 	{"packets",
27 		HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
28 	{"bytes",
29 		HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
30 };
31 
32 /* MAC statistics */
33 static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
34 	{"mac_tx_mac_pause_num",
35 		HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
36 	{"mac_rx_mac_pause_num",
37 		HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
38 	{"mac_tx_control_pkt_num",
39 		HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
40 	{"mac_rx_control_pkt_num",
41 		HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
42 	{"mac_tx_pfc_pkt_num",
43 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
44 	{"mac_tx_pfc_pri0_pkt_num",
45 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
46 	{"mac_tx_pfc_pri1_pkt_num",
47 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
48 	{"mac_tx_pfc_pri2_pkt_num",
49 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
50 	{"mac_tx_pfc_pri3_pkt_num",
51 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
52 	{"mac_tx_pfc_pri4_pkt_num",
53 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
54 	{"mac_tx_pfc_pri5_pkt_num",
55 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
56 	{"mac_tx_pfc_pri6_pkt_num",
57 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
58 	{"mac_tx_pfc_pri7_pkt_num",
59 		HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
60 	{"mac_rx_pfc_pkt_num",
61 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
62 	{"mac_rx_pfc_pri0_pkt_num",
63 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
64 	{"mac_rx_pfc_pri1_pkt_num",
65 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
66 	{"mac_rx_pfc_pri2_pkt_num",
67 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
68 	{"mac_rx_pfc_pri3_pkt_num",
69 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
70 	{"mac_rx_pfc_pri4_pkt_num",
71 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
72 	{"mac_rx_pfc_pri5_pkt_num",
73 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
74 	{"mac_rx_pfc_pri6_pkt_num",
75 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
76 	{"mac_rx_pfc_pri7_pkt_num",
77 		HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
78 	{"mac_tx_total_pkt_num",
79 		HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
80 	{"mac_tx_total_oct_num",
81 		HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
82 	{"mac_tx_good_pkt_num",
83 		HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
84 	{"mac_tx_bad_pkt_num",
85 		HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
86 	{"mac_tx_good_oct_num",
87 		HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
88 	{"mac_tx_bad_oct_num",
89 		HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
90 	{"mac_tx_uni_pkt_num",
91 		HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
92 	{"mac_tx_multi_pkt_num",
93 		HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
94 	{"mac_tx_broad_pkt_num",
95 		HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
96 	{"mac_tx_undersize_pkt_num",
97 		HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
98 	{"mac_tx_oversize_pkt_num",
99 		HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
100 	{"mac_tx_64_oct_pkt_num",
101 		HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
102 	{"mac_tx_65_127_oct_pkt_num",
103 		HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
104 	{"mac_tx_128_255_oct_pkt_num",
105 		HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
106 	{"mac_tx_256_511_oct_pkt_num",
107 		HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
108 	{"mac_tx_512_1023_oct_pkt_num",
109 		HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
110 	{"mac_tx_1024_1518_oct_pkt_num",
111 		HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
112 	{"mac_tx_1519_2047_oct_pkt_num",
113 		HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
114 	{"mac_tx_2048_4095_oct_pkt_num",
115 		HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
116 	{"mac_tx_4096_8191_oct_pkt_num",
117 		HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
118 	{"mac_tx_8192_9216_oct_pkt_num",
119 		HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
120 	{"mac_tx_9217_12287_oct_pkt_num",
121 		HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
122 	{"mac_tx_12288_16383_oct_pkt_num",
123 		HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
124 	{"mac_tx_1519_max_good_pkt_num",
125 		HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
126 	{"mac_tx_1519_max_bad_pkt_num",
127 		HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
128 	{"mac_rx_total_pkt_num",
129 		HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
130 	{"mac_rx_total_oct_num",
131 		HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
132 	{"mac_rx_good_pkt_num",
133 		HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
134 	{"mac_rx_bad_pkt_num",
135 		HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
136 	{"mac_rx_good_oct_num",
137 		HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
138 	{"mac_rx_bad_oct_num",
139 		HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
140 	{"mac_rx_uni_pkt_num",
141 		HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
142 	{"mac_rx_multi_pkt_num",
143 		HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
144 	{"mac_rx_broad_pkt_num",
145 		HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
146 	{"mac_rx_undersize_pkt_num",
147 		HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
148 	{"mac_rx_oversize_pkt_num",
149 		HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
150 	{"mac_rx_64_oct_pkt_num",
151 		HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
152 	{"mac_rx_65_127_oct_pkt_num",
153 		HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
154 	{"mac_rx_128_255_oct_pkt_num",
155 		HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
156 	{"mac_rx_256_511_oct_pkt_num",
157 		HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
158 	{"mac_rx_512_1023_oct_pkt_num",
159 		HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
160 	{"mac_rx_1024_1518_oct_pkt_num",
161 		HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
162 	{"mac_rx_1519_2047_oct_pkt_num",
163 		HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
164 	{"mac_rx_2048_4095_oct_pkt_num",
165 		HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
166 	{"mac_rx_4096_8191_oct_pkt_num",
167 		HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
168 	{"mac_rx_8192_9216_oct_pkt_num",
169 		HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
170 	{"mac_rx_9217_12287_oct_pkt_num",
171 		HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
172 	{"mac_rx_12288_16383_oct_pkt_num",
173 		HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
174 	{"mac_rx_1519_max_good_pkt_num",
175 		HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
176 	{"mac_rx_1519_max_bad_pkt_num",
177 		HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
178 	{"mac_tx_fragment_pkt_num",
179 		HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
180 	{"mac_tx_undermin_pkt_num",
181 		HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
182 	{"mac_tx_jabber_pkt_num",
183 		HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
184 	{"mac_tx_err_all_pkt_num",
185 		HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
186 	{"mac_tx_from_app_good_pkt_num",
187 		HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
188 	{"mac_tx_from_app_bad_pkt_num",
189 		HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
190 	{"mac_rx_fragment_pkt_num",
191 		HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
192 	{"mac_rx_undermin_pkt_num",
193 		HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
194 	{"mac_rx_jabber_pkt_num",
195 		HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
196 	{"mac_rx_fcs_err_pkt_num",
197 		HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
198 	{"mac_rx_send_app_good_pkt_num",
199 		HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
200 	{"mac_rx_send_app_bad_pkt_num",
201 		HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
202 };
203 
204 /* The statistic of reset */
205 static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
206 	{"REQ_RESET_CNT",
207 		HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
208 	{"GLOBAL_RESET_CNT",
209 		HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
210 	{"IMP_RESET_CNT",
211 		HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
212 	{"RESET_EXEC_CNT",
213 		HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
214 	{"RESET_SUCCESS_CNT",
215 		HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
216 	{"RESET_FAIL_CNT",
217 		HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
218 	{"RESET_MERGE_CNT",
219 		HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
220 };
221 
222 /* The statistic of errors in Rx BD */
223 static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
224 	{"PKT_LEN_ERRORS",
225 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
226 	{"L2_ERRORS",
227 		HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
228 };
229 
230 /* The dfx statistic in Rx datapath */
231 static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
232 	{"L3_CHECKSUM_ERRORS",
233 		HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
234 	{"L4_CHECKSUM_ERRORS",
235 		HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
236 	{"OL3_CHECKSUM_ERRORS",
237 		HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
238 	{"OL4_CHECKSUM_ERRORS",
239 		HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
240 };
241 
242 /* The dfx statistic in Tx datapath */
243 static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
244 	{"OVER_LENGTH_PKT_CNT",
245 		HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
246 	{"EXCEED_LIMITED_BD_PKT_CNT",
247 		HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
248 	{"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
249 		HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
250 	{"UNSUPPORTED_TUNNEL_PKT_CNT",
251 		HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
252 	{"QUEUE_FULL_CNT",
253 		HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
254 	{"SHORT_PKT_PAD_FAIL_CNT",
255 		HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
256 };
257 
258 /* The statistic of rx queue */
259 static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
260 	{"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
261 };
262 
263 /* The statistic of tx queue */
264 static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
265 	{"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
266 };
267 
268 /* The statistic of imissed packet */
269 static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
270 	{"RPU_DROP_CNT",
271 		HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
272 	{"SSU_DROP_CNT",
273 		HNS3_IMISSED_STATS_FIELD_OFFSET(ssu_rx_drop_cnt)},
274 };
275 
276 #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
277 	sizeof(hns3_mac_strings[0]))
278 
279 #define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
280 	sizeof(hns3_reset_stats_strings[0]))
281 
282 #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
283 	sizeof(hns3_rx_bd_error_strings[0]))
284 
285 #define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
286 	sizeof(hns3_rxq_dfx_stats_strings[0]))
287 
288 #define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
289 	sizeof(hns3_txq_dfx_stats_strings[0]))
290 
291 #define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
292 	sizeof(hns3_rx_queue_strings[0]))
293 
294 #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
295 	sizeof(hns3_tx_queue_strings[0]))
296 
297 #define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
298 	sizeof(hns3_rxq_basic_stats_strings[0]))
299 
300 #define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
301 	sizeof(hns3_txq_basic_stats_strings[0]))
302 
303 #define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
304 	sizeof(hns3_imissed_stats_strings[0]))
305 
306 #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_RESET_XSTATS)
307 
308 static void hns3_tqp_stats_clear(struct hns3_hw *hw);
309 
310 /*
311  * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034.
312  * This command is used before send 'query_mac_stat command', the descriptor
313  * number of 'query_mac_stat command' must match with reg_num in this command.
314  * @praram hw
315  *   Pointer to structure hns3_hw.
316  * @return
317  *   0 on success.
318  */
319 static int
320 hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num)
321 {
322 	uint64_t *data = (uint64_t *)(&hw->mac_stats);
323 	struct hns3_cmd_desc *desc;
324 	uint64_t *desc_data;
325 	uint16_t i, k, n;
326 	int ret;
327 
328 	desc = rte_malloc("hns3_mac_desc",
329 			  desc_num * sizeof(struct hns3_cmd_desc), 0);
330 	if (desc == NULL) {
331 		hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
332 		return -ENOMEM;
333 	}
334 
335 	hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
336 	ret = hns3_cmd_send(hw, desc, desc_num);
337 	if (ret) {
338 		hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
339 		rte_free(desc);
340 		return ret;
341 	}
342 
343 	for (i = 0; i < desc_num; i++) {
344 		/* For special opcode 0034, only the first desc has the head */
345 		if (i == 0) {
346 			desc_data = (uint64_t *)(&desc[i].data[0]);
347 			n = HNS3_RD_FIRST_STATS_NUM;
348 		} else {
349 			desc_data = (uint64_t *)(&desc[i]);
350 			n = HNS3_RD_OTHER_STATS_NUM;
351 		}
352 
353 		for (k = 0; k < n; k++) {
354 			*data += rte_le_to_cpu_64(*desc_data);
355 			data++;
356 			desc_data++;
357 		}
358 	}
359 	rte_free(desc);
360 
361 	return 0;
362 }
363 
364 /*
365  * Query Mac stat reg num command ,opcode id: 0x0033.
366  * This command is used before send 'query_mac_stat command', the descriptor
367  * number of 'query_mac_stat command' must match with reg_num in this command.
368  * @praram rte_stats
369  *   Pointer to structure rte_eth_stats.
370  * @return
371  *   0 on success.
372  */
373 static int
374 hns3_mac_query_reg_num(struct rte_eth_dev *dev, uint32_t *desc_num)
375 {
376 	struct hns3_adapter *hns = dev->data->dev_private;
377 	struct hns3_hw *hw = &hns->hw;
378 	struct hns3_cmd_desc desc;
379 	uint32_t *desc_data;
380 	uint32_t reg_num;
381 	int ret;
382 
383 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
384 	ret = hns3_cmd_send(hw, &desc, 1);
385 	if (ret)
386 		return ret;
387 
388 	/*
389 	 * The num of MAC statistics registers that are provided by IMP in this
390 	 * version.
391 	 */
392 	desc_data = (uint32_t *)(&desc.data[0]);
393 	reg_num = rte_le_to_cpu_32(*desc_data);
394 
395 	/*
396 	 * The descriptor number of 'query_additional_mac_stat command' is
397 	 * '1 + (reg_num-3)/4 + ((reg_num-3)%4 !=0)';
398 	 * This value is 83 in this version
399 	 */
400 	*desc_num = 1 + ((reg_num - 3) >> 2) +
401 		    (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0);
402 
403 	return 0;
404 }
405 
406 static int
407 hns3_query_update_mac_stats(struct rte_eth_dev *dev)
408 {
409 	struct hns3_adapter *hns = dev->data->dev_private;
410 	struct hns3_hw *hw = &hns->hw;
411 	uint32_t desc_num;
412 	int ret;
413 
414 	ret = hns3_mac_query_reg_num(dev, &desc_num);
415 	if (ret == 0)
416 		ret = hns3_update_mac_stats(hw, desc_num);
417 	else
418 		hns3_err(hw, "Query mac reg num fail : %d", ret);
419 	return ret;
420 }
421 
422 static int
423 hns3_update_port_rpu_drop_stats(struct hns3_hw *hw)
424 {
425 	struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
426 	struct hns3_query_rpu_cmd *req;
427 	struct hns3_cmd_desc desc;
428 	uint64_t cnt;
429 	uint32_t tc_num;
430 	int ret;
431 
432 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
433 	req = (struct hns3_query_rpu_cmd *)desc.data;
434 
435 	/*
436 	 * tc_num is 0, means rpu stats of all TC channels will be
437 	 * get from firmware
438 	 */
439 	tc_num = 0;
440 	req->tc_queue_num = rte_cpu_to_le_32(tc_num);
441 	ret = hns3_cmd_send(hw, &desc, 1);
442 	if (ret) {
443 		hns3_err(hw, "failed to query RPU stats: %d", ret);
444 		return ret;
445 	}
446 
447 	cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
448 	stats->rpu_rx_drop_cnt += cnt;
449 
450 	return 0;
451 }
452 
453 static void
454 hns3_update_function_rpu_drop_stats(struct hns3_hw *hw)
455 {
456 	struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
457 
458 	stats->rpu_rx_drop_cnt += hns3_read_dev(hw, HNS3_RPU_DROP_CNT_REG);
459 }
460 
461 static int
462 hns3_update_rpu_drop_stats(struct hns3_hw *hw)
463 {
464 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
465 	int ret = 0;
466 
467 	if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && !hns->is_vf)
468 		ret = hns3_update_port_rpu_drop_stats(hw);
469 	else if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2)
470 		hns3_update_function_rpu_drop_stats(hw);
471 
472 	return ret;
473 }
474 
475 static int
476 hns3_get_ssu_drop_stats(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
477 			int bd_num, bool is_rx)
478 {
479 	struct hns3_query_ssu_cmd *req;
480 	int ret;
481 	int i;
482 
483 	for (i = 0; i < bd_num - 1; i++) {
484 		hns3_cmd_setup_basic_desc(&desc[i],
485 					  HNS3_OPC_SSU_DROP_REG, true);
486 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
487 	}
488 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_SSU_DROP_REG, true);
489 	req = (struct hns3_query_ssu_cmd *)desc[0].data;
490 	req->rxtx = is_rx ? 0 : 1;
491 	ret = hns3_cmd_send(hw, desc, bd_num);
492 
493 	return ret;
494 }
495 
496 static int
497 hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
498 {
499 	struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
500 	struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
501 	struct hns3_query_ssu_cmd *req;
502 	uint64_t cnt;
503 	int ret;
504 
505 	ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
506 				      true);
507 	if (ret) {
508 		hns3_err(hw, "failed to get Rx SSU drop stats, ret = %d", ret);
509 		return ret;
510 	}
511 
512 	req = (struct hns3_query_ssu_cmd *)desc[0].data;
513 	cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
514 		rte_le_to_cpu_32(req->full_drop_cnt) +
515 		rte_le_to_cpu_32(req->part_drop_cnt);
516 
517 	stats->ssu_rx_drop_cnt += cnt;
518 
519 	return 0;
520 }
521 
522 static int
523 hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
524 {
525 	struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
526 	struct hns3_query_ssu_cmd *req;
527 	uint64_t cnt;
528 	int ret;
529 
530 	ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
531 				      false);
532 	if (ret) {
533 		hns3_err(hw, "failed to get Tx SSU drop stats, ret = %d", ret);
534 		return ret;
535 	}
536 
537 	req = (struct hns3_query_ssu_cmd *)desc[0].data;
538 	cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
539 		rte_le_to_cpu_32(req->full_drop_cnt) +
540 		rte_le_to_cpu_32(req->part_drop_cnt);
541 
542 	hw->oerror_stats += cnt;
543 
544 	return 0;
545 }
546 
547 int
548 hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
549 {
550 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
551 	int ret;
552 
553 	if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
554 		return 0;
555 
556 	if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf) {
557 		ret = hns3_update_port_rx_ssu_drop_stats(hw);
558 		if (ret)
559 			return ret;
560 	}
561 
562 	ret = hns3_update_rpu_drop_stats(hw);
563 	if (ret)
564 		return ret;
565 
566 	if (is_clear)
567 		memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
568 
569 	return 0;
570 }
571 
572 static int
573 hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
574 {
575 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
576 	int ret;
577 
578 	if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 || hns->is_vf)
579 		return 0;
580 
581 	ret = hns3_update_port_tx_ssu_drop_stats(hw);
582 	if (ret)
583 		return ret;
584 
585 	if (is_clear)
586 		hw->oerror_stats = 0;
587 
588 	return 0;
589 }
590 
591 /*
592  * Query tqp tx queue statistics ,opcode id: 0x0B03.
593  * Query tqp rx queue statistics ,opcode id: 0x0B13.
594  * Get all statistics of a port.
595  * @param eth_dev
596  *   Pointer to Ethernet device.
597  * @praram rte_stats
598  *   Pointer to structure rte_eth_stats.
599  * @return
600  *   0 on success.
601  */
602 int
603 hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
604 {
605 	struct hns3_adapter *hns = eth_dev->data->dev_private;
606 	struct hns3_hw *hw = &hns->hw;
607 	struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
608 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
609 	struct hns3_rx_queue *rxq;
610 	struct hns3_tx_queue *txq;
611 	uint64_t cnt;
612 	uint16_t i;
613 	int ret;
614 
615 	/* Update imissed stats */
616 	ret = hns3_update_imissed_stats(hw, false);
617 	if (ret) {
618 		hns3_err(hw, "update imissed stats failed, ret = %d",
619 			 ret);
620 		return ret;
621 	}
622 	rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
623 				imissed_stats->ssu_rx_drop_cnt;
624 
625 	/* Get the error stats and bytes of received packets */
626 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
627 		rxq = eth_dev->data->rx_queues[i];
628 		if (rxq == NULL)
629 			continue;
630 
631 		cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
632 		/*
633 		 * Read hardware and software in adjacent positions to minumize
634 		 * the timing variance.
635 		 */
636 		rte_stats->ierrors += rxq->err_stats.l2_errors +
637 				      rxq->err_stats.pkt_len_errors;
638 		stats->rcb_rx_ring_pktnum_rcd += cnt;
639 		stats->rcb_rx_ring_pktnum[i] += cnt;
640 		rte_stats->ibytes += rxq->basic_stats.bytes;
641 	}
642 
643 	/* Reads all the stats of a txq in a loop to keep them synchronized */
644 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
645 		txq = eth_dev->data->tx_queues[i];
646 		if (txq == NULL)
647 			continue;
648 
649 		cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
650 		stats->rcb_tx_ring_pktnum_rcd += cnt;
651 		stats->rcb_tx_ring_pktnum[i] += cnt;
652 		rte_stats->obytes += txq->basic_stats.bytes;
653 	}
654 
655 	ret = hns3_update_oerror_stats(hw, false);
656 	if (ret) {
657 		hns3_err(hw, "update oerror stats failed, ret = %d",
658 			 ret);
659 		return ret;
660 	}
661 	rte_stats->oerrors = hw->oerror_stats;
662 
663 	/*
664 	 * If HW statistics are reset by stats_reset, but a lot of residual
665 	 * packets exist in the hardware queue and these packets are error
666 	 * packets, flip overflow may occurred. So return 0 in this case.
667 	 */
668 	rte_stats->ipackets =
669 		stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
670 		stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
671 	rte_stats->opackets  = stats->rcb_tx_ring_pktnum_rcd -
672 		rte_stats->oerrors;
673 	rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
674 
675 	return 0;
676 }
677 
678 int
679 hns3_stats_reset(struct rte_eth_dev *eth_dev)
680 {
681 	struct hns3_adapter *hns = eth_dev->data->dev_private;
682 	struct hns3_hw *hw = &hns->hw;
683 	struct hns3_rx_queue *rxq;
684 	struct hns3_tx_queue *txq;
685 	uint16_t i;
686 	int ret;
687 
688 	/*
689 	 * Note: Reading hardware statistics of imissed registers will
690 	 * clear them.
691 	 */
692 	ret = hns3_update_imissed_stats(hw, true);
693 	if (ret) {
694 		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
695 		return ret;
696 	}
697 
698 	/*
699 	 * Note: Reading hardware statistics of oerror registers will
700 	 * clear them.
701 	 */
702 	ret = hns3_update_oerror_stats(hw, true);
703 	if (ret) {
704 		hns3_err(hw, "clear oerror stats failed, ret = %d",
705 			 ret);
706 		return ret;
707 	}
708 
709 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
710 		rxq = eth_dev->data->rx_queues[i];
711 		if (rxq == NULL)
712 			continue;
713 
714 		rxq->err_stats.pkt_len_errors = 0;
715 		rxq->err_stats.l2_errors = 0;
716 	}
717 
718 	/* Clear all the stats of a rxq in a loop to keep them synchronized */
719 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
720 		rxq = eth_dev->data->rx_queues[i];
721 		if (rxq == NULL)
722 			continue;
723 
724 		memset(&rxq->basic_stats, 0,
725 				sizeof(struct hns3_rx_basic_stats));
726 
727 		/* This register is read-clear */
728 		(void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
729 		rxq->err_stats.pkt_len_errors = 0;
730 		rxq->err_stats.l2_errors = 0;
731 	}
732 
733 	/* Clear all the stats of a txq in a loop to keep them synchronized */
734 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
735 		txq = eth_dev->data->tx_queues[i];
736 		if (txq == NULL)
737 			continue;
738 
739 		memset(&txq->basic_stats, 0,
740 				sizeof(struct hns3_tx_basic_stats));
741 
742 		/* This register is read-clear */
743 		(void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
744 	}
745 
746 	hns3_tqp_stats_clear(hw);
747 
748 	return 0;
749 }
750 
751 static int
752 hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev)
753 {
754 	struct hns3_adapter *hns = dev->data->dev_private;
755 	struct hns3_hw *hw = &hns->hw;
756 	struct hns3_mac_stats *mac_stats = &hw->mac_stats;
757 	int ret;
758 
759 	ret = hns3_query_update_mac_stats(dev);
760 	if (ret) {
761 		hns3_err(hw, "Clear Mac stats fail : %d", ret);
762 		return ret;
763 	}
764 
765 	memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
766 
767 	return 0;
768 }
769 
770 static int
771 hns3_get_imissed_stats_num(struct hns3_adapter *hns)
772 {
773 #define NO_IMISSED_STATS_NUM   0
774 #define RPU_STATS_ITEM_NUM     1
775 	struct hns3_hw *hw = &hns->hw;
776 
777 	if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
778 		return NO_IMISSED_STATS_NUM;
779 
780 	if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf)
781 		return HNS3_NUM_IMISSED_XSTATS;
782 
783 	return RPU_STATS_ITEM_NUM;
784 }
785 
786 /* This function calculates the number of xstats based on the current config */
787 static int
788 hns3_xstats_calc_num(struct rte_eth_dev *dev)
789 {
790 #define HNS3_PF_VF_RX_COMM_STATS_NUM	(HNS3_NUM_RX_BD_ERROR_XSTATS + \
791 					 HNS3_NUM_RXQ_DFX_XSTATS + \
792 					 HNS3_NUM_RX_QUEUE_STATS + \
793 					 HNS3_NUM_RXQ_BASIC_STATS)
794 #define HNS3_PF_VF_TX_COMM_STATS_NUM	(HNS3_NUM_TXQ_DFX_XSTATS + \
795 					 HNS3_NUM_TX_QUEUE_STATS + \
796 					 HNS3_NUM_TXQ_BASIC_STATS)
797 
798 	struct hns3_adapter *hns = dev->data->dev_private;
799 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
800 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
801 	int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
802 	int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
803 	int stats_num;
804 
805 	stats_num = rx_comm_stats_num + tx_comm_stats_num;
806 	stats_num += hns3_get_imissed_stats_num(hns);
807 
808 	if (hns->is_vf)
809 		stats_num += HNS3_NUM_RESET_XSTATS;
810 	else
811 		stats_num += HNS3_FIX_NUM_STATS;
812 
813 	return stats_num;
814 }
815 
816 static void
817 hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
818 		     int *count)
819 {
820 	struct hns3_adapter *hns = dev->data->dev_private;
821 	struct hns3_hw *hw = &hns->hw;
822 	uint32_t reg_offset;
823 	uint16_t i, j;
824 
825 	/* Get rx queue stats */
826 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
827 		for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
828 			reg_offset = hns3_get_tqp_reg_offset(j);
829 			xstats[*count].value = hns3_read_dev(hw,
830 				reg_offset + hns3_rx_queue_strings[i].offset);
831 			xstats[*count].id = *count;
832 			(*count)++;
833 		}
834 	}
835 
836 	/* Get tx queue stats */
837 	for (j = 0; j < dev->data->nb_tx_queues; j++) {
838 		for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
839 			reg_offset = hns3_get_tqp_reg_offset(j);
840 			xstats[*count].value = hns3_read_dev(hw,
841 				reg_offset + hns3_tx_queue_strings[i].offset);
842 			xstats[*count].id = *count;
843 			(*count)++;
844 		}
845 	}
846 }
847 
848 static void
849 hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
850 		       int *count)
851 {
852 	struct hns3_rx_dfx_stats *dfx_stats;
853 	struct hns3_rx_queue *rxq;
854 	uint16_t i, j;
855 	char *val;
856 
857 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
858 		rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
859 		if (rxq == NULL)
860 			continue;
861 
862 		dfx_stats = &rxq->dfx_stats;
863 		for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
864 			val = (char *)dfx_stats +
865 				hns3_rxq_dfx_stats_strings[j].offset;
866 			xstats[*count].value = *(uint64_t *)val;
867 			xstats[*count].id = *count;
868 			(*count)++;
869 		}
870 	}
871 }
872 
873 static void
874 hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
875 		       int *count)
876 {
877 	struct hns3_tx_dfx_stats *dfx_stats;
878 	struct hns3_tx_queue *txq;
879 	uint16_t i, j;
880 	char *val;
881 
882 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
883 		txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
884 		if (txq == NULL)
885 			continue;
886 
887 		dfx_stats = &txq->dfx_stats;
888 		for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
889 			val = (char *)dfx_stats +
890 				hns3_txq_dfx_stats_strings[j].offset;
891 			xstats[*count].value = *(uint64_t *)val;
892 			xstats[*count].id = *count;
893 			(*count)++;
894 		}
895 	}
896 }
897 
898 static void
899 hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
900 		       int *count)
901 {
902 	hns3_rxq_dfx_stats_get(dev, xstats, count);
903 	hns3_txq_dfx_stats_get(dev, xstats, count);
904 }
905 
906 static void
907 hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
908 			 int *count)
909 {
910 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
912 	struct hns3_rx_basic_stats *rxq_stats;
913 	struct hns3_rx_queue *rxq;
914 	uint16_t i, j;
915 	uint32_t cnt;
916 	char *val;
917 
918 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
919 		rxq = dev->data->rx_queues[i];
920 		if (rxq == NULL)
921 			continue;
922 
923 		cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
924 		/*
925 		 * Read hardware and software in adjacent positions to minimize
926 		 * the time difference.
927 		 */
928 		rxq_stats = &rxq->basic_stats;
929 		rxq_stats->errors = rxq->err_stats.l2_errors +
930 					rxq->err_stats.pkt_len_errors;
931 		stats->rcb_rx_ring_pktnum_rcd += cnt;
932 		stats->rcb_rx_ring_pktnum[i] += cnt;
933 
934 		/*
935 		 * If HW statistics are reset by stats_reset, but a lot of
936 		 * residual packets exist in the hardware queue and these
937 		 * packets are error packets, flip overflow may occurred.
938 		 * So return 0 in this case.
939 		 */
940 		rxq_stats->packets =
941 			stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
942 			stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
943 		for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
944 			val = (char *)rxq_stats +
945 				hns3_rxq_basic_stats_strings[j].offset;
946 			xstats[*count].value = *(uint64_t *)val;
947 			xstats[*count].id = *count;
948 			(*count)++;
949 		}
950 	}
951 }
952 
953 static void
954 hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
955 			 int *count)
956 {
957 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
958 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
959 	struct hns3_tx_basic_stats *txq_stats;
960 	struct hns3_tx_queue *txq;
961 	uint16_t i, j;
962 	uint32_t cnt;
963 	char *val;
964 
965 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
966 		txq = dev->data->tx_queues[i];
967 		if (txq == NULL)
968 			continue;
969 
970 		cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
971 		stats->rcb_tx_ring_pktnum_rcd += cnt;
972 		stats->rcb_tx_ring_pktnum[i] += cnt;
973 
974 		txq_stats = &txq->basic_stats;
975 		txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
976 
977 		for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
978 			val = (char *)txq_stats +
979 				hns3_txq_basic_stats_strings[j].offset;
980 			xstats[*count].value = *(uint64_t *)val;
981 			xstats[*count].id = *count;
982 			(*count)++;
983 		}
984 	}
985 }
986 
987 static void
988 hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
989 			 int *count)
990 {
991 	hns3_rxq_basic_stats_get(dev, xstats, count);
992 	hns3_txq_basic_stats_get(dev, xstats, count);
993 }
994 
995 static void
996 hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
997 			  int *count)
998 {
999 	struct hns3_adapter *hns = dev->data->dev_private;
1000 	struct hns3_hw *hw = &hns->hw;
1001 	struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
1002 	int imissed_stats_num;
1003 	int cnt = *count;
1004 	char *addr;
1005 	uint16_t i;
1006 
1007 	imissed_stats_num = hns3_get_imissed_stats_num(hns);
1008 
1009 	for (i = 0; i < imissed_stats_num; i++) {
1010 		addr = (char *)imissed_stats +
1011 			hns3_imissed_stats_strings[i].offset;
1012 		xstats[cnt].value = *(uint64_t *)addr;
1013 		xstats[cnt].id = cnt;
1014 		cnt++;
1015 	}
1016 
1017 	*count = cnt;
1018 }
1019 
1020 /*
1021  * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
1022  * @param dev
1023  *   Pointer to Ethernet device.
1024  * @praram xstats
1025  *   A pointer to a table of structure of type *rte_eth_xstat*
1026  *   to be filled with device statistics ids and values.
1027  *   This parameter can be set to NULL if n is 0.
1028  * @param n
1029  *   The size of the xstats array (number of elements).
1030  * @return
1031  *   0 on fail, count(The size of the statistics elements) on success.
1032  */
1033 int
1034 hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1035 		    unsigned int n)
1036 {
1037 	struct hns3_adapter *hns = dev->data->dev_private;
1038 	struct hns3_hw *hw = &hns->hw;
1039 	struct hns3_mac_stats *mac_stats = &hw->mac_stats;
1040 	struct hns3_reset_stats *reset_stats = &hw->reset.stats;
1041 	struct hns3_rx_bd_errors_stats *rx_err_stats;
1042 	struct hns3_rx_queue *rxq;
1043 	uint16_t i, j;
1044 	char *addr;
1045 	int count;
1046 	int ret;
1047 
1048 	if (xstats == NULL)
1049 		return 0;
1050 
1051 	count = hns3_xstats_calc_num(dev);
1052 	if ((int)n < count)
1053 		return count;
1054 
1055 	count = 0;
1056 
1057 	hns3_tqp_basic_stats_get(dev, xstats, &count);
1058 
1059 	if (!hns->is_vf) {
1060 		/* Update Mac stats */
1061 		ret = hns3_query_update_mac_stats(dev);
1062 		if (ret < 0) {
1063 			hns3_err(hw, "Update Mac stats fail : %d", ret);
1064 			return ret;
1065 		}
1066 
1067 		/* Get MAC stats from hw->hw_xstats.mac_stats struct */
1068 		for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1069 			addr = (char *)mac_stats + hns3_mac_strings[i].offset;
1070 			xstats[count].value = *(uint64_t *)addr;
1071 			xstats[count].id = count;
1072 			count++;
1073 		}
1074 	}
1075 
1076 	ret = hns3_update_imissed_stats(hw, false);
1077 	if (ret) {
1078 		hns3_err(hw, "update imissed stats failed, ret = %d",
1079 			 ret);
1080 		return ret;
1081 	}
1082 
1083 	hns3_imissed_stats_get(dev, xstats, &count);
1084 
1085 	/* Get the reset stat */
1086 	for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1087 		addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
1088 		xstats[count].value = *(uint64_t *)addr;
1089 		xstats[count].id = count;
1090 		count++;
1091 	}
1092 
1093 	/* Get the Rx BD errors stats */
1094 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
1095 		for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1096 			rxq = dev->data->rx_queues[j];
1097 			if (rxq) {
1098 				rx_err_stats = &rxq->err_stats;
1099 				addr = (char *)rx_err_stats +
1100 					hns3_rx_bd_error_strings[i].offset;
1101 				xstats[count].value = *(uint64_t *)addr;
1102 				xstats[count].id = count;
1103 				count++;
1104 			}
1105 		}
1106 	}
1107 
1108 	hns3_tqp_dfx_stats_get(dev, xstats, &count);
1109 	hns3_queue_stats_get(dev, xstats, &count);
1110 
1111 	return count;
1112 }
1113 
1114 static void
1115 hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
1116 			      struct rte_eth_xstat_name *xstats_names,
1117 			      uint32_t *count)
1118 {
1119 	uint16_t i, j;
1120 
1121 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1122 		for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
1123 			snprintf(xstats_names[*count].name,
1124 				 sizeof(xstats_names[*count].name),
1125 				 "rx_q%u_%s", i,
1126 				 hns3_rxq_basic_stats_strings[j].name);
1127 			(*count)++;
1128 		}
1129 	}
1130 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1131 		for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
1132 			snprintf(xstats_names[*count].name,
1133 				 sizeof(xstats_names[*count].name),
1134 				 "tx_q%u_%s", i,
1135 				 hns3_txq_basic_stats_strings[j].name);
1136 			(*count)++;
1137 		}
1138 	}
1139 }
1140 
1141 static void
1142 hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
1143 			    struct rte_eth_xstat_name *xstats_names,
1144 			    uint32_t *count)
1145 {
1146 	uint16_t i, j;
1147 
1148 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1149 		for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
1150 			snprintf(xstats_names[*count].name,
1151 				 sizeof(xstats_names[*count].name),
1152 				 "rx_q%u_%s", i,
1153 				 hns3_rxq_dfx_stats_strings[j].name);
1154 			(*count)++;
1155 		}
1156 	}
1157 
1158 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1159 		for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
1160 			snprintf(xstats_names[*count].name,
1161 				 sizeof(xstats_names[*count].name),
1162 				 "tx_q%u_%s", i,
1163 				 hns3_txq_dfx_stats_strings[j].name);
1164 			(*count)++;
1165 		}
1166 	}
1167 }
1168 
1169 static void
1170 hns3_imissed_stats_name_get(struct rte_eth_dev *dev,
1171 			    struct rte_eth_xstat_name *xstats_names,
1172 			    uint32_t *count)
1173 {
1174 	struct hns3_adapter *hns = dev->data->dev_private;
1175 	uint32_t cnt = *count;
1176 	int imissed_stats_num;
1177 	uint16_t i;
1178 
1179 	imissed_stats_num = hns3_get_imissed_stats_num(hns);
1180 
1181 	for (i = 0; i < imissed_stats_num; i++) {
1182 		snprintf(xstats_names[cnt].name,
1183 			 sizeof(xstats_names[cnt].name),
1184 			 "%s", hns3_imissed_stats_strings[i].name);
1185 		cnt++;
1186 	}
1187 
1188 	*count = cnt;
1189 }
1190 
1191 /*
1192  * Retrieve names of extended statistics of an Ethernet device.
1193  *
1194  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
1195  * by array index:
1196  *  xstats_names[i].name => xstats[i].value
1197  *
1198  * And the array index is same with id field of 'struct rte_eth_xstat':
1199  *  xstats[i].id == i
1200  *
1201  * This assumption makes key-value pair matching less flexible but simpler.
1202  *
1203  * @param dev
1204  *   Pointer to Ethernet device.
1205  * @param xstats_names
1206  *   An rte_eth_xstat_name array of at least *size* elements to
1207  *   be filled. If set to NULL, the function returns the required number
1208  *   of elements.
1209  * @param size
1210  *   The size of the xstats_names array (number of elements).
1211  * @return
1212  *   - A positive value lower or equal to size: success. The return value
1213  *     is the number of entries filled in the stats table.
1214  */
1215 int
1216 hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
1217 			  struct rte_eth_xstat_name *xstats_names,
1218 			  __rte_unused unsigned int size)
1219 {
1220 	struct hns3_adapter *hns = dev->data->dev_private;
1221 	int cnt_stats = hns3_xstats_calc_num(dev);
1222 	uint32_t count = 0;
1223 	uint16_t i, j;
1224 
1225 	if (xstats_names == NULL)
1226 		return cnt_stats;
1227 
1228 	hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
1229 
1230 	/* Note: size limited checked in rte_eth_xstats_get_names() */
1231 	if (!hns->is_vf) {
1232 		/* Get MAC name from hw->hw_xstats.mac_stats struct */
1233 		for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1234 			snprintf(xstats_names[count].name,
1235 				 sizeof(xstats_names[count].name),
1236 				 "%s", hns3_mac_strings[i].name);
1237 			count++;
1238 		}
1239 	}
1240 
1241 	hns3_imissed_stats_name_get(dev, xstats_names, &count);
1242 
1243 	for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1244 		snprintf(xstats_names[count].name,
1245 			 sizeof(xstats_names[count].name),
1246 			 "%s", hns3_reset_stats_strings[i].name);
1247 		count++;
1248 	}
1249 
1250 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
1251 		for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1252 			snprintf(xstats_names[count].name,
1253 				 sizeof(xstats_names[count].name),
1254 				 "rx_q%u_%s", j,
1255 				 hns3_rx_bd_error_strings[i].name);
1256 			count++;
1257 		}
1258 	}
1259 
1260 	hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
1261 
1262 	for (j = 0; j < dev->data->nb_rx_queues; j++) {
1263 		for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
1264 			snprintf(xstats_names[count].name,
1265 				 sizeof(xstats_names[count].name),
1266 				 "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
1267 			count++;
1268 		}
1269 	}
1270 
1271 	for (j = 0; j < dev->data->nb_tx_queues; j++) {
1272 		for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
1273 			snprintf(xstats_names[count].name,
1274 				 sizeof(xstats_names[count].name),
1275 				 "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
1276 			count++;
1277 		}
1278 	}
1279 
1280 	return count;
1281 }
1282 
1283 /*
1284  * Retrieve extended statistics of an Ethernet device.
1285  *
1286  * @param dev
1287  *   Pointer to Ethernet device.
1288  * @param ids
1289  *   A pointer to an ids array passed by application. This tells which
1290  *   statistics values function should retrieve. This parameter
1291  *   can be set to NULL if size is 0. In this case function will retrieve
1292  *   all avalible statistics.
1293  * @param values
1294  *   A pointer to a table to be filled with device statistics values.
1295  * @param size
1296  *   The size of the ids array (number of elements).
1297  * @return
1298  *   - A positive value lower or equal to size: success. The return value
1299  *     is the number of entries filled in the stats table.
1300  *   - A positive value higher than size: error, the given statistics table
1301  *     is too small. The return value corresponds to the size that should
1302  *     be given to succeed. The entries in the table are not valid and
1303  *     shall not be used by the caller.
1304  *   - 0 on no ids.
1305  */
1306 int
1307 hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1308 			  uint64_t *values, uint32_t size)
1309 {
1310 	const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1311 	struct hns3_adapter *hns = dev->data->dev_private;
1312 	struct rte_eth_xstat *values_copy;
1313 	struct hns3_hw *hw = &hns->hw;
1314 	uint32_t count_value;
1315 	uint64_t len;
1316 	uint32_t i;
1317 
1318 	if (ids == NULL && values == NULL)
1319 		return cnt_stats;
1320 
1321 	if (ids == NULL)
1322 		if (size < cnt_stats)
1323 			return cnt_stats;
1324 
1325 	len = cnt_stats * sizeof(struct rte_eth_xstat);
1326 	values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
1327 	if (values_copy == NULL) {
1328 		hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1329 			     "to store statistics values", len);
1330 		return -ENOMEM;
1331 	}
1332 
1333 	count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
1334 	if (count_value != cnt_stats) {
1335 		rte_free(values_copy);
1336 		return -EINVAL;
1337 	}
1338 
1339 	if (ids == NULL && values != NULL) {
1340 		for (i = 0; i < cnt_stats; i++)
1341 			memcpy(&values[i], &values_copy[i].value,
1342 			       sizeof(values[i]));
1343 
1344 		rte_free(values_copy);
1345 		return cnt_stats;
1346 	}
1347 
1348 	for (i = 0; i < size; i++) {
1349 		if (ids[i] >= cnt_stats) {
1350 			hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1351 				     "should < %u", i, ids[i], cnt_stats);
1352 			rte_free(values_copy);
1353 			return -EINVAL;
1354 		}
1355 		memcpy(&values[i], &values_copy[ids[i]].value,
1356 			sizeof(values[i]));
1357 	}
1358 
1359 	rte_free(values_copy);
1360 	return size;
1361 }
1362 
1363 /*
1364  * Retrieve names of extended statistics of an Ethernet device.
1365  *
1366  * @param dev
1367  *   Pointer to Ethernet device.
1368  * @param xstats_names
1369  *   An rte_eth_xstat_name array of at least *size* elements to
1370  *   be filled. If set to NULL, the function returns the required number
1371  *   of elements.
1372  * @param ids
1373  *   IDs array given by app to retrieve specific statistics
1374  * @param size
1375  *   The size of the xstats_names array (number of elements).
1376  * @return
1377  *   - A positive value lower or equal to size: success. The return value
1378  *     is the number of entries filled in the stats table.
1379  *   - A positive value higher than size: error, the given statistics table
1380  *     is too small. The return value corresponds to the size that should
1381  *     be given to succeed. The entries in the table are not valid and
1382  *     shall not be used by the caller.
1383  */
1384 int
1385 hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1386 				struct rte_eth_xstat_name *xstats_names,
1387 				const uint64_t *ids, uint32_t size)
1388 {
1389 	const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1390 	struct hns3_adapter *hns = dev->data->dev_private;
1391 	struct rte_eth_xstat_name *names_copy;
1392 	struct hns3_hw *hw = &hns->hw;
1393 	uint64_t len;
1394 	uint32_t i;
1395 
1396 	if (xstats_names == NULL)
1397 		return cnt_stats;
1398 
1399 	if (ids == NULL) {
1400 		if (size < cnt_stats)
1401 			return cnt_stats;
1402 
1403 		return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
1404 	}
1405 
1406 	len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1407 	names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1408 	if (names_copy == NULL) {
1409 		hns3_err(hw, "Failed to allocate %" PRIx64 " bytes needed "
1410 			     "to store statistics names", len);
1411 		return -ENOMEM;
1412 	}
1413 
1414 	(void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1415 
1416 	for (i = 0; i < size; i++) {
1417 		if (ids[i] >= cnt_stats) {
1418 			hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
1419 				     "should < %u", i, ids[i], cnt_stats);
1420 			rte_free(names_copy);
1421 			return -EINVAL;
1422 		}
1423 		snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1424 			 "%s", names_copy[ids[i]].name);
1425 	}
1426 
1427 	rte_free(names_copy);
1428 	return size;
1429 }
1430 
1431 static void
1432 hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
1433 {
1434 	struct hns3_rx_queue *rxq;
1435 	struct hns3_tx_queue *txq;
1436 	uint16_t i;
1437 
1438 	/* Clear Rx dfx stats */
1439 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1440 		rxq = dev->data->rx_queues[i];
1441 		if (rxq)
1442 			memset(&rxq->dfx_stats, 0,
1443 			       sizeof(struct hns3_rx_dfx_stats));
1444 	}
1445 
1446 	/* Clear Tx dfx stats */
1447 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1448 		txq = dev->data->tx_queues[i];
1449 		if (txq)
1450 			memset(&txq->dfx_stats, 0,
1451 			       sizeof(struct hns3_tx_dfx_stats));
1452 	}
1453 }
1454 
1455 int
1456 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1457 {
1458 	struct hns3_adapter *hns = dev->data->dev_private;
1459 	int ret;
1460 
1461 	/* Clear tqp stats */
1462 	ret = hns3_stats_reset(dev);
1463 	if (ret)
1464 		return ret;
1465 
1466 	hns3_tqp_dfx_stats_clear(dev);
1467 
1468 	/* Clear reset stats */
1469 	memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1470 
1471 	if (hns->is_vf)
1472 		return 0;
1473 
1474 	/* HW registers are cleared on read */
1475 	ret = hns3_mac_stats_reset(dev);
1476 	if (ret)
1477 		return ret;
1478 
1479 	return 0;
1480 }
1481 
1482 int
1483 hns3_tqp_stats_init(struct hns3_hw *hw)
1484 {
1485 	struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1486 
1487 	tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1488 					 sizeof(uint64_t) * hw->tqps_num, 0);
1489 	if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1490 		hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1491 		return -ENOMEM;
1492 	}
1493 
1494 	tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1495 					 sizeof(uint64_t) * hw->tqps_num, 0);
1496 	if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1497 		hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1498 		rte_free(tqp_stats->rcb_rx_ring_pktnum);
1499 		tqp_stats->rcb_rx_ring_pktnum = NULL;
1500 		return -ENOMEM;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 void
1507 hns3_tqp_stats_uninit(struct hns3_hw *hw)
1508 {
1509 	struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1510 
1511 	rte_free(tqp_stats->rcb_rx_ring_pktnum);
1512 	tqp_stats->rcb_rx_ring_pktnum = NULL;
1513 	rte_free(tqp_stats->rcb_tx_ring_pktnum);
1514 	tqp_stats->rcb_tx_ring_pktnum = NULL;
1515 }
1516 
1517 static void
1518 hns3_tqp_stats_clear(struct hns3_hw *hw)
1519 {
1520 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
1521 
1522 	stats->rcb_rx_ring_pktnum_rcd = 0;
1523 	stats->rcb_tx_ring_pktnum_rcd = 0;
1524 	memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1525 	memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1526 }
1527