1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
3 */
4
5 #include <rte_ethdev.h>
6 #include <rte_io.h>
7 #include <rte_malloc.h>
8
9 #include "hns3_ethdev.h"
10 #include "hns3_rxtx.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
13
14 /* The statistics of the per-rxq basic stats */
15 static const struct hns3_xstats_name_offset hns3_rxq_basic_stats_strings[] = {
16 {"packets",
17 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(packets)},
18 {"bytes",
19 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(bytes)},
20 {"errors",
21 HNS3_RXQ_BASIC_STATS_FIELD_OFFSET(errors)}
22 };
23
24 /* The statistics of the per-txq basic stats */
25 static const struct hns3_xstats_name_offset hns3_txq_basic_stats_strings[] = {
26 {"packets",
27 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(packets)},
28 {"bytes",
29 HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(bytes)}
30 };
31
32 /* MAC statistics */
33 static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
34 {"mac_tx_mac_pause_num",
35 HNS3_MAC_STATS_OFFSET(mac_tx_mac_pause_num)},
36 {"mac_rx_mac_pause_num",
37 HNS3_MAC_STATS_OFFSET(mac_rx_mac_pause_num)},
38 {"mac_tx_control_pkt_num",
39 HNS3_MAC_STATS_OFFSET(mac_tx_ctrl_pkt_num)},
40 {"mac_rx_control_pkt_num",
41 HNS3_MAC_STATS_OFFSET(mac_rx_ctrl_pkt_num)},
42 {"mac_tx_pfc_pkt_num",
43 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pause_pkt_num)},
44 {"mac_tx_pfc_pri0_pkt_num",
45 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri0_pkt_num)},
46 {"mac_tx_pfc_pri1_pkt_num",
47 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri1_pkt_num)},
48 {"mac_tx_pfc_pri2_pkt_num",
49 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri2_pkt_num)},
50 {"mac_tx_pfc_pri3_pkt_num",
51 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri3_pkt_num)},
52 {"mac_tx_pfc_pri4_pkt_num",
53 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri4_pkt_num)},
54 {"mac_tx_pfc_pri5_pkt_num",
55 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri5_pkt_num)},
56 {"mac_tx_pfc_pri6_pkt_num",
57 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri6_pkt_num)},
58 {"mac_tx_pfc_pri7_pkt_num",
59 HNS3_MAC_STATS_OFFSET(mac_tx_pfc_pri7_pkt_num)},
60 {"mac_rx_pfc_pkt_num",
61 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pause_pkt_num)},
62 {"mac_rx_pfc_pri0_pkt_num",
63 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri0_pkt_num)},
64 {"mac_rx_pfc_pri1_pkt_num",
65 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri1_pkt_num)},
66 {"mac_rx_pfc_pri2_pkt_num",
67 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri2_pkt_num)},
68 {"mac_rx_pfc_pri3_pkt_num",
69 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri3_pkt_num)},
70 {"mac_rx_pfc_pri4_pkt_num",
71 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri4_pkt_num)},
72 {"mac_rx_pfc_pri5_pkt_num",
73 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri5_pkt_num)},
74 {"mac_rx_pfc_pri6_pkt_num",
75 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri6_pkt_num)},
76 {"mac_rx_pfc_pri7_pkt_num",
77 HNS3_MAC_STATS_OFFSET(mac_rx_pfc_pri7_pkt_num)},
78 {"mac_tx_total_pkt_num",
79 HNS3_MAC_STATS_OFFSET(mac_tx_total_pkt_num)},
80 {"mac_tx_total_oct_num",
81 HNS3_MAC_STATS_OFFSET(mac_tx_total_oct_num)},
82 {"mac_tx_good_pkt_num",
83 HNS3_MAC_STATS_OFFSET(mac_tx_good_pkt_num)},
84 {"mac_tx_bad_pkt_num",
85 HNS3_MAC_STATS_OFFSET(mac_tx_bad_pkt_num)},
86 {"mac_tx_good_oct_num",
87 HNS3_MAC_STATS_OFFSET(mac_tx_good_oct_num)},
88 {"mac_tx_bad_oct_num",
89 HNS3_MAC_STATS_OFFSET(mac_tx_bad_oct_num)},
90 {"mac_tx_uni_pkt_num",
91 HNS3_MAC_STATS_OFFSET(mac_tx_uni_pkt_num)},
92 {"mac_tx_multi_pkt_num",
93 HNS3_MAC_STATS_OFFSET(mac_tx_multi_pkt_num)},
94 {"mac_tx_broad_pkt_num",
95 HNS3_MAC_STATS_OFFSET(mac_tx_broad_pkt_num)},
96 {"mac_tx_undersize_pkt_num",
97 HNS3_MAC_STATS_OFFSET(mac_tx_undersize_pkt_num)},
98 {"mac_tx_oversize_pkt_num",
99 HNS3_MAC_STATS_OFFSET(mac_tx_oversize_pkt_num)},
100 {"mac_tx_64_oct_pkt_num",
101 HNS3_MAC_STATS_OFFSET(mac_tx_64_oct_pkt_num)},
102 {"mac_tx_65_127_oct_pkt_num",
103 HNS3_MAC_STATS_OFFSET(mac_tx_65_127_oct_pkt_num)},
104 {"mac_tx_128_255_oct_pkt_num",
105 HNS3_MAC_STATS_OFFSET(mac_tx_128_255_oct_pkt_num)},
106 {"mac_tx_256_511_oct_pkt_num",
107 HNS3_MAC_STATS_OFFSET(mac_tx_256_511_oct_pkt_num)},
108 {"mac_tx_512_1023_oct_pkt_num",
109 HNS3_MAC_STATS_OFFSET(mac_tx_512_1023_oct_pkt_num)},
110 {"mac_tx_1024_1518_oct_pkt_num",
111 HNS3_MAC_STATS_OFFSET(mac_tx_1024_1518_oct_pkt_num)},
112 {"mac_tx_1519_2047_oct_pkt_num",
113 HNS3_MAC_STATS_OFFSET(mac_tx_1519_2047_oct_pkt_num)},
114 {"mac_tx_2048_4095_oct_pkt_num",
115 HNS3_MAC_STATS_OFFSET(mac_tx_2048_4095_oct_pkt_num)},
116 {"mac_tx_4096_8191_oct_pkt_num",
117 HNS3_MAC_STATS_OFFSET(mac_tx_4096_8191_oct_pkt_num)},
118 {"mac_tx_8192_9216_oct_pkt_num",
119 HNS3_MAC_STATS_OFFSET(mac_tx_8192_9216_oct_pkt_num)},
120 {"mac_tx_9217_12287_oct_pkt_num",
121 HNS3_MAC_STATS_OFFSET(mac_tx_9217_12287_oct_pkt_num)},
122 {"mac_tx_12288_16383_oct_pkt_num",
123 HNS3_MAC_STATS_OFFSET(mac_tx_12288_16383_oct_pkt_num)},
124 {"mac_tx_1519_max_good_pkt_num",
125 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_good_oct_pkt_num)},
126 {"mac_tx_1519_max_bad_pkt_num",
127 HNS3_MAC_STATS_OFFSET(mac_tx_1519_max_bad_oct_pkt_num)},
128 {"mac_rx_total_pkt_num",
129 HNS3_MAC_STATS_OFFSET(mac_rx_total_pkt_num)},
130 {"mac_rx_total_oct_num",
131 HNS3_MAC_STATS_OFFSET(mac_rx_total_oct_num)},
132 {"mac_rx_good_pkt_num",
133 HNS3_MAC_STATS_OFFSET(mac_rx_good_pkt_num)},
134 {"mac_rx_bad_pkt_num",
135 HNS3_MAC_STATS_OFFSET(mac_rx_bad_pkt_num)},
136 {"mac_rx_good_oct_num",
137 HNS3_MAC_STATS_OFFSET(mac_rx_good_oct_num)},
138 {"mac_rx_bad_oct_num",
139 HNS3_MAC_STATS_OFFSET(mac_rx_bad_oct_num)},
140 {"mac_rx_uni_pkt_num",
141 HNS3_MAC_STATS_OFFSET(mac_rx_uni_pkt_num)},
142 {"mac_rx_multi_pkt_num",
143 HNS3_MAC_STATS_OFFSET(mac_rx_multi_pkt_num)},
144 {"mac_rx_broad_pkt_num",
145 HNS3_MAC_STATS_OFFSET(mac_rx_broad_pkt_num)},
146 {"mac_rx_undersize_pkt_num",
147 HNS3_MAC_STATS_OFFSET(mac_rx_undersize_pkt_num)},
148 {"mac_rx_oversize_pkt_num",
149 HNS3_MAC_STATS_OFFSET(mac_rx_oversize_pkt_num)},
150 {"mac_rx_64_oct_pkt_num",
151 HNS3_MAC_STATS_OFFSET(mac_rx_64_oct_pkt_num)},
152 {"mac_rx_65_127_oct_pkt_num",
153 HNS3_MAC_STATS_OFFSET(mac_rx_65_127_oct_pkt_num)},
154 {"mac_rx_128_255_oct_pkt_num",
155 HNS3_MAC_STATS_OFFSET(mac_rx_128_255_oct_pkt_num)},
156 {"mac_rx_256_511_oct_pkt_num",
157 HNS3_MAC_STATS_OFFSET(mac_rx_256_511_oct_pkt_num)},
158 {"mac_rx_512_1023_oct_pkt_num",
159 HNS3_MAC_STATS_OFFSET(mac_rx_512_1023_oct_pkt_num)},
160 {"mac_rx_1024_1518_oct_pkt_num",
161 HNS3_MAC_STATS_OFFSET(mac_rx_1024_1518_oct_pkt_num)},
162 {"mac_rx_1519_2047_oct_pkt_num",
163 HNS3_MAC_STATS_OFFSET(mac_rx_1519_2047_oct_pkt_num)},
164 {"mac_rx_2048_4095_oct_pkt_num",
165 HNS3_MAC_STATS_OFFSET(mac_rx_2048_4095_oct_pkt_num)},
166 {"mac_rx_4096_8191_oct_pkt_num",
167 HNS3_MAC_STATS_OFFSET(mac_rx_4096_8191_oct_pkt_num)},
168 {"mac_rx_8192_9216_oct_pkt_num",
169 HNS3_MAC_STATS_OFFSET(mac_rx_8192_9216_oct_pkt_num)},
170 {"mac_rx_9217_12287_oct_pkt_num",
171 HNS3_MAC_STATS_OFFSET(mac_rx_9217_12287_oct_pkt_num)},
172 {"mac_rx_12288_16383_oct_pkt_num",
173 HNS3_MAC_STATS_OFFSET(mac_rx_12288_16383_oct_pkt_num)},
174 {"mac_rx_1519_max_good_pkt_num",
175 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_good_oct_pkt_num)},
176 {"mac_rx_1519_max_bad_pkt_num",
177 HNS3_MAC_STATS_OFFSET(mac_rx_1519_max_bad_oct_pkt_num)},
178 {"mac_tx_fragment_pkt_num",
179 HNS3_MAC_STATS_OFFSET(mac_tx_fragment_pkt_num)},
180 {"mac_tx_undermin_pkt_num",
181 HNS3_MAC_STATS_OFFSET(mac_tx_undermin_pkt_num)},
182 {"mac_tx_jabber_pkt_num",
183 HNS3_MAC_STATS_OFFSET(mac_tx_jabber_pkt_num)},
184 {"mac_tx_err_all_pkt_num",
185 HNS3_MAC_STATS_OFFSET(mac_tx_err_all_pkt_num)},
186 {"mac_tx_from_app_good_pkt_num",
187 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_good_pkt_num)},
188 {"mac_tx_from_app_bad_pkt_num",
189 HNS3_MAC_STATS_OFFSET(mac_tx_from_app_bad_pkt_num)},
190 {"mac_rx_fragment_pkt_num",
191 HNS3_MAC_STATS_OFFSET(mac_rx_fragment_pkt_num)},
192 {"mac_rx_undermin_pkt_num",
193 HNS3_MAC_STATS_OFFSET(mac_rx_undermin_pkt_num)},
194 {"mac_rx_jabber_pkt_num",
195 HNS3_MAC_STATS_OFFSET(mac_rx_jabber_pkt_num)},
196 {"mac_rx_fcs_err_pkt_num",
197 HNS3_MAC_STATS_OFFSET(mac_rx_fcs_err_pkt_num)},
198 {"mac_rx_send_app_good_pkt_num",
199 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_good_pkt_num)},
200 {"mac_rx_send_app_bad_pkt_num",
201 HNS3_MAC_STATS_OFFSET(mac_rx_send_app_bad_pkt_num)}
202 };
203
204 /* The statistic of reset */
205 static const struct hns3_xstats_name_offset hns3_reset_stats_strings[] = {
206 {"REQ_RESET_CNT",
207 HNS3_RESET_STATS_FIELD_OFFSET(request_cnt)},
208 {"GLOBAL_RESET_CNT",
209 HNS3_RESET_STATS_FIELD_OFFSET(global_cnt)},
210 {"IMP_RESET_CNT",
211 HNS3_RESET_STATS_FIELD_OFFSET(imp_cnt)},
212 {"RESET_EXEC_CNT",
213 HNS3_RESET_STATS_FIELD_OFFSET(exec_cnt)},
214 {"RESET_SUCCESS_CNT",
215 HNS3_RESET_STATS_FIELD_OFFSET(success_cnt)},
216 {"RESET_FAIL_CNT",
217 HNS3_RESET_STATS_FIELD_OFFSET(fail_cnt)},
218 {"RESET_MERGE_CNT",
219 HNS3_RESET_STATS_FIELD_OFFSET(merge_cnt)}
220 };
221
222 /* The statistic of errors in Rx BD */
223 static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
224 {"PKT_LEN_ERRORS",
225 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
226 {"L2_ERRORS",
227 HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(l2_errors)}
228 };
229
230 /* The dfx statistic in Rx datapath */
231 static const struct hns3_xstats_name_offset hns3_rxq_dfx_stats_strings[] = {
232 {"L3_CHECKSUM_ERRORS",
233 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l3_csum_errors)},
234 {"L4_CHECKSUM_ERRORS",
235 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(l4_csum_errors)},
236 {"OL3_CHECKSUM_ERRORS",
237 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol3_csum_errors)},
238 {"OL4_CHECKSUM_ERRORS",
239 HNS3_RXQ_DFX_STATS_FIELD_OFFSET(ol4_csum_errors)}
240 };
241
242 /* The dfx statistic in Tx datapath */
243 static const struct hns3_xstats_name_offset hns3_txq_dfx_stats_strings[] = {
244 {"OVER_LENGTH_PKT_CNT",
245 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(over_length_pkt_cnt)},
246 {"EXCEED_LIMITED_BD_PKT_CNT",
247 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_pkt_cnt)},
248 {"EXCEED_LIMITED_BD_PKT_REASSEMBLE_FAIL_CNT",
249 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(exceed_limit_bd_reassem_fail)},
250 {"UNSUPPORTED_TUNNEL_PKT_CNT",
251 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(unsupported_tunnel_pkt_cnt)},
252 {"QUEUE_FULL_CNT",
253 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(queue_full_cnt)},
254 {"SHORT_PKT_PAD_FAIL_CNT",
255 HNS3_TXQ_DFX_STATS_FIELD_OFFSET(pkt_padding_fail_cnt)}
256 };
257
258 /* The statistic of rx queue */
259 static const struct hns3_xstats_name_offset hns3_rx_queue_strings[] = {
260 {"RX_QUEUE_FBD", HNS3_RING_RX_FBDNUM_REG}
261 };
262
263 /* The statistic of tx queue */
264 static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = {
265 {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG}
266 };
267
268 /* The statistic of imissed packet */
269 static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = {
270 {"RPU_DROP_CNT",
271 HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)},
272 {"SSU_DROP_CNT",
273 HNS3_IMISSED_STATS_FIELD_OFFSET(ssu_rx_drop_cnt)},
274 };
275
276 #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
277 sizeof(hns3_mac_strings[0]))
278
279 #define HNS3_NUM_RESET_XSTATS (sizeof(hns3_reset_stats_strings) / \
280 sizeof(hns3_reset_stats_strings[0]))
281
282 #define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
283 sizeof(hns3_rx_bd_error_strings[0]))
284
285 #define HNS3_NUM_RXQ_DFX_XSTATS (sizeof(hns3_rxq_dfx_stats_strings) / \
286 sizeof(hns3_rxq_dfx_stats_strings[0]))
287
288 #define HNS3_NUM_TXQ_DFX_XSTATS (sizeof(hns3_txq_dfx_stats_strings) / \
289 sizeof(hns3_txq_dfx_stats_strings[0]))
290
291 #define HNS3_NUM_RX_QUEUE_STATS (sizeof(hns3_rx_queue_strings) / \
292 sizeof(hns3_rx_queue_strings[0]))
293
294 #define HNS3_NUM_TX_QUEUE_STATS (sizeof(hns3_tx_queue_strings) / \
295 sizeof(hns3_tx_queue_strings[0]))
296
297 #define HNS3_NUM_RXQ_BASIC_STATS (sizeof(hns3_rxq_basic_stats_strings) / \
298 sizeof(hns3_rxq_basic_stats_strings[0]))
299
300 #define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \
301 sizeof(hns3_txq_basic_stats_strings[0]))
302
303 #define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \
304 sizeof(hns3_imissed_stats_strings[0]))
305
306 #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_RESET_XSTATS)
307
308 static void hns3_tqp_stats_clear(struct hns3_hw *hw);
309
310 static int
hns3_update_mac_stats(struct hns3_hw * hw)311 hns3_update_mac_stats(struct hns3_hw *hw)
312 {
313 #define HNS3_MAC_STATS_REG_NUM_PER_DESC 4
314
315 uint64_t *data = (uint64_t *)(&hw->mac_stats);
316 struct hns3_cmd_desc *desc;
317 uint32_t stats_iterms;
318 uint64_t *desc_data;
319 uint32_t desc_num;
320 uint32_t i;
321 int ret;
322
323 /* The first desc has a 64-bit header, so need to consider it. */
324 desc_num = hw->mac_stats_reg_num / HNS3_MAC_STATS_REG_NUM_PER_DESC + 1;
325 desc = rte_malloc("hns3_mac_desc",
326 desc_num * sizeof(struct hns3_cmd_desc), 0);
327 if (desc == NULL) {
328 hns3_err(hw, "Mac_update_stats alloced desc malloc fail");
329 return -ENOMEM;
330 }
331
332 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_STATS_MAC_ALL, true);
333 ret = hns3_cmd_send(hw, desc, desc_num);
334 if (ret) {
335 hns3_err(hw, "Update complete MAC pkt stats fail : %d", ret);
336 rte_free(desc);
337 return ret;
338 }
339
340 stats_iterms = RTE_MIN(sizeof(hw->mac_stats) / sizeof(uint64_t),
341 hw->mac_stats_reg_num);
342 desc_data = (uint64_t *)(&desc[0].data[0]);
343 for (i = 0; i < stats_iterms; i++) {
344 /*
345 * Data memory is continuous and only the first descriptor has a
346 * header in this command.
347 */
348 *data += rte_le_to_cpu_64(*desc_data);
349 data++;
350 desc_data++;
351 }
352 rte_free(desc);
353
354 return 0;
355 }
356
357 static int
hns3_mac_query_reg_num(struct hns3_hw * hw,uint32_t * reg_num)358 hns3_mac_query_reg_num(struct hns3_hw *hw, uint32_t *reg_num)
359 {
360 #define HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B 3
361 struct hns3_cmd_desc desc;
362 int ret;
363
364 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_MAC_REG_NUM, true);
365 ret = hns3_cmd_send(hw, &desc, 1);
366 if (ret) {
367 hns3_err(hw, "failed to query MAC statistic reg number, ret = %d",
368 ret);
369 return ret;
370 }
371
372 /* The number of MAC statistics registers are provided by firmware. */
373 *reg_num = rte_le_to_cpu_32(desc.data[0]);
374 if (*reg_num == 0) {
375 hns3_err(hw, "MAC statistic reg number is invalid!");
376 return -ENODATA;
377 }
378
379 /*
380 * If driver doesn't request the firmware to report more MAC statistics
381 * iterms and the total number of MAC statistics registers by using new
382 * method, firmware will only reports the number of valid statistics
383 * registers. However, structure hns3_mac_stats in driver contains valid
384 * and reserved statistics iterms. In this case, the total register
385 * number must be added to three reserved statistics registers.
386 */
387 *reg_num += HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B;
388
389 return 0;
390 }
391
392 int
hns3_query_mac_stats_reg_num(struct hns3_hw * hw)393 hns3_query_mac_stats_reg_num(struct hns3_hw *hw)
394 {
395 uint32_t mac_stats_reg_num = 0;
396 int ret;
397
398 ret = hns3_mac_query_reg_num(hw, &mac_stats_reg_num);
399 if (ret)
400 return ret;
401
402 hw->mac_stats_reg_num = mac_stats_reg_num;
403 if (hw->mac_stats_reg_num > sizeof(hw->mac_stats) / sizeof(uint64_t))
404 hns3_warn(hw, "MAC stats reg number from firmware is greater than stats iterms in driver.");
405
406 return 0;
407 }
408
409 static int
hns3_update_port_rpu_drop_stats(struct hns3_hw * hw)410 hns3_update_port_rpu_drop_stats(struct hns3_hw *hw)
411 {
412 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
413 struct hns3_query_rpu_cmd *req;
414 struct hns3_cmd_desc desc;
415 uint64_t cnt;
416 uint32_t tc_num;
417 int ret;
418
419 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true);
420 req = (struct hns3_query_rpu_cmd *)desc.data;
421
422 /*
423 * tc_num is 0, means rpu stats of all TC channels will be
424 * get from firmware
425 */
426 tc_num = 0;
427 req->tc_queue_num = rte_cpu_to_le_32(tc_num);
428 ret = hns3_cmd_send(hw, &desc, 1);
429 if (ret) {
430 hns3_err(hw, "failed to query RPU stats: %d", ret);
431 return ret;
432 }
433
434 cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt);
435 stats->rpu_rx_drop_cnt += cnt;
436
437 return 0;
438 }
439
440 static void
hns3_update_function_rpu_drop_stats(struct hns3_hw * hw)441 hns3_update_function_rpu_drop_stats(struct hns3_hw *hw)
442 {
443 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
444
445 stats->rpu_rx_drop_cnt += hns3_read_dev(hw, HNS3_RPU_DROP_CNT_REG);
446 }
447
448 static int
hns3_update_rpu_drop_stats(struct hns3_hw * hw)449 hns3_update_rpu_drop_stats(struct hns3_hw *hw)
450 {
451 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
452 int ret = 0;
453
454 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && !hns->is_vf)
455 ret = hns3_update_port_rpu_drop_stats(hw);
456 else if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2)
457 hns3_update_function_rpu_drop_stats(hw);
458
459 return ret;
460 }
461
462 static int
hns3_get_ssu_drop_stats(struct hns3_hw * hw,struct hns3_cmd_desc * desc,int bd_num,bool is_rx)463 hns3_get_ssu_drop_stats(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
464 int bd_num, bool is_rx)
465 {
466 struct hns3_query_ssu_cmd *req;
467 int ret;
468 int i;
469
470 for (i = 0; i < bd_num - 1; i++) {
471 hns3_cmd_setup_basic_desc(&desc[i],
472 HNS3_OPC_SSU_DROP_REG, true);
473 desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
474 }
475 hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_SSU_DROP_REG, true);
476 req = (struct hns3_query_ssu_cmd *)desc[0].data;
477 req->rxtx = is_rx ? 0 : 1;
478 ret = hns3_cmd_send(hw, desc, bd_num);
479
480 return ret;
481 }
482
483 static int
hns3_update_port_rx_ssu_drop_stats(struct hns3_hw * hw)484 hns3_update_port_rx_ssu_drop_stats(struct hns3_hw *hw)
485 {
486 struct hns3_rx_missed_stats *stats = &hw->imissed_stats;
487 struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
488 struct hns3_query_ssu_cmd *req;
489 uint64_t cnt;
490 int ret;
491
492 ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
493 true);
494 if (ret) {
495 hns3_err(hw, "failed to get Rx SSU drop stats, ret = %d", ret);
496 return ret;
497 }
498
499 req = (struct hns3_query_ssu_cmd *)desc[0].data;
500 cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
501 rte_le_to_cpu_32(req->full_drop_cnt) +
502 rte_le_to_cpu_32(req->part_drop_cnt);
503
504 stats->ssu_rx_drop_cnt += cnt;
505
506 return 0;
507 }
508
509 static int
hns3_update_port_tx_ssu_drop_stats(struct hns3_hw * hw)510 hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw)
511 {
512 struct hns3_cmd_desc desc[HNS3_OPC_SSU_DROP_REG_NUM];
513 struct hns3_query_ssu_cmd *req;
514 uint64_t cnt;
515 int ret;
516
517 ret = hns3_get_ssu_drop_stats(hw, desc, HNS3_OPC_SSU_DROP_REG_NUM,
518 false);
519 if (ret) {
520 hns3_err(hw, "failed to get Tx SSU drop stats, ret = %d", ret);
521 return ret;
522 }
523
524 req = (struct hns3_query_ssu_cmd *)desc[0].data;
525 cnt = rte_le_to_cpu_32(req->oq_drop_cnt) +
526 rte_le_to_cpu_32(req->full_drop_cnt) +
527 rte_le_to_cpu_32(req->part_drop_cnt);
528
529 hw->oerror_stats += cnt;
530
531 return 0;
532 }
533
534 static int
hns3_update_imissed_stats(struct hns3_hw * hw,bool is_clear)535 hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear)
536 {
537 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
538 int ret;
539
540 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
541 return 0;
542
543 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf) {
544 ret = hns3_update_port_rx_ssu_drop_stats(hw);
545 if (ret)
546 return ret;
547 }
548
549 ret = hns3_update_rpu_drop_stats(hw);
550 if (ret)
551 return ret;
552
553 if (is_clear)
554 memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats));
555
556 return 0;
557 }
558
559 static int
hns3_update_oerror_stats(struct hns3_hw * hw,bool is_clear)560 hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
561 {
562 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
563 int ret;
564
565 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 || hns->is_vf)
566 return 0;
567
568 ret = hns3_update_port_tx_ssu_drop_stats(hw);
569 if (ret)
570 return ret;
571
572 if (is_clear)
573 hw->oerror_stats = 0;
574
575 return 0;
576 }
577
578 static void
hns3_rcb_rx_ring_stats_get(struct hns3_rx_queue * rxq,struct hns3_tqp_stats * stats)579 hns3_rcb_rx_ring_stats_get(struct hns3_rx_queue *rxq,
580 struct hns3_tqp_stats *stats)
581 {
582 uint32_t cnt;
583
584 cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
585 stats->rcb_rx_ring_pktnum_rcd += cnt;
586 stats->rcb_rx_ring_pktnum[rxq->queue_id] += cnt;
587 }
588
589 static void
hns3_rcb_tx_ring_stats_get(struct hns3_tx_queue * txq,struct hns3_tqp_stats * stats)590 hns3_rcb_tx_ring_stats_get(struct hns3_tx_queue *txq,
591 struct hns3_tqp_stats *stats)
592 {
593 uint32_t cnt;
594
595 cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
596 stats->rcb_tx_ring_pktnum_rcd += cnt;
597 stats->rcb_tx_ring_pktnum[txq->queue_id] += cnt;
598 }
599
600 /*
601 * Query tqp tx queue statistics ,opcode id: 0x0B03.
602 * Query tqp rx queue statistics ,opcode id: 0x0B13.
603 * Get all statistics of a port.
604 * @param eth_dev
605 * Pointer to Ethernet device.
606 * @praram rte_stats
607 * Pointer to structure rte_eth_stats.
608 * @return
609 * 0 on success.
610 */
611 int
hns3_stats_get(struct rte_eth_dev * eth_dev,struct rte_eth_stats * rte_stats)612 hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
613 {
614 struct hns3_adapter *hns = eth_dev->data->dev_private;
615 struct hns3_hw *hw = &hns->hw;
616 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
617 struct hns3_tqp_stats *stats = &hw->tqp_stats;
618 struct hns3_rx_queue *rxq;
619 struct hns3_tx_queue *txq;
620 uint16_t i;
621 int ret;
622
623 rte_spinlock_lock(&hw->stats_lock);
624 /* Update imissed stats */
625 ret = hns3_update_imissed_stats(hw, false);
626 if (ret) {
627 hns3_err(hw, "update imissed stats failed, ret = %d", ret);
628 goto out;
629 }
630 rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
631 imissed_stats->ssu_rx_drop_cnt;
632
633 /* Get the error stats and bytes of received packets */
634 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
635 rxq = eth_dev->data->rx_queues[i];
636 if (rxq == NULL)
637 continue;
638
639 hns3_rcb_rx_ring_stats_get(rxq, stats);
640 rte_stats->ierrors += rxq->err_stats.l2_errors +
641 rxq->err_stats.pkt_len_errors;
642 rte_stats->ibytes += rxq->basic_stats.bytes;
643 }
644
645 /* Reads all the stats of a txq in a loop to keep them synchronized */
646 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
647 txq = eth_dev->data->tx_queues[i];
648 if (txq == NULL)
649 continue;
650
651 hns3_rcb_tx_ring_stats_get(txq, stats);
652 rte_stats->obytes += txq->basic_stats.bytes;
653 }
654
655 ret = hns3_update_oerror_stats(hw, false);
656 if (ret) {
657 hns3_err(hw, "update oerror stats failed, ret = %d", ret);
658 goto out;
659 }
660 rte_stats->oerrors = hw->oerror_stats;
661
662 /*
663 * If HW statistics are reset by stats_reset, but a lot of residual
664 * packets exist in the hardware queue and these packets are error
665 * packets, flip overflow may occurred. So return 0 in this case.
666 */
667 rte_stats->ipackets =
668 stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ?
669 stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0;
670 rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
671 rte_stats->oerrors;
672 rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
673
674 out:
675 rte_spinlock_unlock(&hw->stats_lock);
676
677 return ret;
678 }
679
680 int
hns3_stats_reset(struct rte_eth_dev * eth_dev)681 hns3_stats_reset(struct rte_eth_dev *eth_dev)
682 {
683 struct hns3_adapter *hns = eth_dev->data->dev_private;
684 struct hns3_hw *hw = &hns->hw;
685 struct hns3_rx_queue *rxq;
686 struct hns3_tx_queue *txq;
687 uint16_t i;
688 int ret;
689
690 rte_spinlock_lock(&hw->stats_lock);
691 /*
692 * Note: Reading hardware statistics of imissed registers will
693 * clear them.
694 */
695 ret = hns3_update_imissed_stats(hw, true);
696 if (ret) {
697 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
698 goto out;
699 }
700
701 /*
702 * Note: Reading hardware statistics of oerror registers will
703 * clear them.
704 */
705 ret = hns3_update_oerror_stats(hw, true);
706 if (ret) {
707 hns3_err(hw, "clear oerror stats failed, ret = %d", ret);
708 goto out;
709 }
710
711 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
712 rxq = eth_dev->data->rx_queues[i];
713 if (rxq == NULL)
714 continue;
715
716 rxq->err_stats.pkt_len_errors = 0;
717 rxq->err_stats.l2_errors = 0;
718 }
719
720 /* Clear all the stats of a rxq in a loop to keep them synchronized */
721 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
722 rxq = eth_dev->data->rx_queues[i];
723 if (rxq == NULL)
724 continue;
725
726 memset(&rxq->basic_stats, 0,
727 sizeof(struct hns3_rx_basic_stats));
728
729 /* This register is read-clear */
730 (void)hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
731 rxq->err_stats.pkt_len_errors = 0;
732 rxq->err_stats.l2_errors = 0;
733 }
734
735 /* Clear all the stats of a txq in a loop to keep them synchronized */
736 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
737 txq = eth_dev->data->tx_queues[i];
738 if (txq == NULL)
739 continue;
740
741 memset(&txq->basic_stats, 0,
742 sizeof(struct hns3_tx_basic_stats));
743
744 /* This register is read-clear */
745 (void)hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
746 }
747
748 hns3_tqp_stats_clear(hw);
749
750 out:
751 rte_spinlock_unlock(&hw->stats_lock);
752
753 return ret;
754 }
755
756 static int
hns3_mac_stats_reset(struct hns3_hw * hw)757 hns3_mac_stats_reset(struct hns3_hw *hw)
758 {
759 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
760 int ret;
761
762 /* Clear hardware MAC statistics by reading it. */
763 ret = hns3_update_mac_stats(hw);
764 if (ret) {
765 hns3_err(hw, "Clear Mac stats fail : %d", ret);
766 return ret;
767 }
768
769 memset(mac_stats, 0, sizeof(struct hns3_mac_stats));
770
771 return 0;
772 }
773
774 static uint16_t
hns3_get_imissed_stats_num(struct hns3_adapter * hns)775 hns3_get_imissed_stats_num(struct hns3_adapter *hns)
776 {
777 #define NO_IMISSED_STATS_NUM 0
778 #define RPU_STATS_ITEM_NUM 1
779 struct hns3_hw *hw = &hns->hw;
780
781 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE1 && hns->is_vf)
782 return NO_IMISSED_STATS_NUM;
783
784 if (hw->drop_stats_mode == HNS3_PKTS_DROP_STATS_MODE2 && !hns->is_vf)
785 return HNS3_NUM_IMISSED_XSTATS;
786
787 return RPU_STATS_ITEM_NUM;
788 }
789
790 /* This function calculates the number of xstats based on the current config */
791 static int
hns3_xstats_calc_num(struct rte_eth_dev * dev)792 hns3_xstats_calc_num(struct rte_eth_dev *dev)
793 {
794 #define HNS3_PF_VF_RX_COMM_STATS_NUM (HNS3_NUM_RX_BD_ERROR_XSTATS + \
795 HNS3_NUM_RXQ_DFX_XSTATS + \
796 HNS3_NUM_RX_QUEUE_STATS + \
797 HNS3_NUM_RXQ_BASIC_STATS)
798 #define HNS3_PF_VF_TX_COMM_STATS_NUM (HNS3_NUM_TXQ_DFX_XSTATS + \
799 HNS3_NUM_TX_QUEUE_STATS + \
800 HNS3_NUM_TXQ_BASIC_STATS)
801
802 struct hns3_adapter *hns = dev->data->dev_private;
803 uint16_t nb_rx_q = dev->data->nb_rx_queues;
804 uint16_t nb_tx_q = dev->data->nb_tx_queues;
805 int rx_comm_stats_num = nb_rx_q * HNS3_PF_VF_RX_COMM_STATS_NUM;
806 int tx_comm_stats_num = nb_tx_q * HNS3_PF_VF_TX_COMM_STATS_NUM;
807 int stats_num;
808
809 stats_num = rx_comm_stats_num + tx_comm_stats_num;
810 stats_num += hns3_get_imissed_stats_num(hns);
811
812 if (hns->is_vf)
813 stats_num += HNS3_NUM_RESET_XSTATS;
814 else
815 stats_num += HNS3_FIX_NUM_STATS;
816
817 return stats_num;
818 }
819
820 static void
hns3_queue_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)821 hns3_queue_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
822 int *count)
823 {
824 struct hns3_adapter *hns = dev->data->dev_private;
825 struct hns3_hw *hw = &hns->hw;
826 uint32_t reg_offset;
827 uint16_t i, j;
828
829 /* Get rx queue stats */
830 for (j = 0; j < dev->data->nb_rx_queues; j++) {
831 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
832 reg_offset = hns3_get_tqp_reg_offset(j);
833 xstats[*count].value = hns3_read_dev(hw,
834 reg_offset + hns3_rx_queue_strings[i].offset);
835 xstats[*count].id = *count;
836 (*count)++;
837 }
838 }
839
840 /* Get tx queue stats */
841 for (j = 0; j < dev->data->nb_tx_queues; j++) {
842 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
843 reg_offset = hns3_get_tqp_reg_offset(j);
844 xstats[*count].value = hns3_read_dev(hw,
845 reg_offset + hns3_tx_queue_strings[i].offset);
846 xstats[*count].id = *count;
847 (*count)++;
848 }
849 }
850 }
851
852 static void
hns3_rxq_dfx_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)853 hns3_rxq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
854 int *count)
855 {
856 struct hns3_rx_dfx_stats *dfx_stats;
857 struct hns3_rx_queue *rxq;
858 uint16_t i, j;
859 char *val;
860
861 for (i = 0; i < dev->data->nb_rx_queues; i++) {
862 rxq = (struct hns3_rx_queue *)dev->data->rx_queues[i];
863 if (rxq == NULL)
864 continue;
865
866 dfx_stats = &rxq->dfx_stats;
867 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
868 val = (char *)dfx_stats +
869 hns3_rxq_dfx_stats_strings[j].offset;
870 xstats[*count].value = *(uint64_t *)val;
871 xstats[*count].id = *count;
872 (*count)++;
873 }
874 }
875 }
876
877 static void
hns3_txq_dfx_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)878 hns3_txq_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
879 int *count)
880 {
881 struct hns3_tx_dfx_stats *dfx_stats;
882 struct hns3_tx_queue *txq;
883 uint16_t i, j;
884 char *val;
885
886 for (i = 0; i < dev->data->nb_tx_queues; i++) {
887 txq = (struct hns3_tx_queue *)dev->data->tx_queues[i];
888 if (txq == NULL)
889 continue;
890
891 dfx_stats = &txq->dfx_stats;
892 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
893 val = (char *)dfx_stats +
894 hns3_txq_dfx_stats_strings[j].offset;
895 xstats[*count].value = *(uint64_t *)val;
896 xstats[*count].id = *count;
897 (*count)++;
898 }
899 }
900 }
901
902 static void
hns3_tqp_dfx_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)903 hns3_tqp_dfx_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
904 int *count)
905 {
906 hns3_rxq_dfx_stats_get(dev, xstats, count);
907 hns3_txq_dfx_stats_get(dev, xstats, count);
908 }
909
910 static void
hns3_rxq_basic_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)911 hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
912 int *count)
913 {
914 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
915 struct hns3_tqp_stats *stats = &hw->tqp_stats;
916 struct hns3_rx_basic_stats *rxq_stats;
917 struct hns3_rx_queue *rxq;
918 uint16_t i, j;
919 char *val;
920
921 for (i = 0; i < dev->data->nb_rx_queues; i++) {
922 rxq = dev->data->rx_queues[i];
923 if (rxq == NULL)
924 continue;
925
926 hns3_rcb_rx_ring_stats_get(rxq, stats);
927 rxq_stats = &rxq->basic_stats;
928 rxq_stats->errors = rxq->err_stats.l2_errors +
929 rxq->err_stats.pkt_len_errors;
930
931 /*
932 * If HW statistics are reset by stats_reset, but a lot of
933 * residual packets exist in the hardware queue and these
934 * packets are error packets, flip overflow may occurred.
935 * So return 0 in this case.
936 */
937 rxq_stats->packets =
938 stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ?
939 stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0;
940 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
941 val = (char *)rxq_stats +
942 hns3_rxq_basic_stats_strings[j].offset;
943 xstats[*count].value = *(uint64_t *)val;
944 xstats[*count].id = *count;
945 (*count)++;
946 }
947 }
948 }
949
950 static void
hns3_txq_basic_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)951 hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
952 int *count)
953 {
954 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955 struct hns3_tqp_stats *stats = &hw->tqp_stats;
956 struct hns3_tx_basic_stats *txq_stats;
957 struct hns3_tx_queue *txq;
958 uint16_t i, j;
959 char *val;
960
961 for (i = 0; i < dev->data->nb_tx_queues; i++) {
962 txq = dev->data->tx_queues[i];
963 if (txq == NULL)
964 continue;
965
966 hns3_rcb_tx_ring_stats_get(txq, stats);
967
968 txq_stats = &txq->basic_stats;
969 txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
970
971 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
972 val = (char *)txq_stats +
973 hns3_txq_basic_stats_strings[j].offset;
974 xstats[*count].value = *(uint64_t *)val;
975 xstats[*count].id = *count;
976 (*count)++;
977 }
978 }
979 }
980
981 static void
hns3_tqp_basic_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)982 hns3_tqp_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
983 int *count)
984 {
985 hns3_rxq_basic_stats_get(dev, xstats, count);
986 hns3_txq_basic_stats_get(dev, xstats, count);
987 }
988
989 static void
hns3_imissed_stats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,int * count)990 hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
991 int *count)
992 {
993 struct hns3_adapter *hns = dev->data->dev_private;
994 struct hns3_hw *hw = &hns->hw;
995 struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats;
996 uint16_t imissed_stats_num;
997 int cnt = *count;
998 char *addr;
999 uint16_t i;
1000
1001 imissed_stats_num = hns3_get_imissed_stats_num(hns);
1002
1003 for (i = 0; i < imissed_stats_num; i++) {
1004 addr = (char *)imissed_stats +
1005 hns3_imissed_stats_strings[i].offset;
1006 xstats[cnt].value = *(uint64_t *)addr;
1007 xstats[cnt].id = cnt;
1008 cnt++;
1009 }
1010
1011 *count = cnt;
1012 }
1013
1014 /*
1015 * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
1016 * @param dev
1017 * Pointer to Ethernet device.
1018 * @praram xstats
1019 * A pointer to a table of structure of type *rte_eth_xstat*
1020 * to be filled with device statistics ids and values.
1021 * This parameter can be set to NULL if and only if n is 0.
1022 * @param n
1023 * The size of the xstats array (number of elements).
1024 * If lower than the required number of elements, the function returns the
1025 * required number of elements.
1026 * If equal to zero, the xstats parameter must be NULL, the function returns
1027 * the required number of elements.
1028 * @return
1029 * 0 on fail, count(The size of the statistics elements) on success.
1030 */
1031 int
hns3_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)1032 hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1033 unsigned int n)
1034 {
1035 struct hns3_adapter *hns = dev->data->dev_private;
1036 struct hns3_hw *hw = &hns->hw;
1037 struct hns3_mac_stats *mac_stats = &hw->mac_stats;
1038 struct hns3_reset_stats *reset_stats = &hw->reset.stats;
1039 struct hns3_rx_bd_errors_stats *rx_err_stats;
1040 struct hns3_rx_queue *rxq;
1041 uint16_t i, j;
1042 char *addr;
1043 int count;
1044 int ret;
1045
1046 count = hns3_xstats_calc_num(dev);
1047 if ((int)n < count)
1048 return count;
1049
1050 count = 0;
1051
1052 rte_spinlock_lock(&hw->stats_lock);
1053 hns3_tqp_basic_stats_get(dev, xstats, &count);
1054
1055 if (!hns->is_vf) {
1056 ret = hns3_update_mac_stats(hw);
1057 if (ret < 0) {
1058 hns3_err(hw, "Update Mac stats fail : %d", ret);
1059 rte_spinlock_unlock(&hw->stats_lock);
1060 return ret;
1061 }
1062
1063 /* Get MAC stats from hw->hw_xstats.mac_stats struct */
1064 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1065 addr = (char *)mac_stats + hns3_mac_strings[i].offset;
1066 xstats[count].value = *(uint64_t *)addr;
1067 xstats[count].id = count;
1068 count++;
1069 }
1070 }
1071
1072 ret = hns3_update_imissed_stats(hw, false);
1073 if (ret) {
1074 hns3_err(hw, "update imissed stats failed, ret = %d", ret);
1075 rte_spinlock_unlock(&hw->stats_lock);
1076 return ret;
1077 }
1078
1079 hns3_imissed_stats_get(dev, xstats, &count);
1080
1081 /* Get the reset stat */
1082 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1083 addr = (char *)reset_stats + hns3_reset_stats_strings[i].offset;
1084 xstats[count].value = *(uint64_t *)addr;
1085 xstats[count].id = count;
1086 count++;
1087 }
1088
1089 /* Get the Rx BD errors stats */
1090 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1091 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1092 rxq = dev->data->rx_queues[j];
1093 if (rxq) {
1094 rx_err_stats = &rxq->err_stats;
1095 addr = (char *)rx_err_stats +
1096 hns3_rx_bd_error_strings[i].offset;
1097 xstats[count].value = *(uint64_t *)addr;
1098 xstats[count].id = count;
1099 count++;
1100 }
1101 }
1102 }
1103
1104 hns3_tqp_dfx_stats_get(dev, xstats, &count);
1105 hns3_queue_stats_get(dev, xstats, &count);
1106 rte_spinlock_unlock(&hw->stats_lock);
1107
1108 return count;
1109 }
1110
1111 static void
hns3_tqp_basic_stats_name_get(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,uint32_t * count)1112 hns3_tqp_basic_stats_name_get(struct rte_eth_dev *dev,
1113 struct rte_eth_xstat_name *xstats_names,
1114 uint32_t *count)
1115 {
1116 uint16_t i, j;
1117
1118 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1119 for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) {
1120 snprintf(xstats_names[*count].name,
1121 sizeof(xstats_names[*count].name),
1122 "rx_q%u_%s", i,
1123 hns3_rxq_basic_stats_strings[j].name);
1124 (*count)++;
1125 }
1126 }
1127 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1128 for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) {
1129 snprintf(xstats_names[*count].name,
1130 sizeof(xstats_names[*count].name),
1131 "tx_q%u_%s", i,
1132 hns3_txq_basic_stats_strings[j].name);
1133 (*count)++;
1134 }
1135 }
1136 }
1137
1138 static void
hns3_tqp_dfx_stats_name_get(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,uint32_t * count)1139 hns3_tqp_dfx_stats_name_get(struct rte_eth_dev *dev,
1140 struct rte_eth_xstat_name *xstats_names,
1141 uint32_t *count)
1142 {
1143 uint16_t i, j;
1144
1145 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1146 for (j = 0; j < HNS3_NUM_RXQ_DFX_XSTATS; j++) {
1147 snprintf(xstats_names[*count].name,
1148 sizeof(xstats_names[*count].name),
1149 "rx_q%u_%s", i,
1150 hns3_rxq_dfx_stats_strings[j].name);
1151 (*count)++;
1152 }
1153 }
1154
1155 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1156 for (j = 0; j < HNS3_NUM_TXQ_DFX_XSTATS; j++) {
1157 snprintf(xstats_names[*count].name,
1158 sizeof(xstats_names[*count].name),
1159 "tx_q%u_%s", i,
1160 hns3_txq_dfx_stats_strings[j].name);
1161 (*count)++;
1162 }
1163 }
1164 }
1165
1166 static void
hns3_imissed_stats_name_get(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,uint32_t * count)1167 hns3_imissed_stats_name_get(struct rte_eth_dev *dev,
1168 struct rte_eth_xstat_name *xstats_names,
1169 uint32_t *count)
1170 {
1171 struct hns3_adapter *hns = dev->data->dev_private;
1172 uint32_t cnt = *count;
1173 uint16_t imissed_stats_num;
1174 uint16_t i;
1175
1176 imissed_stats_num = hns3_get_imissed_stats_num(hns);
1177
1178 for (i = 0; i < imissed_stats_num; i++) {
1179 snprintf(xstats_names[cnt].name,
1180 sizeof(xstats_names[cnt].name),
1181 "%s", hns3_imissed_stats_strings[i].name);
1182 cnt++;
1183 }
1184
1185 *count = cnt;
1186 }
1187
1188 /*
1189 * Retrieve names of extended statistics of an Ethernet device.
1190 *
1191 * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
1192 * by array index:
1193 * xstats_names[i].name => xstats[i].value
1194 *
1195 * And the array index is same with id field of 'struct rte_eth_xstat':
1196 * xstats[i].id == i
1197 *
1198 * This assumption makes key-value pair matching less flexible but simpler.
1199 *
1200 * @param dev
1201 * Pointer to Ethernet device.
1202 * @param xstats_names
1203 * An rte_eth_xstat_name array of at least *size* elements to
1204 * be filled. If set to NULL, the function returns the required number
1205 * of elements.
1206 * @param size
1207 * The size of the xstats_names array (number of elements).
1208 * @return
1209 * - A positive value lower or equal to size: success. The return value
1210 * is the number of entries filled in the stats table.
1211 */
1212 int
hns3_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned int size)1213 hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
1214 struct rte_eth_xstat_name *xstats_names,
1215 __rte_unused unsigned int size)
1216 {
1217 struct hns3_adapter *hns = dev->data->dev_private;
1218 int cnt_stats = hns3_xstats_calc_num(dev);
1219 uint32_t count = 0;
1220 uint16_t i, j;
1221
1222 if (xstats_names == NULL)
1223 return cnt_stats;
1224
1225 hns3_tqp_basic_stats_name_get(dev, xstats_names, &count);
1226
1227 /* Note: size limited checked in rte_eth_xstats_get_names() */
1228 if (!hns->is_vf) {
1229 /* Get MAC name from hw->hw_xstats.mac_stats struct */
1230 for (i = 0; i < HNS3_NUM_MAC_STATS; i++) {
1231 snprintf(xstats_names[count].name,
1232 sizeof(xstats_names[count].name),
1233 "%s", hns3_mac_strings[i].name);
1234 count++;
1235 }
1236 }
1237
1238 hns3_imissed_stats_name_get(dev, xstats_names, &count);
1239
1240 for (i = 0; i < HNS3_NUM_RESET_XSTATS; i++) {
1241 snprintf(xstats_names[count].name,
1242 sizeof(xstats_names[count].name),
1243 "%s", hns3_reset_stats_strings[i].name);
1244 count++;
1245 }
1246
1247 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1248 for (i = 0; i < HNS3_NUM_RX_BD_ERROR_XSTATS; i++) {
1249 snprintf(xstats_names[count].name,
1250 sizeof(xstats_names[count].name),
1251 "rx_q%u_%s", j,
1252 hns3_rx_bd_error_strings[i].name);
1253 count++;
1254 }
1255 }
1256
1257 hns3_tqp_dfx_stats_name_get(dev, xstats_names, &count);
1258
1259 for (j = 0; j < dev->data->nb_rx_queues; j++) {
1260 for (i = 0; i < HNS3_NUM_RX_QUEUE_STATS; i++) {
1261 snprintf(xstats_names[count].name,
1262 sizeof(xstats_names[count].name),
1263 "rx_q%u_%s", j, hns3_rx_queue_strings[i].name);
1264 count++;
1265 }
1266 }
1267
1268 for (j = 0; j < dev->data->nb_tx_queues; j++) {
1269 for (i = 0; i < HNS3_NUM_TX_QUEUE_STATS; i++) {
1270 snprintf(xstats_names[count].name,
1271 sizeof(xstats_names[count].name),
1272 "tx_q%u_%s", j, hns3_tx_queue_strings[i].name);
1273 count++;
1274 }
1275 }
1276
1277 return count;
1278 }
1279
1280 /*
1281 * Retrieve extended statistics of an Ethernet device.
1282 *
1283 * @param dev
1284 * Pointer to Ethernet device.
1285 * @param ids
1286 * A pointer to an ids array passed by application. This tells which
1287 * statistics values function should retrieve. This parameter
1288 * can be set to NULL if size is 0. In this case function will retrieve
1289 * all available statistics.
1290 * @param values
1291 * A pointer to a table to be filled with device statistics values.
1292 * @param size
1293 * The size of the ids array (number of elements).
1294 * @return
1295 * - A positive value lower or equal to size: success. The return value
1296 * is the number of entries filled in the stats table.
1297 * - A positive value higher than size: error, the given statistics table
1298 * is too small. The return value corresponds to the size that should
1299 * be given to succeed. The entries in the table are not valid and
1300 * shall not be used by the caller.
1301 * - 0 on no ids.
1302 */
1303 int
hns3_dev_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,uint32_t size)1304 hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1305 uint64_t *values, uint32_t size)
1306 {
1307 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1308 struct hns3_adapter *hns = dev->data->dev_private;
1309 struct rte_eth_xstat *values_copy;
1310 struct hns3_hw *hw = &hns->hw;
1311 uint32_t count_value;
1312 uint64_t len;
1313 uint32_t i;
1314
1315 if (ids == NULL && values == NULL)
1316 return cnt_stats;
1317
1318 if (ids == NULL)
1319 if (size < cnt_stats)
1320 return cnt_stats;
1321
1322 len = cnt_stats * sizeof(struct rte_eth_xstat);
1323 values_copy = rte_zmalloc("hns3_xstats_values", len, 0);
1324 if (values_copy == NULL) {
1325 hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed to store statistics values",
1326 len);
1327 return -ENOMEM;
1328 }
1329
1330 count_value = hns3_dev_xstats_get(dev, values_copy, cnt_stats);
1331 if (count_value != cnt_stats) {
1332 rte_free(values_copy);
1333 return -EINVAL;
1334 }
1335
1336 if (ids == NULL && values != NULL) {
1337 for (i = 0; i < cnt_stats; i++)
1338 memcpy(&values[i], &values_copy[i].value,
1339 sizeof(values[i]));
1340
1341 rte_free(values_copy);
1342 return cnt_stats;
1343 }
1344
1345 for (i = 0; i < size; i++) {
1346 if (ids[i] >= cnt_stats) {
1347 hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, should < %u",
1348 i, ids[i], cnt_stats);
1349 rte_free(values_copy);
1350 return -EINVAL;
1351 }
1352 memcpy(&values[i], &values_copy[ids[i]].value,
1353 sizeof(values[i]));
1354 }
1355
1356 rte_free(values_copy);
1357 return size;
1358 }
1359
1360 /*
1361 * Retrieve names of extended statistics of an Ethernet device.
1362 *
1363 * @param dev
1364 * Pointer to Ethernet device.
1365 * @param ids
1366 * IDs array given by app to retrieve specific statistics
1367 * @param xstats_names
1368 * An rte_eth_xstat_name array of at least *size* elements to
1369 * be filled. If set to NULL, the function returns the required number
1370 * of elements.
1371 * @param size
1372 * The size of the xstats_names array (number of elements).
1373 * @return
1374 * - A positive value lower or equal to size: success. The return value
1375 * is the number of entries filled in the stats table.
1376 * - A positive value higher than size: error, the given statistics table
1377 * is too small. The return value corresponds to the size that should
1378 * be given to succeed. The entries in the table are not valid and
1379 * shall not be used by the caller.
1380 */
1381 int
hns3_dev_xstats_get_names_by_id(struct rte_eth_dev * dev,const uint64_t * ids,struct rte_eth_xstat_name * xstats_names,uint32_t size)1382 hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1383 const uint64_t *ids,
1384 struct rte_eth_xstat_name *xstats_names,
1385 uint32_t size)
1386 {
1387 const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
1388 struct hns3_adapter *hns = dev->data->dev_private;
1389 struct rte_eth_xstat_name *names_copy;
1390 struct hns3_hw *hw = &hns->hw;
1391 uint64_t len;
1392 uint32_t i;
1393
1394 if (xstats_names == NULL)
1395 return cnt_stats;
1396
1397 if (ids == NULL) {
1398 if (size < cnt_stats)
1399 return cnt_stats;
1400
1401 return hns3_dev_xstats_get_names(dev, xstats_names, cnt_stats);
1402 }
1403
1404 len = cnt_stats * sizeof(struct rte_eth_xstat_name);
1405 names_copy = rte_zmalloc("hns3_xstats_names", len, 0);
1406 if (names_copy == NULL) {
1407 hns3_err(hw, "Failed to allocate 0x%" PRIx64 " bytes needed to store statistics names",
1408 len);
1409 return -ENOMEM;
1410 }
1411
1412 (void)hns3_dev_xstats_get_names(dev, names_copy, cnt_stats);
1413
1414 for (i = 0; i < size; i++) {
1415 if (ids[i] >= cnt_stats) {
1416 hns3_err(hw, "ids[%u] (%" PRIu64 ") is invalid, should < %u",
1417 i, ids[i], cnt_stats);
1418 rte_free(names_copy);
1419 return -EINVAL;
1420 }
1421 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1422 "%s", names_copy[ids[i]].name);
1423 }
1424
1425 rte_free(names_copy);
1426 return size;
1427 }
1428
1429 static void
hns3_tqp_dfx_stats_clear(struct rte_eth_dev * dev)1430 hns3_tqp_dfx_stats_clear(struct rte_eth_dev *dev)
1431 {
1432 struct hns3_rx_queue *rxq;
1433 struct hns3_tx_queue *txq;
1434 uint16_t i;
1435
1436 /* Clear Rx dfx stats */
1437 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1438 rxq = dev->data->rx_queues[i];
1439 if (rxq)
1440 memset(&rxq->dfx_stats, 0,
1441 sizeof(struct hns3_rx_dfx_stats));
1442 }
1443
1444 /* Clear Tx dfx stats */
1445 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1446 txq = dev->data->tx_queues[i];
1447 if (txq)
1448 memset(&txq->dfx_stats, 0,
1449 sizeof(struct hns3_tx_dfx_stats));
1450 }
1451 }
1452
1453 int
hns3_dev_xstats_reset(struct rte_eth_dev * dev)1454 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
1455 {
1456 struct hns3_adapter *hns = dev->data->dev_private;
1457 struct hns3_hw *hw = &hns->hw;
1458 int ret;
1459
1460 /* Clear tqp stats */
1461 ret = hns3_stats_reset(dev);
1462 if (ret)
1463 return ret;
1464
1465 rte_spinlock_lock(&hw->stats_lock);
1466 hns3_tqp_dfx_stats_clear(dev);
1467
1468 /* Clear reset stats */
1469 memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
1470
1471 if (hns->is_vf)
1472 goto out;
1473
1474 ret = hns3_mac_stats_reset(hw);
1475
1476 out:
1477 rte_spinlock_unlock(&hw->stats_lock);
1478
1479 return ret;
1480 }
1481
1482 static int
hns3_tqp_stats_init(struct hns3_hw * hw)1483 hns3_tqp_stats_init(struct hns3_hw *hw)
1484 {
1485 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1486
1487 tqp_stats->rcb_rx_ring_pktnum = rte_zmalloc("hns3_rx_ring_pkt_num",
1488 sizeof(uint64_t) * hw->tqps_num, 0);
1489 if (tqp_stats->rcb_rx_ring_pktnum == NULL) {
1490 hns3_err(hw, "failed to allocate rx_ring pkt_num.");
1491 return -ENOMEM;
1492 }
1493
1494 tqp_stats->rcb_tx_ring_pktnum = rte_zmalloc("hns3_tx_ring_pkt_num",
1495 sizeof(uint64_t) * hw->tqps_num, 0);
1496 if (tqp_stats->rcb_tx_ring_pktnum == NULL) {
1497 hns3_err(hw, "failed to allocate tx_ring pkt_num.");
1498 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1499 tqp_stats->rcb_rx_ring_pktnum = NULL;
1500 return -ENOMEM;
1501 }
1502
1503 return 0;
1504 }
1505
1506 static void
hns3_tqp_stats_uninit(struct hns3_hw * hw)1507 hns3_tqp_stats_uninit(struct hns3_hw *hw)
1508 {
1509 struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats;
1510
1511 rte_free(tqp_stats->rcb_rx_ring_pktnum);
1512 tqp_stats->rcb_rx_ring_pktnum = NULL;
1513 rte_free(tqp_stats->rcb_tx_ring_pktnum);
1514 tqp_stats->rcb_tx_ring_pktnum = NULL;
1515 }
1516
1517 static void
hns3_tqp_stats_clear(struct hns3_hw * hw)1518 hns3_tqp_stats_clear(struct hns3_hw *hw)
1519 {
1520 struct hns3_tqp_stats *stats = &hw->tqp_stats;
1521
1522 stats->rcb_rx_ring_pktnum_rcd = 0;
1523 stats->rcb_tx_ring_pktnum_rcd = 0;
1524 memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1525 memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num);
1526 }
1527
1528 int
hns3_stats_init(struct hns3_hw * hw)1529 hns3_stats_init(struct hns3_hw *hw)
1530 {
1531 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1532 int ret;
1533
1534 rte_spinlock_init(&hw->stats_lock);
1535 /* Hardware statistics of imissed registers cleared. */
1536 ret = hns3_update_imissed_stats(hw, true);
1537 if (ret) {
1538 hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
1539 return ret;
1540 }
1541
1542 if (!hns->is_vf) {
1543 ret = hns3_mac_stats_reset(hw);
1544 if (ret) {
1545 hns3_err(hw, "reset mac stats failed, ret = %d", ret);
1546 return ret;
1547 }
1548 }
1549
1550 return hns3_tqp_stats_init(hw);
1551 }
1552
1553 void
hns3_stats_uninit(struct hns3_hw * hw)1554 hns3_stats_uninit(struct hns3_hw *hw)
1555 {
1556 hns3_tqp_stats_uninit(hw);
1557 }
1558
1559 static void
hns3_update_queues_stats(struct hns3_hw * hw)1560 hns3_update_queues_stats(struct hns3_hw *hw)
1561 {
1562 struct rte_eth_dev_data *data = hw->data;
1563 struct hns3_rx_queue *rxq;
1564 struct hns3_tx_queue *txq;
1565 uint16_t i;
1566
1567 for (i = 0; i < data->nb_rx_queues; i++) {
1568 rxq = data->rx_queues[i];
1569 if (rxq != NULL)
1570 hns3_rcb_rx_ring_stats_get(rxq, &hw->tqp_stats);
1571 }
1572
1573 for (i = 0; i < data->nb_tx_queues; i++) {
1574 txq = data->tx_queues[i];
1575 if (txq != NULL)
1576 hns3_rcb_tx_ring_stats_get(txq, &hw->tqp_stats);
1577 }
1578 }
1579
1580 /*
1581 * Some hardware statistics registers are not 64-bit. If hardware statistics are
1582 * not obtained for a long time, these statistics may be reversed. This function
1583 * is used to update these hardware statistics in periodic task.
1584 */
1585 void
hns3_update_hw_stats(struct hns3_hw * hw)1586 hns3_update_hw_stats(struct hns3_hw *hw)
1587 {
1588 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1589
1590 rte_spinlock_lock(&hw->stats_lock);
1591 if (!hns->is_vf)
1592 hns3_update_mac_stats(hw);
1593
1594 hns3_update_queues_stats(hw);
1595 rte_spinlock_unlock(&hw->stats_lock);
1596 }
1597