xref: /dpdk/drivers/net/hns3/hns3_common.c (revision 8ac3a1cd3ebdf54d9bae0dba0b3b8aa5b3f5339a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 HiSilicon Limited
3  */
4 
5 #include <rte_kvargs.h>
6 #include <bus_pci_driver.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9 
10 #include "hns3_logs.h"
11 #include "hns3_regs.h"
12 #include "hns3_rxtx.h"
13 #include "hns3_dcb.h"
14 #include "hns3_common.h"
15 
16 int
17 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
18 		    size_t fw_size)
19 {
20 	struct hns3_adapter *hns = eth_dev->data->dev_private;
21 	struct hns3_hw *hw = &hns->hw;
22 	uint32_t version = hw->fw_version;
23 	int ret;
24 
25 	ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
26 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
27 				      HNS3_FW_VERSION_BYTE3_S),
28 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
29 				      HNS3_FW_VERSION_BYTE2_S),
30 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
31 				      HNS3_FW_VERSION_BYTE1_S),
32 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
33 				      HNS3_FW_VERSION_BYTE0_S));
34 	if (ret < 0)
35 		return -EINVAL;
36 
37 	ret += 1; /* add the size of '\0' */
38 	if (fw_size < (size_t)ret)
39 		return ret;
40 	else
41 		return 0;
42 }
43 
44 int
45 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
46 {
47 	struct hns3_adapter *hns = eth_dev->data->dev_private;
48 	struct hns3_hw *hw = &hns->hw;
49 	uint16_t queue_num = hw->tqps_num;
50 
51 	/*
52 	 * In interrupt mode, 'max_rx_queues' is set based on the number of
53 	 * MSI-X interrupt resources of the hardware.
54 	 */
55 	if (hw->data->dev_conf.intr_conf.rxq == 1)
56 		queue_num = hw->intr_tqps_num;
57 
58 	info->max_rx_queues = queue_num;
59 	info->max_tx_queues = hw->tqps_num;
60 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
61 	info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
62 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
63 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
64 	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
65 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
66 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
67 				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
68 				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
69 				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
70 				 RTE_ETH_RX_OFFLOAD_SCATTER |
71 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
72 				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
73 				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
74 				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
75 	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
76 				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
77 				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
78 				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
79 				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
80 				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
81 				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
82 				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
83 				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
84 				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
85 				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
86 				 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
87 
88 	if (!hw->port_base_vlan_cfg.state)
89 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
90 
91 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
92 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
93 
94 	info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP |
95 			 RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP;
96 	if (hns3_dev_get_support(hw, INDEP_TXRX))
97 		info->dev_capa |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 				  RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
99 
100 	if (hns3_dev_get_support(hw, PTP))
101 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
102 
103 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
104 		.nb_max = HNS3_MAX_RING_DESC,
105 		.nb_min = HNS3_MIN_RING_DESC,
106 		.nb_align = HNS3_ALIGN_RING_DESC,
107 	};
108 
109 	info->tx_desc_lim = (struct rte_eth_desc_lim) {
110 		.nb_max = HNS3_MAX_RING_DESC,
111 		.nb_min = HNS3_MIN_RING_DESC,
112 		.nb_align = HNS3_ALIGN_RING_DESC,
113 		.nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
114 		.nb_mtu_seg_max = hw->max_non_tso_bd_num,
115 	};
116 
117 	info->default_rxconf = (struct rte_eth_rxconf) {
118 		.rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
119 		/*
120 		 * If there are no available Rx buffer descriptors, incoming
121 		 * packets are always dropped by hardware based on hns3 network
122 		 * engine.
123 		 */
124 		.rx_drop_en = 1,
125 		.offloads = 0,
126 	};
127 	info->default_txconf = (struct rte_eth_txconf) {
128 		.tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
129 		.offloads = 0,
130 	};
131 
132 	info->reta_size = hw->rss_ind_tbl_size;
133 	info->hash_key_size = hw->rss_key_size;
134 	info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
135 
136 	info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
137 	info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
138 	info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
139 	info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
140 	info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
141 	info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
142 
143 	/*
144 	 * Next is the PF/VF difference section.
145 	 */
146 	if (!hns->is_vf) {
147 		info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
148 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
149 		info->speed_capa = hns3_get_speed_capa(hw);
150 	} else {
151 		info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
152 	}
153 
154 	info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE;
155 
156 	return 0;
157 }
158 
159 static int
160 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
161 {
162 	uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
163 
164 	RTE_SET_USED(key);
165 
166 	if (value == NULL || extra_args == NULL)
167 		return 0;
168 
169 	if (strcmp(value, "vec") == 0)
170 		hint = HNS3_IO_FUNC_HINT_VEC;
171 	else if (strcmp(value, "sve") == 0)
172 		hint = HNS3_IO_FUNC_HINT_SVE;
173 	else if (strcmp(value, "simple") == 0)
174 		hint = HNS3_IO_FUNC_HINT_SIMPLE;
175 	else if (strcmp(value, "common") == 0)
176 		hint = HNS3_IO_FUNC_HINT_COMMON;
177 
178 	/* If the hint is valid then update output parameters */
179 	if (hint != HNS3_IO_FUNC_HINT_NONE)
180 		*(uint32_t *)extra_args = hint;
181 
182 	return 0;
183 }
184 
185 static const char *
186 hns3_get_io_hint_func_name(uint32_t hint)
187 {
188 	switch (hint) {
189 	case HNS3_IO_FUNC_HINT_VEC:
190 		return "vec";
191 	case HNS3_IO_FUNC_HINT_SVE:
192 		return "sve";
193 	case HNS3_IO_FUNC_HINT_SIMPLE:
194 		return "simple";
195 	case HNS3_IO_FUNC_HINT_COMMON:
196 		return "common";
197 	default:
198 		return "none";
199 	}
200 }
201 
202 static int
203 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
204 {
205 	uint64_t val;
206 
207 	RTE_SET_USED(key);
208 
209 	if (value == NULL || extra_args == NULL)
210 		return 0;
211 
212 	val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
213 	*(uint64_t *)extra_args = val;
214 
215 	return 0;
216 }
217 
218 static int
219 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
220 {
221 	uint32_t val;
222 
223 	RTE_SET_USED(key);
224 
225 	if (value == NULL || extra_args == NULL)
226 		return 0;
227 
228 	val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
229 
230 	/*
231 	 * 500ms is empirical value in process of mailbox communication. If
232 	 * the delay value is set to one lower than the empirical value, mailbox
233 	 * communication may fail.
234 	 */
235 	if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
236 		*(uint16_t *)extra_args = val;
237 
238 	return 0;
239 }
240 
241 static int
242 hns3_parse_vlan_match_mode(const char *key, const char *value, void *args)
243 {
244 	uint8_t mode;
245 
246 	RTE_SET_USED(key);
247 
248 	if (value == NULL) {
249 		PMD_INIT_LOG(WARNING, "no value for key:\"%s\"", key);
250 		return -1;
251 	}
252 
253 	if (strcmp(value, "strict") == 0) {
254 		mode = HNS3_FDIR_VLAN_STRICT_MATCH;
255 	} else if (strcmp(value, "nostrict") == 0) {
256 		mode = HNS3_FDIR_VLAN_NOSTRICT_MATCH;
257 	} else {
258 		PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
259 			"value must be 'strict' or 'nostrict'",
260 			value, key);
261 		return -1;
262 	}
263 
264 	*(uint8_t *)args = mode;
265 
266 	return 0;
267 }
268 
269 void
270 hns3_parse_devargs(struct rte_eth_dev *dev)
271 {
272 	uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
273 	struct hns3_adapter *hns = dev->data->dev_private;
274 	uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
275 	uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
276 	struct hns3_hw *hw = &hns->hw;
277 	uint64_t dev_caps_mask = 0;
278 	struct rte_kvargs *kvlist;
279 
280 	/* Set default value of runtime config parameters. */
281 	hns->rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
282 	hns->tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
283 	hns->dev_caps_mask = 0;
284 	hns->mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
285 	if (!hns->is_vf)
286 		hns->pf.fdir.vlan_match_mode = HNS3_FDIR_VLAN_STRICT_MATCH;
287 
288 	if (dev->device->devargs == NULL)
289 		return;
290 
291 	kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
292 	if (!kvlist)
293 		return;
294 
295 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
296 			   &hns3_parse_io_hint_func, &rx_func_hint);
297 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
298 			   &hns3_parse_io_hint_func, &tx_func_hint);
299 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
300 			   &hns3_parse_dev_caps_mask, &dev_caps_mask);
301 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
302 			   &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
303 	if (!hns->is_vf)
304 		(void)rte_kvargs_process(kvlist,
305 					 HNS3_DEVARG_FDIR_VALN_MATCH_MODE,
306 					 &hns3_parse_vlan_match_mode,
307 					 &hns->pf.fdir.vlan_match_mode);
308 
309 	rte_kvargs_free(kvlist);
310 
311 	if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
312 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
313 			  hns3_get_io_hint_func_name(rx_func_hint));
314 	hns->rx_func_hint = rx_func_hint;
315 	if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
316 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
317 			  hns3_get_io_hint_func_name(tx_func_hint));
318 	hns->tx_func_hint = tx_func_hint;
319 
320 	if (dev_caps_mask != 0)
321 		hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
322 			  HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
323 	hns->dev_caps_mask = dev_caps_mask;
324 
325 	if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
326 		hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
327 				mbx_time_limit_ms);
328 	hns->mbx_time_limit_ms = mbx_time_limit_ms;
329 }
330 
331 void
332 hns3_clock_gettime(struct timeval *tv)
333 {
334 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
335 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
336 #else
337 #define CLOCK_TYPE CLOCK_MONOTONIC
338 #endif
339 #define NSEC_TO_USEC_DIV 1000
340 
341 	struct timespec spec;
342 	(void)clock_gettime(CLOCK_TYPE, &spec);
343 
344 	tv->tv_sec = spec.tv_sec;
345 	tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
346 }
347 
348 uint64_t
349 hns3_clock_calctime_ms(struct timeval *tv)
350 {
351 	return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
352 		tv->tv_usec / USEC_PER_MSEC;
353 }
354 
355 uint64_t
356 hns3_clock_gettime_ms(void)
357 {
358 	struct timeval tv;
359 
360 	hns3_clock_gettime(&tv);
361 	return hns3_clock_calctime_ms(&tv);
362 }
363 
364 void hns3_ether_format_addr(char *buf, uint16_t size,
365 			    const struct rte_ether_addr *ether_addr)
366 {
367 	(void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
368 			ether_addr->addr_bytes[0],
369 			ether_addr->addr_bytes[4],
370 			ether_addr->addr_bytes[5]);
371 }
372 
373 static int
374 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
375 			   struct rte_ether_addr *mc_addr_set,
376 			   uint32_t nb_mc_addr)
377 {
378 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
379 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
380 	struct rte_ether_addr *addr;
381 	uint16_t mac_addrs_capa;
382 	uint32_t i;
383 	uint32_t j;
384 
385 	if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
386 		hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
387 			 "invalid. valid range: 0~%d",
388 			 nb_mc_addr, HNS3_MC_MACADDR_NUM);
389 		return -EINVAL;
390 	}
391 
392 	/* Check if input mac addresses are valid */
393 	for (i = 0; i < nb_mc_addr; i++) {
394 		addr = &mc_addr_set[i];
395 		if (!rte_is_multicast_ether_addr(addr)) {
396 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
397 					      addr);
398 			hns3_err(hw,
399 				 "failed to set mc mac addr, addr(%s) invalid.",
400 				 mac_str);
401 			return -EINVAL;
402 		}
403 
404 		/* Check if there are duplicate addresses */
405 		for (j = i + 1; j < nb_mc_addr; j++) {
406 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
407 				hns3_ether_format_addr(mac_str,
408 						      RTE_ETHER_ADDR_FMT_SIZE,
409 						      addr);
410 				hns3_err(hw, "failed to set mc mac addr, "
411 					 "addrs invalid. two same addrs(%s).",
412 					 mac_str);
413 				return -EINVAL;
414 			}
415 		}
416 
417 		/*
418 		 * Check if there are duplicate addresses between mac_addrs
419 		 * and mc_addr_set
420 		 */
421 		mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
422 					      HNS3_UC_MACADDR_NUM;
423 		for (j = 0; j < mac_addrs_capa; j++) {
424 			if (rte_is_same_ether_addr(addr,
425 						   &hw->data->mac_addrs[j])) {
426 				hns3_ether_format_addr(mac_str,
427 						       RTE_ETHER_ADDR_FMT_SIZE,
428 						       addr);
429 				hns3_err(hw, "failed to set mc mac addr, "
430 					 "addrs invalid. addrs(%s) has already "
431 					 "configured in mac_addr add API",
432 					 mac_str);
433 				return -EINVAL;
434 			}
435 		}
436 	}
437 
438 	return 0;
439 }
440 
441 int
442 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
443 			  struct rte_ether_addr *mc_addr_set,
444 			  uint32_t nb_mc_addr)
445 {
446 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447 	struct rte_ether_addr *addr;
448 	int cur_addr_num;
449 	int set_addr_num;
450 	int num;
451 	int ret;
452 	int i;
453 
454 	/* Check if input parameters are valid */
455 	ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
456 	if (ret)
457 		return ret;
458 
459 	rte_spinlock_lock(&hw->lock);
460 	cur_addr_num = hw->mc_addrs_num;
461 	for (i = 0; i < cur_addr_num; i++) {
462 		num = cur_addr_num - i - 1;
463 		addr = &hw->mc_addrs[num];
464 		ret = hw->ops.del_mc_mac_addr(hw, addr);
465 		if (ret) {
466 			rte_spinlock_unlock(&hw->lock);
467 			return ret;
468 		}
469 
470 		hw->mc_addrs_num--;
471 	}
472 
473 	set_addr_num = (int)nb_mc_addr;
474 	for (i = 0; i < set_addr_num; i++) {
475 		addr = &mc_addr_set[i];
476 		ret = hw->ops.add_mc_mac_addr(hw, addr);
477 		if (ret) {
478 			rte_spinlock_unlock(&hw->lock);
479 			return ret;
480 		}
481 
482 		rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
483 		hw->mc_addrs_num++;
484 	}
485 	rte_spinlock_unlock(&hw->lock);
486 
487 	return 0;
488 }
489 
490 int
491 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
492 {
493 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
494 	struct hns3_hw *hw = &hns->hw;
495 	struct rte_ether_addr *addr;
496 	int ret = 0;
497 	int i;
498 
499 	for (i = 0; i < hw->mc_addrs_num; i++) {
500 		addr = &hw->mc_addrs[i];
501 		if (!rte_is_multicast_ether_addr(addr))
502 			continue;
503 		if (del)
504 			ret = hw->ops.del_mc_mac_addr(hw, addr);
505 		else
506 			ret = hw->ops.add_mc_mac_addr(hw, addr);
507 		if (ret) {
508 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
509 					      addr);
510 			hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
511 				 del ? "Remove" : "Restore", mac_str, ret);
512 		}
513 	}
514 	return ret;
515 }
516 
517 int
518 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
519 {
520 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
521 	struct hns3_hw *hw = &hns->hw;
522 	struct hns3_hw_ops *ops = &hw->ops;
523 	struct rte_ether_addr *addr;
524 	uint16_t mac_addrs_capa;
525 	int ret = 0;
526 	uint16_t i;
527 
528 	mac_addrs_capa =
529 		hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
530 	for (i = 0; i < mac_addrs_capa; i++) {
531 		addr = &hw->data->mac_addrs[i];
532 		if (rte_is_zero_ether_addr(addr))
533 			continue;
534 		if (rte_is_multicast_ether_addr(addr))
535 			ret = del ? ops->del_mc_mac_addr(hw, addr) :
536 			      ops->add_mc_mac_addr(hw, addr);
537 		else
538 			ret = del ? ops->del_uc_mac_addr(hw, addr) :
539 			      ops->add_uc_mac_addr(hw, addr);
540 
541 		if (ret) {
542 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
543 					       addr);
544 			hns3_err(hw, "failed to %s mac addr(%s) index:%u ret = %d.",
545 				 del ? "remove" : "restore", mac_str, i, ret);
546 		}
547 	}
548 
549 	return ret;
550 }
551 
552 static bool
553 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
554 {
555 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
556 	struct rte_ether_addr *addr;
557 	int i;
558 
559 	for (i = 0; i < hw->mc_addrs_num; i++) {
560 		addr = &hw->mc_addrs[i];
561 		/* Check if there are duplicate addresses in mc_addrs[] */
562 		if (rte_is_same_ether_addr(addr, mc_addr)) {
563 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
564 					       addr);
565 			hns3_err(hw, "failed to add mc mac addr, same addrs"
566 				 "(%s) is added by the set_mc_mac_addr_list "
567 				 "API", mac_str);
568 			return true;
569 		}
570 	}
571 
572 	return false;
573 }
574 
575 int
576 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
577 		  __rte_unused uint32_t idx, __rte_unused uint32_t pool)
578 {
579 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
580 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
581 	int ret;
582 
583 	rte_spinlock_lock(&hw->lock);
584 
585 	/*
586 	 * In hns3 network engine adding UC and MC mac address with different
587 	 * commands with firmware. We need to determine whether the input
588 	 * address is a UC or a MC address to call different commands.
589 	 * By the way, it is recommended calling the API function named
590 	 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
591 	 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
592 	 * may affect the specifications of UC mac addresses.
593 	 */
594 	if (rte_is_multicast_ether_addr(mac_addr)) {
595 		if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
596 			rte_spinlock_unlock(&hw->lock);
597 			return -EINVAL;
598 		}
599 		ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
600 	} else {
601 		ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
602 	}
603 	rte_spinlock_unlock(&hw->lock);
604 	if (ret) {
605 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
606 				      mac_addr);
607 		hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
608 			 ret);
609 	}
610 
611 	return ret;
612 }
613 
614 void
615 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
616 {
617 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
618 	/* index will be checked by upper level rte interface */
619 	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
620 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
621 	int ret;
622 
623 	rte_spinlock_lock(&hw->lock);
624 
625 	if (rte_is_multicast_ether_addr(mac_addr))
626 		ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
627 	else
628 		ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
629 	rte_spinlock_unlock(&hw->lock);
630 	if (ret) {
631 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
632 				      mac_addr);
633 		hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
634 			 ret);
635 	}
636 }
637 
638 int
639 hns3_init_mac_addrs(struct rte_eth_dev *dev)
640 {
641 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
643 	const char *memory_name = hns->is_vf ? "hns3vf-mac" : "hns3-mac";
644 	uint16_t mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
645 						HNS3_UC_MACADDR_NUM;
646 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
647 	struct rte_ether_addr *eth_addr;
648 
649 	/* Allocate memory for storing MAC addresses */
650 	dev->data->mac_addrs = rte_zmalloc(memory_name,
651 				sizeof(struct rte_ether_addr) * mac_addrs_capa,
652 				0);
653 	if (dev->data->mac_addrs == NULL) {
654 		hns3_err(hw, "failed to allocate %zx bytes needed to store MAC addresses",
655 			 sizeof(struct rte_ether_addr) * mac_addrs_capa);
656 		return -ENOMEM;
657 	}
658 
659 	eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
660 	if (!hns->is_vf) {
661 		if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
662 			rte_eth_random_addr(hw->mac.mac_addr);
663 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
664 				(struct rte_ether_addr *)hw->mac.mac_addr);
665 			hns3_warn(hw, "default mac_addr from firmware is an invalid "
666 				  "unicast address, using random MAC address %s",
667 				  mac_str);
668 		}
669 	} else {
670 		/*
671 		 * The hns3 PF ethdev driver in kernel support setting VF MAC
672 		 * address on the host by "ip link set ..." command. To avoid
673 		 * some incorrect scenes, for example, hns3 VF PMD driver fails
674 		 * to receive and send packets after user configure the MAC
675 		 * address by using the "ip link set ..." command, hns3 VF PMD
676 		 * driver keep the same MAC address strategy as the hns3 kernel
677 		 * ethdev driver in the initialization. If user configure a MAC
678 		 * address by the ip command for VF device, then hns3 VF PMD
679 		 * driver will start with it, otherwise start with a random MAC
680 		 * address in the initialization.
681 		 */
682 		if (rte_is_zero_ether_addr(eth_addr))
683 			rte_eth_random_addr(hw->mac.mac_addr);
684 	}
685 
686 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
687 			    &dev->data->mac_addrs[0]);
688 
689 	return 0;
690 }
691 
692 int
693 hns3_init_ring_with_vector(struct hns3_hw *hw)
694 {
695 	uint16_t vec;
696 	uint16_t i;
697 	int ret;
698 
699 	/*
700 	 * In hns3 network engine, vector 0 is always the misc interrupt of this
701 	 * function, vector 1~N can be used respectively for the queues of the
702 	 * function. Tx and Rx queues with the same number share the interrupt
703 	 * vector. In the initialization clearing the all hardware mapping
704 	 * relationship configurations between queues and interrupt vectors is
705 	 * needed, so some error caused by the residual configurations, such as
706 	 * the unexpected Tx interrupt, can be avoid.
707 	 */
708 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
709 	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
710 		vec = vec - 1; /* the last interrupt is reserved */
711 	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
712 	for (i = 0; i < hw->intr_tqps_num; i++) {
713 		/*
714 		 * Set gap limiter/rate limiter/quantity limiter algorithm
715 		 * configuration for interrupt coalesce of queue's interrupt.
716 		 */
717 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
718 				       HNS3_TQP_INTR_GL_DEFAULT);
719 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
720 				       HNS3_TQP_INTR_GL_DEFAULT);
721 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
722 		/*
723 		 * QL(quantity limiter) is not used currently, just set 0 to
724 		 * close it.
725 		 */
726 		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
727 
728 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
729 						    HNS3_RING_TYPE_TX, i);
730 		if (ret) {
731 			PMD_INIT_LOG(ERR, "fail to unbind TX ring(%u) with vector: %u, ret=%d",
732 				     i, vec, ret);
733 			return ret;
734 		}
735 
736 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
737 						    HNS3_RING_TYPE_RX, i);
738 		if (ret) {
739 			PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with vector: %u, ret=%d",
740 				     i, vec, ret);
741 			return ret;
742 		}
743 	}
744 
745 	return 0;
746 }
747 
748 int
749 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
750 {
751 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
752 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
753 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
754 	uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
755 	uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
756 	uint32_t intr_vector;
757 	uint16_t q_id;
758 	int ret;
759 
760 	/*
761 	 * hns3 needs a separate interrupt to be used as event interrupt which
762 	 * could not be shared with task queue pair, so KERNEL drivers need
763 	 * support multiple interrupt vectors.
764 	 */
765 	if (dev->data->dev_conf.intr_conf.rxq == 0 ||
766 	    !rte_intr_cap_multiple(intr_handle))
767 		return 0;
768 
769 	rte_intr_disable(intr_handle);
770 	intr_vector = hw->used_rx_queues;
771 	/* creates event fd for each intr vector when MSIX is used */
772 	if (rte_intr_efd_enable(intr_handle, intr_vector))
773 		return -EINVAL;
774 
775 	/* Allocate vector list */
776 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
777 				    hw->used_rx_queues)) {
778 		hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
779 			 hw->used_rx_queues);
780 		ret = -ENOMEM;
781 		goto alloc_intr_vec_error;
782 	}
783 
784 	if (rte_intr_allow_others(intr_handle)) {
785 		vec = RTE_INTR_VEC_RXTX_OFFSET;
786 		base = RTE_INTR_VEC_RXTX_OFFSET;
787 	}
788 
789 	for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
790 		ret = hw->ops.bind_ring_with_vector(hw, vec, true,
791 						    HNS3_RING_TYPE_RX, q_id);
792 		if (ret)
793 			goto bind_vector_error;
794 
795 		if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
796 			goto bind_vector_error;
797 		/*
798 		 * If there are not enough efds (e.g. not enough interrupt),
799 		 * remaining queues will be bond to the last interrupt.
800 		 */
801 		if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
802 			vec++;
803 	}
804 	rte_intr_enable(intr_handle);
805 	return 0;
806 
807 bind_vector_error:
808 	rte_intr_vec_list_free(intr_handle);
809 alloc_intr_vec_error:
810 	rte_intr_efd_disable(intr_handle);
811 	return ret;
812 }
813 
814 void
815 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
816 {
817 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
818 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
819 	struct hns3_adapter *hns = dev->data->dev_private;
820 	struct hns3_hw *hw = &hns->hw;
821 	uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
822 	uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
823 	uint16_t q_id;
824 
825 	if (dev->data->dev_conf.intr_conf.rxq == 0)
826 		return;
827 
828 	/* unmap the ring with vector */
829 	if (rte_intr_allow_others(intr_handle)) {
830 		vec = RTE_INTR_VEC_RXTX_OFFSET;
831 		base = RTE_INTR_VEC_RXTX_OFFSET;
832 	}
833 	if (rte_intr_dp_is_en(intr_handle)) {
834 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
835 			(void)hw->ops.bind_ring_with_vector(hw, vec, false,
836 							HNS3_RING_TYPE_RX,
837 							q_id);
838 			if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
839 				vec++;
840 		}
841 	}
842 	/* Clean datapath event and queue/vec mapping */
843 	rte_intr_efd_disable(intr_handle);
844 	rte_intr_vec_list_free(intr_handle);
845 }
846 
847 int
848 hns3_restore_rx_interrupt(struct hns3_hw *hw)
849 {
850 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
851 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
852 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
853 	uint16_t q_id;
854 	int ret;
855 
856 	if (dev->data->dev_conf.intr_conf.rxq == 0)
857 		return 0;
858 
859 	if (rte_intr_dp_is_en(intr_handle)) {
860 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
861 			ret = hw->ops.bind_ring_with_vector(hw,
862 				rte_intr_vec_list_index_get(intr_handle,
863 								   q_id),
864 				true, HNS3_RING_TYPE_RX, q_id);
865 			if (ret)
866 				return ret;
867 		}
868 	}
869 
870 	return 0;
871 }
872 
873 int
874 hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id)
875 {
876 	struct rte_pci_device *pci_dev;
877 	struct rte_eth_dev *eth_dev;
878 	uint8_t revision;
879 	int ret;
880 
881 	eth_dev = &rte_eth_devices[hw->data->port_id];
882 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
883 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
884 				  HNS3_PCI_REVISION_ID);
885 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
886 		hns3_err(hw, "failed to read pci revision id, ret = %d", ret);
887 		return -EIO;
888 	}
889 
890 	*revision_id = revision;
891 
892 	return 0;
893 }
894 
895 void
896 hns3_set_default_dev_specifications(struct hns3_hw *hw)
897 {
898 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
899 
900 	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
901 	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
902 	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
903 	hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
904 
905 	if (hns->is_vf)
906 		return;
907 
908 	hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
909 }
910 
911 static void
912 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
913 {
914 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
915 	struct hns3_dev_specs_0_cmd *req0;
916 	struct hns3_dev_specs_1_cmd *req1;
917 
918 	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
919 	req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data;
920 
921 	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
922 	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
923 	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
924 	hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
925 	hw->min_tx_pkt_len = req1->min_tx_pkt_len;
926 
927 	if (hns->is_vf)
928 		return;
929 
930 	hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
931 }
932 
933 static int
934 hns3_check_dev_specifications(struct hns3_hw *hw)
935 {
936 	if (hw->rss_ind_tbl_size == 0 ||
937 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
938 		hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
939 			 hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX);
940 		return -EINVAL;
941 	}
942 
943 	if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) {
944 		hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
945 			 hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX);
946 		return -EINVAL;
947 	}
948 
949 	if (hw->rss_key_size > HNS3_RSS_KEY_SIZE)
950 		hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)",
951 			  hw->rss_key_size, HNS3_RSS_KEY_SIZE);
952 
953 	return 0;
954 }
955 
956 int
957 hns3_query_dev_specifications(struct hns3_hw *hw)
958 {
959 	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
960 	int ret;
961 	int i;
962 
963 	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
964 		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
965 					  true);
966 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
967 	}
968 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
969 
970 	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
971 	if (ret)
972 		return ret;
973 
974 	hns3_parse_dev_specifications(hw, desc);
975 
976 	return hns3_check_dev_specifications(hw);
977 }
978