xref: /dpdk/drivers/net/hns3/hns3_common.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 HiSilicon Limited
3  */
4 
5 #include <rte_kvargs.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9 
10 #include "hns3_common.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
13 #include "hns3_rxtx.h"
14 
15 int
16 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
17 		    size_t fw_size)
18 {
19 	struct hns3_adapter *hns = eth_dev->data->dev_private;
20 	struct hns3_hw *hw = &hns->hw;
21 	uint32_t version = hw->fw_version;
22 	int ret;
23 
24 	ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
25 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
26 				      HNS3_FW_VERSION_BYTE3_S),
27 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
28 				      HNS3_FW_VERSION_BYTE2_S),
29 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
30 				      HNS3_FW_VERSION_BYTE1_S),
31 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
32 				      HNS3_FW_VERSION_BYTE0_S));
33 	if (ret < 0)
34 		return -EINVAL;
35 
36 	ret += 1; /* add the size of '\0' */
37 	if (fw_size < (size_t)ret)
38 		return ret;
39 	else
40 		return 0;
41 }
42 
43 int
44 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
45 {
46 	struct hns3_adapter *hns = eth_dev->data->dev_private;
47 	struct hns3_hw *hw = &hns->hw;
48 	uint16_t queue_num = hw->tqps_num;
49 
50 	/*
51 	 * In interrupt mode, 'max_rx_queues' is set based on the number of
52 	 * MSI-X interrupt resources of the hardware.
53 	 */
54 	if (hw->data->dev_conf.intr_conf.rxq == 1)
55 		queue_num = hw->intr_tqps_num;
56 
57 	info->max_rx_queues = queue_num;
58 	info->max_tx_queues = hw->tqps_num;
59 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
60 	info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
61 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
62 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
63 	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
64 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
65 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
66 				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
67 				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
68 				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
69 				 RTE_ETH_RX_OFFLOAD_SCATTER |
70 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
71 				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
72 				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
73 				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
74 	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
75 				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
76 				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
77 				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
78 				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
79 				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
80 				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
81 				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
82 				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
83 				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
84 				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
85 				 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
86 
87 	if (!hw->port_base_vlan_cfg.state)
88 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
89 
90 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
91 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
92 
93 	if (hns3_dev_get_support(hw, INDEP_TXRX))
94 		info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
95 				 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
96 	info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
97 
98 	if (hns3_dev_get_support(hw, PTP))
99 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
100 
101 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
102 		.nb_max = HNS3_MAX_RING_DESC,
103 		.nb_min = HNS3_MIN_RING_DESC,
104 		.nb_align = HNS3_ALIGN_RING_DESC,
105 	};
106 
107 	info->tx_desc_lim = (struct rte_eth_desc_lim) {
108 		.nb_max = HNS3_MAX_RING_DESC,
109 		.nb_min = HNS3_MIN_RING_DESC,
110 		.nb_align = HNS3_ALIGN_RING_DESC,
111 		.nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
112 		.nb_mtu_seg_max = hw->max_non_tso_bd_num,
113 	};
114 
115 	info->default_rxconf = (struct rte_eth_rxconf) {
116 		.rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
117 		/*
118 		 * If there are no available Rx buffer descriptors, incoming
119 		 * packets are always dropped by hardware based on hns3 network
120 		 * engine.
121 		 */
122 		.rx_drop_en = 1,
123 		.offloads = 0,
124 	};
125 	info->default_txconf = (struct rte_eth_txconf) {
126 		.tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
127 		.offloads = 0,
128 	};
129 
130 	info->reta_size = hw->rss_ind_tbl_size;
131 	info->hash_key_size = HNS3_RSS_KEY_SIZE;
132 	info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
133 
134 	info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
135 	info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
136 	info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
137 	info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
138 	info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
139 	info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
140 
141 	/*
142 	 * Next is the PF/VF difference section.
143 	 */
144 	if (!hns->is_vf) {
145 		info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
146 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
147 		info->speed_capa = hns3_get_speed_capa(hw);
148 	} else {
149 		info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
150 	}
151 
152 	return 0;
153 }
154 
155 static int
156 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
157 {
158 	uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
159 
160 	RTE_SET_USED(key);
161 
162 	if (strcmp(value, "vec") == 0)
163 		hint = HNS3_IO_FUNC_HINT_VEC;
164 	else if (strcmp(value, "sve") == 0)
165 		hint = HNS3_IO_FUNC_HINT_SVE;
166 	else if (strcmp(value, "simple") == 0)
167 		hint = HNS3_IO_FUNC_HINT_SIMPLE;
168 	else if (strcmp(value, "common") == 0)
169 		hint = HNS3_IO_FUNC_HINT_COMMON;
170 
171 	/* If the hint is valid then update output parameters */
172 	if (hint != HNS3_IO_FUNC_HINT_NONE)
173 		*(uint32_t *)extra_args = hint;
174 
175 	return 0;
176 }
177 
178 static const char *
179 hns3_get_io_hint_func_name(uint32_t hint)
180 {
181 	switch (hint) {
182 	case HNS3_IO_FUNC_HINT_VEC:
183 		return "vec";
184 	case HNS3_IO_FUNC_HINT_SVE:
185 		return "sve";
186 	case HNS3_IO_FUNC_HINT_SIMPLE:
187 		return "simple";
188 	case HNS3_IO_FUNC_HINT_COMMON:
189 		return "common";
190 	default:
191 		return "none";
192 	}
193 }
194 
195 static int
196 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
197 {
198 	uint64_t val;
199 
200 	RTE_SET_USED(key);
201 
202 	val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
203 	*(uint64_t *)extra_args = val;
204 
205 	return 0;
206 }
207 
208 static int
209 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
210 {
211 	uint32_t val;
212 
213 	RTE_SET_USED(key);
214 
215 	val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
216 
217 	/*
218 	 * 500ms is empirical value in process of mailbox communication. If
219 	 * the delay value is set to one lower thanthe empirical value, mailbox
220 	 * communication may fail.
221 	 */
222 	if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
223 		*(uint16_t *)extra_args = val;
224 
225 	return 0;
226 }
227 
228 void
229 hns3_parse_devargs(struct rte_eth_dev *dev)
230 {
231 	uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
232 	struct hns3_adapter *hns = dev->data->dev_private;
233 	uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
234 	uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
235 	struct hns3_hw *hw = &hns->hw;
236 	uint64_t dev_caps_mask = 0;
237 	struct rte_kvargs *kvlist;
238 
239 	if (dev->device->devargs == NULL)
240 		return;
241 
242 	kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
243 	if (!kvlist)
244 		return;
245 
246 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
247 			   &hns3_parse_io_hint_func, &rx_func_hint);
248 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
249 			   &hns3_parse_io_hint_func, &tx_func_hint);
250 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
251 			   &hns3_parse_dev_caps_mask, &dev_caps_mask);
252 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
253 			   &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
254 
255 	rte_kvargs_free(kvlist);
256 
257 	if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
258 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
259 			  hns3_get_io_hint_func_name(rx_func_hint));
260 	hns->rx_func_hint = rx_func_hint;
261 	if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
262 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
263 			  hns3_get_io_hint_func_name(tx_func_hint));
264 	hns->tx_func_hint = tx_func_hint;
265 
266 	if (dev_caps_mask != 0)
267 		hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
268 			  HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
269 	hns->dev_caps_mask = dev_caps_mask;
270 
271 	if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
272 		hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
273 				mbx_time_limit_ms);
274 	hns->mbx_time_limit_ms = mbx_time_limit_ms;
275 }
276 
277 void
278 hns3_clock_gettime(struct timeval *tv)
279 {
280 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
281 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
282 #else
283 #define CLOCK_TYPE CLOCK_MONOTONIC
284 #endif
285 #define NSEC_TO_USEC_DIV 1000
286 
287 	struct timespec spec;
288 	(void)clock_gettime(CLOCK_TYPE, &spec);
289 
290 	tv->tv_sec = spec.tv_sec;
291 	tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
292 }
293 
294 uint64_t
295 hns3_clock_calctime_ms(struct timeval *tv)
296 {
297 	return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
298 		tv->tv_usec / USEC_PER_MSEC;
299 }
300 
301 uint64_t
302 hns3_clock_gettime_ms(void)
303 {
304 	struct timeval tv;
305 
306 	hns3_clock_gettime(&tv);
307 	return hns3_clock_calctime_ms(&tv);
308 }
309 
310 void hns3_ether_format_addr(char *buf, uint16_t size,
311 			    const struct rte_ether_addr *ether_addr)
312 {
313 	(void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
314 			ether_addr->addr_bytes[0],
315 			ether_addr->addr_bytes[4],
316 			ether_addr->addr_bytes[5]);
317 }
318 
319 static int
320 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
321 			   struct rte_ether_addr *mc_addr_set,
322 			   uint32_t nb_mc_addr)
323 {
324 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
325 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
326 	struct rte_ether_addr *addr;
327 	uint16_t mac_addrs_capa;
328 	uint32_t i;
329 	uint32_t j;
330 
331 	if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
332 		hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
333 			 "invalid. valid range: 0~%d",
334 			 nb_mc_addr, HNS3_MC_MACADDR_NUM);
335 		return -EINVAL;
336 	}
337 
338 	/* Check if input mac addresses are valid */
339 	for (i = 0; i < nb_mc_addr; i++) {
340 		addr = &mc_addr_set[i];
341 		if (!rte_is_multicast_ether_addr(addr)) {
342 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
343 					      addr);
344 			hns3_err(hw,
345 				 "failed to set mc mac addr, addr(%s) invalid.",
346 				 mac_str);
347 			return -EINVAL;
348 		}
349 
350 		/* Check if there are duplicate addresses */
351 		for (j = i + 1; j < nb_mc_addr; j++) {
352 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
353 				hns3_ether_format_addr(mac_str,
354 						      RTE_ETHER_ADDR_FMT_SIZE,
355 						      addr);
356 				hns3_err(hw, "failed to set mc mac addr, "
357 					 "addrs invalid. two same addrs(%s).",
358 					 mac_str);
359 				return -EINVAL;
360 			}
361 		}
362 
363 		/*
364 		 * Check if there are duplicate addresses between mac_addrs
365 		 * and mc_addr_set
366 		 */
367 		mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
368 					      HNS3_UC_MACADDR_NUM;
369 		for (j = 0; j < mac_addrs_capa; j++) {
370 			if (rte_is_same_ether_addr(addr,
371 						   &hw->data->mac_addrs[j])) {
372 				hns3_ether_format_addr(mac_str,
373 						       RTE_ETHER_ADDR_FMT_SIZE,
374 						       addr);
375 				hns3_err(hw, "failed to set mc mac addr, "
376 					 "addrs invalid. addrs(%s) has already "
377 					 "configured in mac_addr add API",
378 					 mac_str);
379 				return -EINVAL;
380 			}
381 		}
382 	}
383 
384 	return 0;
385 }
386 
387 int
388 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
389 			  struct rte_ether_addr *mc_addr_set,
390 			  uint32_t nb_mc_addr)
391 {
392 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393 	struct rte_ether_addr *addr;
394 	int cur_addr_num;
395 	int set_addr_num;
396 	int num;
397 	int ret;
398 	int i;
399 
400 	/* Check if input parameters are valid */
401 	ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
402 	if (ret)
403 		return ret;
404 
405 	rte_spinlock_lock(&hw->lock);
406 	cur_addr_num = hw->mc_addrs_num;
407 	for (i = 0; i < cur_addr_num; i++) {
408 		num = cur_addr_num - i - 1;
409 		addr = &hw->mc_addrs[num];
410 		ret = hw->ops.del_mc_mac_addr(hw, addr);
411 		if (ret) {
412 			rte_spinlock_unlock(&hw->lock);
413 			return ret;
414 		}
415 
416 		hw->mc_addrs_num--;
417 	}
418 
419 	set_addr_num = (int)nb_mc_addr;
420 	for (i = 0; i < set_addr_num; i++) {
421 		addr = &mc_addr_set[i];
422 		ret = hw->ops.add_mc_mac_addr(hw, addr);
423 		if (ret) {
424 			rte_spinlock_unlock(&hw->lock);
425 			return ret;
426 		}
427 
428 		rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
429 		hw->mc_addrs_num++;
430 	}
431 	rte_spinlock_unlock(&hw->lock);
432 
433 	return 0;
434 }
435 
436 int
437 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
438 {
439 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
440 	struct hns3_hw *hw = &hns->hw;
441 	struct rte_ether_addr *addr;
442 	int ret = 0;
443 	int i;
444 
445 	for (i = 0; i < hw->mc_addrs_num; i++) {
446 		addr = &hw->mc_addrs[i];
447 		if (!rte_is_multicast_ether_addr(addr))
448 			continue;
449 		if (del)
450 			ret = hw->ops.del_mc_mac_addr(hw, addr);
451 		else
452 			ret = hw->ops.add_mc_mac_addr(hw, addr);
453 		if (ret) {
454 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
455 					      addr);
456 			hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
457 				 del ? "Remove" : "Restore", mac_str, ret);
458 		}
459 	}
460 	return ret;
461 }
462 
463 int
464 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
465 {
466 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
467 	struct hns3_hw *hw = &hns->hw;
468 	struct hns3_hw_ops *ops = &hw->ops;
469 	struct rte_ether_addr *addr;
470 	uint16_t mac_addrs_capa;
471 	int ret = 0;
472 	int i;
473 
474 	mac_addrs_capa =
475 		hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
476 	for (i = 0; i < mac_addrs_capa; i++) {
477 		addr = &hw->data->mac_addrs[i];
478 		if (rte_is_zero_ether_addr(addr))
479 			continue;
480 		if (rte_is_multicast_ether_addr(addr))
481 			ret = del ? ops->del_mc_mac_addr(hw, addr) :
482 			      ops->add_mc_mac_addr(hw, addr);
483 		else
484 			ret = del ? ops->del_uc_mac_addr(hw, addr) :
485 			      ops->add_uc_mac_addr(hw, addr);
486 
487 		if (ret) {
488 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
489 					       addr);
490 			hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.",
491 				 del ? "remove" : "restore", mac_str, i, ret);
492 		}
493 	}
494 
495 	return ret;
496 }
497 
498 static bool
499 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
500 {
501 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
502 	struct rte_ether_addr *addr;
503 	int i;
504 
505 	for (i = 0; i < hw->mc_addrs_num; i++) {
506 		addr = &hw->mc_addrs[i];
507 		/* Check if there are duplicate addresses in mc_addrs[] */
508 		if (rte_is_same_ether_addr(addr, mc_addr)) {
509 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
510 					       addr);
511 			hns3_err(hw, "failed to add mc mac addr, same addrs"
512 				 "(%s) is added by the set_mc_mac_addr_list "
513 				 "API", mac_str);
514 			return true;
515 		}
516 	}
517 
518 	return false;
519 }
520 
521 int
522 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
523 		  __rte_unused uint32_t idx, __rte_unused uint32_t pool)
524 {
525 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
527 	int ret;
528 
529 	rte_spinlock_lock(&hw->lock);
530 
531 	/*
532 	 * In hns3 network engine adding UC and MC mac address with different
533 	 * commands with firmware. We need to determine whether the input
534 	 * address is a UC or a MC address to call different commands.
535 	 * By the way, it is recommended calling the API function named
536 	 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
537 	 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
538 	 * may affect the specifications of UC mac addresses.
539 	 */
540 	if (rte_is_multicast_ether_addr(mac_addr)) {
541 		if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
542 			rte_spinlock_unlock(&hw->lock);
543 			return -EINVAL;
544 		}
545 		ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
546 	} else {
547 		ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
548 	}
549 	rte_spinlock_unlock(&hw->lock);
550 	if (ret) {
551 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
552 				      mac_addr);
553 		hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
554 			 ret);
555 	}
556 
557 	return ret;
558 }
559 
560 void
561 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
562 {
563 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 	/* index will be checked by upper level rte interface */
565 	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
566 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
567 	int ret;
568 
569 	rte_spinlock_lock(&hw->lock);
570 
571 	if (rte_is_multicast_ether_addr(mac_addr))
572 		ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
573 	else
574 		ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
575 	rte_spinlock_unlock(&hw->lock);
576 	if (ret) {
577 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
578 				      mac_addr);
579 		hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
580 			 ret);
581 	}
582 }
583 
584 int
585 hns3_init_ring_with_vector(struct hns3_hw *hw)
586 {
587 	uint16_t vec;
588 	int ret;
589 	int i;
590 
591 	/*
592 	 * In hns3 network engine, vector 0 is always the misc interrupt of this
593 	 * function, vector 1~N can be used respectively for the queues of the
594 	 * function. Tx and Rx queues with the same number share the interrupt
595 	 * vector. In the initialization clearing the all hardware mapping
596 	 * relationship configurations between queues and interrupt vectors is
597 	 * needed, so some error caused by the residual configurations, such as
598 	 * the unexpected Tx interrupt, can be avoid.
599 	 */
600 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
601 	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
602 		vec = vec - 1; /* the last interrupt is reserved */
603 	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
604 	for (i = 0; i < hw->intr_tqps_num; i++) {
605 		/*
606 		 * Set gap limiter/rate limiter/quanity limiter algorithm
607 		 * configuration for interrupt coalesce of queue's interrupt.
608 		 */
609 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
610 				       HNS3_TQP_INTR_GL_DEFAULT);
611 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
612 				       HNS3_TQP_INTR_GL_DEFAULT);
613 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
614 		/*
615 		 * QL(quantity limiter) is not used currently, just set 0 to
616 		 * close it.
617 		 */
618 		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
619 
620 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
621 						    HNS3_RING_TYPE_TX, i);
622 		if (ret) {
623 			PMD_INIT_LOG(ERR, "fail to unbind TX ring(%d) with "
624 					  "vector: %u, ret=%d", i, vec, ret);
625 			return ret;
626 		}
627 
628 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
629 						    HNS3_RING_TYPE_RX, i);
630 		if (ret) {
631 			PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with "
632 					  "vector: %u, ret=%d", i, vec, ret);
633 			return ret;
634 		}
635 	}
636 
637 	return 0;
638 }
639 
640 int
641 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
642 {
643 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
644 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
645 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
646 	uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
647 	uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
648 	uint32_t intr_vector;
649 	uint16_t q_id;
650 	int ret;
651 
652 	/*
653 	 * hns3 needs a separate interrupt to be used as event interrupt which
654 	 * could not be shared with task queue pair, so KERNEL drivers need
655 	 * support multiple interrupt vectors.
656 	 */
657 	if (dev->data->dev_conf.intr_conf.rxq == 0 ||
658 	    !rte_intr_cap_multiple(intr_handle))
659 		return 0;
660 
661 	rte_intr_disable(intr_handle);
662 	intr_vector = hw->used_rx_queues;
663 	/* creates event fd for each intr vector when MSIX is used */
664 	if (rte_intr_efd_enable(intr_handle, intr_vector))
665 		return -EINVAL;
666 
667 	/* Allocate vector list */
668 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
669 				    hw->used_rx_queues)) {
670 		hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
671 			 hw->used_rx_queues);
672 		ret = -ENOMEM;
673 		goto alloc_intr_vec_error;
674 	}
675 
676 	if (rte_intr_allow_others(intr_handle)) {
677 		vec = RTE_INTR_VEC_RXTX_OFFSET;
678 		base = RTE_INTR_VEC_RXTX_OFFSET;
679 	}
680 
681 	for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
682 		ret = hw->ops.bind_ring_with_vector(hw, vec, true,
683 						    HNS3_RING_TYPE_RX, q_id);
684 		if (ret)
685 			goto bind_vector_error;
686 
687 		if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
688 			goto bind_vector_error;
689 		/*
690 		 * If there are not enough efds (e.g. not enough interrupt),
691 		 * remaining queues will be bond to the last interrupt.
692 		 */
693 		if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
694 			vec++;
695 	}
696 	rte_intr_enable(intr_handle);
697 	return 0;
698 
699 bind_vector_error:
700 	rte_intr_vec_list_free(intr_handle);
701 alloc_intr_vec_error:
702 	rte_intr_efd_disable(intr_handle);
703 	return ret;
704 }
705 
706 void
707 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
708 {
709 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
710 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
711 	struct hns3_adapter *hns = dev->data->dev_private;
712 	struct hns3_hw *hw = &hns->hw;
713 	uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
714 	uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
715 	uint16_t q_id;
716 
717 	if (dev->data->dev_conf.intr_conf.rxq == 0)
718 		return;
719 
720 	/* unmap the ring with vector */
721 	if (rte_intr_allow_others(intr_handle)) {
722 		vec = RTE_INTR_VEC_RXTX_OFFSET;
723 		base = RTE_INTR_VEC_RXTX_OFFSET;
724 	}
725 	if (rte_intr_dp_is_en(intr_handle)) {
726 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
727 			(void)hw->ops.bind_ring_with_vector(hw, vec, false,
728 							HNS3_RING_TYPE_RX,
729 							q_id);
730 			if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
731 				vec++;
732 		}
733 	}
734 	/* Clean datapath event and queue/vec mapping */
735 	rte_intr_efd_disable(intr_handle);
736 	rte_intr_vec_list_free(intr_handle);
737 }
738 
739 int
740 hns3_restore_rx_interrupt(struct hns3_hw *hw)
741 {
742 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
743 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
744 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
745 	uint16_t q_id;
746 	int ret;
747 
748 	if (dev->data->dev_conf.intr_conf.rxq == 0)
749 		return 0;
750 
751 	if (rte_intr_dp_is_en(intr_handle)) {
752 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
753 			ret = hw->ops.bind_ring_with_vector(hw,
754 				rte_intr_vec_list_index_get(intr_handle,
755 								   q_id),
756 				true, HNS3_RING_TYPE_RX, q_id);
757 			if (ret)
758 				return ret;
759 		}
760 	}
761 
762 	return 0;
763 }
764