xref: /dpdk/drivers/net/hns3/hns3_common.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 HiSilicon Limited
3  */
4 
5 #include <rte_kvargs.h>
6 #include <bus_pci_driver.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9 
10 #include "hns3_logs.h"
11 #include "hns3_regs.h"
12 #include "hns3_rxtx.h"
13 #include "hns3_dcb.h"
14 #include "hns3_common.h"
15 
16 int
17 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
18 		    size_t fw_size)
19 {
20 	struct hns3_adapter *hns = eth_dev->data->dev_private;
21 	struct hns3_hw *hw = &hns->hw;
22 	uint32_t version = hw->fw_version;
23 	int ret;
24 
25 	ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
26 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
27 				      HNS3_FW_VERSION_BYTE3_S),
28 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
29 				      HNS3_FW_VERSION_BYTE2_S),
30 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
31 				      HNS3_FW_VERSION_BYTE1_S),
32 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
33 				      HNS3_FW_VERSION_BYTE0_S));
34 	if (ret < 0)
35 		return -EINVAL;
36 
37 	ret += 1; /* add the size of '\0' */
38 	if (fw_size < (size_t)ret)
39 		return ret;
40 	else
41 		return 0;
42 }
43 
44 int
45 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
46 {
47 	struct hns3_adapter *hns = eth_dev->data->dev_private;
48 	struct hns3_hw *hw = &hns->hw;
49 	uint16_t queue_num = hw->tqps_num;
50 
51 	/*
52 	 * In interrupt mode, 'max_rx_queues' is set based on the number of
53 	 * MSI-X interrupt resources of the hardware.
54 	 */
55 	if (hw->data->dev_conf.intr_conf.rxq == 1)
56 		queue_num = hw->intr_tqps_num;
57 
58 	info->max_rx_queues = queue_num;
59 	info->max_tx_queues = hw->tqps_num;
60 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
61 	info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
62 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
63 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
64 	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
65 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
66 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
67 				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
68 				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
69 				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
70 				 RTE_ETH_RX_OFFLOAD_SCATTER |
71 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
72 				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
73 				 RTE_ETH_RX_OFFLOAD_RSS_HASH |
74 				 RTE_ETH_RX_OFFLOAD_TCP_LRO);
75 	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
76 				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
77 				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
78 				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
79 				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
80 				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
81 				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
82 				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
83 				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
84 				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
85 				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
86 				 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
87 
88 	if (!hw->port_base_vlan_cfg.state)
89 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
90 
91 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
92 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
93 
94 	info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP |
95 			 RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP;
96 	if (hns3_dev_get_support(hw, INDEP_TXRX))
97 		info->dev_capa |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 				  RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
99 
100 	if (hns3_dev_get_support(hw, PTP))
101 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
102 
103 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
104 		.nb_max = HNS3_MAX_RING_DESC,
105 		.nb_min = HNS3_MIN_RING_DESC,
106 		.nb_align = HNS3_ALIGN_RING_DESC,
107 	};
108 
109 	info->tx_desc_lim = (struct rte_eth_desc_lim) {
110 		.nb_max = HNS3_MAX_RING_DESC,
111 		.nb_min = HNS3_MIN_RING_DESC,
112 		.nb_align = HNS3_ALIGN_RING_DESC,
113 		.nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
114 		.nb_mtu_seg_max = hw->max_non_tso_bd_num,
115 	};
116 
117 	info->default_rxconf = (struct rte_eth_rxconf) {
118 		.rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
119 		/*
120 		 * If there are no available Rx buffer descriptors, incoming
121 		 * packets are always dropped by hardware based on hns3 network
122 		 * engine.
123 		 */
124 		.rx_drop_en = 1,
125 		.offloads = 0,
126 	};
127 	info->default_txconf = (struct rte_eth_txconf) {
128 		.tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
129 		.offloads = 0,
130 	};
131 
132 	info->reta_size = hw->rss_ind_tbl_size;
133 	info->hash_key_size = hw->rss_key_size;
134 	info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
135 
136 	info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
137 	info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
138 	info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
139 	info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
140 	info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
141 	info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
142 
143 	/*
144 	 * Next is the PF/VF difference section.
145 	 */
146 	if (!hns->is_vf) {
147 		info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
148 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
149 		info->speed_capa = hns3_get_speed_capa(hw);
150 	} else {
151 		info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
152 	}
153 
154 	info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE;
155 
156 	return 0;
157 }
158 
159 static int
160 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
161 {
162 	uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
163 
164 	RTE_SET_USED(key);
165 
166 	if (strcmp(value, "vec") == 0)
167 		hint = HNS3_IO_FUNC_HINT_VEC;
168 	else if (strcmp(value, "sve") == 0)
169 		hint = HNS3_IO_FUNC_HINT_SVE;
170 	else if (strcmp(value, "simple") == 0)
171 		hint = HNS3_IO_FUNC_HINT_SIMPLE;
172 	else if (strcmp(value, "common") == 0)
173 		hint = HNS3_IO_FUNC_HINT_COMMON;
174 
175 	/* If the hint is valid then update output parameters */
176 	if (hint != HNS3_IO_FUNC_HINT_NONE)
177 		*(uint32_t *)extra_args = hint;
178 
179 	return 0;
180 }
181 
182 static const char *
183 hns3_get_io_hint_func_name(uint32_t hint)
184 {
185 	switch (hint) {
186 	case HNS3_IO_FUNC_HINT_VEC:
187 		return "vec";
188 	case HNS3_IO_FUNC_HINT_SVE:
189 		return "sve";
190 	case HNS3_IO_FUNC_HINT_SIMPLE:
191 		return "simple";
192 	case HNS3_IO_FUNC_HINT_COMMON:
193 		return "common";
194 	default:
195 		return "none";
196 	}
197 }
198 
199 static int
200 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
201 {
202 	uint64_t val;
203 
204 	RTE_SET_USED(key);
205 
206 	val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
207 	*(uint64_t *)extra_args = val;
208 
209 	return 0;
210 }
211 
212 static int
213 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
214 {
215 	uint32_t val;
216 
217 	RTE_SET_USED(key);
218 
219 	val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
220 
221 	/*
222 	 * 500ms is empirical value in process of mailbox communication. If
223 	 * the delay value is set to one lower than the empirical value, mailbox
224 	 * communication may fail.
225 	 */
226 	if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
227 		*(uint16_t *)extra_args = val;
228 
229 	return 0;
230 }
231 
232 void
233 hns3_parse_devargs(struct rte_eth_dev *dev)
234 {
235 	uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
236 	struct hns3_adapter *hns = dev->data->dev_private;
237 	uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
238 	uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
239 	struct hns3_hw *hw = &hns->hw;
240 	uint64_t dev_caps_mask = 0;
241 	struct rte_kvargs *kvlist;
242 
243 	/* Set default value of runtime config parameters. */
244 	hns->rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
245 	hns->tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
246 	hns->dev_caps_mask = 0;
247 	hns->mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
248 
249 	if (dev->device->devargs == NULL)
250 		return;
251 
252 	kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
253 	if (!kvlist)
254 		return;
255 
256 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
257 			   &hns3_parse_io_hint_func, &rx_func_hint);
258 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
259 			   &hns3_parse_io_hint_func, &tx_func_hint);
260 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
261 			   &hns3_parse_dev_caps_mask, &dev_caps_mask);
262 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
263 			   &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
264 
265 	rte_kvargs_free(kvlist);
266 
267 	if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
268 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
269 			  hns3_get_io_hint_func_name(rx_func_hint));
270 	hns->rx_func_hint = rx_func_hint;
271 	if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
272 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
273 			  hns3_get_io_hint_func_name(tx_func_hint));
274 	hns->tx_func_hint = tx_func_hint;
275 
276 	if (dev_caps_mask != 0)
277 		hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
278 			  HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
279 	hns->dev_caps_mask = dev_caps_mask;
280 
281 	if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
282 		hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
283 				mbx_time_limit_ms);
284 	hns->mbx_time_limit_ms = mbx_time_limit_ms;
285 }
286 
287 void
288 hns3_clock_gettime(struct timeval *tv)
289 {
290 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
291 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
292 #else
293 #define CLOCK_TYPE CLOCK_MONOTONIC
294 #endif
295 #define NSEC_TO_USEC_DIV 1000
296 
297 	struct timespec spec;
298 	(void)clock_gettime(CLOCK_TYPE, &spec);
299 
300 	tv->tv_sec = spec.tv_sec;
301 	tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
302 }
303 
304 uint64_t
305 hns3_clock_calctime_ms(struct timeval *tv)
306 {
307 	return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
308 		tv->tv_usec / USEC_PER_MSEC;
309 }
310 
311 uint64_t
312 hns3_clock_gettime_ms(void)
313 {
314 	struct timeval tv;
315 
316 	hns3_clock_gettime(&tv);
317 	return hns3_clock_calctime_ms(&tv);
318 }
319 
320 void hns3_ether_format_addr(char *buf, uint16_t size,
321 			    const struct rte_ether_addr *ether_addr)
322 {
323 	(void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
324 			ether_addr->addr_bytes[0],
325 			ether_addr->addr_bytes[4],
326 			ether_addr->addr_bytes[5]);
327 }
328 
329 static int
330 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
331 			   struct rte_ether_addr *mc_addr_set,
332 			   uint32_t nb_mc_addr)
333 {
334 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
335 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
336 	struct rte_ether_addr *addr;
337 	uint16_t mac_addrs_capa;
338 	uint32_t i;
339 	uint32_t j;
340 
341 	if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
342 		hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
343 			 "invalid. valid range: 0~%d",
344 			 nb_mc_addr, HNS3_MC_MACADDR_NUM);
345 		return -EINVAL;
346 	}
347 
348 	/* Check if input mac addresses are valid */
349 	for (i = 0; i < nb_mc_addr; i++) {
350 		addr = &mc_addr_set[i];
351 		if (!rte_is_multicast_ether_addr(addr)) {
352 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
353 					      addr);
354 			hns3_err(hw,
355 				 "failed to set mc mac addr, addr(%s) invalid.",
356 				 mac_str);
357 			return -EINVAL;
358 		}
359 
360 		/* Check if there are duplicate addresses */
361 		for (j = i + 1; j < nb_mc_addr; j++) {
362 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
363 				hns3_ether_format_addr(mac_str,
364 						      RTE_ETHER_ADDR_FMT_SIZE,
365 						      addr);
366 				hns3_err(hw, "failed to set mc mac addr, "
367 					 "addrs invalid. two same addrs(%s).",
368 					 mac_str);
369 				return -EINVAL;
370 			}
371 		}
372 
373 		/*
374 		 * Check if there are duplicate addresses between mac_addrs
375 		 * and mc_addr_set
376 		 */
377 		mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
378 					      HNS3_UC_MACADDR_NUM;
379 		for (j = 0; j < mac_addrs_capa; j++) {
380 			if (rte_is_same_ether_addr(addr,
381 						   &hw->data->mac_addrs[j])) {
382 				hns3_ether_format_addr(mac_str,
383 						       RTE_ETHER_ADDR_FMT_SIZE,
384 						       addr);
385 				hns3_err(hw, "failed to set mc mac addr, "
386 					 "addrs invalid. addrs(%s) has already "
387 					 "configured in mac_addr add API",
388 					 mac_str);
389 				return -EINVAL;
390 			}
391 		}
392 	}
393 
394 	return 0;
395 }
396 
397 int
398 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
399 			  struct rte_ether_addr *mc_addr_set,
400 			  uint32_t nb_mc_addr)
401 {
402 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
403 	struct rte_ether_addr *addr;
404 	int cur_addr_num;
405 	int set_addr_num;
406 	int num;
407 	int ret;
408 	int i;
409 
410 	/* Check if input parameters are valid */
411 	ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
412 	if (ret)
413 		return ret;
414 
415 	rte_spinlock_lock(&hw->lock);
416 	cur_addr_num = hw->mc_addrs_num;
417 	for (i = 0; i < cur_addr_num; i++) {
418 		num = cur_addr_num - i - 1;
419 		addr = &hw->mc_addrs[num];
420 		ret = hw->ops.del_mc_mac_addr(hw, addr);
421 		if (ret) {
422 			rte_spinlock_unlock(&hw->lock);
423 			return ret;
424 		}
425 
426 		hw->mc_addrs_num--;
427 	}
428 
429 	set_addr_num = (int)nb_mc_addr;
430 	for (i = 0; i < set_addr_num; i++) {
431 		addr = &mc_addr_set[i];
432 		ret = hw->ops.add_mc_mac_addr(hw, addr);
433 		if (ret) {
434 			rte_spinlock_unlock(&hw->lock);
435 			return ret;
436 		}
437 
438 		rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
439 		hw->mc_addrs_num++;
440 	}
441 	rte_spinlock_unlock(&hw->lock);
442 
443 	return 0;
444 }
445 
446 int
447 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
448 {
449 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
450 	struct hns3_hw *hw = &hns->hw;
451 	struct rte_ether_addr *addr;
452 	int ret = 0;
453 	int i;
454 
455 	for (i = 0; i < hw->mc_addrs_num; i++) {
456 		addr = &hw->mc_addrs[i];
457 		if (!rte_is_multicast_ether_addr(addr))
458 			continue;
459 		if (del)
460 			ret = hw->ops.del_mc_mac_addr(hw, addr);
461 		else
462 			ret = hw->ops.add_mc_mac_addr(hw, addr);
463 		if (ret) {
464 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
465 					      addr);
466 			hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
467 				 del ? "Remove" : "Restore", mac_str, ret);
468 		}
469 	}
470 	return ret;
471 }
472 
473 int
474 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
475 {
476 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
477 	struct hns3_hw *hw = &hns->hw;
478 	struct hns3_hw_ops *ops = &hw->ops;
479 	struct rte_ether_addr *addr;
480 	uint16_t mac_addrs_capa;
481 	int ret = 0;
482 	uint16_t i;
483 
484 	mac_addrs_capa =
485 		hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
486 	for (i = 0; i < mac_addrs_capa; i++) {
487 		addr = &hw->data->mac_addrs[i];
488 		if (rte_is_zero_ether_addr(addr))
489 			continue;
490 		if (rte_is_multicast_ether_addr(addr))
491 			ret = del ? ops->del_mc_mac_addr(hw, addr) :
492 			      ops->add_mc_mac_addr(hw, addr);
493 		else
494 			ret = del ? ops->del_uc_mac_addr(hw, addr) :
495 			      ops->add_uc_mac_addr(hw, addr);
496 
497 		if (ret) {
498 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
499 					       addr);
500 			hns3_err(hw, "failed to %s mac addr(%s) index:%u ret = %d.",
501 				 del ? "remove" : "restore", mac_str, i, ret);
502 		}
503 	}
504 
505 	return ret;
506 }
507 
508 static bool
509 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
510 {
511 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
512 	struct rte_ether_addr *addr;
513 	int i;
514 
515 	for (i = 0; i < hw->mc_addrs_num; i++) {
516 		addr = &hw->mc_addrs[i];
517 		/* Check if there are duplicate addresses in mc_addrs[] */
518 		if (rte_is_same_ether_addr(addr, mc_addr)) {
519 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
520 					       addr);
521 			hns3_err(hw, "failed to add mc mac addr, same addrs"
522 				 "(%s) is added by the set_mc_mac_addr_list "
523 				 "API", mac_str);
524 			return true;
525 		}
526 	}
527 
528 	return false;
529 }
530 
531 int
532 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
533 		  __rte_unused uint32_t idx, __rte_unused uint32_t pool)
534 {
535 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
536 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
537 	int ret;
538 
539 	rte_spinlock_lock(&hw->lock);
540 
541 	/*
542 	 * In hns3 network engine adding UC and MC mac address with different
543 	 * commands with firmware. We need to determine whether the input
544 	 * address is a UC or a MC address to call different commands.
545 	 * By the way, it is recommended calling the API function named
546 	 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
547 	 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
548 	 * may affect the specifications of UC mac addresses.
549 	 */
550 	if (rte_is_multicast_ether_addr(mac_addr)) {
551 		if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
552 			rte_spinlock_unlock(&hw->lock);
553 			return -EINVAL;
554 		}
555 		ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
556 	} else {
557 		ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
558 	}
559 	rte_spinlock_unlock(&hw->lock);
560 	if (ret) {
561 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
562 				      mac_addr);
563 		hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
564 			 ret);
565 	}
566 
567 	return ret;
568 }
569 
570 void
571 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
572 {
573 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
574 	/* index will be checked by upper level rte interface */
575 	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
576 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
577 	int ret;
578 
579 	rte_spinlock_lock(&hw->lock);
580 
581 	if (rte_is_multicast_ether_addr(mac_addr))
582 		ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
583 	else
584 		ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
585 	rte_spinlock_unlock(&hw->lock);
586 	if (ret) {
587 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
588 				      mac_addr);
589 		hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
590 			 ret);
591 	}
592 }
593 
594 int
595 hns3_init_mac_addrs(struct rte_eth_dev *dev)
596 {
597 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
598 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
599 	const char *memory_name = hns->is_vf ? "hns3vf-mac" : "hns3-mac";
600 	uint16_t mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
601 						HNS3_UC_MACADDR_NUM;
602 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
603 	struct rte_ether_addr *eth_addr;
604 
605 	/* Allocate memory for storing MAC addresses */
606 	dev->data->mac_addrs = rte_zmalloc(memory_name,
607 				sizeof(struct rte_ether_addr) * mac_addrs_capa,
608 				0);
609 	if (dev->data->mac_addrs == NULL) {
610 		hns3_err(hw, "failed to allocate %zx bytes needed to store MAC addresses",
611 			 sizeof(struct rte_ether_addr) * mac_addrs_capa);
612 		return -ENOMEM;
613 	}
614 
615 	eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
616 	if (!hns->is_vf) {
617 		if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
618 			rte_eth_random_addr(hw->mac.mac_addr);
619 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
620 				(struct rte_ether_addr *)hw->mac.mac_addr);
621 			hns3_warn(hw, "default mac_addr from firmware is an invalid "
622 				  "unicast address, using random MAC address %s",
623 				  mac_str);
624 		}
625 	} else {
626 		/*
627 		 * The hns3 PF ethdev driver in kernel support setting VF MAC
628 		 * address on the host by "ip link set ..." command. To avoid
629 		 * some incorrect scenes, for example, hns3 VF PMD driver fails
630 		 * to receive and send packets after user configure the MAC
631 		 * address by using the "ip link set ..." command, hns3 VF PMD
632 		 * driver keep the same MAC address strategy as the hns3 kernel
633 		 * ethdev driver in the initialization. If user configure a MAC
634 		 * address by the ip command for VF device, then hns3 VF PMD
635 		 * driver will start with it, otherwise start with a random MAC
636 		 * address in the initialization.
637 		 */
638 		if (rte_is_zero_ether_addr(eth_addr))
639 			rte_eth_random_addr(hw->mac.mac_addr);
640 	}
641 
642 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
643 			    &dev->data->mac_addrs[0]);
644 
645 	return 0;
646 }
647 
648 int
649 hns3_init_ring_with_vector(struct hns3_hw *hw)
650 {
651 	uint16_t vec;
652 	uint16_t i;
653 	int ret;
654 
655 	/*
656 	 * In hns3 network engine, vector 0 is always the misc interrupt of this
657 	 * function, vector 1~N can be used respectively for the queues of the
658 	 * function. Tx and Rx queues with the same number share the interrupt
659 	 * vector. In the initialization clearing the all hardware mapping
660 	 * relationship configurations between queues and interrupt vectors is
661 	 * needed, so some error caused by the residual configurations, such as
662 	 * the unexpected Tx interrupt, can be avoid.
663 	 */
664 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
665 	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
666 		vec = vec - 1; /* the last interrupt is reserved */
667 	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
668 	for (i = 0; i < hw->intr_tqps_num; i++) {
669 		/*
670 		 * Set gap limiter/rate limiter/quantity limiter algorithm
671 		 * configuration for interrupt coalesce of queue's interrupt.
672 		 */
673 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
674 				       HNS3_TQP_INTR_GL_DEFAULT);
675 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
676 				       HNS3_TQP_INTR_GL_DEFAULT);
677 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
678 		/*
679 		 * QL(quantity limiter) is not used currently, just set 0 to
680 		 * close it.
681 		 */
682 		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
683 
684 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
685 						    HNS3_RING_TYPE_TX, i);
686 		if (ret) {
687 			PMD_INIT_LOG(ERR, "fail to unbind TX ring(%u) with vector: %u, ret=%d",
688 				     i, vec, ret);
689 			return ret;
690 		}
691 
692 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
693 						    HNS3_RING_TYPE_RX, i);
694 		if (ret) {
695 			PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with vector: %u, ret=%d",
696 				     i, vec, ret);
697 			return ret;
698 		}
699 	}
700 
701 	return 0;
702 }
703 
704 int
705 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
706 {
707 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
708 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
709 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710 	uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
711 	uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
712 	uint32_t intr_vector;
713 	uint16_t q_id;
714 	int ret;
715 
716 	/*
717 	 * hns3 needs a separate interrupt to be used as event interrupt which
718 	 * could not be shared with task queue pair, so KERNEL drivers need
719 	 * support multiple interrupt vectors.
720 	 */
721 	if (dev->data->dev_conf.intr_conf.rxq == 0 ||
722 	    !rte_intr_cap_multiple(intr_handle))
723 		return 0;
724 
725 	rte_intr_disable(intr_handle);
726 	intr_vector = hw->used_rx_queues;
727 	/* creates event fd for each intr vector when MSIX is used */
728 	if (rte_intr_efd_enable(intr_handle, intr_vector))
729 		return -EINVAL;
730 
731 	/* Allocate vector list */
732 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
733 				    hw->used_rx_queues)) {
734 		hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
735 			 hw->used_rx_queues);
736 		ret = -ENOMEM;
737 		goto alloc_intr_vec_error;
738 	}
739 
740 	if (rte_intr_allow_others(intr_handle)) {
741 		vec = RTE_INTR_VEC_RXTX_OFFSET;
742 		base = RTE_INTR_VEC_RXTX_OFFSET;
743 	}
744 
745 	for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
746 		ret = hw->ops.bind_ring_with_vector(hw, vec, true,
747 						    HNS3_RING_TYPE_RX, q_id);
748 		if (ret)
749 			goto bind_vector_error;
750 
751 		if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
752 			goto bind_vector_error;
753 		/*
754 		 * If there are not enough efds (e.g. not enough interrupt),
755 		 * remaining queues will be bond to the last interrupt.
756 		 */
757 		if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
758 			vec++;
759 	}
760 	rte_intr_enable(intr_handle);
761 	return 0;
762 
763 bind_vector_error:
764 	rte_intr_vec_list_free(intr_handle);
765 alloc_intr_vec_error:
766 	rte_intr_efd_disable(intr_handle);
767 	return ret;
768 }
769 
770 void
771 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
772 {
773 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
774 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
775 	struct hns3_adapter *hns = dev->data->dev_private;
776 	struct hns3_hw *hw = &hns->hw;
777 	uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
778 	uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
779 	uint16_t q_id;
780 
781 	if (dev->data->dev_conf.intr_conf.rxq == 0)
782 		return;
783 
784 	/* unmap the ring with vector */
785 	if (rte_intr_allow_others(intr_handle)) {
786 		vec = RTE_INTR_VEC_RXTX_OFFSET;
787 		base = RTE_INTR_VEC_RXTX_OFFSET;
788 	}
789 	if (rte_intr_dp_is_en(intr_handle)) {
790 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
791 			(void)hw->ops.bind_ring_with_vector(hw, vec, false,
792 							HNS3_RING_TYPE_RX,
793 							q_id);
794 			if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
795 				vec++;
796 		}
797 	}
798 	/* Clean datapath event and queue/vec mapping */
799 	rte_intr_efd_disable(intr_handle);
800 	rte_intr_vec_list_free(intr_handle);
801 }
802 
803 int
804 hns3_restore_rx_interrupt(struct hns3_hw *hw)
805 {
806 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
807 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
808 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
809 	uint16_t q_id;
810 	int ret;
811 
812 	if (dev->data->dev_conf.intr_conf.rxq == 0)
813 		return 0;
814 
815 	if (rte_intr_dp_is_en(intr_handle)) {
816 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
817 			ret = hw->ops.bind_ring_with_vector(hw,
818 				rte_intr_vec_list_index_get(intr_handle,
819 								   q_id),
820 				true, HNS3_RING_TYPE_RX, q_id);
821 			if (ret)
822 				return ret;
823 		}
824 	}
825 
826 	return 0;
827 }
828 
829 int
830 hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id)
831 {
832 	struct rte_pci_device *pci_dev;
833 	struct rte_eth_dev *eth_dev;
834 	uint8_t revision;
835 	int ret;
836 
837 	eth_dev = &rte_eth_devices[hw->data->port_id];
838 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
839 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
840 				  HNS3_PCI_REVISION_ID);
841 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
842 		hns3_err(hw, "failed to read pci revision id, ret = %d", ret);
843 		return -EIO;
844 	}
845 
846 	*revision_id = revision;
847 
848 	return 0;
849 }
850 
851 void
852 hns3_set_default_dev_specifications(struct hns3_hw *hw)
853 {
854 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
855 
856 	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
857 	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
858 	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
859 	hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
860 
861 	if (hns->is_vf)
862 		return;
863 
864 	hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
865 }
866 
867 static void
868 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
869 {
870 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
871 	struct hns3_dev_specs_0_cmd *req0;
872 	struct hns3_dev_specs_1_cmd *req1;
873 
874 	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
875 	req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data;
876 
877 	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
878 	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
879 	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
880 	hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
881 	hw->min_tx_pkt_len = req1->min_tx_pkt_len;
882 
883 	if (hns->is_vf)
884 		return;
885 
886 	hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
887 }
888 
889 static int
890 hns3_check_dev_specifications(struct hns3_hw *hw)
891 {
892 	if (hw->rss_ind_tbl_size == 0 ||
893 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
894 		hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
895 			 hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX);
896 		return -EINVAL;
897 	}
898 
899 	if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) {
900 		hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
901 			 hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX);
902 		return -EINVAL;
903 	}
904 
905 	if (hw->rss_key_size > HNS3_RSS_KEY_SIZE)
906 		hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)",
907 			  hw->rss_key_size, HNS3_RSS_KEY_SIZE);
908 
909 	return 0;
910 }
911 
912 int
913 hns3_query_dev_specifications(struct hns3_hw *hw)
914 {
915 	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
916 	int ret;
917 	int i;
918 
919 	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
920 		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
921 					  true);
922 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
923 	}
924 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
925 
926 	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
927 	if (ret)
928 		return ret;
929 
930 	hns3_parse_dev_specifications(hw, desc);
931 
932 	return hns3_check_dev_specifications(hw);
933 }
934