xref: /dpdk/drivers/net/hns3/hns3_common.c (revision 37dda90ee15b7098bc48356868a87d34f727eecc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 HiSilicon Limited
3  */
4 
5 #include <rte_kvargs.h>
6 #include <bus_pci_driver.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9 
10 #include "hns3_logs.h"
11 #include "hns3_regs.h"
12 #include "hns3_rxtx.h"
13 #include "hns3_dcb.h"
14 #include "hns3_common.h"
15 
16 int
17 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
18 		    size_t fw_size)
19 {
20 	struct hns3_adapter *hns = eth_dev->data->dev_private;
21 	struct hns3_hw *hw = &hns->hw;
22 	uint32_t version = hw->fw_version;
23 	int ret;
24 
25 	ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
26 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
27 				      HNS3_FW_VERSION_BYTE3_S),
28 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
29 				      HNS3_FW_VERSION_BYTE2_S),
30 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
31 				      HNS3_FW_VERSION_BYTE1_S),
32 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
33 				      HNS3_FW_VERSION_BYTE0_S));
34 	if (ret < 0)
35 		return -EINVAL;
36 
37 	ret += 1; /* add the size of '\0' */
38 	if (fw_size < (size_t)ret)
39 		return ret;
40 	else
41 		return 0;
42 }
43 
44 int
45 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
46 {
47 	struct hns3_adapter *hns = eth_dev->data->dev_private;
48 	struct hns3_hw *hw = &hns->hw;
49 	uint16_t queue_num = hw->tqps_num;
50 
51 	/*
52 	 * In interrupt mode, 'max_rx_queues' is set based on the number of
53 	 * MSI-X interrupt resources of the hardware.
54 	 */
55 	if (hw->data->dev_conf.intr_conf.rxq == 1)
56 		queue_num = hw->intr_tqps_num;
57 
58 	info->max_rx_queues = queue_num;
59 	info->max_tx_queues = hw->tqps_num;
60 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
61 	info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
62 	info->max_rx_bufsize = HNS3_MAX_BD_BUF_SIZE;
63 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
64 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
65 	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
66 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
67 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
68 				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
69 				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
70 				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
71 				 RTE_ETH_RX_OFFLOAD_SCATTER |
72 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
73 				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
74 				 RTE_ETH_RX_OFFLOAD_RSS_HASH);
75 	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
76 				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
77 				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
78 				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
79 				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
80 				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
81 				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
82 				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
83 				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
84 				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
85 				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
86 				 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
87 
88 	if (!hns->is_vf && !hw->port_base_vlan_cfg.state)
89 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
90 
91 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
92 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
93 
94 	info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP |
95 			 RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP;
96 	if (hns3_dev_get_support(hw, INDEP_TXRX))
97 		info->dev_capa |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 				  RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
99 
100 	if (hns3_dev_get_support(hw, PTP))
101 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
102 	if (hns3_dev_get_support(hw, GRO))
103 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
104 
105 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
106 		.nb_max = HNS3_MAX_RING_DESC,
107 		.nb_min = HNS3_MIN_RING_DESC,
108 		.nb_align = HNS3_ALIGN_RING_DESC,
109 	};
110 
111 	info->tx_desc_lim = (struct rte_eth_desc_lim) {
112 		.nb_max = HNS3_MAX_RING_DESC,
113 		.nb_min = HNS3_MIN_RING_DESC,
114 		.nb_align = HNS3_ALIGN_RING_DESC,
115 		.nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
116 		.nb_mtu_seg_max = hw->max_non_tso_bd_num,
117 	};
118 
119 	info->default_rxconf = (struct rte_eth_rxconf) {
120 		.rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
121 		/*
122 		 * If there are no available Rx buffer descriptors, incoming
123 		 * packets are always dropped by hardware based on hns3 network
124 		 * engine.
125 		 */
126 		.rx_drop_en = 1,
127 		.offloads = 0,
128 	};
129 	info->default_txconf = (struct rte_eth_txconf) {
130 		.tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
131 		.offloads = 0,
132 	};
133 
134 	info->reta_size = hw->rss_ind_tbl_size;
135 	info->hash_key_size = hw->rss_key_size;
136 	info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
137 	info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT) |
138 			      RTE_ETH_HASH_ALGO_CAPA_MASK(TOEPLITZ) |
139 			      RTE_ETH_HASH_ALGO_CAPA_MASK(SIMPLE_XOR) |
140 			      RTE_ETH_HASH_ALGO_CAPA_MASK(SYMMETRIC_TOEPLITZ);
141 
142 	info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
143 	info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
144 	info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
145 	info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
146 	info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
147 	info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
148 
149 	/*
150 	 * Next is the PF/VF difference section.
151 	 */
152 	if (!hns->is_vf) {
153 		info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
154 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
155 		info->speed_capa = hns3_get_speed_capa(hw);
156 	} else {
157 		info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
158 	}
159 
160 	info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE;
161 
162 	return 0;
163 }
164 
165 static int
166 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
167 {
168 	uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
169 
170 	RTE_SET_USED(key);
171 
172 	if (value == NULL || extra_args == NULL)
173 		return 0;
174 
175 	if (strcmp(value, "vec") == 0)
176 		hint = HNS3_IO_FUNC_HINT_VEC;
177 	else if (strcmp(value, "sve") == 0)
178 		hint = HNS3_IO_FUNC_HINT_SVE;
179 	else if (strcmp(value, "simple") == 0)
180 		hint = HNS3_IO_FUNC_HINT_SIMPLE;
181 	else if (strcmp(value, "common") == 0)
182 		hint = HNS3_IO_FUNC_HINT_COMMON;
183 
184 	/* If the hint is valid then update output parameters */
185 	if (hint != HNS3_IO_FUNC_HINT_NONE)
186 		*(uint32_t *)extra_args = hint;
187 
188 	return 0;
189 }
190 
191 static const char *
192 hns3_get_io_hint_func_name(uint32_t hint)
193 {
194 	switch (hint) {
195 	case HNS3_IO_FUNC_HINT_VEC:
196 		return "vec";
197 	case HNS3_IO_FUNC_HINT_SVE:
198 		return "sve";
199 	case HNS3_IO_FUNC_HINT_SIMPLE:
200 		return "simple";
201 	case HNS3_IO_FUNC_HINT_COMMON:
202 		return "common";
203 	default:
204 		return "none";
205 	}
206 }
207 
208 static int
209 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
210 {
211 	uint64_t val;
212 
213 	RTE_SET_USED(key);
214 
215 	if (value == NULL || extra_args == NULL)
216 		return 0;
217 
218 	val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
219 	*(uint64_t *)extra_args = val;
220 
221 	return 0;
222 }
223 
224 static int
225 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
226 {
227 	uint64_t val;
228 
229 	RTE_SET_USED(key);
230 
231 	if (value == NULL || extra_args == NULL)
232 		return 0;
233 
234 	val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
235 
236 	/*
237 	 * 500ms is empirical value in process of mailbox communication. If
238 	 * the delay value is set to one lower than the empirical value, mailbox
239 	 * communication may fail.
240 	 */
241 	if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
242 		*(uint16_t *)extra_args = val;
243 
244 	return 0;
245 }
246 
247 static int
248 hns3_parse_vlan_match_mode(const char *key, const char *value, void *args)
249 {
250 	uint8_t mode;
251 
252 	RTE_SET_USED(key);
253 
254 	if (value == NULL) {
255 		PMD_INIT_LOG(WARNING, "no value for key:\"%s\"", key);
256 		return -1;
257 	}
258 
259 	if (strcmp(value, "strict") == 0) {
260 		mode = HNS3_FDIR_VLAN_STRICT_MATCH;
261 	} else if (strcmp(value, "nostrict") == 0) {
262 		mode = HNS3_FDIR_VLAN_NOSTRICT_MATCH;
263 	} else {
264 		PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
265 			"value must be 'strict' or 'nostrict'",
266 			value, key);
267 		return -1;
268 	}
269 
270 	*(uint8_t *)args = mode;
271 
272 	return 0;
273 }
274 
275 static int
276 hns3_parse_fdir_tuple_config(const char *key, const char *value, void *args)
277 {
278 	enum hns3_fdir_tuple_config tuple_cfg;
279 
280 	tuple_cfg = hns3_parse_tuple_config(value);
281 	if (tuple_cfg == HNS3_FDIR_TUPLE_CONFIG_DEFAULT ||
282 	    tuple_cfg == HNS3_FDIR_TUPLE_CONFIG_BUTT) {
283 		PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\"",
284 			     value, key);
285 		return -1;
286 	}
287 
288 	*(enum hns3_fdir_tuple_config *)args = tuple_cfg;
289 
290 	return 0;
291 }
292 
293 static int
294 hns3_parse_fdir_index_config(const char *key, const char *value, void *args)
295 {
296 	enum hns3_fdir_index_config cfg;
297 
298 	if (strcmp(value, "hash") == 0) {
299 		cfg  = HNS3_FDIR_INDEX_CONFIG_HASH;
300 	} else if (strcmp(value, "priority") == 0) {
301 		cfg  = HNS3_FDIR_INDEX_CONFIG_PRIORITY;
302 	} else {
303 		PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
304 			"value must be 'hash' or 'priority'",
305 			value, key);
306 		return -1;
307 	}
308 
309 	*(enum hns3_fdir_index_config *)args = cfg;
310 
311 	return 0;
312 }
313 
314 void
315 hns3_parse_devargs(struct rte_eth_dev *dev)
316 {
317 	uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
318 	struct hns3_adapter *hns = dev->data->dev_private;
319 	uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
320 	uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
321 	struct hns3_hw *hw = &hns->hw;
322 	uint64_t dev_caps_mask = 0;
323 	struct rte_kvargs *kvlist;
324 
325 	/* Set default value of runtime config parameters. */
326 	hns->rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
327 	hns->tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
328 	hns->dev_caps_mask = 0;
329 	hns->mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
330 	if (!hns->is_vf)
331 		hns->pf.fdir.vlan_match_mode = HNS3_FDIR_VLAN_STRICT_MATCH;
332 
333 	if (dev->device->devargs == NULL)
334 		return;
335 
336 	kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
337 	if (!kvlist)
338 		return;
339 
340 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
341 			   &hns3_parse_io_hint_func, &rx_func_hint);
342 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
343 			   &hns3_parse_io_hint_func, &tx_func_hint);
344 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
345 			   &hns3_parse_dev_caps_mask, &dev_caps_mask);
346 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
347 			   &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
348 	if (!hns->is_vf) {
349 		(void)rte_kvargs_process(kvlist,
350 					 HNS3_DEVARG_FDIR_VLAN_MATCH_MODE,
351 					 &hns3_parse_vlan_match_mode,
352 					 &hns->pf.fdir.vlan_match_mode);
353 		(void)rte_kvargs_process(kvlist,
354 					 HNS3_DEVARG_FDIR_TUPLE_CONFIG,
355 					 &hns3_parse_fdir_tuple_config,
356 					 &hns->pf.fdir.tuple_cfg);
357 		(void)rte_kvargs_process(kvlist,
358 					 HNS3_DEVARG_FDIR_INDEX_CONFIG,
359 					 &hns3_parse_fdir_index_config,
360 					 &hns->pf.fdir.index_cfg);
361 	}
362 
363 	rte_kvargs_free(kvlist);
364 
365 	if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
366 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
367 			  hns3_get_io_hint_func_name(rx_func_hint));
368 	hns->rx_func_hint = rx_func_hint;
369 	if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
370 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
371 			  hns3_get_io_hint_func_name(tx_func_hint));
372 	hns->tx_func_hint = tx_func_hint;
373 
374 	if (dev_caps_mask != 0)
375 		hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
376 			  HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
377 	hns->dev_caps_mask = dev_caps_mask;
378 
379 	if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
380 		hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
381 				mbx_time_limit_ms);
382 	hns->mbx_time_limit_ms = mbx_time_limit_ms;
383 }
384 
385 void
386 hns3_clock_gettime(struct timeval *tv)
387 {
388 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
389 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
390 #else
391 #define CLOCK_TYPE CLOCK_MONOTONIC
392 #endif
393 #define NSEC_TO_USEC_DIV 1000
394 
395 	struct timespec spec;
396 	(void)clock_gettime(CLOCK_TYPE, &spec);
397 
398 	tv->tv_sec = spec.tv_sec;
399 	tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
400 }
401 
402 uint64_t
403 hns3_clock_calctime_ms(struct timeval *tv)
404 {
405 	return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
406 		tv->tv_usec / USEC_PER_MSEC;
407 }
408 
409 uint64_t
410 hns3_clock_gettime_ms(void)
411 {
412 	struct timeval tv;
413 
414 	hns3_clock_gettime(&tv);
415 	return hns3_clock_calctime_ms(&tv);
416 }
417 
418 void hns3_ether_format_addr(char *buf, uint16_t size,
419 			    const struct rte_ether_addr *ether_addr)
420 {
421 	(void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
422 			ether_addr->addr_bytes[0],
423 			ether_addr->addr_bytes[4],
424 			ether_addr->addr_bytes[5]);
425 }
426 
427 static int
428 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
429 			   struct rte_ether_addr *mc_addr_set,
430 			   uint32_t nb_mc_addr)
431 {
432 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
433 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
434 	struct rte_ether_addr *addr;
435 	uint16_t mac_addrs_capa;
436 	uint32_t i;
437 	uint32_t j;
438 
439 	if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
440 		hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
441 			 "invalid. valid range: 0~%d",
442 			 nb_mc_addr, HNS3_MC_MACADDR_NUM);
443 		return -ENOSPC;
444 	}
445 
446 	/* Check if input mac addresses are valid */
447 	for (i = 0; i < nb_mc_addr; i++) {
448 		addr = &mc_addr_set[i];
449 		if (!rte_is_multicast_ether_addr(addr)) {
450 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
451 					      addr);
452 			hns3_err(hw,
453 				 "failed to set mc mac addr, addr(%s) invalid.",
454 				 mac_str);
455 			return -EINVAL;
456 		}
457 
458 		/* Check if there are duplicate addresses */
459 		for (j = i + 1; j < nb_mc_addr; j++) {
460 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
461 				hns3_ether_format_addr(mac_str,
462 						      RTE_ETHER_ADDR_FMT_SIZE,
463 						      addr);
464 				hns3_err(hw, "failed to set mc mac addr, "
465 					 "addrs invalid. two same addrs(%s).",
466 					 mac_str);
467 				return -EINVAL;
468 			}
469 		}
470 
471 		/*
472 		 * Check if there are duplicate addresses between mac_addrs
473 		 * and mc_addr_set
474 		 */
475 		mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
476 					      HNS3_UC_MACADDR_NUM;
477 		for (j = 0; j < mac_addrs_capa; j++) {
478 			if (rte_is_same_ether_addr(addr,
479 						   &hw->data->mac_addrs[j])) {
480 				hns3_ether_format_addr(mac_str,
481 						       RTE_ETHER_ADDR_FMT_SIZE,
482 						       addr);
483 				hns3_err(hw, "failed to set mc mac addr, "
484 					 "addrs invalid. addrs(%s) has already "
485 					 "configured in mac_addr add API",
486 					 mac_str);
487 				return -EINVAL;
488 			}
489 		}
490 	}
491 
492 	return 0;
493 }
494 
495 int
496 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
497 			  struct rte_ether_addr *mc_addr_set,
498 			  uint32_t nb_mc_addr)
499 {
500 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
501 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
502 	struct rte_ether_addr *addr;
503 	int cur_addr_num;
504 	int set_addr_num;
505 	int num;
506 	int ret;
507 	int i;
508 
509 	if (mc_addr_set == NULL || nb_mc_addr == 0) {
510 		rte_spinlock_lock(&hw->lock);
511 		ret = hns3_configure_all_mc_mac_addr(hns, true);
512 		if (ret == 0)
513 			hw->mc_addrs_num = 0;
514 		rte_spinlock_unlock(&hw->lock);
515 		return ret;
516 	}
517 
518 	/* Check if input parameters are valid */
519 	ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
520 	if (ret)
521 		return ret;
522 
523 	rte_spinlock_lock(&hw->lock);
524 	cur_addr_num = hw->mc_addrs_num;
525 	for (i = 0; i < cur_addr_num; i++) {
526 		num = cur_addr_num - i - 1;
527 		addr = &hw->mc_addrs[num];
528 		ret = hw->ops.del_mc_mac_addr(hw, addr);
529 		if (ret) {
530 			rte_spinlock_unlock(&hw->lock);
531 			return ret;
532 		}
533 
534 		hw->mc_addrs_num--;
535 	}
536 
537 	set_addr_num = (int)nb_mc_addr;
538 	for (i = 0; i < set_addr_num; i++) {
539 		addr = &mc_addr_set[i];
540 		ret = hw->ops.add_mc_mac_addr(hw, addr);
541 		if (ret) {
542 			rte_spinlock_unlock(&hw->lock);
543 			return ret;
544 		}
545 
546 		rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
547 		hw->mc_addrs_num++;
548 	}
549 	rte_spinlock_unlock(&hw->lock);
550 
551 	return 0;
552 }
553 
554 int
555 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
556 {
557 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
558 	struct hns3_hw *hw = &hns->hw;
559 	struct rte_ether_addr *addr;
560 	int ret = 0;
561 	int i;
562 
563 	for (i = 0; i < hw->mc_addrs_num; i++) {
564 		addr = &hw->mc_addrs[i];
565 		if (!rte_is_multicast_ether_addr(addr))
566 			continue;
567 		if (del)
568 			ret = hw->ops.del_mc_mac_addr(hw, addr);
569 		else
570 			ret = hw->ops.add_mc_mac_addr(hw, addr);
571 		if (ret) {
572 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
573 					      addr);
574 			hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
575 				 del ? "Remove" : "Restore", mac_str, ret);
576 		}
577 	}
578 	return ret;
579 }
580 
581 int
582 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
583 {
584 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
585 	struct hns3_hw *hw = &hns->hw;
586 	struct hns3_hw_ops *ops = &hw->ops;
587 	struct rte_ether_addr *addr;
588 	uint16_t mac_addrs_capa;
589 	int ret = 0;
590 	uint16_t i;
591 
592 	mac_addrs_capa =
593 		hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
594 	for (i = 0; i < mac_addrs_capa; i++) {
595 		addr = &hw->data->mac_addrs[i];
596 		if (rte_is_zero_ether_addr(addr))
597 			continue;
598 		if (rte_is_multicast_ether_addr(addr))
599 			ret = del ? ops->del_mc_mac_addr(hw, addr) :
600 			      ops->add_mc_mac_addr(hw, addr);
601 		else
602 			ret = del ? ops->del_uc_mac_addr(hw, addr) :
603 			      ops->add_uc_mac_addr(hw, addr);
604 
605 		if (ret) {
606 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
607 					       addr);
608 			hns3_err(hw, "failed to %s mac addr(%s) index:%u ret = %d.",
609 				 del ? "remove" : "restore", mac_str, i, ret);
610 		}
611 	}
612 
613 	return ret;
614 }
615 
616 static bool
617 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
618 {
619 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
620 	struct rte_ether_addr *addr;
621 	int i;
622 
623 	for (i = 0; i < hw->mc_addrs_num; i++) {
624 		addr = &hw->mc_addrs[i];
625 		/* Check if there are duplicate addresses in mc_addrs[] */
626 		if (rte_is_same_ether_addr(addr, mc_addr)) {
627 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
628 					       addr);
629 			hns3_err(hw, "failed to add mc mac addr, same addrs"
630 				 "(%s) is added by the set_mc_mac_addr_list "
631 				 "API", mac_str);
632 			return true;
633 		}
634 	}
635 
636 	return false;
637 }
638 
639 int
640 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
641 		  __rte_unused uint32_t idx, __rte_unused uint32_t pool)
642 {
643 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
645 	int ret;
646 
647 	rte_spinlock_lock(&hw->lock);
648 
649 	/*
650 	 * In hns3 network engine adding UC and MC mac address with different
651 	 * commands with firmware. We need to determine whether the input
652 	 * address is a UC or a MC address to call different commands.
653 	 * By the way, it is recommended calling the API function named
654 	 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
655 	 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
656 	 * may affect the specifications of UC mac addresses.
657 	 */
658 	if (rte_is_multicast_ether_addr(mac_addr)) {
659 		if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
660 			rte_spinlock_unlock(&hw->lock);
661 			return -EINVAL;
662 		}
663 		ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
664 	} else {
665 		ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
666 	}
667 	rte_spinlock_unlock(&hw->lock);
668 	if (ret) {
669 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
670 				      mac_addr);
671 		hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
672 			 ret);
673 	}
674 
675 	return ret;
676 }
677 
678 void
679 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
680 {
681 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
682 	/* index will be checked by upper level rte interface */
683 	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
684 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
685 	int ret;
686 
687 	rte_spinlock_lock(&hw->lock);
688 
689 	if (rte_is_multicast_ether_addr(mac_addr))
690 		ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
691 	else
692 		ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
693 	rte_spinlock_unlock(&hw->lock);
694 	if (ret) {
695 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
696 				      mac_addr);
697 		hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
698 			 ret);
699 	}
700 }
701 
702 int
703 hns3_init_mac_addrs(struct rte_eth_dev *dev)
704 {
705 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
706 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
707 	const char *memory_name = hns->is_vf ? "hns3vf-mac" : "hns3-mac";
708 	uint16_t mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
709 						HNS3_UC_MACADDR_NUM;
710 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
711 	struct rte_ether_addr *eth_addr;
712 
713 	/* Allocate memory for storing MAC addresses */
714 	dev->data->mac_addrs = rte_zmalloc(memory_name,
715 				sizeof(struct rte_ether_addr) * mac_addrs_capa,
716 				0);
717 	if (dev->data->mac_addrs == NULL) {
718 		hns3_err(hw, "failed to allocate %zx bytes needed to store MAC addresses",
719 			 sizeof(struct rte_ether_addr) * mac_addrs_capa);
720 		return -ENOMEM;
721 	}
722 
723 	eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
724 	if (!hns->is_vf) {
725 		if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
726 			rte_eth_random_addr(hw->mac.mac_addr);
727 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
728 				(struct rte_ether_addr *)hw->mac.mac_addr);
729 			hns3_warn(hw, "default mac_addr from firmware is an invalid "
730 				  "unicast address, using random MAC address %s",
731 				  mac_str);
732 		}
733 	} else {
734 		/*
735 		 * The hns3 PF ethdev driver in kernel support setting VF MAC
736 		 * address on the host by "ip link set ..." command. To avoid
737 		 * some incorrect scenes, for example, hns3 VF PMD driver fails
738 		 * to receive and send packets after user configure the MAC
739 		 * address by using the "ip link set ..." command, hns3 VF PMD
740 		 * driver keep the same MAC address strategy as the hns3 kernel
741 		 * ethdev driver in the initialization. If user configure a MAC
742 		 * address by the ip command for VF device, then hns3 VF PMD
743 		 * driver will start with it, otherwise start with a random MAC
744 		 * address in the initialization.
745 		 */
746 		if (rte_is_zero_ether_addr(eth_addr))
747 			rte_eth_random_addr(hw->mac.mac_addr);
748 	}
749 
750 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
751 			    &dev->data->mac_addrs[0]);
752 
753 	return 0;
754 }
755 
756 int
757 hns3_init_ring_with_vector(struct hns3_hw *hw)
758 {
759 	uint16_t vec;
760 	uint16_t i;
761 	int ret;
762 
763 	/*
764 	 * In hns3 network engine, vector 0 is always the misc interrupt of this
765 	 * function, vector 1~N can be used respectively for the queues of the
766 	 * function. Tx and Rx queues with the same number share the interrupt
767 	 * vector. In the initialization clearing the all hardware mapping
768 	 * relationship configurations between queues and interrupt vectors is
769 	 * needed, so some error caused by the residual configurations, such as
770 	 * the unexpected Tx interrupt, can be avoid.
771 	 */
772 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
773 	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
774 		vec = vec - 1; /* the last interrupt is reserved */
775 	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
776 	for (i = 0; i < hw->intr_tqps_num; i++) {
777 		/*
778 		 * Set gap limiter/rate limiter/quantity limiter algorithm
779 		 * configuration for interrupt coalesce of queue's interrupt.
780 		 */
781 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
782 				       HNS3_TQP_INTR_GL_DEFAULT);
783 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
784 				       HNS3_TQP_INTR_GL_DEFAULT);
785 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
786 		/*
787 		 * QL(quantity limiter) is not used currently, just set 0 to
788 		 * close it.
789 		 */
790 		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
791 
792 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
793 						    HNS3_RING_TYPE_TX, i);
794 		if (ret) {
795 			PMD_INIT_LOG(ERR, "fail to unbind TX ring(%u) with vector: %u, ret=%d",
796 				     i, vec, ret);
797 			return ret;
798 		}
799 
800 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
801 						    HNS3_RING_TYPE_RX, i);
802 		if (ret) {
803 			PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with vector: %u, ret=%d",
804 				     i, vec, ret);
805 			return ret;
806 		}
807 	}
808 
809 	return 0;
810 }
811 
812 int
813 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
814 {
815 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
816 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
817 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818 	uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
819 	uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
820 	uint32_t intr_vector;
821 	uint16_t q_id;
822 	int ret;
823 
824 	/*
825 	 * hns3 needs a separate interrupt to be used as event interrupt which
826 	 * could not be shared with task queue pair, so KERNEL drivers need
827 	 * support multiple interrupt vectors.
828 	 */
829 	if (dev->data->dev_conf.intr_conf.rxq == 0 ||
830 	    !rte_intr_cap_multiple(intr_handle))
831 		return 0;
832 
833 	rte_intr_disable(intr_handle);
834 	intr_vector = hw->used_rx_queues;
835 	/* creates event fd for each intr vector when MSIX is used */
836 	if (rte_intr_efd_enable(intr_handle, intr_vector))
837 		return -EINVAL;
838 
839 	/* Allocate vector list */
840 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
841 				    hw->used_rx_queues)) {
842 		hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
843 			 hw->used_rx_queues);
844 		ret = -ENOMEM;
845 		goto alloc_intr_vec_error;
846 	}
847 
848 	if (rte_intr_allow_others(intr_handle)) {
849 		vec = RTE_INTR_VEC_RXTX_OFFSET;
850 		base = RTE_INTR_VEC_RXTX_OFFSET;
851 	}
852 
853 	for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
854 		ret = hw->ops.bind_ring_with_vector(hw, vec, true,
855 						    HNS3_RING_TYPE_RX, q_id);
856 		if (ret)
857 			goto bind_vector_error;
858 
859 		if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
860 			goto bind_vector_error;
861 		/*
862 		 * If there are not enough efds (e.g. not enough interrupt),
863 		 * remaining queues will be bond to the last interrupt.
864 		 */
865 		if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
866 			vec++;
867 	}
868 	rte_intr_enable(intr_handle);
869 	return 0;
870 
871 bind_vector_error:
872 	rte_intr_vec_list_free(intr_handle);
873 alloc_intr_vec_error:
874 	rte_intr_efd_disable(intr_handle);
875 	return ret;
876 }
877 
878 void
879 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
880 {
881 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
882 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
883 	struct hns3_adapter *hns = dev->data->dev_private;
884 	struct hns3_hw *hw = &hns->hw;
885 	uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
886 	uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
887 	uint16_t q_id;
888 
889 	if (dev->data->dev_conf.intr_conf.rxq == 0)
890 		return;
891 
892 	/* unmap the ring with vector */
893 	if (rte_intr_allow_others(intr_handle)) {
894 		vec = RTE_INTR_VEC_RXTX_OFFSET;
895 		base = RTE_INTR_VEC_RXTX_OFFSET;
896 	}
897 	if (rte_intr_dp_is_en(intr_handle)) {
898 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
899 			(void)hw->ops.bind_ring_with_vector(hw, vec, false,
900 							HNS3_RING_TYPE_RX,
901 							q_id);
902 			if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
903 				vec++;
904 		}
905 	}
906 	/* Clean datapath event and queue/vec mapping */
907 	rte_intr_efd_disable(intr_handle);
908 	rte_intr_vec_list_free(intr_handle);
909 }
910 
911 int
912 hns3_restore_rx_interrupt(struct hns3_hw *hw)
913 {
914 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
915 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
916 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
917 	uint16_t q_id;
918 	int ret;
919 
920 	if (dev->data->dev_conf.intr_conf.rxq == 0)
921 		return 0;
922 
923 	if (rte_intr_dp_is_en(intr_handle)) {
924 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
925 			ret = hw->ops.bind_ring_with_vector(hw,
926 				rte_intr_vec_list_index_get(intr_handle,
927 								   q_id),
928 				true, HNS3_RING_TYPE_RX, q_id);
929 			if (ret)
930 				return ret;
931 		}
932 	}
933 
934 	return 0;
935 }
936 
937 int
938 hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id)
939 {
940 	struct rte_pci_device *pci_dev;
941 	struct rte_eth_dev *eth_dev;
942 	uint8_t revision;
943 	int ret;
944 
945 	eth_dev = &rte_eth_devices[hw->data->port_id];
946 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
947 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
948 				  HNS3_PCI_REVISION_ID);
949 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
950 		hns3_err(hw, "failed to read pci revision id, ret = %d", ret);
951 		return -EIO;
952 	}
953 
954 	*revision_id = revision;
955 
956 	return 0;
957 }
958 
959 void
960 hns3_set_default_dev_specifications(struct hns3_hw *hw)
961 {
962 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
963 
964 	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
965 	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
966 	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
967 	hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
968 
969 	if (hns->is_vf)
970 		return;
971 
972 	hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
973 }
974 
975 static void
976 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
977 {
978 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
979 	struct hns3_dev_specs_0_cmd *req0;
980 	struct hns3_dev_specs_1_cmd *req1;
981 
982 	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
983 	req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data;
984 
985 	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
986 	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
987 	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
988 	hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
989 	hw->min_tx_pkt_len = req1->min_tx_pkt_len;
990 
991 	if (hns->is_vf)
992 		return;
993 
994 	hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
995 }
996 
997 static int
998 hns3_check_dev_specifications(struct hns3_hw *hw)
999 {
1000 	if (hw->rss_ind_tbl_size == 0 ||
1001 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
1002 		hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
1003 			 hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX);
1004 		return -EINVAL;
1005 	}
1006 
1007 	if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) {
1008 		hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
1009 			 hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX);
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (hw->rss_key_size > HNS3_RSS_KEY_SIZE)
1014 		hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)",
1015 			  hw->rss_key_size, HNS3_RSS_KEY_SIZE);
1016 
1017 	return 0;
1018 }
1019 
1020 int
1021 hns3_query_dev_specifications(struct hns3_hw *hw)
1022 {
1023 	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
1024 	int ret;
1025 	int i;
1026 
1027 	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1028 		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
1029 					  true);
1030 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
1031 	}
1032 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
1033 
1034 	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
1035 	if (ret)
1036 		return ret;
1037 
1038 	hns3_parse_dev_specifications(hw, desc);
1039 
1040 	return hns3_check_dev_specifications(hw);
1041 }
1042