xref: /dpdk/drivers/net/hns3/hns3_common.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 HiSilicon Limited
3  */
4 
5 #include <rte_kvargs.h>
6 #include <bus_pci_driver.h>
7 #include <ethdev_pci.h>
8 #include <rte_pci.h>
9 
10 #include "hns3_logs.h"
11 #include "hns3_regs.h"
12 #include "hns3_rxtx.h"
13 #include "hns3_dcb.h"
14 #include "hns3_common.h"
15 
16 int
17 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
18 		    size_t fw_size)
19 {
20 	struct hns3_adapter *hns = eth_dev->data->dev_private;
21 	struct hns3_hw *hw = &hns->hw;
22 	uint32_t version = hw->fw_version;
23 	int ret;
24 
25 	ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
26 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
27 				      HNS3_FW_VERSION_BYTE3_S),
28 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
29 				      HNS3_FW_VERSION_BYTE2_S),
30 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
31 				      HNS3_FW_VERSION_BYTE1_S),
32 		       hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
33 				      HNS3_FW_VERSION_BYTE0_S));
34 	if (ret < 0)
35 		return -EINVAL;
36 
37 	ret += 1; /* add the size of '\0' */
38 	if (fw_size < (size_t)ret)
39 		return ret;
40 	else
41 		return 0;
42 }
43 
44 int
45 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
46 {
47 	struct hns3_adapter *hns = eth_dev->data->dev_private;
48 	struct hns3_hw *hw = &hns->hw;
49 	uint16_t queue_num = hw->tqps_num;
50 
51 	/*
52 	 * In interrupt mode, 'max_rx_queues' is set based on the number of
53 	 * MSI-X interrupt resources of the hardware.
54 	 */
55 	if (hw->data->dev_conf.intr_conf.rxq == 1)
56 		queue_num = hw->intr_tqps_num;
57 
58 	info->max_rx_queues = queue_num;
59 	info->max_tx_queues = hw->tqps_num;
60 	info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
61 	info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
62 	info->max_rx_bufsize = HNS3_MAX_BD_BUF_SIZE;
63 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
64 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
65 	info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
66 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
67 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
68 				 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
69 				 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
70 				 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
71 				 RTE_ETH_RX_OFFLOAD_SCATTER |
72 				 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
73 				 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
74 				 RTE_ETH_RX_OFFLOAD_RSS_HASH);
75 	info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
76 				 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
77 				 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
78 				 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
79 				 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
80 				 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
81 				 RTE_ETH_TX_OFFLOAD_TCP_TSO |
82 				 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
83 				 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
84 				 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
85 				 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
86 				 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
87 
88 	if (!hns->is_vf && !hw->port_base_vlan_cfg.state)
89 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
90 
91 	if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
92 		info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
93 
94 	info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP |
95 			 RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP;
96 	if (hns3_dev_get_support(hw, INDEP_TXRX))
97 		info->dev_capa |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 				  RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
99 
100 	if (hns3_dev_get_support(hw, PTP))
101 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
102 	if (hns3_dev_get_support(hw, GRO))
103 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
104 
105 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
106 		.nb_max = HNS3_MAX_RING_DESC,
107 		.nb_min = HNS3_MIN_RING_DESC,
108 		.nb_align = HNS3_ALIGN_RING_DESC,
109 	};
110 
111 	info->tx_desc_lim = (struct rte_eth_desc_lim) {
112 		.nb_max = HNS3_MAX_RING_DESC,
113 		.nb_min = HNS3_MIN_RING_DESC,
114 		.nb_align = HNS3_ALIGN_RING_DESC,
115 		.nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
116 		.nb_mtu_seg_max = hw->max_non_tso_bd_num,
117 	};
118 
119 	info->default_rxconf = (struct rte_eth_rxconf) {
120 		.rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
121 		/*
122 		 * If there are no available Rx buffer descriptors, incoming
123 		 * packets are always dropped by hardware based on hns3 network
124 		 * engine.
125 		 */
126 		.rx_drop_en = 1,
127 		.offloads = 0,
128 	};
129 	info->default_txconf = (struct rte_eth_txconf) {
130 		.tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
131 		.offloads = 0,
132 	};
133 
134 	info->reta_size = hw->rss_ind_tbl_size;
135 	info->hash_key_size = hw->rss_key_size;
136 	info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
137 	info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT) |
138 			      RTE_ETH_HASH_ALGO_CAPA_MASK(TOEPLITZ) |
139 			      RTE_ETH_HASH_ALGO_CAPA_MASK(SIMPLE_XOR) |
140 			      RTE_ETH_HASH_ALGO_CAPA_MASK(SYMMETRIC_TOEPLITZ);
141 
142 	info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
143 	info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
144 	info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
145 	info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
146 	info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
147 	info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
148 
149 	/*
150 	 * Next is the PF/VF difference section.
151 	 */
152 	if (!hns->is_vf) {
153 		info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
154 		info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
155 		info->speed_capa = hns3_get_speed_capa(hw);
156 	} else {
157 		info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
158 	}
159 
160 	info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE;
161 
162 	return 0;
163 }
164 
165 static int
166 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
167 {
168 	uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
169 
170 	RTE_SET_USED(key);
171 
172 	if (value == NULL || extra_args == NULL)
173 		return 0;
174 
175 	if (strcmp(value, "vec") == 0)
176 		hint = HNS3_IO_FUNC_HINT_VEC;
177 	else if (strcmp(value, "sve") == 0)
178 		hint = HNS3_IO_FUNC_HINT_SVE;
179 	else if (strcmp(value, "simple") == 0)
180 		hint = HNS3_IO_FUNC_HINT_SIMPLE;
181 	else if (strcmp(value, "common") == 0)
182 		hint = HNS3_IO_FUNC_HINT_COMMON;
183 
184 	/* If the hint is valid then update output parameters */
185 	if (hint != HNS3_IO_FUNC_HINT_NONE)
186 		*(uint32_t *)extra_args = hint;
187 
188 	return 0;
189 }
190 
191 static const char *
192 hns3_get_io_hint_func_name(uint32_t hint)
193 {
194 	switch (hint) {
195 	case HNS3_IO_FUNC_HINT_VEC:
196 		return "vec";
197 	case HNS3_IO_FUNC_HINT_SVE:
198 		return "sve";
199 	case HNS3_IO_FUNC_HINT_SIMPLE:
200 		return "simple";
201 	case HNS3_IO_FUNC_HINT_COMMON:
202 		return "common";
203 	default:
204 		return "none";
205 	}
206 }
207 
208 static int
209 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
210 {
211 	uint64_t val;
212 
213 	RTE_SET_USED(key);
214 
215 	if (value == NULL || extra_args == NULL)
216 		return 0;
217 
218 	val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
219 	*(uint64_t *)extra_args = val;
220 
221 	return 0;
222 }
223 
224 static int
225 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
226 {
227 	uint32_t val;
228 
229 	RTE_SET_USED(key);
230 
231 	if (value == NULL || extra_args == NULL)
232 		return 0;
233 
234 	val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
235 
236 	/*
237 	 * 500ms is empirical value in process of mailbox communication. If
238 	 * the delay value is set to one lower than the empirical value, mailbox
239 	 * communication may fail.
240 	 */
241 	if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
242 		*(uint16_t *)extra_args = val;
243 
244 	return 0;
245 }
246 
247 static int
248 hns3_parse_vlan_match_mode(const char *key, const char *value, void *args)
249 {
250 	uint8_t mode;
251 
252 	RTE_SET_USED(key);
253 
254 	if (value == NULL) {
255 		PMD_INIT_LOG(WARNING, "no value for key:\"%s\"", key);
256 		return -1;
257 	}
258 
259 	if (strcmp(value, "strict") == 0) {
260 		mode = HNS3_FDIR_VLAN_STRICT_MATCH;
261 	} else if (strcmp(value, "nostrict") == 0) {
262 		mode = HNS3_FDIR_VLAN_NOSTRICT_MATCH;
263 	} else {
264 		PMD_INIT_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
265 			"value must be 'strict' or 'nostrict'",
266 			value, key);
267 		return -1;
268 	}
269 
270 	*(uint8_t *)args = mode;
271 
272 	return 0;
273 }
274 
275 void
276 hns3_parse_devargs(struct rte_eth_dev *dev)
277 {
278 	uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
279 	struct hns3_adapter *hns = dev->data->dev_private;
280 	uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
281 	uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
282 	struct hns3_hw *hw = &hns->hw;
283 	uint64_t dev_caps_mask = 0;
284 	struct rte_kvargs *kvlist;
285 
286 	/* Set default value of runtime config parameters. */
287 	hns->rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
288 	hns->tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
289 	hns->dev_caps_mask = 0;
290 	hns->mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
291 	if (!hns->is_vf)
292 		hns->pf.fdir.vlan_match_mode = HNS3_FDIR_VLAN_STRICT_MATCH;
293 
294 	if (dev->device->devargs == NULL)
295 		return;
296 
297 	kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
298 	if (!kvlist)
299 		return;
300 
301 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
302 			   &hns3_parse_io_hint_func, &rx_func_hint);
303 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
304 			   &hns3_parse_io_hint_func, &tx_func_hint);
305 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
306 			   &hns3_parse_dev_caps_mask, &dev_caps_mask);
307 	(void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
308 			   &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
309 	if (!hns->is_vf)
310 		(void)rte_kvargs_process(kvlist,
311 					 HNS3_DEVARG_FDIR_VALN_MATCH_MODE,
312 					 &hns3_parse_vlan_match_mode,
313 					 &hns->pf.fdir.vlan_match_mode);
314 
315 	rte_kvargs_free(kvlist);
316 
317 	if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
318 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
319 			  hns3_get_io_hint_func_name(rx_func_hint));
320 	hns->rx_func_hint = rx_func_hint;
321 	if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
322 		hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
323 			  hns3_get_io_hint_func_name(tx_func_hint));
324 	hns->tx_func_hint = tx_func_hint;
325 
326 	if (dev_caps_mask != 0)
327 		hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
328 			  HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
329 	hns->dev_caps_mask = dev_caps_mask;
330 
331 	if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
332 		hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
333 				mbx_time_limit_ms);
334 	hns->mbx_time_limit_ms = mbx_time_limit_ms;
335 }
336 
337 void
338 hns3_clock_gettime(struct timeval *tv)
339 {
340 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
341 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
342 #else
343 #define CLOCK_TYPE CLOCK_MONOTONIC
344 #endif
345 #define NSEC_TO_USEC_DIV 1000
346 
347 	struct timespec spec;
348 	(void)clock_gettime(CLOCK_TYPE, &spec);
349 
350 	tv->tv_sec = spec.tv_sec;
351 	tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
352 }
353 
354 uint64_t
355 hns3_clock_calctime_ms(struct timeval *tv)
356 {
357 	return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
358 		tv->tv_usec / USEC_PER_MSEC;
359 }
360 
361 uint64_t
362 hns3_clock_gettime_ms(void)
363 {
364 	struct timeval tv;
365 
366 	hns3_clock_gettime(&tv);
367 	return hns3_clock_calctime_ms(&tv);
368 }
369 
370 void hns3_ether_format_addr(char *buf, uint16_t size,
371 			    const struct rte_ether_addr *ether_addr)
372 {
373 	(void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
374 			ether_addr->addr_bytes[0],
375 			ether_addr->addr_bytes[4],
376 			ether_addr->addr_bytes[5]);
377 }
378 
379 static int
380 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
381 			   struct rte_ether_addr *mc_addr_set,
382 			   uint32_t nb_mc_addr)
383 {
384 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
385 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
386 	struct rte_ether_addr *addr;
387 	uint16_t mac_addrs_capa;
388 	uint32_t i;
389 	uint32_t j;
390 
391 	if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
392 		hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
393 			 "invalid. valid range: 0~%d",
394 			 nb_mc_addr, HNS3_MC_MACADDR_NUM);
395 		return -ENOSPC;
396 	}
397 
398 	/* Check if input mac addresses are valid */
399 	for (i = 0; i < nb_mc_addr; i++) {
400 		addr = &mc_addr_set[i];
401 		if (!rte_is_multicast_ether_addr(addr)) {
402 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
403 					      addr);
404 			hns3_err(hw,
405 				 "failed to set mc mac addr, addr(%s) invalid.",
406 				 mac_str);
407 			return -EINVAL;
408 		}
409 
410 		/* Check if there are duplicate addresses */
411 		for (j = i + 1; j < nb_mc_addr; j++) {
412 			if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
413 				hns3_ether_format_addr(mac_str,
414 						      RTE_ETHER_ADDR_FMT_SIZE,
415 						      addr);
416 				hns3_err(hw, "failed to set mc mac addr, "
417 					 "addrs invalid. two same addrs(%s).",
418 					 mac_str);
419 				return -EINVAL;
420 			}
421 		}
422 
423 		/*
424 		 * Check if there are duplicate addresses between mac_addrs
425 		 * and mc_addr_set
426 		 */
427 		mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
428 					      HNS3_UC_MACADDR_NUM;
429 		for (j = 0; j < mac_addrs_capa; j++) {
430 			if (rte_is_same_ether_addr(addr,
431 						   &hw->data->mac_addrs[j])) {
432 				hns3_ether_format_addr(mac_str,
433 						       RTE_ETHER_ADDR_FMT_SIZE,
434 						       addr);
435 				hns3_err(hw, "failed to set mc mac addr, "
436 					 "addrs invalid. addrs(%s) has already "
437 					 "configured in mac_addr add API",
438 					 mac_str);
439 				return -EINVAL;
440 			}
441 		}
442 	}
443 
444 	return 0;
445 }
446 
447 int
448 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
449 			  struct rte_ether_addr *mc_addr_set,
450 			  uint32_t nb_mc_addr)
451 {
452 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
453 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
454 	struct rte_ether_addr *addr;
455 	int cur_addr_num;
456 	int set_addr_num;
457 	int num;
458 	int ret;
459 	int i;
460 
461 	if (mc_addr_set == NULL || nb_mc_addr == 0) {
462 		rte_spinlock_lock(&hw->lock);
463 		ret = hns3_configure_all_mc_mac_addr(hns, true);
464 		if (ret == 0)
465 			hw->mc_addrs_num = 0;
466 		rte_spinlock_unlock(&hw->lock);
467 		return ret;
468 	}
469 
470 	/* Check if input parameters are valid */
471 	ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
472 	if (ret)
473 		return ret;
474 
475 	rte_spinlock_lock(&hw->lock);
476 	cur_addr_num = hw->mc_addrs_num;
477 	for (i = 0; i < cur_addr_num; i++) {
478 		num = cur_addr_num - i - 1;
479 		addr = &hw->mc_addrs[num];
480 		ret = hw->ops.del_mc_mac_addr(hw, addr);
481 		if (ret) {
482 			rte_spinlock_unlock(&hw->lock);
483 			return ret;
484 		}
485 
486 		hw->mc_addrs_num--;
487 	}
488 
489 	set_addr_num = (int)nb_mc_addr;
490 	for (i = 0; i < set_addr_num; i++) {
491 		addr = &mc_addr_set[i];
492 		ret = hw->ops.add_mc_mac_addr(hw, addr);
493 		if (ret) {
494 			rte_spinlock_unlock(&hw->lock);
495 			return ret;
496 		}
497 
498 		rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
499 		hw->mc_addrs_num++;
500 	}
501 	rte_spinlock_unlock(&hw->lock);
502 
503 	return 0;
504 }
505 
506 int
507 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
508 {
509 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
510 	struct hns3_hw *hw = &hns->hw;
511 	struct rte_ether_addr *addr;
512 	int ret = 0;
513 	int i;
514 
515 	for (i = 0; i < hw->mc_addrs_num; i++) {
516 		addr = &hw->mc_addrs[i];
517 		if (!rte_is_multicast_ether_addr(addr))
518 			continue;
519 		if (del)
520 			ret = hw->ops.del_mc_mac_addr(hw, addr);
521 		else
522 			ret = hw->ops.add_mc_mac_addr(hw, addr);
523 		if (ret) {
524 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
525 					      addr);
526 			hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
527 				 del ? "Remove" : "Restore", mac_str, ret);
528 		}
529 	}
530 	return ret;
531 }
532 
533 int
534 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
535 {
536 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
537 	struct hns3_hw *hw = &hns->hw;
538 	struct hns3_hw_ops *ops = &hw->ops;
539 	struct rte_ether_addr *addr;
540 	uint16_t mac_addrs_capa;
541 	int ret = 0;
542 	uint16_t i;
543 
544 	mac_addrs_capa =
545 		hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
546 	for (i = 0; i < mac_addrs_capa; i++) {
547 		addr = &hw->data->mac_addrs[i];
548 		if (rte_is_zero_ether_addr(addr))
549 			continue;
550 		if (rte_is_multicast_ether_addr(addr))
551 			ret = del ? ops->del_mc_mac_addr(hw, addr) :
552 			      ops->add_mc_mac_addr(hw, addr);
553 		else
554 			ret = del ? ops->del_uc_mac_addr(hw, addr) :
555 			      ops->add_uc_mac_addr(hw, addr);
556 
557 		if (ret) {
558 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
559 					       addr);
560 			hns3_err(hw, "failed to %s mac addr(%s) index:%u ret = %d.",
561 				 del ? "remove" : "restore", mac_str, i, ret);
562 		}
563 	}
564 
565 	return ret;
566 }
567 
568 static bool
569 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
570 {
571 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
572 	struct rte_ether_addr *addr;
573 	int i;
574 
575 	for (i = 0; i < hw->mc_addrs_num; i++) {
576 		addr = &hw->mc_addrs[i];
577 		/* Check if there are duplicate addresses in mc_addrs[] */
578 		if (rte_is_same_ether_addr(addr, mc_addr)) {
579 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
580 					       addr);
581 			hns3_err(hw, "failed to add mc mac addr, same addrs"
582 				 "(%s) is added by the set_mc_mac_addr_list "
583 				 "API", mac_str);
584 			return true;
585 		}
586 	}
587 
588 	return false;
589 }
590 
591 int
592 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
593 		  __rte_unused uint32_t idx, __rte_unused uint32_t pool)
594 {
595 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
596 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
597 	int ret;
598 
599 	rte_spinlock_lock(&hw->lock);
600 
601 	/*
602 	 * In hns3 network engine adding UC and MC mac address with different
603 	 * commands with firmware. We need to determine whether the input
604 	 * address is a UC or a MC address to call different commands.
605 	 * By the way, it is recommended calling the API function named
606 	 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
607 	 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
608 	 * may affect the specifications of UC mac addresses.
609 	 */
610 	if (rte_is_multicast_ether_addr(mac_addr)) {
611 		if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
612 			rte_spinlock_unlock(&hw->lock);
613 			return -EINVAL;
614 		}
615 		ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
616 	} else {
617 		ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
618 	}
619 	rte_spinlock_unlock(&hw->lock);
620 	if (ret) {
621 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
622 				      mac_addr);
623 		hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
624 			 ret);
625 	}
626 
627 	return ret;
628 }
629 
630 void
631 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
632 {
633 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634 	/* index will be checked by upper level rte interface */
635 	struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
636 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
637 	int ret;
638 
639 	rte_spinlock_lock(&hw->lock);
640 
641 	if (rte_is_multicast_ether_addr(mac_addr))
642 		ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
643 	else
644 		ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
645 	rte_spinlock_unlock(&hw->lock);
646 	if (ret) {
647 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
648 				      mac_addr);
649 		hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
650 			 ret);
651 	}
652 }
653 
654 int
655 hns3_init_mac_addrs(struct rte_eth_dev *dev)
656 {
657 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
658 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
659 	const char *memory_name = hns->is_vf ? "hns3vf-mac" : "hns3-mac";
660 	uint16_t mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
661 						HNS3_UC_MACADDR_NUM;
662 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
663 	struct rte_ether_addr *eth_addr;
664 
665 	/* Allocate memory for storing MAC addresses */
666 	dev->data->mac_addrs = rte_zmalloc(memory_name,
667 				sizeof(struct rte_ether_addr) * mac_addrs_capa,
668 				0);
669 	if (dev->data->mac_addrs == NULL) {
670 		hns3_err(hw, "failed to allocate %zx bytes needed to store MAC addresses",
671 			 sizeof(struct rte_ether_addr) * mac_addrs_capa);
672 		return -ENOMEM;
673 	}
674 
675 	eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
676 	if (!hns->is_vf) {
677 		if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
678 			rte_eth_random_addr(hw->mac.mac_addr);
679 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
680 				(struct rte_ether_addr *)hw->mac.mac_addr);
681 			hns3_warn(hw, "default mac_addr from firmware is an invalid "
682 				  "unicast address, using random MAC address %s",
683 				  mac_str);
684 		}
685 	} else {
686 		/*
687 		 * The hns3 PF ethdev driver in kernel support setting VF MAC
688 		 * address on the host by "ip link set ..." command. To avoid
689 		 * some incorrect scenes, for example, hns3 VF PMD driver fails
690 		 * to receive and send packets after user configure the MAC
691 		 * address by using the "ip link set ..." command, hns3 VF PMD
692 		 * driver keep the same MAC address strategy as the hns3 kernel
693 		 * ethdev driver in the initialization. If user configure a MAC
694 		 * address by the ip command for VF device, then hns3 VF PMD
695 		 * driver will start with it, otherwise start with a random MAC
696 		 * address in the initialization.
697 		 */
698 		if (rte_is_zero_ether_addr(eth_addr))
699 			rte_eth_random_addr(hw->mac.mac_addr);
700 	}
701 
702 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
703 			    &dev->data->mac_addrs[0]);
704 
705 	return 0;
706 }
707 
708 int
709 hns3_init_ring_with_vector(struct hns3_hw *hw)
710 {
711 	uint16_t vec;
712 	uint16_t i;
713 	int ret;
714 
715 	/*
716 	 * In hns3 network engine, vector 0 is always the misc interrupt of this
717 	 * function, vector 1~N can be used respectively for the queues of the
718 	 * function. Tx and Rx queues with the same number share the interrupt
719 	 * vector. In the initialization clearing the all hardware mapping
720 	 * relationship configurations between queues and interrupt vectors is
721 	 * needed, so some error caused by the residual configurations, such as
722 	 * the unexpected Tx interrupt, can be avoid.
723 	 */
724 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
725 	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
726 		vec = vec - 1; /* the last interrupt is reserved */
727 	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
728 	for (i = 0; i < hw->intr_tqps_num; i++) {
729 		/*
730 		 * Set gap limiter/rate limiter/quantity limiter algorithm
731 		 * configuration for interrupt coalesce of queue's interrupt.
732 		 */
733 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
734 				       HNS3_TQP_INTR_GL_DEFAULT);
735 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
736 				       HNS3_TQP_INTR_GL_DEFAULT);
737 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
738 		/*
739 		 * QL(quantity limiter) is not used currently, just set 0 to
740 		 * close it.
741 		 */
742 		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
743 
744 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
745 						    HNS3_RING_TYPE_TX, i);
746 		if (ret) {
747 			PMD_INIT_LOG(ERR, "fail to unbind TX ring(%u) with vector: %u, ret=%d",
748 				     i, vec, ret);
749 			return ret;
750 		}
751 
752 		ret = hw->ops.bind_ring_with_vector(hw, vec, false,
753 						    HNS3_RING_TYPE_RX, i);
754 		if (ret) {
755 			PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with vector: %u, ret=%d",
756 				     i, vec, ret);
757 			return ret;
758 		}
759 	}
760 
761 	return 0;
762 }
763 
764 int
765 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
766 {
767 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
768 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
769 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
770 	uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
771 	uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
772 	uint32_t intr_vector;
773 	uint16_t q_id;
774 	int ret;
775 
776 	/*
777 	 * hns3 needs a separate interrupt to be used as event interrupt which
778 	 * could not be shared with task queue pair, so KERNEL drivers need
779 	 * support multiple interrupt vectors.
780 	 */
781 	if (dev->data->dev_conf.intr_conf.rxq == 0 ||
782 	    !rte_intr_cap_multiple(intr_handle))
783 		return 0;
784 
785 	rte_intr_disable(intr_handle);
786 	intr_vector = hw->used_rx_queues;
787 	/* creates event fd for each intr vector when MSIX is used */
788 	if (rte_intr_efd_enable(intr_handle, intr_vector))
789 		return -EINVAL;
790 
791 	/* Allocate vector list */
792 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
793 				    hw->used_rx_queues)) {
794 		hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
795 			 hw->used_rx_queues);
796 		ret = -ENOMEM;
797 		goto alloc_intr_vec_error;
798 	}
799 
800 	if (rte_intr_allow_others(intr_handle)) {
801 		vec = RTE_INTR_VEC_RXTX_OFFSET;
802 		base = RTE_INTR_VEC_RXTX_OFFSET;
803 	}
804 
805 	for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
806 		ret = hw->ops.bind_ring_with_vector(hw, vec, true,
807 						    HNS3_RING_TYPE_RX, q_id);
808 		if (ret)
809 			goto bind_vector_error;
810 
811 		if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
812 			goto bind_vector_error;
813 		/*
814 		 * If there are not enough efds (e.g. not enough interrupt),
815 		 * remaining queues will be bond to the last interrupt.
816 		 */
817 		if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
818 			vec++;
819 	}
820 	rte_intr_enable(intr_handle);
821 	return 0;
822 
823 bind_vector_error:
824 	rte_intr_vec_list_free(intr_handle);
825 alloc_intr_vec_error:
826 	rte_intr_efd_disable(intr_handle);
827 	return ret;
828 }
829 
830 void
831 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
832 {
833 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
834 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
835 	struct hns3_adapter *hns = dev->data->dev_private;
836 	struct hns3_hw *hw = &hns->hw;
837 	uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
838 	uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
839 	uint16_t q_id;
840 
841 	if (dev->data->dev_conf.intr_conf.rxq == 0)
842 		return;
843 
844 	/* unmap the ring with vector */
845 	if (rte_intr_allow_others(intr_handle)) {
846 		vec = RTE_INTR_VEC_RXTX_OFFSET;
847 		base = RTE_INTR_VEC_RXTX_OFFSET;
848 	}
849 	if (rte_intr_dp_is_en(intr_handle)) {
850 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
851 			(void)hw->ops.bind_ring_with_vector(hw, vec, false,
852 							HNS3_RING_TYPE_RX,
853 							q_id);
854 			if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
855 				vec++;
856 		}
857 	}
858 	/* Clean datapath event and queue/vec mapping */
859 	rte_intr_efd_disable(intr_handle);
860 	rte_intr_vec_list_free(intr_handle);
861 }
862 
863 int
864 hns3_restore_rx_interrupt(struct hns3_hw *hw)
865 {
866 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
867 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
868 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
869 	uint16_t q_id;
870 	int ret;
871 
872 	if (dev->data->dev_conf.intr_conf.rxq == 0)
873 		return 0;
874 
875 	if (rte_intr_dp_is_en(intr_handle)) {
876 		for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
877 			ret = hw->ops.bind_ring_with_vector(hw,
878 				rte_intr_vec_list_index_get(intr_handle,
879 								   q_id),
880 				true, HNS3_RING_TYPE_RX, q_id);
881 			if (ret)
882 				return ret;
883 		}
884 	}
885 
886 	return 0;
887 }
888 
889 int
890 hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id)
891 {
892 	struct rte_pci_device *pci_dev;
893 	struct rte_eth_dev *eth_dev;
894 	uint8_t revision;
895 	int ret;
896 
897 	eth_dev = &rte_eth_devices[hw->data->port_id];
898 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
899 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
900 				  HNS3_PCI_REVISION_ID);
901 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
902 		hns3_err(hw, "failed to read pci revision id, ret = %d", ret);
903 		return -EIO;
904 	}
905 
906 	*revision_id = revision;
907 
908 	return 0;
909 }
910 
911 void
912 hns3_set_default_dev_specifications(struct hns3_hw *hw)
913 {
914 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
915 
916 	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
917 	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
918 	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
919 	hw->intr.int_ql_max = HNS3_INTR_QL_NONE;
920 
921 	if (hns->is_vf)
922 		return;
923 
924 	hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
925 }
926 
927 static void
928 hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
929 {
930 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
931 	struct hns3_dev_specs_0_cmd *req0;
932 	struct hns3_dev_specs_1_cmd *req1;
933 
934 	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
935 	req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data;
936 
937 	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
938 	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
939 	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
940 	hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max);
941 	hw->min_tx_pkt_len = req1->min_tx_pkt_len;
942 
943 	if (hns->is_vf)
944 		return;
945 
946 	hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
947 }
948 
949 static int
950 hns3_check_dev_specifications(struct hns3_hw *hw)
951 {
952 	if (hw->rss_ind_tbl_size == 0 ||
953 	    hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) {
954 		hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
955 			 hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX);
956 		return -EINVAL;
957 	}
958 
959 	if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) {
960 		hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)",
961 			 hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX);
962 		return -EINVAL;
963 	}
964 
965 	if (hw->rss_key_size > HNS3_RSS_KEY_SIZE)
966 		hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)",
967 			  hw->rss_key_size, HNS3_RSS_KEY_SIZE);
968 
969 	return 0;
970 }
971 
972 int
973 hns3_query_dev_specifications(struct hns3_hw *hw)
974 {
975 	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
976 	int ret;
977 	int i;
978 
979 	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
980 		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
981 					  true);
982 		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
983 	}
984 	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
985 
986 	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
987 	if (ret)
988 		return ret;
989 
990 	hns3_parse_dev_specifications(hw, desc);
991 
992 	return hns3_check_dev_specifications(hw);
993 }
994